1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
19 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
22 #include "sfc_switch.h"
23 #include "sfc_service.h"
26 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
27 efx_mport_sel_t *mportp)
29 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
31 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
36 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
37 uint32_t nb_counters_max)
39 return sfc_mae_counters_init(®istry->counters, nb_counters_max);
43 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
45 sfc_mae_counters_fini(®istry->counters);
49 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
50 struct sfc_mae_rule **rule)
52 struct sfc_mae *mae = &sa->mae;
53 struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
57 for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
58 if (internal_rules->rules[entry].spec == NULL)
62 if (entry == SFC_MAE_NB_RULES_MAX) {
64 sfc_err(sa, "failed too many rules (%u rules used)", entry);
65 goto fail_too_many_rules;
68 *rule = &internal_rules->rules[entry];
77 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
78 const efx_mport_sel_t *mport_match,
79 const efx_mport_sel_t *mport_deliver,
80 int prio, struct sfc_mae_rule **rulep)
82 struct sfc_mae *mae = &sa->mae;
83 struct sfc_mae_rule *rule;
86 sfc_log_init(sa, "entry");
88 if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
90 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
91 mae->nb_action_rule_prios_max);
92 goto fail_invalid_prio;
95 prio = mae->nb_action_rule_prios_max - 1;
97 rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
99 goto fail_find_empty_slot;
101 sfc_log_init(sa, "init MAE match spec");
102 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
103 (uint32_t)prio, &rule->spec);
105 sfc_err(sa, "failed to init MAE match spec");
106 goto fail_match_init;
109 rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
111 sfc_err(sa, "failed to get MAE match mport selector");
115 rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
117 sfc_err(sa, "failed to init MAE action set");
118 goto fail_action_init;
121 rc = efx_mae_action_set_populate_deliver(rule->actions,
124 sfc_err(sa, "failed to populate deliver action");
125 goto fail_populate_deliver;
128 rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
131 sfc_err(sa, "failed to allocate action set");
132 goto fail_action_set_alloc;
135 rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
139 sfc_err(sa, "failed to insert action rule");
140 goto fail_rule_insert;
145 sfc_log_init(sa, "done");
150 efx_mae_action_set_free(sa->nic, &rule->action_set);
152 fail_action_set_alloc:
153 fail_populate_deliver:
154 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
158 efx_mae_match_spec_fini(sa->nic, rule->spec);
161 fail_find_empty_slot:
163 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
168 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
170 if (rule == NULL || rule->spec == NULL)
173 efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
174 efx_mae_action_set_free(sa->nic, &rule->action_set);
175 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
176 efx_mae_match_spec_fini(sa->nic, rule->spec);
182 sfc_mae_attach(struct sfc_adapter *sa)
184 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
185 struct sfc_mae_switch_port_request switch_port_request = {0};
186 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
187 efx_mport_sel_t entity_mport;
188 struct sfc_mae *mae = &sa->mae;
189 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
190 efx_mae_limits_t limits;
193 sfc_log_init(sa, "entry");
195 if (!encp->enc_mae_supported) {
196 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
200 sfc_log_init(sa, "init MAE");
201 rc = efx_mae_init(sa->nic);
205 sfc_log_init(sa, "get MAE limits");
206 rc = efx_mae_get_limits(sa->nic, &limits);
208 goto fail_mae_get_limits;
210 sfc_log_init(sa, "init MAE counter registry");
211 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
212 limits.eml_max_n_counters);
214 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
215 limits.eml_max_n_counters, rte_strerror(rc));
216 goto fail_counter_registry_init;
219 sfc_log_init(sa, "assign entity MPORT");
220 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
222 goto fail_mae_assign_entity_mport;
224 sfc_log_init(sa, "assign RTE switch domain");
225 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
227 goto fail_mae_assign_switch_domain;
229 sfc_log_init(sa, "assign RTE switch port");
230 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
231 switch_port_request.entity_mportp = &entity_mport;
232 /* RTE ethdev MPORT matches that of the entity for independent ports. */
233 switch_port_request.ethdev_mportp = &entity_mport;
234 switch_port_request.ethdev_port_id = sas->port_id;
235 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
236 &switch_port_request,
237 &mae->switch_port_id);
239 goto fail_mae_assign_switch_port;
241 sfc_log_init(sa, "allocate encap. header bounce buffer");
242 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
243 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
244 bounce_eh->buf_size, 0);
245 if (bounce_eh->buf == NULL)
246 goto fail_mae_alloc_bounce_eh;
248 mae->status = SFC_MAE_STATUS_SUPPORTED;
249 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
250 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
251 mae->encap_types_supported = limits.eml_encap_types_supported;
252 TAILQ_INIT(&mae->outer_rules);
253 TAILQ_INIT(&mae->encap_headers);
254 TAILQ_INIT(&mae->action_sets);
256 sfc_log_init(sa, "done");
260 fail_mae_alloc_bounce_eh:
261 fail_mae_assign_switch_port:
262 fail_mae_assign_switch_domain:
263 fail_mae_assign_entity_mport:
264 sfc_mae_counter_registry_fini(&mae->counter_registry);
266 fail_counter_registry_init:
268 efx_mae_fini(sa->nic);
271 sfc_log_init(sa, "failed %d", rc);
277 sfc_mae_detach(struct sfc_adapter *sa)
279 struct sfc_mae *mae = &sa->mae;
280 enum sfc_mae_status status_prev = mae->status;
282 sfc_log_init(sa, "entry");
284 mae->nb_action_rule_prios_max = 0;
285 mae->status = SFC_MAE_STATUS_UNKNOWN;
287 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
290 rte_free(mae->bounce_eh.buf);
291 sfc_mae_counter_registry_fini(&mae->counter_registry);
293 efx_mae_fini(sa->nic);
295 sfc_log_init(sa, "done");
298 static struct sfc_mae_outer_rule *
299 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
300 const efx_mae_match_spec_t *match_spec,
301 efx_tunnel_protocol_t encap_type)
303 struct sfc_mae_outer_rule *rule;
304 struct sfc_mae *mae = &sa->mae;
306 SFC_ASSERT(sfc_adapter_is_locked(sa));
308 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
309 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
310 rule->encap_type == encap_type) {
311 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
321 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
322 efx_mae_match_spec_t *match_spec,
323 efx_tunnel_protocol_t encap_type,
324 struct sfc_mae_outer_rule **rulep)
326 struct sfc_mae_outer_rule *rule;
327 struct sfc_mae *mae = &sa->mae;
329 SFC_ASSERT(sfc_adapter_is_locked(sa));
331 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
336 rule->match_spec = match_spec;
337 rule->encap_type = encap_type;
339 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
341 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
345 sfc_dbg(sa, "added outer_rule=%p", rule);
351 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
352 struct sfc_mae_outer_rule *rule)
354 struct sfc_mae *mae = &sa->mae;
356 SFC_ASSERT(sfc_adapter_is_locked(sa));
357 SFC_ASSERT(rule->refcnt != 0);
361 if (rule->refcnt != 0)
364 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
365 rule->fw_rsrc.refcnt != 0) {
366 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
367 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
370 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
372 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
375 sfc_dbg(sa, "deleted outer_rule=%p", rule);
379 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
380 struct sfc_mae_outer_rule *rule,
381 efx_mae_match_spec_t *match_spec_action)
383 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
386 SFC_ASSERT(sfc_adapter_is_locked(sa));
388 if (fw_rsrc->refcnt == 0) {
389 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
390 SFC_ASSERT(rule->match_spec != NULL);
392 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
396 sfc_err(sa, "failed to enable outer_rule=%p: %s",
402 if (match_spec_action == NULL)
403 goto skip_action_rule;
405 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
408 if (fw_rsrc->refcnt == 0) {
409 (void)efx_mae_outer_rule_remove(sa->nic,
411 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
414 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
420 if (fw_rsrc->refcnt == 0) {
421 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
422 rule, fw_rsrc->rule_id.id);
431 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
432 struct sfc_mae_outer_rule *rule)
434 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
437 SFC_ASSERT(sfc_adapter_is_locked(sa));
439 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
440 fw_rsrc->refcnt == 0) {
441 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
442 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
446 if (fw_rsrc->refcnt == 1) {
447 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
449 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
450 rule, fw_rsrc->rule_id.id);
452 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
453 rule, fw_rsrc->rule_id.id, strerror(rc));
455 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
461 static struct sfc_mae_encap_header *
462 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
463 const struct sfc_mae_bounce_eh *bounce_eh)
465 struct sfc_mae_encap_header *encap_header;
466 struct sfc_mae *mae = &sa->mae;
468 SFC_ASSERT(sfc_adapter_is_locked(sa));
470 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
471 if (encap_header->size == bounce_eh->size &&
472 memcmp(encap_header->buf, bounce_eh->buf,
473 bounce_eh->size) == 0) {
474 sfc_dbg(sa, "attaching to encap_header=%p",
476 ++(encap_header->refcnt);
485 sfc_mae_encap_header_add(struct sfc_adapter *sa,
486 const struct sfc_mae_bounce_eh *bounce_eh,
487 struct sfc_mae_encap_header **encap_headerp)
489 struct sfc_mae_encap_header *encap_header;
490 struct sfc_mae *mae = &sa->mae;
492 SFC_ASSERT(sfc_adapter_is_locked(sa));
494 encap_header = rte_zmalloc("sfc_mae_encap_header",
495 sizeof(*encap_header), 0);
496 if (encap_header == NULL)
499 encap_header->size = bounce_eh->size;
501 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
502 encap_header->size, 0);
503 if (encap_header->buf == NULL) {
504 rte_free(encap_header);
508 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
510 encap_header->refcnt = 1;
511 encap_header->type = bounce_eh->type;
512 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
514 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
516 *encap_headerp = encap_header;
518 sfc_dbg(sa, "added encap_header=%p", encap_header);
524 sfc_mae_encap_header_del(struct sfc_adapter *sa,
525 struct sfc_mae_encap_header *encap_header)
527 struct sfc_mae *mae = &sa->mae;
529 if (encap_header == NULL)
532 SFC_ASSERT(sfc_adapter_is_locked(sa));
533 SFC_ASSERT(encap_header->refcnt != 0);
535 --(encap_header->refcnt);
537 if (encap_header->refcnt != 0)
540 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
541 encap_header->fw_rsrc.refcnt != 0) {
542 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
543 encap_header, encap_header->fw_rsrc.eh_id.id,
544 encap_header->fw_rsrc.refcnt);
547 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
548 rte_free(encap_header->buf);
549 rte_free(encap_header);
551 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
555 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
556 struct sfc_mae_encap_header *encap_header,
557 efx_mae_actions_t *action_set_spec)
559 struct sfc_mae_fw_rsrc *fw_rsrc;
562 if (encap_header == NULL)
565 SFC_ASSERT(sfc_adapter_is_locked(sa));
567 fw_rsrc = &encap_header->fw_rsrc;
569 if (fw_rsrc->refcnt == 0) {
570 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
571 SFC_ASSERT(encap_header->buf != NULL);
572 SFC_ASSERT(encap_header->size != 0);
574 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
579 sfc_err(sa, "failed to enable encap_header=%p: %s",
580 encap_header, strerror(rc));
585 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
588 if (fw_rsrc->refcnt == 0) {
589 (void)efx_mae_encap_header_free(sa->nic,
591 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
594 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
599 if (fw_rsrc->refcnt == 0) {
600 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
601 encap_header, fw_rsrc->eh_id.id);
610 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
611 struct sfc_mae_encap_header *encap_header)
613 struct sfc_mae_fw_rsrc *fw_rsrc;
616 if (encap_header == NULL)
619 SFC_ASSERT(sfc_adapter_is_locked(sa));
621 fw_rsrc = &encap_header->fw_rsrc;
623 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
624 fw_rsrc->refcnt == 0) {
625 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
626 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
630 if (fw_rsrc->refcnt == 1) {
631 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
633 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
634 encap_header, fw_rsrc->eh_id.id);
636 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
637 encap_header, fw_rsrc->eh_id.id, strerror(rc));
639 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
646 sfc_mae_counters_enable(struct sfc_adapter *sa,
647 struct sfc_mae_counter_id *counters,
648 unsigned int n_counters,
649 efx_mae_actions_t *action_set_spec)
653 sfc_log_init(sa, "entry");
655 if (n_counters == 0) {
656 sfc_log_init(sa, "no counters - skip");
660 SFC_ASSERT(sfc_adapter_is_locked(sa));
661 SFC_ASSERT(n_counters == 1);
663 rc = sfc_mae_counter_enable(sa, &counters[0]);
665 sfc_err(sa, "failed to enable MAE counter %u: %s",
666 counters[0].mae_id.id, rte_strerror(rc));
667 goto fail_counter_add;
670 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
671 &counters[0].mae_id);
673 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
674 counters[0].mae_id.id, rte_strerror(rc));
675 goto fail_fill_in_id;
681 (void)sfc_mae_counter_disable(sa, &counters[0]);
684 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
689 sfc_mae_counters_disable(struct sfc_adapter *sa,
690 struct sfc_mae_counter_id *counters,
691 unsigned int n_counters)
696 SFC_ASSERT(sfc_adapter_is_locked(sa));
697 SFC_ASSERT(n_counters == 1);
699 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
700 sfc_err(sa, "failed to disable: already disabled");
704 return sfc_mae_counter_disable(sa, &counters[0]);
707 static struct sfc_mae_action_set *
708 sfc_mae_action_set_attach(struct sfc_adapter *sa,
709 const struct sfc_mae_encap_header *encap_header,
710 unsigned int n_count,
711 const efx_mae_actions_t *spec)
713 struct sfc_mae_action_set *action_set;
714 struct sfc_mae *mae = &sa->mae;
716 SFC_ASSERT(sfc_adapter_is_locked(sa));
718 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
720 * Shared counters are not supported, hence action sets with
721 * COUNT are not attachable.
723 if (action_set->encap_header == encap_header &&
725 efx_mae_action_set_specs_equal(action_set->spec, spec)) {
726 sfc_dbg(sa, "attaching to action_set=%p", action_set);
727 ++(action_set->refcnt);
736 sfc_mae_action_set_add(struct sfc_adapter *sa,
737 const struct rte_flow_action actions[],
738 efx_mae_actions_t *spec,
739 struct sfc_mae_encap_header *encap_header,
740 unsigned int n_counters,
741 struct sfc_mae_action_set **action_setp)
743 struct sfc_mae_action_set *action_set;
744 struct sfc_mae *mae = &sa->mae;
747 SFC_ASSERT(sfc_adapter_is_locked(sa));
749 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
750 if (action_set == NULL) {
751 sfc_err(sa, "failed to alloc action set");
755 if (n_counters > 0) {
756 const struct rte_flow_action *action;
758 action_set->counters = rte_malloc("sfc_mae_counter_ids",
759 sizeof(action_set->counters[0]) * n_counters, 0);
760 if (action_set->counters == NULL) {
761 rte_free(action_set);
762 sfc_err(sa, "failed to alloc counters");
766 for (action = actions, i = 0;
767 action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
769 const struct rte_flow_action_count *conf;
771 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
776 action_set->counters[i].mae_id.id =
777 EFX_MAE_RSRC_ID_INVALID;
778 action_set->counters[i].rte_id = conf->id;
781 action_set->n_counters = n_counters;
784 action_set->refcnt = 1;
785 action_set->spec = spec;
786 action_set->encap_header = encap_header;
788 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
790 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
792 *action_setp = action_set;
794 sfc_dbg(sa, "added action_set=%p", action_set);
800 sfc_mae_action_set_del(struct sfc_adapter *sa,
801 struct sfc_mae_action_set *action_set)
803 struct sfc_mae *mae = &sa->mae;
805 SFC_ASSERT(sfc_adapter_is_locked(sa));
806 SFC_ASSERT(action_set->refcnt != 0);
808 --(action_set->refcnt);
810 if (action_set->refcnt != 0)
813 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
814 action_set->fw_rsrc.refcnt != 0) {
815 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
816 action_set, action_set->fw_rsrc.aset_id.id,
817 action_set->fw_rsrc.refcnt);
820 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
821 sfc_mae_encap_header_del(sa, action_set->encap_header);
822 if (action_set->n_counters > 0) {
823 SFC_ASSERT(action_set->n_counters == 1);
824 SFC_ASSERT(action_set->counters[0].mae_id.id ==
825 EFX_MAE_RSRC_ID_INVALID);
826 rte_free(action_set->counters);
828 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
829 rte_free(action_set);
831 sfc_dbg(sa, "deleted action_set=%p", action_set);
835 sfc_mae_action_set_enable(struct sfc_adapter *sa,
836 struct sfc_mae_action_set *action_set)
838 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
839 struct sfc_mae_counter_id *counters = action_set->counters;
840 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
843 SFC_ASSERT(sfc_adapter_is_locked(sa));
845 if (fw_rsrc->refcnt == 0) {
846 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
847 SFC_ASSERT(action_set->spec != NULL);
849 rc = sfc_mae_encap_header_enable(sa, encap_header,
854 rc = sfc_mae_counters_enable(sa, counters,
855 action_set->n_counters,
858 sfc_err(sa, "failed to enable %u MAE counters: %s",
859 action_set->n_counters, rte_strerror(rc));
861 sfc_mae_encap_header_disable(sa, encap_header);
865 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
868 sfc_err(sa, "failed to enable action_set=%p: %s",
869 action_set, strerror(rc));
871 (void)sfc_mae_counters_disable(sa, counters,
872 action_set->n_counters);
873 sfc_mae_encap_header_disable(sa, encap_header);
877 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
878 action_set, fw_rsrc->aset_id.id);
887 sfc_mae_action_set_disable(struct sfc_adapter *sa,
888 struct sfc_mae_action_set *action_set)
890 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
893 SFC_ASSERT(sfc_adapter_is_locked(sa));
895 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
896 fw_rsrc->refcnt == 0) {
897 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
898 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
902 if (fw_rsrc->refcnt == 1) {
903 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
905 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
906 action_set, fw_rsrc->aset_id.id);
908 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
909 action_set, fw_rsrc->aset_id.id, strerror(rc));
911 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
913 rc = sfc_mae_counters_disable(sa, action_set->counters,
914 action_set->n_counters);
916 sfc_err(sa, "failed to disable %u MAE counters: %s",
917 action_set->n_counters, rte_strerror(rc));
920 sfc_mae_encap_header_disable(sa, action_set->encap_header);
927 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
928 struct rte_flow *flow)
930 struct sfc_flow_spec *spec;
931 struct sfc_flow_spec_mae *spec_mae;
941 spec_mae = &spec->mae;
943 if (spec_mae->ft != NULL) {
944 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
945 spec_mae->ft->jump_rule_is_set = B_FALSE;
947 SFC_ASSERT(spec_mae->ft->refcnt != 0);
948 --(spec_mae->ft->refcnt);
951 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
953 if (spec_mae->outer_rule != NULL)
954 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
956 if (spec_mae->action_set != NULL)
957 sfc_mae_action_set_del(sa, spec_mae->action_set);
959 if (spec_mae->match_spec != NULL)
960 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
964 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
966 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
967 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
968 const efx_mae_field_id_t field_ids[] = {
969 EFX_MAE_FIELD_VLAN0_PROTO_BE,
970 EFX_MAE_FIELD_VLAN1_PROTO_BE,
972 const struct sfc_mae_ethertype *et;
977 * In accordance with RTE flow API convention, the innermost L2
978 * item's "type" ("inner_type") is a L3 EtherType. If there is
979 * no L3 item, it's 0x0000/0x0000.
981 et = &pdata->ethertypes[pdata->nb_vlan_tags];
982 rc = efx_mae_match_spec_field_set(ctx->match_spec,
983 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
985 (const uint8_t *)&et->value,
987 (const uint8_t *)&et->mask);
992 * sfc_mae_rule_parse_item_vlan() has already made sure
993 * that pdata->nb_vlan_tags does not exceed this figure.
995 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
997 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
998 et = &pdata->ethertypes[i];
1000 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1001 fremap[field_ids[i]],
1003 (const uint8_t *)&et->value,
1005 (const uint8_t *)&et->mask);
1014 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1015 struct rte_flow_error *error)
1017 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1018 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1019 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1020 const rte_be16_t supported_tpids[] = {
1021 /* VLAN standard TPID (always the first element) */
1022 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1024 /* Double-tagging TPIDs */
1025 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1026 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1027 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1028 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1030 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1031 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1032 unsigned int ethertype_idx;
1033 const uint8_t *valuep;
1034 const uint8_t *maskp;
1037 if (pdata->innermost_ethertype_restriction.mask != 0 &&
1038 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1040 * If a single item VLAN is followed by a L3 item, value
1041 * of "type" in item ETH can't be a double-tagging TPID.
1043 nb_supported_tpids = 1;
1047 * sfc_mae_rule_parse_item_vlan() has already made sure
1048 * that pdata->nb_vlan_tags does not exceed this figure.
1050 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1052 for (ethertype_idx = 0;
1053 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1054 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1055 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1056 unsigned int tpid_idx;
1059 * This loop can have only two iterations. On the second one,
1060 * drop outer tag presence enforcement bit because the inner
1061 * tag presence automatically assumes that for the outer tag.
1063 enforce_tag_presence[0] = B_FALSE;
1065 if (tpid_m == RTE_BE16(0)) {
1066 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1067 enforce_tag_presence[ethertype_idx] = B_TRUE;
1069 /* No match on this field, and no value check. */
1070 nb_supported_tpids = 1;
1074 /* Exact match is supported only. */
1075 if (tpid_m != RTE_BE16(0xffff)) {
1076 sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1077 rte_be_to_cpu_16(tpid_m));
1082 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1083 tpid_idx < nb_supported_tpids; ++tpid_idx) {
1084 if (tpid_v == supported_tpids[tpid_idx])
1088 if (tpid_idx == nb_supported_tpids) {
1089 sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1090 rte_be_to_cpu_16(tpid_v));
1095 nb_supported_tpids = 1;
1098 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1099 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
1100 rte_be16_t enforced_et;
1102 enforced_et = pdata->innermost_ethertype_restriction.value;
1104 if (et->mask == 0) {
1105 et->mask = RTE_BE16(0xffff);
1106 et->value = enforced_et;
1107 } else if (et->mask != RTE_BE16(0xffff) ||
1108 et->value != enforced_et) {
1109 sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1110 rte_be_to_cpu_16(enforced_et),
1111 rte_be_to_cpu_16(et->value),
1112 rte_be_to_cpu_16(et->mask));
1119 * Now, when the number of VLAN tags is known, set fields
1120 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1121 * one is either a valid L3 EtherType (or 0x0000/0x0000),
1122 * and the last two are valid TPIDs (or 0x0000/0x0000).
1124 rc = sfc_mae_set_ethertypes(ctx);
1128 if (pdata->l3_next_proto_restriction_mask == 0xff) {
1129 if (pdata->l3_next_proto_mask == 0) {
1130 pdata->l3_next_proto_mask = 0xff;
1131 pdata->l3_next_proto_value =
1132 pdata->l3_next_proto_restriction_value;
1133 } else if (pdata->l3_next_proto_mask != 0xff ||
1134 pdata->l3_next_proto_value !=
1135 pdata->l3_next_proto_restriction_value) {
1136 sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1137 pdata->l3_next_proto_restriction_value,
1138 pdata->l3_next_proto_value,
1139 pdata->l3_next_proto_mask);
1145 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1146 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1147 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1148 enforce_tag_presence[0] ||
1149 pdata->has_ovlan_value);
1154 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1155 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1156 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1157 enforce_tag_presence[1] ||
1158 pdata->has_ivlan_value);
1163 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1164 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1165 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1166 fremap[EFX_MAE_FIELD_IP_PROTO],
1167 sizeof(pdata->l3_next_proto_value),
1169 sizeof(pdata->l3_next_proto_mask),
1177 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1178 "Failed to process pattern data");
1182 sfc_mae_rule_parse_item_mark(const struct rte_flow_item *item,
1183 struct sfc_flow_parse_ctx *ctx,
1184 struct rte_flow_error *error)
1186 const struct rte_flow_item_mark *spec = item->spec;
1187 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1190 return rte_flow_error_set(error, EINVAL,
1191 RTE_FLOW_ERROR_TYPE_ITEM, item,
1192 "NULL spec in item MARK");
1196 * This item is used in tunnel offload support only.
1197 * It must go before any network header items. This
1198 * way, sfc_mae_rule_preparse_item_mark() must have
1199 * already parsed it. Only one item MARK is allowed.
1201 if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
1202 spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
1203 return rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ITEM,
1205 item, "invalid item MARK");
1212 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1213 struct sfc_flow_parse_ctx *ctx,
1214 struct rte_flow_error *error)
1216 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1217 const struct rte_flow_item_port_id supp_mask = {
1220 const void *def_mask = &rte_flow_item_port_id_mask;
1221 const struct rte_flow_item_port_id *spec = NULL;
1222 const struct rte_flow_item_port_id *mask = NULL;
1223 efx_mport_sel_t mport_sel;
1226 if (ctx_mae->match_mport_set) {
1227 return rte_flow_error_set(error, ENOTSUP,
1228 RTE_FLOW_ERROR_TYPE_ITEM, item,
1229 "Can't handle multiple traffic source items");
1232 rc = sfc_flow_parse_init(item,
1233 (const void **)&spec, (const void **)&mask,
1234 (const void *)&supp_mask, def_mask,
1235 sizeof(struct rte_flow_item_port_id), error);
1239 if (mask->id != supp_mask.id) {
1240 return rte_flow_error_set(error, EINVAL,
1241 RTE_FLOW_ERROR_TYPE_ITEM, item,
1242 "Bad mask in the PORT_ID pattern item");
1245 /* If "spec" is not set, could be any port ID */
1249 if (spec->id > UINT16_MAX) {
1250 return rte_flow_error_set(error, EOVERFLOW,
1251 RTE_FLOW_ERROR_TYPE_ITEM, item,
1252 "The port ID is too large");
1255 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
1256 spec->id, &mport_sel);
1258 return rte_flow_error_set(error, rc,
1259 RTE_FLOW_ERROR_TYPE_ITEM, item,
1260 "Can't find RTE ethdev by the port ID");
1263 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1266 return rte_flow_error_set(error, rc,
1267 RTE_FLOW_ERROR_TYPE_ITEM, item,
1268 "Failed to set MPORT for the port ID");
1271 ctx_mae->match_mport_set = B_TRUE;
1277 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1278 struct sfc_flow_parse_ctx *ctx,
1279 struct rte_flow_error *error)
1281 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1282 const struct rte_flow_item_phy_port supp_mask = {
1283 .index = 0xffffffff,
1285 const void *def_mask = &rte_flow_item_phy_port_mask;
1286 const struct rte_flow_item_phy_port *spec = NULL;
1287 const struct rte_flow_item_phy_port *mask = NULL;
1288 efx_mport_sel_t mport_v;
1291 if (ctx_mae->match_mport_set) {
1292 return rte_flow_error_set(error, ENOTSUP,
1293 RTE_FLOW_ERROR_TYPE_ITEM, item,
1294 "Can't handle multiple traffic source items");
1297 rc = sfc_flow_parse_init(item,
1298 (const void **)&spec, (const void **)&mask,
1299 (const void *)&supp_mask, def_mask,
1300 sizeof(struct rte_flow_item_phy_port), error);
1304 if (mask->index != supp_mask.index) {
1305 return rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ITEM, item,
1307 "Bad mask in the PHY_PORT pattern item");
1310 /* If "spec" is not set, could be any physical port */
1314 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1316 return rte_flow_error_set(error, rc,
1317 RTE_FLOW_ERROR_TYPE_ITEM, item,
1318 "Failed to convert the PHY_PORT index");
1321 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1323 return rte_flow_error_set(error, rc,
1324 RTE_FLOW_ERROR_TYPE_ITEM, item,
1325 "Failed to set MPORT for the PHY_PORT");
1328 ctx_mae->match_mport_set = B_TRUE;
1334 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1335 struct sfc_flow_parse_ctx *ctx,
1336 struct rte_flow_error *error)
1338 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1339 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1340 efx_mport_sel_t mport_v;
1343 if (ctx_mae->match_mport_set) {
1344 return rte_flow_error_set(error, ENOTSUP,
1345 RTE_FLOW_ERROR_TYPE_ITEM, item,
1346 "Can't handle multiple traffic source items");
1349 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1352 return rte_flow_error_set(error, rc,
1353 RTE_FLOW_ERROR_TYPE_ITEM, item,
1354 "Failed to convert the PF ID");
1357 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1359 return rte_flow_error_set(error, rc,
1360 RTE_FLOW_ERROR_TYPE_ITEM, item,
1361 "Failed to set MPORT for the PF");
1364 ctx_mae->match_mport_set = B_TRUE;
1370 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1371 struct sfc_flow_parse_ctx *ctx,
1372 struct rte_flow_error *error)
1374 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1375 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1376 const struct rte_flow_item_vf supp_mask = {
1379 const void *def_mask = &rte_flow_item_vf_mask;
1380 const struct rte_flow_item_vf *spec = NULL;
1381 const struct rte_flow_item_vf *mask = NULL;
1382 efx_mport_sel_t mport_v;
1385 if (ctx_mae->match_mport_set) {
1386 return rte_flow_error_set(error, ENOTSUP,
1387 RTE_FLOW_ERROR_TYPE_ITEM, item,
1388 "Can't handle multiple traffic source items");
1391 rc = sfc_flow_parse_init(item,
1392 (const void **)&spec, (const void **)&mask,
1393 (const void *)&supp_mask, def_mask,
1394 sizeof(struct rte_flow_item_vf), error);
1398 if (mask->id != supp_mask.id) {
1399 return rte_flow_error_set(error, EINVAL,
1400 RTE_FLOW_ERROR_TYPE_ITEM, item,
1401 "Bad mask in the VF pattern item");
1405 * If "spec" is not set, the item requests any VF related to the
1406 * PF of the current DPDK port (but not the PF itself).
1407 * Reject this match criterion as unsupported.
1410 return rte_flow_error_set(error, EINVAL,
1411 RTE_FLOW_ERROR_TYPE_ITEM, item,
1412 "Bad spec in the VF pattern item");
1415 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1417 return rte_flow_error_set(error, rc,
1418 RTE_FLOW_ERROR_TYPE_ITEM, item,
1419 "Failed to convert the PF + VF IDs");
1422 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1424 return rte_flow_error_set(error, rc,
1425 RTE_FLOW_ERROR_TYPE_ITEM, item,
1426 "Failed to set MPORT for the PF + VF");
1429 ctx_mae->match_mport_set = B_TRUE;
1435 * Having this field ID in a field locator means that this
1436 * locator cannot be used to actually set the field at the
1437 * time when the corresponding item gets encountered. Such
1438 * fields get stashed in the parsing context instead. This
1439 * is required to resolve dependencies between the stashed
1440 * fields. See sfc_mae_rule_process_pattern_data().
1442 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1444 struct sfc_mae_field_locator {
1445 efx_mae_field_id_t field_id;
1447 /* Field offset in the corresponding rte_flow_item_ struct */
1452 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1453 unsigned int nb_field_locators, void *mask_ptr,
1458 memset(mask_ptr, 0, mask_size);
1460 for (i = 0; i < nb_field_locators; ++i) {
1461 const struct sfc_mae_field_locator *fl = &field_locators[i];
1463 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1464 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1469 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1470 unsigned int nb_field_locators, const uint8_t *spec,
1471 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1472 struct rte_flow_error *error)
1474 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1478 for (i = 0; i < nb_field_locators; ++i) {
1479 const struct sfc_mae_field_locator *fl = &field_locators[i];
1481 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1484 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1485 fremap[fl->field_id],
1486 fl->size, spec + fl->ofst,
1487 fl->size, mask + fl->ofst);
1493 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1494 NULL, "Failed to process item fields");
1500 static const struct sfc_mae_field_locator flocs_eth[] = {
1503 * This locator is used only for building supported fields mask.
1504 * The field is handled by sfc_mae_rule_process_pattern_data().
1506 SFC_MAE_FIELD_HANDLING_DEFERRED,
1507 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1508 offsetof(struct rte_flow_item_eth, type),
1511 EFX_MAE_FIELD_ETH_DADDR_BE,
1512 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1513 offsetof(struct rte_flow_item_eth, dst),
1516 EFX_MAE_FIELD_ETH_SADDR_BE,
1517 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1518 offsetof(struct rte_flow_item_eth, src),
1523 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1524 struct sfc_flow_parse_ctx *ctx,
1525 struct rte_flow_error *error)
1527 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1528 struct rte_flow_item_eth override_mask;
1529 struct rte_flow_item_eth supp_mask;
1530 const uint8_t *spec = NULL;
1531 const uint8_t *mask = NULL;
1534 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1535 &supp_mask, sizeof(supp_mask));
1536 supp_mask.has_vlan = 1;
1538 rc = sfc_flow_parse_init(item,
1539 (const void **)&spec, (const void **)&mask,
1540 (const void *)&supp_mask,
1541 &rte_flow_item_eth_mask,
1542 sizeof(struct rte_flow_item_eth), error);
1546 if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
1548 * The HW/FW hasn't got support for match on MAC addresses in
1549 * outer rules yet (this will change). Match on VLAN presence
1550 * isn't supported either. Ignore these match criteria.
1552 memcpy(&override_mask, mask, sizeof(override_mask));
1553 memset(&override_mask.hdr.dst_addr, 0,
1554 sizeof(override_mask.hdr.dst_addr));
1555 memset(&override_mask.hdr.src_addr, 0,
1556 sizeof(override_mask.hdr.src_addr));
1557 override_mask.has_vlan = 0;
1559 mask = (const uint8_t *)&override_mask;
1563 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1564 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1565 const struct rte_flow_item_eth *item_spec;
1566 const struct rte_flow_item_eth *item_mask;
1568 item_spec = (const struct rte_flow_item_eth *)spec;
1569 item_mask = (const struct rte_flow_item_eth *)mask;
1572 * Remember various match criteria in the parsing context.
1573 * sfc_mae_rule_process_pattern_data() will consider them
1574 * altogether when the rest of the items have been parsed.
1576 ethertypes[0].value = item_spec->type;
1577 ethertypes[0].mask = item_mask->type;
1578 if (item_mask->has_vlan) {
1579 pdata->has_ovlan_mask = B_TRUE;
1580 if (item_spec->has_vlan)
1581 pdata->has_ovlan_value = B_TRUE;
1585 * The specification is empty. The overall pattern
1586 * validity will be enforced at the end of parsing.
1587 * See sfc_mae_rule_process_pattern_data().
1592 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1596 static const struct sfc_mae_field_locator flocs_vlan[] = {
1599 EFX_MAE_FIELD_VLAN0_TCI_BE,
1600 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1601 offsetof(struct rte_flow_item_vlan, tci),
1605 * This locator is used only for building supported fields mask.
1606 * The field is handled by sfc_mae_rule_process_pattern_data().
1608 SFC_MAE_FIELD_HANDLING_DEFERRED,
1609 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1610 offsetof(struct rte_flow_item_vlan, inner_type),
1615 EFX_MAE_FIELD_VLAN1_TCI_BE,
1616 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1617 offsetof(struct rte_flow_item_vlan, tci),
1621 * This locator is used only for building supported fields mask.
1622 * The field is handled by sfc_mae_rule_process_pattern_data().
1624 SFC_MAE_FIELD_HANDLING_DEFERRED,
1625 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1626 offsetof(struct rte_flow_item_vlan, inner_type),
1631 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1632 struct sfc_flow_parse_ctx *ctx,
1633 struct rte_flow_error *error)
1635 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1636 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1637 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1638 &pdata->has_ovlan_mask,
1639 &pdata->has_ivlan_mask,
1641 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1642 &pdata->has_ovlan_value,
1643 &pdata->has_ivlan_value,
1645 boolean_t *cur_tag_presence_bit_mp;
1646 boolean_t *cur_tag_presence_bit_vp;
1647 const struct sfc_mae_field_locator *flocs;
1648 struct rte_flow_item_vlan supp_mask;
1649 const uint8_t *spec = NULL;
1650 const uint8_t *mask = NULL;
1651 unsigned int nb_flocs;
1654 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1656 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1657 return rte_flow_error_set(error, ENOTSUP,
1658 RTE_FLOW_ERROR_TYPE_ITEM, item,
1659 "Can't match that many VLAN tags");
1662 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1663 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1665 if (*cur_tag_presence_bit_mp == B_TRUE &&
1666 *cur_tag_presence_bit_vp == B_FALSE) {
1667 return rte_flow_error_set(error, EINVAL,
1668 RTE_FLOW_ERROR_TYPE_ITEM, item,
1669 "The previous item enforces no (more) VLAN, "
1670 "so the current item (VLAN) must not exist");
1673 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1674 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1676 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1677 &supp_mask, sizeof(supp_mask));
1679 * This only means that the field is supported by the driver and libefx.
1680 * Support on NIC level will be checked when all items have been parsed.
1682 supp_mask.has_more_vlan = 1;
1684 rc = sfc_flow_parse_init(item,
1685 (const void **)&spec, (const void **)&mask,
1686 (const void *)&supp_mask,
1687 &rte_flow_item_vlan_mask,
1688 sizeof(struct rte_flow_item_vlan), error);
1693 struct sfc_mae_ethertype *et = pdata->ethertypes;
1694 const struct rte_flow_item_vlan *item_spec;
1695 const struct rte_flow_item_vlan *item_mask;
1697 item_spec = (const struct rte_flow_item_vlan *)spec;
1698 item_mask = (const struct rte_flow_item_vlan *)mask;
1701 * Remember various match criteria in the parsing context.
1702 * sfc_mae_rule_process_pattern_data() will consider them
1703 * altogether when the rest of the items have been parsed.
1705 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1706 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1707 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1708 if (item_mask->has_more_vlan) {
1709 if (pdata->nb_vlan_tags ==
1710 SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1711 return rte_flow_error_set(error, ENOTSUP,
1712 RTE_FLOW_ERROR_TYPE_ITEM, item,
1713 "Can't use 'has_more_vlan' in "
1714 "the second item VLAN");
1716 pdata->has_ivlan_mask = B_TRUE;
1717 if (item_spec->has_more_vlan)
1718 pdata->has_ivlan_value = B_TRUE;
1721 /* Convert TCI to MAE representation right now. */
1722 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1728 ++(pdata->nb_vlan_tags);
1733 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1735 EFX_MAE_FIELD_SRC_IP4_BE,
1736 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1737 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1740 EFX_MAE_FIELD_DST_IP4_BE,
1741 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1742 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1746 * This locator is used only for building supported fields mask.
1747 * The field is handled by sfc_mae_rule_process_pattern_data().
1749 SFC_MAE_FIELD_HANDLING_DEFERRED,
1750 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1751 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1754 EFX_MAE_FIELD_IP_TOS,
1755 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1756 hdr.type_of_service),
1757 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1760 EFX_MAE_FIELD_IP_TTL,
1761 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1762 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1767 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1768 struct sfc_flow_parse_ctx *ctx,
1769 struct rte_flow_error *error)
1771 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1772 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1773 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1774 struct rte_flow_item_ipv4 supp_mask;
1775 const uint8_t *spec = NULL;
1776 const uint8_t *mask = NULL;
1779 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1780 &supp_mask, sizeof(supp_mask));
1782 rc = sfc_flow_parse_init(item,
1783 (const void **)&spec, (const void **)&mask,
1784 (const void *)&supp_mask,
1785 &rte_flow_item_ipv4_mask,
1786 sizeof(struct rte_flow_item_ipv4), error);
1790 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1791 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1794 const struct rte_flow_item_ipv4 *item_spec;
1795 const struct rte_flow_item_ipv4 *item_mask;
1797 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1798 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1800 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1801 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1806 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1810 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1812 EFX_MAE_FIELD_SRC_IP6_BE,
1813 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1814 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1817 EFX_MAE_FIELD_DST_IP6_BE,
1818 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1819 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1823 * This locator is used only for building supported fields mask.
1824 * The field is handled by sfc_mae_rule_process_pattern_data().
1826 SFC_MAE_FIELD_HANDLING_DEFERRED,
1827 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1828 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1831 EFX_MAE_FIELD_IP_TTL,
1832 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1833 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1838 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1839 struct sfc_flow_parse_ctx *ctx,
1840 struct rte_flow_error *error)
1842 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1843 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1844 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1845 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1846 struct rte_flow_item_ipv6 supp_mask;
1847 const uint8_t *spec = NULL;
1848 const uint8_t *mask = NULL;
1849 rte_be32_t vtc_flow_be;
1855 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1856 &supp_mask, sizeof(supp_mask));
1858 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1859 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1861 rc = sfc_flow_parse_init(item,
1862 (const void **)&spec, (const void **)&mask,
1863 (const void *)&supp_mask,
1864 &rte_flow_item_ipv6_mask,
1865 sizeof(struct rte_flow_item_ipv6), error);
1869 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1870 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1873 const struct rte_flow_item_ipv6 *item_spec;
1874 const struct rte_flow_item_ipv6 *item_mask;
1876 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1877 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1879 pdata->l3_next_proto_value = item_spec->hdr.proto;
1880 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1885 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1890 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1891 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1892 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1894 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1895 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1896 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1898 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1899 fremap[EFX_MAE_FIELD_IP_TOS],
1900 sizeof(tc_value), &tc_value,
1901 sizeof(tc_mask), &tc_mask);
1903 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1904 NULL, "Failed to process item fields");
1910 static const struct sfc_mae_field_locator flocs_tcp[] = {
1912 EFX_MAE_FIELD_L4_SPORT_BE,
1913 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1914 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1917 EFX_MAE_FIELD_L4_DPORT_BE,
1918 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1919 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1922 EFX_MAE_FIELD_TCP_FLAGS_BE,
1924 * The values have been picked intentionally since the
1925 * target MAE field is oversize (16 bit). This mapping
1926 * relies on the fact that the MAE field is big-endian.
1928 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1929 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1930 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1935 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1936 struct sfc_flow_parse_ctx *ctx,
1937 struct rte_flow_error *error)
1939 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1940 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1941 struct rte_flow_item_tcp supp_mask;
1942 const uint8_t *spec = NULL;
1943 const uint8_t *mask = NULL;
1947 * When encountered among outermost items, item TCP is invalid.
1948 * Check which match specification is being constructed now.
1950 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1951 return rte_flow_error_set(error, EINVAL,
1952 RTE_FLOW_ERROR_TYPE_ITEM, item,
1953 "TCP in outer frame is invalid");
1956 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1957 &supp_mask, sizeof(supp_mask));
1959 rc = sfc_flow_parse_init(item,
1960 (const void **)&spec, (const void **)&mask,
1961 (const void *)&supp_mask,
1962 &rte_flow_item_tcp_mask,
1963 sizeof(struct rte_flow_item_tcp), error);
1967 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1968 pdata->l3_next_proto_restriction_mask = 0xff;
1973 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1977 static const struct sfc_mae_field_locator flocs_udp[] = {
1979 EFX_MAE_FIELD_L4_SPORT_BE,
1980 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1981 offsetof(struct rte_flow_item_udp, hdr.src_port),
1984 EFX_MAE_FIELD_L4_DPORT_BE,
1985 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1986 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1991 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1992 struct sfc_flow_parse_ctx *ctx,
1993 struct rte_flow_error *error)
1995 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1996 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1997 struct rte_flow_item_udp supp_mask;
1998 const uint8_t *spec = NULL;
1999 const uint8_t *mask = NULL;
2002 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
2003 &supp_mask, sizeof(supp_mask));
2005 rc = sfc_flow_parse_init(item,
2006 (const void **)&spec, (const void **)&mask,
2007 (const void *)&supp_mask,
2008 &rte_flow_item_udp_mask,
2009 sizeof(struct rte_flow_item_udp), error);
2013 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
2014 pdata->l3_next_proto_restriction_mask = 0xff;
2019 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
2023 static const struct sfc_mae_field_locator flocs_tunnel[] = {
2026 * The size and offset values are relevant
2027 * for Geneve and NVGRE, too.
2029 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
2030 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
2035 * An auxiliary registry which allows using non-encap. field IDs
2036 * directly when building a match specification of type ACTION.
2038 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
2040 static const efx_mae_field_id_t field_ids_no_remap[] = {
2041 #define FIELD_ID_NO_REMAP(_field) \
2042 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
2044 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
2045 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
2046 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
2047 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
2048 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
2049 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
2050 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
2051 FIELD_ID_NO_REMAP(SRC_IP4_BE),
2052 FIELD_ID_NO_REMAP(DST_IP4_BE),
2053 FIELD_ID_NO_REMAP(IP_PROTO),
2054 FIELD_ID_NO_REMAP(IP_TOS),
2055 FIELD_ID_NO_REMAP(IP_TTL),
2056 FIELD_ID_NO_REMAP(SRC_IP6_BE),
2057 FIELD_ID_NO_REMAP(DST_IP6_BE),
2058 FIELD_ID_NO_REMAP(L4_SPORT_BE),
2059 FIELD_ID_NO_REMAP(L4_DPORT_BE),
2060 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2061 FIELD_ID_NO_REMAP(HAS_OVLAN),
2062 FIELD_ID_NO_REMAP(HAS_IVLAN),
2064 #undef FIELD_ID_NO_REMAP
2068 * An auxiliary registry which allows using "ENC" field IDs
2069 * when building a match specification of type OUTER.
2071 * See sfc_mae_rule_encap_parse_init().
2073 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2074 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2075 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2077 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2078 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2079 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2080 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2081 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2082 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2083 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2084 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2085 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2086 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2087 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2088 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2089 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2090 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2091 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2092 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2093 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2094 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2096 #undef FIELD_ID_REMAP_TO_ENCAP
2100 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2101 struct sfc_flow_parse_ctx *ctx,
2102 struct rte_flow_error *error)
2104 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2105 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2106 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2107 const struct rte_flow_item_vxlan *vxp;
2108 uint8_t supp_mask[sizeof(uint64_t)];
2109 const uint8_t *spec = NULL;
2110 const uint8_t *mask = NULL;
2114 * We're about to start processing inner frame items.
2115 * Process pattern data that has been deferred so far
2116 * and reset pattern data storage.
2118 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2122 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2124 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2125 &supp_mask, sizeof(supp_mask));
2128 * This tunnel item was preliminarily detected by
2129 * sfc_mae_rule_encap_parse_init(). Default mask
2130 * was also picked by that helper. Use it here.
2132 rc = sfc_flow_parse_init(item,
2133 (const void **)&spec, (const void **)&mask,
2134 (const void *)&supp_mask,
2135 ctx_mae->tunnel_def_mask,
2136 ctx_mae->tunnel_def_mask_size, error);
2141 * This item and later ones comprise a
2142 * match specification of type ACTION.
2144 ctx_mae->match_spec = ctx_mae->match_spec_action;
2146 /* This item and later ones use non-encap. EFX MAE field IDs. */
2147 ctx_mae->field_ids_remap = field_ids_no_remap;
2153 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2154 * Copy 24-bit VNI, which is BE, at offset 1 in it.
2155 * The extra byte is 0 both in the mask and in the value.
2157 vxp = (const struct rte_flow_item_vxlan *)spec;
2158 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2160 vxp = (const struct rte_flow_item_vxlan *)mask;
2161 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2163 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2164 EFX_MAE_FIELD_ENC_VNET_ID_BE,
2165 sizeof(vnet_id_v), vnet_id_v,
2166 sizeof(vnet_id_m), vnet_id_m);
2168 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2169 item, "Failed to set VXLAN VNI");
2175 static const struct sfc_flow_item sfc_flow_items[] = {
2177 .type = RTE_FLOW_ITEM_TYPE_MARK,
2179 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2180 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2181 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2182 .parse = sfc_mae_rule_parse_item_mark,
2185 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2188 * In terms of RTE flow, this item is a META one,
2189 * and its position in the pattern is don't care.
2191 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2192 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2193 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2194 .parse = sfc_mae_rule_parse_item_port_id,
2197 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2200 * In terms of RTE flow, this item is a META one,
2201 * and its position in the pattern is don't care.
2203 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2204 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2205 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2206 .parse = sfc_mae_rule_parse_item_phy_port,
2209 .type = RTE_FLOW_ITEM_TYPE_PF,
2212 * In terms of RTE flow, this item is a META one,
2213 * and its position in the pattern is don't care.
2215 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2216 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2217 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2218 .parse = sfc_mae_rule_parse_item_pf,
2221 .type = RTE_FLOW_ITEM_TYPE_VF,
2224 * In terms of RTE flow, this item is a META one,
2225 * and its position in the pattern is don't care.
2227 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2228 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2229 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2230 .parse = sfc_mae_rule_parse_item_vf,
2233 .type = RTE_FLOW_ITEM_TYPE_ETH,
2235 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2236 .layer = SFC_FLOW_ITEM_L2,
2237 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2238 .parse = sfc_mae_rule_parse_item_eth,
2241 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2243 .prev_layer = SFC_FLOW_ITEM_L2,
2244 .layer = SFC_FLOW_ITEM_L2,
2245 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2246 .parse = sfc_mae_rule_parse_item_vlan,
2249 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2251 .prev_layer = SFC_FLOW_ITEM_L2,
2252 .layer = SFC_FLOW_ITEM_L3,
2253 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2254 .parse = sfc_mae_rule_parse_item_ipv4,
2257 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2259 .prev_layer = SFC_FLOW_ITEM_L2,
2260 .layer = SFC_FLOW_ITEM_L3,
2261 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2262 .parse = sfc_mae_rule_parse_item_ipv6,
2265 .type = RTE_FLOW_ITEM_TYPE_TCP,
2267 .prev_layer = SFC_FLOW_ITEM_L3,
2268 .layer = SFC_FLOW_ITEM_L4,
2269 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2270 .parse = sfc_mae_rule_parse_item_tcp,
2273 .type = RTE_FLOW_ITEM_TYPE_UDP,
2275 .prev_layer = SFC_FLOW_ITEM_L3,
2276 .layer = SFC_FLOW_ITEM_L4,
2277 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2278 .parse = sfc_mae_rule_parse_item_udp,
2281 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2283 .prev_layer = SFC_FLOW_ITEM_L4,
2284 .layer = SFC_FLOW_ITEM_START_LAYER,
2285 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2286 .parse = sfc_mae_rule_parse_item_tunnel,
2289 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2291 .prev_layer = SFC_FLOW_ITEM_L4,
2292 .layer = SFC_FLOW_ITEM_START_LAYER,
2293 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2294 .parse = sfc_mae_rule_parse_item_tunnel,
2297 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2299 .prev_layer = SFC_FLOW_ITEM_L3,
2300 .layer = SFC_FLOW_ITEM_START_LAYER,
2301 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2302 .parse = sfc_mae_rule_parse_item_tunnel,
2307 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2308 struct sfc_mae_parse_ctx *ctx,
2309 struct sfc_mae_outer_rule **rulep,
2310 struct rte_flow_error *error)
2312 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2315 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2320 SFC_ASSERT(ctx->match_spec_outer != NULL);
2322 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2323 return rte_flow_error_set(error, ENOTSUP,
2324 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2325 "Inconsistent pattern (outer)");
2328 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2330 if (*rulep != NULL) {
2331 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2333 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2334 ctx->encap_type, rulep);
2336 return rte_flow_error_set(error, rc,
2337 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2338 "Failed to process the pattern");
2342 /* The spec has now been tracked by the outer rule entry. */
2343 ctx->match_spec_outer = NULL;
2346 switch (ctx->ft_rule_type) {
2347 case SFC_FT_RULE_NONE:
2349 case SFC_FT_RULE_JUMP:
2350 /* No action rule */
2352 case SFC_FT_RULE_GROUP:
2354 * Match on recirculation ID rather than
2355 * on the outer rule allocation handle.
2357 rc = efx_mae_match_spec_recirc_id_set(ctx->match_spec_action,
2358 SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id));
2360 return rte_flow_error_set(error, rc,
2361 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2362 "tunnel offload: GROUP: AR: failed to request match on RECIRC_ID");
2366 SFC_ASSERT(B_FALSE);
2370 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2371 * inner parse (when some outer rule is hit) and action rule lookup.
2372 * If the currently processed flow does not come with an outer rule,
2373 * its action rule must be available only for packets which miss in
2374 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2375 * in the action rule specification; this ensures correct behaviour.
2377 * If, on the other hand, this flow does have an outer rule, its ID
2378 * may be unknown at the moment (not yet allocated), but OR_ID mask
2379 * has to be set to 0xffffffff anyway for correct class comparisons.
2380 * When the outer rule has been allocated, this match field will be
2381 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2383 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2387 sfc_mae_outer_rule_del(sa, *rulep);
2391 return rte_flow_error_set(error, rc,
2392 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2393 "Failed to process the pattern");
2400 sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
2401 struct sfc_mae_parse_ctx *ctx)
2403 struct sfc_flow_tunnel *ft;
2407 sfc_err(ctx->sa, "tunnel offload: GROUP: NULL spec in item MARK");
2411 ft = sfc_flow_tunnel_pick(ctx->sa, spec->id);
2413 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid tunnel");
2417 if (ft->refcnt == 0) {
2418 sfc_err(ctx->sa, "tunnel offload: GROUP: tunnel=%u does not exist",
2423 user_mark = SFC_FT_GET_USER_MARK(spec->id);
2424 if (user_mark != 0) {
2425 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid item MARK");
2429 sfc_dbg(ctx->sa, "tunnel offload: GROUP: detected");
2431 ctx->ft_rule_type = SFC_FT_RULE_GROUP;
2438 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2439 const struct rte_flow_item pattern[],
2440 struct sfc_mae_parse_ctx *ctx,
2441 struct rte_flow_error *error)
2443 struct sfc_mae *mae = &sa->mae;
2444 uint8_t recirc_id = 0;
2447 if (pattern == NULL) {
2448 rte_flow_error_set(error, EINVAL,
2449 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2455 switch (pattern->type) {
2456 case RTE_FLOW_ITEM_TYPE_MARK:
2457 rc = sfc_mae_rule_preparse_item_mark(pattern->spec,
2460 return rte_flow_error_set(error, rc,
2461 RTE_FLOW_ERROR_TYPE_ITEM,
2462 pattern, "tunnel offload: GROUP: invalid item MARK");
2466 case RTE_FLOW_ITEM_TYPE_VXLAN:
2467 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2468 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2469 ctx->tunnel_def_mask_size =
2470 sizeof(rte_flow_item_vxlan_mask);
2472 case RTE_FLOW_ITEM_TYPE_GENEVE:
2473 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2474 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2475 ctx->tunnel_def_mask_size =
2476 sizeof(rte_flow_item_geneve_mask);
2478 case RTE_FLOW_ITEM_TYPE_NVGRE:
2479 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2480 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2481 ctx->tunnel_def_mask_size =
2482 sizeof(rte_flow_item_nvgre_mask);
2484 case RTE_FLOW_ITEM_TYPE_END:
2494 switch (ctx->ft_rule_type) {
2495 case SFC_FT_RULE_NONE:
2496 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2499 case SFC_FT_RULE_JUMP:
2500 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
2501 return rte_flow_error_set(error, ENOTSUP,
2502 RTE_FLOW_ERROR_TYPE_ITEM,
2503 pattern, "tunnel offload: JUMP: invalid item");
2505 ctx->encap_type = ctx->ft->encap_type;
2507 case SFC_FT_RULE_GROUP:
2508 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2509 return rte_flow_error_set(error, EINVAL,
2510 RTE_FLOW_ERROR_TYPE_ITEM,
2511 NULL, "tunnel offload: GROUP: missing tunnel item");
2512 } else if (ctx->encap_type != ctx->ft->encap_type) {
2513 return rte_flow_error_set(error, EINVAL,
2514 RTE_FLOW_ERROR_TYPE_ITEM,
2515 pattern, "tunnel offload: GROUP: tunnel type mismatch");
2519 SFC_ASSERT(B_FALSE);
2523 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2524 return rte_flow_error_set(error, ENOTSUP,
2525 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2526 "OR: unsupported tunnel type");
2529 switch (ctx->ft_rule_type) {
2530 case SFC_FT_RULE_JUMP:
2531 recirc_id = SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id);
2533 case SFC_FT_RULE_NONE:
2534 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2535 return rte_flow_error_set(error, ENOTSUP,
2536 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2537 NULL, "OR: unsupported priority level");
2540 rc = efx_mae_match_spec_init(sa->nic,
2541 EFX_MAE_RULE_OUTER, ctx->priority,
2542 &ctx->match_spec_outer);
2544 return rte_flow_error_set(error, rc,
2545 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2546 "OR: failed to initialise the match specification");
2550 * Outermost items comprise a match
2551 * specification of type OUTER.
2553 ctx->match_spec = ctx->match_spec_outer;
2555 /* Outermost items use "ENC" EFX MAE field IDs. */
2556 ctx->field_ids_remap = field_ids_remap_to_encap;
2558 rc = efx_mae_outer_rule_recirc_id_set(ctx->match_spec,
2561 return rte_flow_error_set(error, rc,
2562 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2563 "OR: failed to initialise RECIRC_ID");
2566 case SFC_FT_RULE_GROUP:
2567 /* Outermost items -> "ENC" match fields in the action rule. */
2568 ctx->field_ids_remap = field_ids_remap_to_encap;
2569 ctx->match_spec = ctx->match_spec_action;
2571 /* No own outer rule; match on JUMP OR's RECIRC_ID is used. */
2572 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2575 SFC_ASSERT(B_FALSE);
2583 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2584 struct sfc_mae_parse_ctx *ctx)
2586 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2589 if (ctx->match_spec_outer != NULL)
2590 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2594 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2595 const struct rte_flow_item pattern[],
2596 struct sfc_flow_spec_mae *spec,
2597 struct rte_flow_error *error)
2599 struct sfc_mae_parse_ctx ctx_mae;
2600 unsigned int priority_shift = 0;
2601 struct sfc_flow_parse_ctx ctx;
2604 memset(&ctx_mae, 0, sizeof(ctx_mae));
2605 ctx_mae.ft_rule_type = spec->ft_rule_type;
2606 ctx_mae.priority = spec->priority;
2607 ctx_mae.ft = spec->ft;
2610 switch (ctx_mae.ft_rule_type) {
2611 case SFC_FT_RULE_JUMP:
2613 * By design, this flow should be represented solely by the
2614 * outer rule. But the HW/FW hasn't got support for setting
2615 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2616 * does it support outer rule counters. As a workaround, an
2617 * action rule of lower priority is used to do the job.
2622 case SFC_FT_RULE_GROUP:
2623 if (ctx_mae.priority != 0) {
2625 * Because of the above workaround, deny the
2626 * use of priorities to JUMP and GROUP rules.
2628 rc = rte_flow_error_set(error, ENOTSUP,
2629 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2630 "tunnel offload: priorities are not supported");
2631 goto fail_priority_check;
2635 case SFC_FT_RULE_NONE:
2636 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2637 spec->priority + priority_shift,
2638 &ctx_mae.match_spec_action);
2640 rc = rte_flow_error_set(error, rc,
2641 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2642 "AR: failed to initialise the match specification");
2643 goto fail_init_match_spec_action;
2647 SFC_ASSERT(B_FALSE);
2652 * As a preliminary setting, assume that there is no encapsulation
2653 * in the pattern. That is, pattern items are about to comprise a
2654 * match specification of type ACTION and use non-encap. field IDs.
2656 * sfc_mae_rule_encap_parse_init() below may override this.
2658 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2659 ctx_mae.match_spec = ctx_mae.match_spec_action;
2660 ctx_mae.field_ids_remap = field_ids_no_remap;
2662 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2665 rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2667 goto fail_encap_parse_init;
2670 * sfc_mae_rule_encap_parse_init() may have detected tunnel offload
2671 * GROUP rule. Remember its properties for later use.
2673 spec->ft_rule_type = ctx_mae.ft_rule_type;
2674 spec->ft = ctx_mae.ft;
2676 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2677 pattern, &ctx, error);
2679 goto fail_parse_pattern;
2681 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2683 goto fail_process_pattern_data;
2685 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2687 goto fail_process_outer;
2689 if (ctx_mae.match_spec_action != NULL &&
2690 !efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2691 rc = rte_flow_error_set(error, ENOTSUP,
2692 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2693 "Inconsistent pattern");
2694 goto fail_validate_match_spec_action;
2697 spec->match_spec = ctx_mae.match_spec_action;
2701 fail_validate_match_spec_action:
2703 fail_process_pattern_data:
2705 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2707 fail_encap_parse_init:
2708 if (ctx_mae.match_spec_action != NULL)
2709 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2711 fail_init_match_spec_action:
2712 fail_priority_check:
2717 * An action supported by MAE may correspond to a bundle of RTE flow actions,
2718 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2719 * That is, related RTE flow actions need to be tracked as parts of a whole
2720 * so that they can be combined into a single action and submitted to MAE
2721 * representation of a given rule's action set.
2723 * Each RTE flow action provided by an application gets classified as
2724 * one belonging to some bundle type. If an action is not supposed to
2725 * belong to any bundle, or if this action is END, it is described as
2726 * one belonging to a dummy bundle of type EMPTY.
2728 * A currently tracked bundle will be submitted if a repeating
2729 * action or an action of different bundle type follows.
2732 enum sfc_mae_actions_bundle_type {
2733 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2734 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2737 struct sfc_mae_actions_bundle {
2738 enum sfc_mae_actions_bundle_type type;
2740 /* Indicates actions already tracked by the current bundle */
2741 uint64_t actions_mask;
2743 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2744 rte_be16_t vlan_push_tpid;
2745 rte_be16_t vlan_push_tci;
2749 * Combine configuration of RTE flow actions tracked by the bundle into a
2750 * single action and submit the result to MAE action set specification.
2751 * Do nothing in the case of dummy action bundle.
2754 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2755 efx_mae_actions_t *spec)
2759 switch (bundle->type) {
2760 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2762 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2763 rc = efx_mae_action_set_populate_vlan_push(
2764 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2767 SFC_ASSERT(B_FALSE);
2775 * Given the type of the next RTE flow action in the line, decide
2776 * whether a new bundle is about to start, and, if this is the case,
2777 * submit and reset the current bundle.
2780 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2781 struct sfc_mae_actions_bundle *bundle,
2782 efx_mae_actions_t *spec,
2783 struct rte_flow_error *error)
2785 enum sfc_mae_actions_bundle_type bundle_type_new;
2788 switch (action->type) {
2789 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2790 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2791 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2792 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2796 * Self-sufficient actions, including END, are handled in this
2797 * case. No checks for unsupported actions are needed here
2798 * because parsing doesn't occur at this point.
2800 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2804 if (bundle_type_new != bundle->type ||
2805 (bundle->actions_mask & (1ULL << action->type)) != 0) {
2806 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2810 memset(bundle, 0, sizeof(*bundle));
2813 bundle->type = bundle_type_new;
2818 return rte_flow_error_set(error, rc,
2819 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2820 "Failed to request the (group of) action(s)");
2824 sfc_mae_rule_parse_action_of_push_vlan(
2825 const struct rte_flow_action_of_push_vlan *conf,
2826 struct sfc_mae_actions_bundle *bundle)
2828 bundle->vlan_push_tpid = conf->ethertype;
2832 sfc_mae_rule_parse_action_of_set_vlan_vid(
2833 const struct rte_flow_action_of_set_vlan_vid *conf,
2834 struct sfc_mae_actions_bundle *bundle)
2836 bundle->vlan_push_tci |= (conf->vlan_vid &
2837 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2841 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2842 const struct rte_flow_action_of_set_vlan_pcp *conf,
2843 struct sfc_mae_actions_bundle *bundle)
2845 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2846 RTE_LEN2MASK(3, uint8_t)) << 13;
2848 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2851 struct sfc_mae_parsed_item {
2852 const struct rte_flow_item *item;
2853 size_t proto_header_ofst;
2854 size_t proto_header_size;
2858 * For each 16-bit word of the given header, override
2859 * bits enforced by the corresponding 16-bit mask.
2862 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2863 const struct sfc_mae_parsed_item *parsed_items,
2864 unsigned int nb_parsed_items)
2866 unsigned int item_idx;
2868 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2869 const struct sfc_mae_parsed_item *parsed_item;
2870 const struct rte_flow_item *item;
2871 size_t proto_header_size;
2874 parsed_item = &parsed_items[item_idx];
2875 proto_header_size = parsed_item->proto_header_size;
2876 item = parsed_item->item;
2878 for (ofst = 0; ofst < proto_header_size;
2879 ofst += sizeof(rte_be16_t)) {
2880 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2881 const rte_be16_t *w_maskp;
2882 const rte_be16_t *w_specp;
2884 w_maskp = RTE_PTR_ADD(item->mask, ofst);
2885 w_specp = RTE_PTR_ADD(item->spec, ofst);
2888 *wp |= (*w_specp & *w_maskp);
2891 header_buf += proto_header_size;
2895 #define SFC_IPV4_TTL_DEF 0x40
2896 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
2897 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2898 #define SFC_VXLAN_FLAGS_DEF 0x08000000
2901 sfc_mae_rule_parse_action_vxlan_encap(
2902 struct sfc_mae *mae,
2903 const struct rte_flow_action_vxlan_encap *conf,
2904 efx_mae_actions_t *spec,
2905 struct rte_flow_error *error)
2907 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2908 struct rte_flow_item *pattern = conf->definition;
2909 uint8_t *buf = bounce_eh->buf;
2911 /* This array will keep track of non-VOID pattern items. */
2912 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2914 1 /* IPv4 or IPv6 */ +
2917 unsigned int nb_parsed_items = 0;
2919 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2920 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2921 sizeof(struct rte_ipv6_hdr))];
2922 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2923 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2924 struct rte_vxlan_hdr *vxlan = NULL;
2925 struct rte_udp_hdr *udp = NULL;
2926 unsigned int nb_vlan_tags = 0;
2927 size_t next_proto_ofst = 0;
2928 size_t ethertype_ofst = 0;
2932 if (pattern == NULL) {
2933 return rte_flow_error_set(error, EINVAL,
2934 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2935 "The encap. header definition is NULL");
2938 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2939 bounce_eh->size = 0;
2942 * Process pattern items and remember non-VOID ones.
2943 * Defer applying masks until after the complete header
2944 * has been built from the pattern items.
2946 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2948 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2949 struct sfc_mae_parsed_item *parsed_item;
2950 const uint64_t exp_items_extra_vlan[] = {
2951 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2953 size_t proto_header_size;
2954 rte_be16_t *ethertypep;
2955 uint8_t *next_protop;
2958 if (pattern->spec == NULL) {
2959 return rte_flow_error_set(error, EINVAL,
2960 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2961 "NULL item spec in the encap. header");
2964 if (pattern->mask == NULL) {
2965 return rte_flow_error_set(error, EINVAL,
2966 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2967 "NULL item mask in the encap. header");
2970 if (pattern->last != NULL) {
2971 /* This is not a match pattern, so disallow range. */
2972 return rte_flow_error_set(error, EINVAL,
2973 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2974 "Range item in the encap. header");
2977 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2978 /* Handle VOID separately, for clarity. */
2982 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2983 return rte_flow_error_set(error, ENOTSUP,
2984 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2985 "Unexpected item in the encap. header");
2988 parsed_item = &parsed_items[nb_parsed_items];
2989 buf_cur = buf + bounce_eh->size;
2991 switch (pattern->type) {
2992 case RTE_FLOW_ITEM_TYPE_ETH:
2993 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2995 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2998 proto_header_size = sizeof(struct rte_ether_hdr);
3000 ethertype_ofst = eth_ethertype_ofst;
3002 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
3003 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3004 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3006 case RTE_FLOW_ITEM_TYPE_VLAN:
3007 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
3009 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
3012 proto_header_size = sizeof(struct rte_vlan_hdr);
3014 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
3015 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
3017 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3018 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3022 offsetof(struct rte_vlan_hdr, eth_proto);
3024 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3025 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3026 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
3030 case RTE_FLOW_ITEM_TYPE_IPV4:
3031 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
3033 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
3036 proto_header_size = sizeof(struct rte_ipv4_hdr);
3038 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3039 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3043 offsetof(struct rte_ipv4_hdr, next_proto_id);
3045 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
3047 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3049 case RTE_FLOW_ITEM_TYPE_IPV6:
3050 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
3052 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
3055 proto_header_size = sizeof(struct rte_ipv6_hdr);
3057 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3058 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3060 next_proto_ofst = bounce_eh->size +
3061 offsetof(struct rte_ipv6_hdr, proto);
3063 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
3065 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3067 case RTE_FLOW_ITEM_TYPE_UDP:
3068 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
3070 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
3073 proto_header_size = sizeof(struct rte_udp_hdr);
3075 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
3076 *next_protop = IPPROTO_UDP;
3078 udp = (struct rte_udp_hdr *)buf_cur;
3080 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
3082 case RTE_FLOW_ITEM_TYPE_VXLAN:
3083 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
3085 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
3088 proto_header_size = sizeof(struct rte_vxlan_hdr);
3090 vxlan = (struct rte_vxlan_hdr *)buf_cur;
3092 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
3093 udp->dgram_len = RTE_BE16(sizeof(*udp) +
3095 udp->dgram_cksum = 0;
3100 return rte_flow_error_set(error, ENOTSUP,
3101 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3102 "Unknown item in the encap. header");
3105 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
3106 return rte_flow_error_set(error, E2BIG,
3107 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3108 "The encap. header is too big");
3111 if ((proto_header_size & 1) != 0) {
3112 return rte_flow_error_set(error, EINVAL,
3113 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3114 "Odd layer size in the encap. header");
3117 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
3118 bounce_eh->size += proto_header_size;
3120 parsed_item->item = pattern;
3121 parsed_item->proto_header_size = proto_header_size;
3125 if (exp_items != 0) {
3126 /* Parsing item VXLAN would have reset exp_items to 0. */
3127 return rte_flow_error_set(error, ENOTSUP,
3128 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3129 "No item VXLAN in the encap. header");
3132 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
3133 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
3134 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
3135 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
3137 /* The HW cannot compute this checksum. */
3138 ipv4->hdr_checksum = 0;
3139 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
3141 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
3142 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
3143 ipv6->payload_len = udp->dgram_len;
3145 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
3147 /* Take care of the masks. */
3148 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
3150 rc = efx_mae_action_set_populate_encap(spec);
3152 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3153 NULL, "failed to request action ENCAP");
3160 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
3161 const struct rte_flow_action_mark *conf,
3162 const struct sfc_flow_spec_mae *spec_mae,
3163 efx_mae_actions_t *spec)
3167 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3168 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3169 } else if (conf->id > SFC_FT_USER_MARK_MASK) {
3170 sfc_err(sa, "the mark value is too large");
3174 rc = efx_mae_action_set_populate_mark(spec, conf->id);
3176 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
3182 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
3183 const struct rte_flow_action_count *conf
3185 efx_mae_actions_t *spec)
3189 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
3191 "counter queue is not configured for COUNT action");
3193 goto fail_counter_queue_uninit;
3196 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
3198 goto fail_no_service_core;
3201 rc = efx_mae_action_set_populate_count(spec);
3204 "failed to populate counters in MAE action set: %s",
3206 goto fail_populate_count;
3211 fail_populate_count:
3212 fail_no_service_core:
3213 fail_counter_queue_uninit:
3219 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
3220 const struct rte_flow_action_phy_port *conf,
3221 efx_mae_actions_t *spec)
3223 efx_mport_sel_t mport;
3227 if (conf->original != 0)
3228 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
3230 phy_port = conf->index;
3232 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
3234 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
3235 phy_port, strerror(rc));
3239 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3241 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3242 mport.sel, strerror(rc));
3249 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3250 const struct rte_flow_action_vf *vf_conf,
3251 efx_mae_actions_t *spec)
3253 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3254 efx_mport_sel_t mport;
3258 if (vf_conf == NULL)
3259 vf = EFX_PCI_VF_INVALID;
3260 else if (vf_conf->original != 0)
3265 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3267 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3268 encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3273 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3275 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3276 mport.sel, strerror(rc));
3283 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3284 const struct rte_flow_action_port_id *conf,
3285 efx_mae_actions_t *spec)
3287 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3288 struct sfc_mae *mae = &sa->mae;
3289 efx_mport_sel_t mport;
3293 if (conf->id > UINT16_MAX)
3296 port_id = (conf->original != 0) ? sas->port_id : conf->id;
3298 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
3301 sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s",
3302 port_id, strerror(rc));
3306 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3308 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3309 mport.sel, strerror(rc));
3315 static const char * const action_names[] = {
3316 [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3317 [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3318 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3319 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3320 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3321 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3322 [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3323 [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3324 [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3325 [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3326 [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3327 [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3328 [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3329 [RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
3333 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3334 const struct rte_flow_action *action,
3335 const struct sfc_flow_spec_mae *spec_mae,
3336 struct sfc_mae_actions_bundle *bundle,
3337 efx_mae_actions_t *spec,
3338 struct rte_flow_error *error)
3340 const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3341 const uint64_t rx_metadata = sa->negotiated_rx_metadata;
3342 bool custom_error = B_FALSE;
3345 switch (action->type) {
3346 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3347 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3348 bundle->actions_mask);
3349 if (outer_rule == NULL ||
3350 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3353 rc = efx_mae_action_set_populate_decap(spec);
3355 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3356 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3357 bundle->actions_mask);
3358 rc = efx_mae_action_set_populate_vlan_pop(spec);
3360 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3361 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3362 bundle->actions_mask);
3363 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3365 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3366 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3367 bundle->actions_mask);
3368 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3370 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3371 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3372 bundle->actions_mask);
3373 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3375 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3376 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3377 bundle->actions_mask);
3378 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3381 custom_error = B_TRUE;
3383 case RTE_FLOW_ACTION_TYPE_COUNT:
3384 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3385 bundle->actions_mask);
3386 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3388 case RTE_FLOW_ACTION_TYPE_FLAG:
3389 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3390 bundle->actions_mask);
3391 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
3392 rc = efx_mae_action_set_populate_flag(spec);
3394 rc = rte_flow_error_set(error, ENOTSUP,
3395 RTE_FLOW_ERROR_TYPE_ACTION,
3397 "flag delivery has not been negotiated");
3398 custom_error = B_TRUE;
3401 case RTE_FLOW_ACTION_TYPE_MARK:
3402 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3403 bundle->actions_mask);
3404 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
3405 spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3406 rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
3409 rc = rte_flow_error_set(error, ENOTSUP,
3410 RTE_FLOW_ERROR_TYPE_ACTION,
3412 "mark delivery has not been negotiated");
3413 custom_error = B_TRUE;
3416 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3417 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3418 bundle->actions_mask);
3419 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3421 case RTE_FLOW_ACTION_TYPE_PF:
3422 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3423 bundle->actions_mask);
3424 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3426 case RTE_FLOW_ACTION_TYPE_VF:
3427 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3428 bundle->actions_mask);
3429 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3431 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3432 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3433 bundle->actions_mask);
3434 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3436 case RTE_FLOW_ACTION_TYPE_DROP:
3437 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3438 bundle->actions_mask);
3439 rc = efx_mae_action_set_populate_drop(spec);
3441 case RTE_FLOW_ACTION_TYPE_JUMP:
3442 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3443 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3448 return rte_flow_error_set(error, ENOTSUP,
3449 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3450 "Unsupported action");
3454 bundle->actions_mask |= (1ULL << action->type);
3455 } else if (!custom_error) {
3456 if (action->type < RTE_DIM(action_names)) {
3457 const char *action_name = action_names[action->type];
3459 if (action_name != NULL) {
3460 sfc_err(sa, "action %s was rejected: %s",
3461 action_name, strerror(rc));
3464 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3465 NULL, "Failed to request the action");
3472 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3474 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3478 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3479 const struct sfc_mae_bounce_eh *bounce_eh,
3480 struct sfc_mae_encap_header **encap_headerp)
3482 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3483 encap_headerp = NULL;
3487 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3488 if (*encap_headerp != NULL)
3491 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3495 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3496 const struct rte_flow_action actions[],
3497 struct sfc_flow_spec_mae *spec_mae,
3498 struct rte_flow_error *error)
3500 struct sfc_mae_encap_header *encap_header = NULL;
3501 struct sfc_mae_actions_bundle bundle = {0};
3502 const struct rte_flow_action *action;
3503 struct sfc_mae *mae = &sa->mae;
3504 efx_mae_actions_t *spec;
3505 unsigned int n_count;
3510 if (actions == NULL) {
3511 return rte_flow_error_set(error, EINVAL,
3512 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3516 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3518 goto fail_action_set_spec_init;
3520 if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
3521 /* JUMP rules don't decapsulate packets. GROUP rules do. */
3522 rc = efx_mae_action_set_populate_decap(spec);
3524 goto fail_enforce_ft_decap;
3527 /* Cleanup after previous encap. header bounce buffer usage. */
3528 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3530 for (action = actions;
3531 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3532 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3534 goto fail_rule_parse_action;
3536 rc = sfc_mae_rule_parse_action(sa, action, spec_mae,
3537 &bundle, spec, error);
3539 goto fail_rule_parse_action;
3542 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3544 goto fail_rule_parse_action;
3546 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3548 goto fail_process_encap_header;
3550 n_count = efx_mae_action_set_get_nb_count(spec);
3553 sfc_err(sa, "too many count actions requested: %u", n_count);
3557 switch (spec_mae->ft_rule_type) {
3558 case SFC_FT_RULE_NONE:
3560 case SFC_FT_RULE_JUMP:
3561 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3562 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3564 goto fail_workaround_jump_delivery;
3566 case SFC_FT_RULE_GROUP:
3568 * Packets that go to the rule's AR have FT mark set (from the
3569 * JUMP rule OR's RECIRC_ID). Remove this mark in matching
3570 * packets. The user may have provided their own action
3571 * MARK above, so don't check the return value here.
3573 (void)efx_mae_action_set_populate_mark(spec, 0);
3576 SFC_ASSERT(B_FALSE);
3579 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3581 if (spec_mae->action_set != NULL) {
3582 sfc_mae_encap_header_del(sa, encap_header);
3583 efx_mae_action_set_spec_fini(sa->nic, spec);
3587 rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
3588 &spec_mae->action_set);
3590 goto fail_action_set_add;
3594 fail_action_set_add:
3595 fail_workaround_jump_delivery:
3597 sfc_mae_encap_header_del(sa, encap_header);
3599 fail_process_encap_header:
3600 fail_rule_parse_action:
3601 efx_mae_action_set_spec_fini(sa->nic, spec);
3603 fail_enforce_ft_decap:
3604 fail_action_set_spec_init:
3605 if (rc > 0 && rte_errno == 0) {
3606 rc = rte_flow_error_set(error, rc,
3607 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3608 NULL, "Failed to process the action");
3614 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3615 const efx_mae_match_spec_t *left,
3616 const efx_mae_match_spec_t *right)
3618 bool have_same_class;
3621 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3624 return (rc == 0) ? have_same_class : false;
3628 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3629 struct sfc_mae_outer_rule *rule)
3631 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3632 struct sfc_mae_outer_rule *entry;
3633 struct sfc_mae *mae = &sa->mae;
3635 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3636 /* An active rule is reused. It's class is wittingly valid. */
3640 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3641 sfc_mae_outer_rules, entries) {
3642 const efx_mae_match_spec_t *left = entry->match_spec;
3643 const efx_mae_match_spec_t *right = rule->match_spec;
3648 if (sfc_mae_rules_class_cmp(sa, left, right))
3652 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3653 "support for outer frame pattern items is not guaranteed; "
3654 "other than that, the items are valid from SW standpoint");
3659 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3660 struct sfc_flow_spec_mae *spec)
3662 const struct rte_flow *entry;
3664 if (spec->match_spec == NULL)
3667 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3668 const struct sfc_flow_spec *entry_spec = &entry->spec;
3669 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3670 const efx_mae_match_spec_t *left = es_mae->match_spec;
3671 const efx_mae_match_spec_t *right = spec->match_spec;
3673 switch (entry_spec->type) {
3674 case SFC_FLOW_SPEC_FILTER:
3675 /* Ignore VNIC-level flows */
3677 case SFC_FLOW_SPEC_MAE:
3678 if (sfc_mae_rules_class_cmp(sa, left, right))
3686 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3687 "support for inner frame pattern items is not guaranteed; "
3688 "other than that, the items are valid from SW standpoint");
3693 * Confirm that a given flow can be accepted by the FW.
3696 * Software adapter context
3698 * Flow to be verified
3700 * Zero on success and non-zero in the case of error.
3701 * A special value of EAGAIN indicates that the adapter is
3702 * not in started state. This state is compulsory because
3703 * it only makes sense to compare the rule class of the flow
3704 * being validated with classes of the active rules.
3705 * Such classes are wittingly supported by the FW.
3708 sfc_mae_flow_verify(struct sfc_adapter *sa,
3709 struct rte_flow *flow)
3711 struct sfc_flow_spec *spec = &flow->spec;
3712 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3713 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3716 SFC_ASSERT(sfc_adapter_is_locked(sa));
3718 if (sa->state != SFC_ETHDEV_STARTED)
3721 if (outer_rule != NULL) {
3722 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
3727 return sfc_mae_action_rule_class_verify(sa, spec_mae);
3731 sfc_mae_flow_insert(struct sfc_adapter *sa,
3732 struct rte_flow *flow)
3734 struct sfc_flow_spec *spec = &flow->spec;
3735 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3736 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3737 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3738 struct sfc_mae_fw_rsrc *fw_rsrc;
3741 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
3743 if (outer_rule != NULL) {
3744 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
3745 spec_mae->match_spec);
3747 goto fail_outer_rule_enable;
3750 if (action_set == NULL) {
3751 sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
3755 rc = sfc_mae_action_set_enable(sa, action_set);
3757 goto fail_action_set_enable;
3759 if (action_set->n_counters > 0) {
3760 rc = sfc_mae_counter_start(sa);
3762 sfc_err(sa, "failed to start MAE counters support: %s",
3764 goto fail_mae_counter_start;
3768 fw_rsrc = &action_set->fw_rsrc;
3770 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3771 NULL, &fw_rsrc->aset_id,
3772 &spec_mae->rule_id);
3774 goto fail_action_rule_insert;
3776 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3777 flow, spec_mae->rule_id.id);
3781 fail_action_rule_insert:
3782 fail_mae_counter_start:
3783 sfc_mae_action_set_disable(sa, action_set);
3785 fail_action_set_enable:
3786 if (outer_rule != NULL)
3787 sfc_mae_outer_rule_disable(sa, outer_rule);
3789 fail_outer_rule_enable:
3794 sfc_mae_flow_remove(struct sfc_adapter *sa,
3795 struct rte_flow *flow)
3797 struct sfc_flow_spec *spec = &flow->spec;
3798 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3799 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3800 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3803 if (action_set == NULL) {
3804 sfc_dbg(sa, "disabled flow=%p (no AR)", flow);
3805 goto skip_action_rule;
3808 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3810 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3812 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3813 flow, spec_mae->rule_id.id, strerror(rc));
3815 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3816 flow, spec_mae->rule_id.id);
3817 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3819 sfc_mae_action_set_disable(sa, action_set);
3822 if (outer_rule != NULL)
3823 sfc_mae_outer_rule_disable(sa, outer_rule);
3829 sfc_mae_query_counter(struct sfc_adapter *sa,
3830 struct sfc_flow_spec_mae *spec,
3831 const struct rte_flow_action *action,
3832 struct rte_flow_query_count *data,
3833 struct rte_flow_error *error)
3835 struct sfc_mae_action_set *action_set = spec->action_set;
3836 const struct rte_flow_action_count *conf = action->conf;
3840 if (action_set == NULL || action_set->n_counters == 0) {
3841 return rte_flow_error_set(error, EINVAL,
3842 RTE_FLOW_ERROR_TYPE_ACTION, action,
3843 "Queried flow rule does not have count actions");
3846 for (i = 0; i < action_set->n_counters; i++) {
3848 * Get the first available counter of the flow rule if
3849 * counter ID is not specified.
3851 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
3854 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
3855 &action_set->counters[i], data);
3857 return rte_flow_error_set(error, EINVAL,
3858 RTE_FLOW_ERROR_TYPE_ACTION, action,
3859 "Queried flow rule counter action is invalid");
3865 return rte_flow_error_set(error, ENOENT,
3866 RTE_FLOW_ERROR_TYPE_ACTION, action,
3867 "No such flow rule action count ID");
3871 sfc_mae_flow_query(struct rte_eth_dev *dev,
3872 struct rte_flow *flow,
3873 const struct rte_flow_action *action,
3875 struct rte_flow_error *error)
3877 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3878 struct sfc_flow_spec *spec = &flow->spec;
3879 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3881 switch (action->type) {
3882 case RTE_FLOW_ACTION_TYPE_COUNT:
3883 return sfc_mae_query_counter(sa, spec_mae, action,
3886 return rte_flow_error_set(error, ENOTSUP,
3887 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3888 "Query for action of this type is not supported");
3893 sfc_mae_switchdev_init(struct sfc_adapter *sa)
3895 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3896 struct sfc_mae *mae = &sa->mae;
3898 efx_mport_sel_t phy;
3901 sfc_log_init(sa, "entry");
3903 if (!sa->switchdev) {
3904 sfc_log_init(sa, "switchdev is not enabled - skip");
3908 if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
3910 sfc_err(sa, "failed to init switchdev - no MAE support");
3914 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
3917 sfc_err(sa, "failed get PF mport");
3921 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
3923 sfc_err(sa, "failed get PHY mport");
3927 rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
3928 SFC_MAE_RULE_PRIO_LOWEST,
3929 &mae->switchdev_rule_pf_to_ext);
3931 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
3935 rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
3936 SFC_MAE_RULE_PRIO_LOWEST,
3937 &mae->switchdev_rule_ext_to_pf);
3939 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
3943 sfc_log_init(sa, "done");
3948 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
3954 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
3959 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
3961 struct sfc_mae *mae = &sa->mae;
3966 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
3967 sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);