1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
19 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
22 #include "sfc_switch.h"
23 #include "sfc_service.h"
26 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
27 efx_mport_sel_t *mportp)
29 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
31 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
36 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
37 uint32_t nb_counters_max)
39 return sfc_mae_counters_init(®istry->counters, nb_counters_max);
43 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
45 sfc_mae_counters_fini(®istry->counters);
49 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
50 struct sfc_mae_rule **rule)
52 struct sfc_mae *mae = &sa->mae;
53 struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
57 for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
58 if (internal_rules->rules[entry].spec == NULL)
62 if (entry == SFC_MAE_NB_RULES_MAX) {
64 sfc_err(sa, "failed too many rules (%u rules used)", entry);
65 goto fail_too_many_rules;
68 *rule = &internal_rules->rules[entry];
77 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
78 const efx_mport_sel_t *mport_match,
79 const efx_mport_sel_t *mport_deliver,
80 int prio, struct sfc_mae_rule **rulep)
82 struct sfc_mae *mae = &sa->mae;
83 struct sfc_mae_rule *rule;
86 sfc_log_init(sa, "entry");
88 if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
90 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
91 mae->nb_action_rule_prios_max);
92 goto fail_invalid_prio;
95 prio = mae->nb_action_rule_prios_max - 1;
97 rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
99 goto fail_find_empty_slot;
101 sfc_log_init(sa, "init MAE match spec");
102 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
103 (uint32_t)prio, &rule->spec);
105 sfc_err(sa, "failed to init MAE match spec");
106 goto fail_match_init;
109 rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
111 sfc_err(sa, "failed to get MAE match mport selector");
115 rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
117 sfc_err(sa, "failed to init MAE action set");
118 goto fail_action_init;
121 rc = efx_mae_action_set_populate_deliver(rule->actions,
124 sfc_err(sa, "failed to populate deliver action");
125 goto fail_populate_deliver;
128 rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
131 sfc_err(sa, "failed to allocate action set");
132 goto fail_action_set_alloc;
135 rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
139 sfc_err(sa, "failed to insert action rule");
140 goto fail_rule_insert;
145 sfc_log_init(sa, "done");
150 efx_mae_action_set_free(sa->nic, &rule->action_set);
152 fail_action_set_alloc:
153 fail_populate_deliver:
154 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
158 efx_mae_match_spec_fini(sa->nic, rule->spec);
161 fail_find_empty_slot:
163 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
168 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
170 if (rule == NULL || rule->spec == NULL)
173 efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
174 efx_mae_action_set_free(sa->nic, &rule->action_set);
175 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
176 efx_mae_match_spec_fini(sa->nic, rule->spec);
182 sfc_mae_attach(struct sfc_adapter *sa)
184 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
185 struct sfc_mae_switch_port_request switch_port_request = {0};
186 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
187 efx_mport_sel_t entity_mport;
188 struct sfc_mae *mae = &sa->mae;
189 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
190 efx_mae_limits_t limits;
193 sfc_log_init(sa, "entry");
195 if (!encp->enc_mae_supported) {
196 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
200 sfc_log_init(sa, "init MAE");
201 rc = efx_mae_init(sa->nic);
205 sfc_log_init(sa, "get MAE limits");
206 rc = efx_mae_get_limits(sa->nic, &limits);
208 goto fail_mae_get_limits;
210 sfc_log_init(sa, "init MAE counter registry");
211 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
212 limits.eml_max_n_counters);
214 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
215 limits.eml_max_n_counters, rte_strerror(rc));
216 goto fail_counter_registry_init;
219 sfc_log_init(sa, "assign entity MPORT");
220 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
222 goto fail_mae_assign_entity_mport;
224 sfc_log_init(sa, "assign RTE switch domain");
225 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
227 goto fail_mae_assign_switch_domain;
229 sfc_log_init(sa, "assign RTE switch port");
230 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
231 switch_port_request.entity_mportp = &entity_mport;
232 /* RTE ethdev MPORT matches that of the entity for independent ports. */
233 switch_port_request.ethdev_mportp = &entity_mport;
234 switch_port_request.ethdev_port_id = sas->port_id;
235 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
236 &switch_port_request,
237 &mae->switch_port_id);
239 goto fail_mae_assign_switch_port;
241 sfc_log_init(sa, "allocate encap. header bounce buffer");
242 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
243 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
244 bounce_eh->buf_size, 0);
245 if (bounce_eh->buf == NULL)
246 goto fail_mae_alloc_bounce_eh;
248 mae->status = SFC_MAE_STATUS_SUPPORTED;
249 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
250 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
251 mae->encap_types_supported = limits.eml_encap_types_supported;
252 TAILQ_INIT(&mae->outer_rules);
253 TAILQ_INIT(&mae->encap_headers);
254 TAILQ_INIT(&mae->action_sets);
256 sfc_log_init(sa, "done");
260 fail_mae_alloc_bounce_eh:
261 fail_mae_assign_switch_port:
262 fail_mae_assign_switch_domain:
263 fail_mae_assign_entity_mport:
264 sfc_mae_counter_registry_fini(&mae->counter_registry);
266 fail_counter_registry_init:
268 efx_mae_fini(sa->nic);
271 sfc_log_init(sa, "failed %d", rc);
277 sfc_mae_detach(struct sfc_adapter *sa)
279 struct sfc_mae *mae = &sa->mae;
280 enum sfc_mae_status status_prev = mae->status;
282 sfc_log_init(sa, "entry");
284 mae->nb_action_rule_prios_max = 0;
285 mae->status = SFC_MAE_STATUS_UNKNOWN;
287 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
290 rte_free(mae->bounce_eh.buf);
291 sfc_mae_counter_registry_fini(&mae->counter_registry);
293 efx_mae_fini(sa->nic);
295 sfc_log_init(sa, "done");
298 static struct sfc_mae_outer_rule *
299 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
300 const efx_mae_match_spec_t *match_spec,
301 efx_tunnel_protocol_t encap_type)
303 struct sfc_mae_outer_rule *rule;
304 struct sfc_mae *mae = &sa->mae;
306 SFC_ASSERT(sfc_adapter_is_locked(sa));
308 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
309 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
310 rule->encap_type == encap_type) {
311 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
321 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
322 efx_mae_match_spec_t *match_spec,
323 efx_tunnel_protocol_t encap_type,
324 struct sfc_mae_outer_rule **rulep)
326 struct sfc_mae_outer_rule *rule;
327 struct sfc_mae *mae = &sa->mae;
329 SFC_ASSERT(sfc_adapter_is_locked(sa));
331 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
336 rule->match_spec = match_spec;
337 rule->encap_type = encap_type;
339 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
341 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
345 sfc_dbg(sa, "added outer_rule=%p", rule);
351 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
352 struct sfc_mae_outer_rule *rule)
354 struct sfc_mae *mae = &sa->mae;
356 SFC_ASSERT(sfc_adapter_is_locked(sa));
357 SFC_ASSERT(rule->refcnt != 0);
361 if (rule->refcnt != 0)
364 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
365 rule->fw_rsrc.refcnt != 0) {
366 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
367 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
370 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
372 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
375 sfc_dbg(sa, "deleted outer_rule=%p", rule);
379 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
380 struct sfc_mae_outer_rule *rule,
381 efx_mae_match_spec_t *match_spec_action)
383 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
386 SFC_ASSERT(sfc_adapter_is_locked(sa));
388 if (fw_rsrc->refcnt == 0) {
389 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
390 SFC_ASSERT(rule->match_spec != NULL);
392 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
396 sfc_err(sa, "failed to enable outer_rule=%p: %s",
402 if (match_spec_action == NULL)
403 goto skip_action_rule;
405 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
408 if (fw_rsrc->refcnt == 0) {
409 (void)efx_mae_outer_rule_remove(sa->nic,
411 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
414 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
420 if (fw_rsrc->refcnt == 0) {
421 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
422 rule, fw_rsrc->rule_id.id);
431 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
432 struct sfc_mae_outer_rule *rule)
434 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
437 SFC_ASSERT(sfc_adapter_is_locked(sa));
439 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
440 fw_rsrc->refcnt == 0) {
441 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
442 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
446 if (fw_rsrc->refcnt == 1) {
447 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
449 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
450 rule, fw_rsrc->rule_id.id);
452 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
453 rule, fw_rsrc->rule_id.id, strerror(rc));
455 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
461 static struct sfc_mae_encap_header *
462 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
463 const struct sfc_mae_bounce_eh *bounce_eh)
465 struct sfc_mae_encap_header *encap_header;
466 struct sfc_mae *mae = &sa->mae;
468 SFC_ASSERT(sfc_adapter_is_locked(sa));
470 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
471 if (encap_header->size == bounce_eh->size &&
472 memcmp(encap_header->buf, bounce_eh->buf,
473 bounce_eh->size) == 0) {
474 sfc_dbg(sa, "attaching to encap_header=%p",
476 ++(encap_header->refcnt);
485 sfc_mae_encap_header_add(struct sfc_adapter *sa,
486 const struct sfc_mae_bounce_eh *bounce_eh,
487 struct sfc_mae_encap_header **encap_headerp)
489 struct sfc_mae_encap_header *encap_header;
490 struct sfc_mae *mae = &sa->mae;
492 SFC_ASSERT(sfc_adapter_is_locked(sa));
494 encap_header = rte_zmalloc("sfc_mae_encap_header",
495 sizeof(*encap_header), 0);
496 if (encap_header == NULL)
499 encap_header->size = bounce_eh->size;
501 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
502 encap_header->size, 0);
503 if (encap_header->buf == NULL) {
504 rte_free(encap_header);
508 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
510 encap_header->refcnt = 1;
511 encap_header->type = bounce_eh->type;
512 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
514 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
516 *encap_headerp = encap_header;
518 sfc_dbg(sa, "added encap_header=%p", encap_header);
524 sfc_mae_encap_header_del(struct sfc_adapter *sa,
525 struct sfc_mae_encap_header *encap_header)
527 struct sfc_mae *mae = &sa->mae;
529 if (encap_header == NULL)
532 SFC_ASSERT(sfc_adapter_is_locked(sa));
533 SFC_ASSERT(encap_header->refcnt != 0);
535 --(encap_header->refcnt);
537 if (encap_header->refcnt != 0)
540 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
541 encap_header->fw_rsrc.refcnt != 0) {
542 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
543 encap_header, encap_header->fw_rsrc.eh_id.id,
544 encap_header->fw_rsrc.refcnt);
547 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
548 rte_free(encap_header->buf);
549 rte_free(encap_header);
551 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
555 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
556 struct sfc_mae_encap_header *encap_header,
557 efx_mae_actions_t *action_set_spec)
559 struct sfc_mae_fw_rsrc *fw_rsrc;
562 if (encap_header == NULL)
565 SFC_ASSERT(sfc_adapter_is_locked(sa));
567 fw_rsrc = &encap_header->fw_rsrc;
569 if (fw_rsrc->refcnt == 0) {
570 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
571 SFC_ASSERT(encap_header->buf != NULL);
572 SFC_ASSERT(encap_header->size != 0);
574 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
579 sfc_err(sa, "failed to enable encap_header=%p: %s",
580 encap_header, strerror(rc));
585 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
588 if (fw_rsrc->refcnt == 0) {
589 (void)efx_mae_encap_header_free(sa->nic,
591 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
594 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
599 if (fw_rsrc->refcnt == 0) {
600 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
601 encap_header, fw_rsrc->eh_id.id);
610 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
611 struct sfc_mae_encap_header *encap_header)
613 struct sfc_mae_fw_rsrc *fw_rsrc;
616 if (encap_header == NULL)
619 SFC_ASSERT(sfc_adapter_is_locked(sa));
621 fw_rsrc = &encap_header->fw_rsrc;
623 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
624 fw_rsrc->refcnt == 0) {
625 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
626 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
630 if (fw_rsrc->refcnt == 1) {
631 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
633 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
634 encap_header, fw_rsrc->eh_id.id);
636 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
637 encap_header, fw_rsrc->eh_id.id, strerror(rc));
639 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
646 sfc_mae_counters_enable(struct sfc_adapter *sa,
647 struct sfc_mae_counter_id *counters,
648 unsigned int n_counters,
649 efx_mae_actions_t *action_set_spec)
653 sfc_log_init(sa, "entry");
655 if (n_counters == 0) {
656 sfc_log_init(sa, "no counters - skip");
660 SFC_ASSERT(sfc_adapter_is_locked(sa));
661 SFC_ASSERT(n_counters == 1);
663 rc = sfc_mae_counter_enable(sa, &counters[0]);
665 sfc_err(sa, "failed to enable MAE counter %u: %s",
666 counters[0].mae_id.id, rte_strerror(rc));
667 goto fail_counter_add;
670 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
671 &counters[0].mae_id);
673 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
674 counters[0].mae_id.id, rte_strerror(rc));
675 goto fail_fill_in_id;
681 (void)sfc_mae_counter_disable(sa, &counters[0]);
684 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
689 sfc_mae_counters_disable(struct sfc_adapter *sa,
690 struct sfc_mae_counter_id *counters,
691 unsigned int n_counters)
696 SFC_ASSERT(sfc_adapter_is_locked(sa));
697 SFC_ASSERT(n_counters == 1);
699 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
700 sfc_err(sa, "failed to disable: already disabled");
704 return sfc_mae_counter_disable(sa, &counters[0]);
707 static struct sfc_mae_action_set *
708 sfc_mae_action_set_attach(struct sfc_adapter *sa,
709 const struct sfc_mae_encap_header *encap_header,
710 unsigned int n_count,
711 const efx_mae_actions_t *spec)
713 struct sfc_mae_action_set *action_set;
714 struct sfc_mae *mae = &sa->mae;
716 SFC_ASSERT(sfc_adapter_is_locked(sa));
718 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
720 * Shared counters are not supported, hence action sets with
721 * COUNT are not attachable.
723 if (action_set->encap_header == encap_header &&
725 efx_mae_action_set_specs_equal(action_set->spec, spec)) {
726 sfc_dbg(sa, "attaching to action_set=%p", action_set);
727 ++(action_set->refcnt);
736 sfc_mae_action_set_add(struct sfc_adapter *sa,
737 const struct rte_flow_action actions[],
738 efx_mae_actions_t *spec,
739 struct sfc_mae_encap_header *encap_header,
740 uint64_t *ft_group_hit_counter,
741 struct sfc_flow_tunnel *ft,
742 unsigned int n_counters,
743 struct sfc_mae_action_set **action_setp)
745 struct sfc_mae_action_set *action_set;
746 struct sfc_mae *mae = &sa->mae;
749 SFC_ASSERT(sfc_adapter_is_locked(sa));
751 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
752 if (action_set == NULL) {
753 sfc_err(sa, "failed to alloc action set");
757 if (n_counters > 0) {
758 const struct rte_flow_action *action;
760 action_set->counters = rte_malloc("sfc_mae_counter_ids",
761 sizeof(action_set->counters[0]) * n_counters, 0);
762 if (action_set->counters == NULL) {
763 rte_free(action_set);
764 sfc_err(sa, "failed to alloc counters");
768 for (i = 0; i < n_counters; ++i) {
769 action_set->counters[i].rte_id_valid = B_FALSE;
770 action_set->counters[i].mae_id.id =
771 EFX_MAE_RSRC_ID_INVALID;
773 action_set->counters[i].ft_group_hit_counter =
774 ft_group_hit_counter;
775 action_set->counters[i].ft = ft;
778 for (action = actions, i = 0;
779 action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
781 const struct rte_flow_action_count *conf;
783 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
788 action_set->counters[i].rte_id_valid = B_TRUE;
789 action_set->counters[i].rte_id = conf->id;
792 action_set->n_counters = n_counters;
795 action_set->refcnt = 1;
796 action_set->spec = spec;
797 action_set->encap_header = encap_header;
799 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
801 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
803 *action_setp = action_set;
805 sfc_dbg(sa, "added action_set=%p", action_set);
811 sfc_mae_action_set_del(struct sfc_adapter *sa,
812 struct sfc_mae_action_set *action_set)
814 struct sfc_mae *mae = &sa->mae;
816 SFC_ASSERT(sfc_adapter_is_locked(sa));
817 SFC_ASSERT(action_set->refcnt != 0);
819 --(action_set->refcnt);
821 if (action_set->refcnt != 0)
824 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
825 action_set->fw_rsrc.refcnt != 0) {
826 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
827 action_set, action_set->fw_rsrc.aset_id.id,
828 action_set->fw_rsrc.refcnt);
831 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
832 sfc_mae_encap_header_del(sa, action_set->encap_header);
833 if (action_set->n_counters > 0) {
834 SFC_ASSERT(action_set->n_counters == 1);
835 SFC_ASSERT(action_set->counters[0].mae_id.id ==
836 EFX_MAE_RSRC_ID_INVALID);
837 rte_free(action_set->counters);
839 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
840 rte_free(action_set);
842 sfc_dbg(sa, "deleted action_set=%p", action_set);
846 sfc_mae_action_set_enable(struct sfc_adapter *sa,
847 struct sfc_mae_action_set *action_set)
849 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
850 struct sfc_mae_counter_id *counters = action_set->counters;
851 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
854 SFC_ASSERT(sfc_adapter_is_locked(sa));
856 if (fw_rsrc->refcnt == 0) {
857 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
858 SFC_ASSERT(action_set->spec != NULL);
860 rc = sfc_mae_encap_header_enable(sa, encap_header,
865 rc = sfc_mae_counters_enable(sa, counters,
866 action_set->n_counters,
869 sfc_err(sa, "failed to enable %u MAE counters: %s",
870 action_set->n_counters, rte_strerror(rc));
872 sfc_mae_encap_header_disable(sa, encap_header);
876 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
879 sfc_err(sa, "failed to enable action_set=%p: %s",
880 action_set, strerror(rc));
882 (void)sfc_mae_counters_disable(sa, counters,
883 action_set->n_counters);
884 sfc_mae_encap_header_disable(sa, encap_header);
888 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
889 action_set, fw_rsrc->aset_id.id);
898 sfc_mae_action_set_disable(struct sfc_adapter *sa,
899 struct sfc_mae_action_set *action_set)
901 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
904 SFC_ASSERT(sfc_adapter_is_locked(sa));
906 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
907 fw_rsrc->refcnt == 0) {
908 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
909 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
913 if (fw_rsrc->refcnt == 1) {
914 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
916 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
917 action_set, fw_rsrc->aset_id.id);
919 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
920 action_set, fw_rsrc->aset_id.id, strerror(rc));
922 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
924 rc = sfc_mae_counters_disable(sa, action_set->counters,
925 action_set->n_counters);
927 sfc_err(sa, "failed to disable %u MAE counters: %s",
928 action_set->n_counters, rte_strerror(rc));
931 sfc_mae_encap_header_disable(sa, action_set->encap_header);
938 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
939 struct rte_flow *flow)
941 struct sfc_flow_spec *spec;
942 struct sfc_flow_spec_mae *spec_mae;
952 spec_mae = &spec->mae;
954 if (spec_mae->ft != NULL) {
955 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
956 spec_mae->ft->jump_rule_is_set = B_FALSE;
958 SFC_ASSERT(spec_mae->ft->refcnt != 0);
959 --(spec_mae->ft->refcnt);
962 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
964 if (spec_mae->outer_rule != NULL)
965 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
967 if (spec_mae->action_set != NULL)
968 sfc_mae_action_set_del(sa, spec_mae->action_set);
970 if (spec_mae->match_spec != NULL)
971 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
975 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
977 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
978 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
979 const efx_mae_field_id_t field_ids[] = {
980 EFX_MAE_FIELD_VLAN0_PROTO_BE,
981 EFX_MAE_FIELD_VLAN1_PROTO_BE,
983 const struct sfc_mae_ethertype *et;
988 * In accordance with RTE flow API convention, the innermost L2
989 * item's "type" ("inner_type") is a L3 EtherType. If there is
990 * no L3 item, it's 0x0000/0x0000.
992 et = &pdata->ethertypes[pdata->nb_vlan_tags];
993 rc = efx_mae_match_spec_field_set(ctx->match_spec,
994 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
996 (const uint8_t *)&et->value,
998 (const uint8_t *)&et->mask);
1003 * sfc_mae_rule_parse_item_vlan() has already made sure
1004 * that pdata->nb_vlan_tags does not exceed this figure.
1006 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1008 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
1009 et = &pdata->ethertypes[i];
1011 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1012 fremap[field_ids[i]],
1014 (const uint8_t *)&et->value,
1016 (const uint8_t *)&et->mask);
1025 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1026 struct rte_flow_error *error)
1028 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1029 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1030 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1031 const rte_be16_t supported_tpids[] = {
1032 /* VLAN standard TPID (always the first element) */
1033 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1035 /* Double-tagging TPIDs */
1036 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1037 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1038 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1039 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1041 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1042 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1043 unsigned int ethertype_idx;
1044 const uint8_t *valuep;
1045 const uint8_t *maskp;
1048 if (pdata->innermost_ethertype_restriction.mask != 0 &&
1049 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1051 * If a single item VLAN is followed by a L3 item, value
1052 * of "type" in item ETH can't be a double-tagging TPID.
1054 nb_supported_tpids = 1;
1058 * sfc_mae_rule_parse_item_vlan() has already made sure
1059 * that pdata->nb_vlan_tags does not exceed this figure.
1061 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1063 for (ethertype_idx = 0;
1064 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1065 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1066 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1067 unsigned int tpid_idx;
1070 * This loop can have only two iterations. On the second one,
1071 * drop outer tag presence enforcement bit because the inner
1072 * tag presence automatically assumes that for the outer tag.
1074 enforce_tag_presence[0] = B_FALSE;
1076 if (tpid_m == RTE_BE16(0)) {
1077 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1078 enforce_tag_presence[ethertype_idx] = B_TRUE;
1080 /* No match on this field, and no value check. */
1081 nb_supported_tpids = 1;
1085 /* Exact match is supported only. */
1086 if (tpid_m != RTE_BE16(0xffff)) {
1087 sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1088 rte_be_to_cpu_16(tpid_m));
1093 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1094 tpid_idx < nb_supported_tpids; ++tpid_idx) {
1095 if (tpid_v == supported_tpids[tpid_idx])
1099 if (tpid_idx == nb_supported_tpids) {
1100 sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1101 rte_be_to_cpu_16(tpid_v));
1106 nb_supported_tpids = 1;
1109 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1110 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
1111 rte_be16_t enforced_et;
1113 enforced_et = pdata->innermost_ethertype_restriction.value;
1115 if (et->mask == 0) {
1116 et->mask = RTE_BE16(0xffff);
1117 et->value = enforced_et;
1118 } else if (et->mask != RTE_BE16(0xffff) ||
1119 et->value != enforced_et) {
1120 sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1121 rte_be_to_cpu_16(enforced_et),
1122 rte_be_to_cpu_16(et->value),
1123 rte_be_to_cpu_16(et->mask));
1130 * Now, when the number of VLAN tags is known, set fields
1131 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1132 * one is either a valid L3 EtherType (or 0x0000/0x0000),
1133 * and the last two are valid TPIDs (or 0x0000/0x0000).
1135 rc = sfc_mae_set_ethertypes(ctx);
1139 if (pdata->l3_next_proto_restriction_mask == 0xff) {
1140 if (pdata->l3_next_proto_mask == 0) {
1141 pdata->l3_next_proto_mask = 0xff;
1142 pdata->l3_next_proto_value =
1143 pdata->l3_next_proto_restriction_value;
1144 } else if (pdata->l3_next_proto_mask != 0xff ||
1145 pdata->l3_next_proto_value !=
1146 pdata->l3_next_proto_restriction_value) {
1147 sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1148 pdata->l3_next_proto_restriction_value,
1149 pdata->l3_next_proto_value,
1150 pdata->l3_next_proto_mask);
1156 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1157 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1158 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1159 enforce_tag_presence[0] ||
1160 pdata->has_ovlan_value);
1165 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1166 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1167 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1168 enforce_tag_presence[1] ||
1169 pdata->has_ivlan_value);
1174 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1175 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1176 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1177 fremap[EFX_MAE_FIELD_IP_PROTO],
1178 sizeof(pdata->l3_next_proto_value),
1180 sizeof(pdata->l3_next_proto_mask),
1188 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1189 "Failed to process pattern data");
1193 sfc_mae_rule_parse_item_mark(const struct rte_flow_item *item,
1194 struct sfc_flow_parse_ctx *ctx,
1195 struct rte_flow_error *error)
1197 const struct rte_flow_item_mark *spec = item->spec;
1198 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1201 return rte_flow_error_set(error, EINVAL,
1202 RTE_FLOW_ERROR_TYPE_ITEM, item,
1203 "NULL spec in item MARK");
1207 * This item is used in tunnel offload support only.
1208 * It must go before any network header items. This
1209 * way, sfc_mae_rule_preparse_item_mark() must have
1210 * already parsed it. Only one item MARK is allowed.
1212 if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
1213 spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
1214 return rte_flow_error_set(error, EINVAL,
1215 RTE_FLOW_ERROR_TYPE_ITEM,
1216 item, "invalid item MARK");
1223 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1224 struct sfc_flow_parse_ctx *ctx,
1225 struct rte_flow_error *error)
1227 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1228 const struct rte_flow_item_port_id supp_mask = {
1231 const void *def_mask = &rte_flow_item_port_id_mask;
1232 const struct rte_flow_item_port_id *spec = NULL;
1233 const struct rte_flow_item_port_id *mask = NULL;
1234 efx_mport_sel_t mport_sel;
1237 if (ctx_mae->match_mport_set) {
1238 return rte_flow_error_set(error, ENOTSUP,
1239 RTE_FLOW_ERROR_TYPE_ITEM, item,
1240 "Can't handle multiple traffic source items");
1243 rc = sfc_flow_parse_init(item,
1244 (const void **)&spec, (const void **)&mask,
1245 (const void *)&supp_mask, def_mask,
1246 sizeof(struct rte_flow_item_port_id), error);
1250 if (mask->id != supp_mask.id) {
1251 return rte_flow_error_set(error, EINVAL,
1252 RTE_FLOW_ERROR_TYPE_ITEM, item,
1253 "Bad mask in the PORT_ID pattern item");
1256 /* If "spec" is not set, could be any port ID */
1260 if (spec->id > UINT16_MAX) {
1261 return rte_flow_error_set(error, EOVERFLOW,
1262 RTE_FLOW_ERROR_TYPE_ITEM, item,
1263 "The port ID is too large");
1266 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
1267 spec->id, &mport_sel);
1269 return rte_flow_error_set(error, rc,
1270 RTE_FLOW_ERROR_TYPE_ITEM, item,
1271 "Can't find RTE ethdev by the port ID");
1274 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1277 return rte_flow_error_set(error, rc,
1278 RTE_FLOW_ERROR_TYPE_ITEM, item,
1279 "Failed to set MPORT for the port ID");
1282 ctx_mae->match_mport_set = B_TRUE;
1288 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1289 struct sfc_flow_parse_ctx *ctx,
1290 struct rte_flow_error *error)
1292 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1293 const struct rte_flow_item_phy_port supp_mask = {
1294 .index = 0xffffffff,
1296 const void *def_mask = &rte_flow_item_phy_port_mask;
1297 const struct rte_flow_item_phy_port *spec = NULL;
1298 const struct rte_flow_item_phy_port *mask = NULL;
1299 efx_mport_sel_t mport_v;
1302 if (ctx_mae->match_mport_set) {
1303 return rte_flow_error_set(error, ENOTSUP,
1304 RTE_FLOW_ERROR_TYPE_ITEM, item,
1305 "Can't handle multiple traffic source items");
1308 rc = sfc_flow_parse_init(item,
1309 (const void **)&spec, (const void **)&mask,
1310 (const void *)&supp_mask, def_mask,
1311 sizeof(struct rte_flow_item_phy_port), error);
1315 if (mask->index != supp_mask.index) {
1316 return rte_flow_error_set(error, EINVAL,
1317 RTE_FLOW_ERROR_TYPE_ITEM, item,
1318 "Bad mask in the PHY_PORT pattern item");
1321 /* If "spec" is not set, could be any physical port */
1325 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1327 return rte_flow_error_set(error, rc,
1328 RTE_FLOW_ERROR_TYPE_ITEM, item,
1329 "Failed to convert the PHY_PORT index");
1332 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1334 return rte_flow_error_set(error, rc,
1335 RTE_FLOW_ERROR_TYPE_ITEM, item,
1336 "Failed to set MPORT for the PHY_PORT");
1339 ctx_mae->match_mport_set = B_TRUE;
1345 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1346 struct sfc_flow_parse_ctx *ctx,
1347 struct rte_flow_error *error)
1349 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1350 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1351 efx_mport_sel_t mport_v;
1354 if (ctx_mae->match_mport_set) {
1355 return rte_flow_error_set(error, ENOTSUP,
1356 RTE_FLOW_ERROR_TYPE_ITEM, item,
1357 "Can't handle multiple traffic source items");
1360 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1363 return rte_flow_error_set(error, rc,
1364 RTE_FLOW_ERROR_TYPE_ITEM, item,
1365 "Failed to convert the PF ID");
1368 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1370 return rte_flow_error_set(error, rc,
1371 RTE_FLOW_ERROR_TYPE_ITEM, item,
1372 "Failed to set MPORT for the PF");
1375 ctx_mae->match_mport_set = B_TRUE;
1381 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1382 struct sfc_flow_parse_ctx *ctx,
1383 struct rte_flow_error *error)
1385 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1386 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1387 const struct rte_flow_item_vf supp_mask = {
1390 const void *def_mask = &rte_flow_item_vf_mask;
1391 const struct rte_flow_item_vf *spec = NULL;
1392 const struct rte_flow_item_vf *mask = NULL;
1393 efx_mport_sel_t mport_v;
1396 if (ctx_mae->match_mport_set) {
1397 return rte_flow_error_set(error, ENOTSUP,
1398 RTE_FLOW_ERROR_TYPE_ITEM, item,
1399 "Can't handle multiple traffic source items");
1402 rc = sfc_flow_parse_init(item,
1403 (const void **)&spec, (const void **)&mask,
1404 (const void *)&supp_mask, def_mask,
1405 sizeof(struct rte_flow_item_vf), error);
1409 if (mask->id != supp_mask.id) {
1410 return rte_flow_error_set(error, EINVAL,
1411 RTE_FLOW_ERROR_TYPE_ITEM, item,
1412 "Bad mask in the VF pattern item");
1416 * If "spec" is not set, the item requests any VF related to the
1417 * PF of the current DPDK port (but not the PF itself).
1418 * Reject this match criterion as unsupported.
1421 return rte_flow_error_set(error, EINVAL,
1422 RTE_FLOW_ERROR_TYPE_ITEM, item,
1423 "Bad spec in the VF pattern item");
1426 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1428 return rte_flow_error_set(error, rc,
1429 RTE_FLOW_ERROR_TYPE_ITEM, item,
1430 "Failed to convert the PF + VF IDs");
1433 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1435 return rte_flow_error_set(error, rc,
1436 RTE_FLOW_ERROR_TYPE_ITEM, item,
1437 "Failed to set MPORT for the PF + VF");
1440 ctx_mae->match_mport_set = B_TRUE;
1446 * Having this field ID in a field locator means that this
1447 * locator cannot be used to actually set the field at the
1448 * time when the corresponding item gets encountered. Such
1449 * fields get stashed in the parsing context instead. This
1450 * is required to resolve dependencies between the stashed
1451 * fields. See sfc_mae_rule_process_pattern_data().
1453 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1455 struct sfc_mae_field_locator {
1456 efx_mae_field_id_t field_id;
1458 /* Field offset in the corresponding rte_flow_item_ struct */
1463 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1464 unsigned int nb_field_locators, void *mask_ptr,
1469 memset(mask_ptr, 0, mask_size);
1471 for (i = 0; i < nb_field_locators; ++i) {
1472 const struct sfc_mae_field_locator *fl = &field_locators[i];
1474 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1475 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1480 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1481 unsigned int nb_field_locators, const uint8_t *spec,
1482 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1483 struct rte_flow_error *error)
1485 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1489 for (i = 0; i < nb_field_locators; ++i) {
1490 const struct sfc_mae_field_locator *fl = &field_locators[i];
1492 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1495 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1496 fremap[fl->field_id],
1497 fl->size, spec + fl->ofst,
1498 fl->size, mask + fl->ofst);
1504 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1505 NULL, "Failed to process item fields");
1511 static const struct sfc_mae_field_locator flocs_eth[] = {
1514 * This locator is used only for building supported fields mask.
1515 * The field is handled by sfc_mae_rule_process_pattern_data().
1517 SFC_MAE_FIELD_HANDLING_DEFERRED,
1518 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1519 offsetof(struct rte_flow_item_eth, type),
1522 EFX_MAE_FIELD_ETH_DADDR_BE,
1523 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1524 offsetof(struct rte_flow_item_eth, dst),
1527 EFX_MAE_FIELD_ETH_SADDR_BE,
1528 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1529 offsetof(struct rte_flow_item_eth, src),
1534 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1535 struct sfc_flow_parse_ctx *ctx,
1536 struct rte_flow_error *error)
1538 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1539 struct rte_flow_item_eth override_mask;
1540 struct rte_flow_item_eth supp_mask;
1541 const uint8_t *spec = NULL;
1542 const uint8_t *mask = NULL;
1545 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1546 &supp_mask, sizeof(supp_mask));
1547 supp_mask.has_vlan = 1;
1549 rc = sfc_flow_parse_init(item,
1550 (const void **)&spec, (const void **)&mask,
1551 (const void *)&supp_mask,
1552 &rte_flow_item_eth_mask,
1553 sizeof(struct rte_flow_item_eth), error);
1557 if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
1559 * The HW/FW hasn't got support for match on MAC addresses in
1560 * outer rules yet (this will change). Match on VLAN presence
1561 * isn't supported either. Ignore these match criteria.
1563 memcpy(&override_mask, mask, sizeof(override_mask));
1564 memset(&override_mask.hdr.dst_addr, 0,
1565 sizeof(override_mask.hdr.dst_addr));
1566 memset(&override_mask.hdr.src_addr, 0,
1567 sizeof(override_mask.hdr.src_addr));
1568 override_mask.has_vlan = 0;
1570 mask = (const uint8_t *)&override_mask;
1574 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1575 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1576 const struct rte_flow_item_eth *item_spec;
1577 const struct rte_flow_item_eth *item_mask;
1579 item_spec = (const struct rte_flow_item_eth *)spec;
1580 item_mask = (const struct rte_flow_item_eth *)mask;
1583 * Remember various match criteria in the parsing context.
1584 * sfc_mae_rule_process_pattern_data() will consider them
1585 * altogether when the rest of the items have been parsed.
1587 ethertypes[0].value = item_spec->type;
1588 ethertypes[0].mask = item_mask->type;
1589 if (item_mask->has_vlan) {
1590 pdata->has_ovlan_mask = B_TRUE;
1591 if (item_spec->has_vlan)
1592 pdata->has_ovlan_value = B_TRUE;
1596 * The specification is empty. The overall pattern
1597 * validity will be enforced at the end of parsing.
1598 * See sfc_mae_rule_process_pattern_data().
1603 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1607 static const struct sfc_mae_field_locator flocs_vlan[] = {
1610 EFX_MAE_FIELD_VLAN0_TCI_BE,
1611 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1612 offsetof(struct rte_flow_item_vlan, tci),
1616 * This locator is used only for building supported fields mask.
1617 * The field is handled by sfc_mae_rule_process_pattern_data().
1619 SFC_MAE_FIELD_HANDLING_DEFERRED,
1620 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1621 offsetof(struct rte_flow_item_vlan, inner_type),
1626 EFX_MAE_FIELD_VLAN1_TCI_BE,
1627 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1628 offsetof(struct rte_flow_item_vlan, tci),
1632 * This locator is used only for building supported fields mask.
1633 * The field is handled by sfc_mae_rule_process_pattern_data().
1635 SFC_MAE_FIELD_HANDLING_DEFERRED,
1636 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1637 offsetof(struct rte_flow_item_vlan, inner_type),
1642 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1643 struct sfc_flow_parse_ctx *ctx,
1644 struct rte_flow_error *error)
1646 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1647 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1648 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1649 &pdata->has_ovlan_mask,
1650 &pdata->has_ivlan_mask,
1652 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1653 &pdata->has_ovlan_value,
1654 &pdata->has_ivlan_value,
1656 boolean_t *cur_tag_presence_bit_mp;
1657 boolean_t *cur_tag_presence_bit_vp;
1658 const struct sfc_mae_field_locator *flocs;
1659 struct rte_flow_item_vlan supp_mask;
1660 const uint8_t *spec = NULL;
1661 const uint8_t *mask = NULL;
1662 unsigned int nb_flocs;
1665 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1667 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1668 return rte_flow_error_set(error, ENOTSUP,
1669 RTE_FLOW_ERROR_TYPE_ITEM, item,
1670 "Can't match that many VLAN tags");
1673 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1674 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1676 if (*cur_tag_presence_bit_mp == B_TRUE &&
1677 *cur_tag_presence_bit_vp == B_FALSE) {
1678 return rte_flow_error_set(error, EINVAL,
1679 RTE_FLOW_ERROR_TYPE_ITEM, item,
1680 "The previous item enforces no (more) VLAN, "
1681 "so the current item (VLAN) must not exist");
1684 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1685 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1687 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1688 &supp_mask, sizeof(supp_mask));
1690 * This only means that the field is supported by the driver and libefx.
1691 * Support on NIC level will be checked when all items have been parsed.
1693 supp_mask.has_more_vlan = 1;
1695 rc = sfc_flow_parse_init(item,
1696 (const void **)&spec, (const void **)&mask,
1697 (const void *)&supp_mask,
1698 &rte_flow_item_vlan_mask,
1699 sizeof(struct rte_flow_item_vlan), error);
1704 struct sfc_mae_ethertype *et = pdata->ethertypes;
1705 const struct rte_flow_item_vlan *item_spec;
1706 const struct rte_flow_item_vlan *item_mask;
1708 item_spec = (const struct rte_flow_item_vlan *)spec;
1709 item_mask = (const struct rte_flow_item_vlan *)mask;
1712 * Remember various match criteria in the parsing context.
1713 * sfc_mae_rule_process_pattern_data() will consider them
1714 * altogether when the rest of the items have been parsed.
1716 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1717 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1718 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1719 if (item_mask->has_more_vlan) {
1720 if (pdata->nb_vlan_tags ==
1721 SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1722 return rte_flow_error_set(error, ENOTSUP,
1723 RTE_FLOW_ERROR_TYPE_ITEM, item,
1724 "Can't use 'has_more_vlan' in "
1725 "the second item VLAN");
1727 pdata->has_ivlan_mask = B_TRUE;
1728 if (item_spec->has_more_vlan)
1729 pdata->has_ivlan_value = B_TRUE;
1732 /* Convert TCI to MAE representation right now. */
1733 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1739 ++(pdata->nb_vlan_tags);
1744 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1746 EFX_MAE_FIELD_SRC_IP4_BE,
1747 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1748 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1751 EFX_MAE_FIELD_DST_IP4_BE,
1752 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1753 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1757 * This locator is used only for building supported fields mask.
1758 * The field is handled by sfc_mae_rule_process_pattern_data().
1760 SFC_MAE_FIELD_HANDLING_DEFERRED,
1761 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1762 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1765 EFX_MAE_FIELD_IP_TOS,
1766 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1767 hdr.type_of_service),
1768 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1771 EFX_MAE_FIELD_IP_TTL,
1772 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1773 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1778 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1779 struct sfc_flow_parse_ctx *ctx,
1780 struct rte_flow_error *error)
1782 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1783 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1784 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1785 struct rte_flow_item_ipv4 supp_mask;
1786 const uint8_t *spec = NULL;
1787 const uint8_t *mask = NULL;
1790 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1791 &supp_mask, sizeof(supp_mask));
1793 rc = sfc_flow_parse_init(item,
1794 (const void **)&spec, (const void **)&mask,
1795 (const void *)&supp_mask,
1796 &rte_flow_item_ipv4_mask,
1797 sizeof(struct rte_flow_item_ipv4), error);
1801 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1802 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1805 const struct rte_flow_item_ipv4 *item_spec;
1806 const struct rte_flow_item_ipv4 *item_mask;
1808 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1809 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1811 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1812 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1817 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1821 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1823 EFX_MAE_FIELD_SRC_IP6_BE,
1824 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1825 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1828 EFX_MAE_FIELD_DST_IP6_BE,
1829 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1830 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1834 * This locator is used only for building supported fields mask.
1835 * The field is handled by sfc_mae_rule_process_pattern_data().
1837 SFC_MAE_FIELD_HANDLING_DEFERRED,
1838 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1839 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1842 EFX_MAE_FIELD_IP_TTL,
1843 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1844 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1849 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1850 struct sfc_flow_parse_ctx *ctx,
1851 struct rte_flow_error *error)
1853 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1854 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1855 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1856 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1857 struct rte_flow_item_ipv6 supp_mask;
1858 const uint8_t *spec = NULL;
1859 const uint8_t *mask = NULL;
1860 rte_be32_t vtc_flow_be;
1866 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1867 &supp_mask, sizeof(supp_mask));
1869 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1870 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1872 rc = sfc_flow_parse_init(item,
1873 (const void **)&spec, (const void **)&mask,
1874 (const void *)&supp_mask,
1875 &rte_flow_item_ipv6_mask,
1876 sizeof(struct rte_flow_item_ipv6), error);
1880 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1881 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1884 const struct rte_flow_item_ipv6 *item_spec;
1885 const struct rte_flow_item_ipv6 *item_mask;
1887 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1888 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1890 pdata->l3_next_proto_value = item_spec->hdr.proto;
1891 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1896 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1901 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1902 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1903 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1905 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1906 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1907 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1909 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1910 fremap[EFX_MAE_FIELD_IP_TOS],
1911 sizeof(tc_value), &tc_value,
1912 sizeof(tc_mask), &tc_mask);
1914 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1915 NULL, "Failed to process item fields");
1921 static const struct sfc_mae_field_locator flocs_tcp[] = {
1923 EFX_MAE_FIELD_L4_SPORT_BE,
1924 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1925 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1928 EFX_MAE_FIELD_L4_DPORT_BE,
1929 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1930 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1933 EFX_MAE_FIELD_TCP_FLAGS_BE,
1935 * The values have been picked intentionally since the
1936 * target MAE field is oversize (16 bit). This mapping
1937 * relies on the fact that the MAE field is big-endian.
1939 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1940 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1941 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1946 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1947 struct sfc_flow_parse_ctx *ctx,
1948 struct rte_flow_error *error)
1950 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1951 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1952 struct rte_flow_item_tcp supp_mask;
1953 const uint8_t *spec = NULL;
1954 const uint8_t *mask = NULL;
1958 * When encountered among outermost items, item TCP is invalid.
1959 * Check which match specification is being constructed now.
1961 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1962 return rte_flow_error_set(error, EINVAL,
1963 RTE_FLOW_ERROR_TYPE_ITEM, item,
1964 "TCP in outer frame is invalid");
1967 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1968 &supp_mask, sizeof(supp_mask));
1970 rc = sfc_flow_parse_init(item,
1971 (const void **)&spec, (const void **)&mask,
1972 (const void *)&supp_mask,
1973 &rte_flow_item_tcp_mask,
1974 sizeof(struct rte_flow_item_tcp), error);
1978 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1979 pdata->l3_next_proto_restriction_mask = 0xff;
1984 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1988 static const struct sfc_mae_field_locator flocs_udp[] = {
1990 EFX_MAE_FIELD_L4_SPORT_BE,
1991 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1992 offsetof(struct rte_flow_item_udp, hdr.src_port),
1995 EFX_MAE_FIELD_L4_DPORT_BE,
1996 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1997 offsetof(struct rte_flow_item_udp, hdr.dst_port),
2002 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
2003 struct sfc_flow_parse_ctx *ctx,
2004 struct rte_flow_error *error)
2006 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2007 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2008 struct rte_flow_item_udp supp_mask;
2009 const uint8_t *spec = NULL;
2010 const uint8_t *mask = NULL;
2013 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
2014 &supp_mask, sizeof(supp_mask));
2016 rc = sfc_flow_parse_init(item,
2017 (const void **)&spec, (const void **)&mask,
2018 (const void *)&supp_mask,
2019 &rte_flow_item_udp_mask,
2020 sizeof(struct rte_flow_item_udp), error);
2024 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
2025 pdata->l3_next_proto_restriction_mask = 0xff;
2030 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
2034 static const struct sfc_mae_field_locator flocs_tunnel[] = {
2037 * The size and offset values are relevant
2038 * for Geneve and NVGRE, too.
2040 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
2041 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
2046 * An auxiliary registry which allows using non-encap. field IDs
2047 * directly when building a match specification of type ACTION.
2049 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
2051 static const efx_mae_field_id_t field_ids_no_remap[] = {
2052 #define FIELD_ID_NO_REMAP(_field) \
2053 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
2055 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
2056 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
2057 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
2058 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
2059 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
2060 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
2061 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
2062 FIELD_ID_NO_REMAP(SRC_IP4_BE),
2063 FIELD_ID_NO_REMAP(DST_IP4_BE),
2064 FIELD_ID_NO_REMAP(IP_PROTO),
2065 FIELD_ID_NO_REMAP(IP_TOS),
2066 FIELD_ID_NO_REMAP(IP_TTL),
2067 FIELD_ID_NO_REMAP(SRC_IP6_BE),
2068 FIELD_ID_NO_REMAP(DST_IP6_BE),
2069 FIELD_ID_NO_REMAP(L4_SPORT_BE),
2070 FIELD_ID_NO_REMAP(L4_DPORT_BE),
2071 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2072 FIELD_ID_NO_REMAP(HAS_OVLAN),
2073 FIELD_ID_NO_REMAP(HAS_IVLAN),
2075 #undef FIELD_ID_NO_REMAP
2079 * An auxiliary registry which allows using "ENC" field IDs
2080 * when building a match specification of type OUTER.
2082 * See sfc_mae_rule_encap_parse_init().
2084 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2085 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2086 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2088 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2089 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2090 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2091 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2092 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2093 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2094 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2095 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2096 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2097 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2098 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2099 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2100 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2101 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2102 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2103 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2104 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2105 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2107 #undef FIELD_ID_REMAP_TO_ENCAP
2111 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2112 struct sfc_flow_parse_ctx *ctx,
2113 struct rte_flow_error *error)
2115 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2116 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2117 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2118 const struct rte_flow_item_vxlan *vxp;
2119 uint8_t supp_mask[sizeof(uint64_t)];
2120 const uint8_t *spec = NULL;
2121 const uint8_t *mask = NULL;
2124 if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
2126 * As a workaround, pattern processing has started from
2127 * this (tunnel) item. No pattern data to process yet.
2131 * We're about to start processing inner frame items.
2132 * Process pattern data that has been deferred so far
2133 * and reset pattern data storage.
2135 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2140 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2142 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2143 &supp_mask, sizeof(supp_mask));
2146 * This tunnel item was preliminarily detected by
2147 * sfc_mae_rule_encap_parse_init(). Default mask
2148 * was also picked by that helper. Use it here.
2150 rc = sfc_flow_parse_init(item,
2151 (const void **)&spec, (const void **)&mask,
2152 (const void *)&supp_mask,
2153 ctx_mae->tunnel_def_mask,
2154 ctx_mae->tunnel_def_mask_size, error);
2159 * This item and later ones comprise a
2160 * match specification of type ACTION.
2162 ctx_mae->match_spec = ctx_mae->match_spec_action;
2164 /* This item and later ones use non-encap. EFX MAE field IDs. */
2165 ctx_mae->field_ids_remap = field_ids_no_remap;
2171 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2172 * Copy 24-bit VNI, which is BE, at offset 1 in it.
2173 * The extra byte is 0 both in the mask and in the value.
2175 vxp = (const struct rte_flow_item_vxlan *)spec;
2176 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2178 vxp = (const struct rte_flow_item_vxlan *)mask;
2179 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2181 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2182 EFX_MAE_FIELD_ENC_VNET_ID_BE,
2183 sizeof(vnet_id_v), vnet_id_v,
2184 sizeof(vnet_id_m), vnet_id_m);
2186 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2187 item, "Failed to set VXLAN VNI");
2193 static const struct sfc_flow_item sfc_flow_items[] = {
2195 .type = RTE_FLOW_ITEM_TYPE_MARK,
2197 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2198 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2199 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2200 .parse = sfc_mae_rule_parse_item_mark,
2203 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2206 * In terms of RTE flow, this item is a META one,
2207 * and its position in the pattern is don't care.
2209 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2210 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2211 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2212 .parse = sfc_mae_rule_parse_item_port_id,
2215 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2218 * In terms of RTE flow, this item is a META one,
2219 * and its position in the pattern is don't care.
2221 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2222 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2223 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2224 .parse = sfc_mae_rule_parse_item_phy_port,
2227 .type = RTE_FLOW_ITEM_TYPE_PF,
2230 * In terms of RTE flow, this item is a META one,
2231 * and its position in the pattern is don't care.
2233 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2234 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2235 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2236 .parse = sfc_mae_rule_parse_item_pf,
2239 .type = RTE_FLOW_ITEM_TYPE_VF,
2242 * In terms of RTE flow, this item is a META one,
2243 * and its position in the pattern is don't care.
2245 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2246 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2247 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2248 .parse = sfc_mae_rule_parse_item_vf,
2251 .type = RTE_FLOW_ITEM_TYPE_ETH,
2253 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2254 .layer = SFC_FLOW_ITEM_L2,
2255 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2256 .parse = sfc_mae_rule_parse_item_eth,
2259 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2261 .prev_layer = SFC_FLOW_ITEM_L2,
2262 .layer = SFC_FLOW_ITEM_L2,
2263 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2264 .parse = sfc_mae_rule_parse_item_vlan,
2267 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2269 .prev_layer = SFC_FLOW_ITEM_L2,
2270 .layer = SFC_FLOW_ITEM_L3,
2271 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2272 .parse = sfc_mae_rule_parse_item_ipv4,
2275 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2277 .prev_layer = SFC_FLOW_ITEM_L2,
2278 .layer = SFC_FLOW_ITEM_L3,
2279 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2280 .parse = sfc_mae_rule_parse_item_ipv6,
2283 .type = RTE_FLOW_ITEM_TYPE_TCP,
2285 .prev_layer = SFC_FLOW_ITEM_L3,
2286 .layer = SFC_FLOW_ITEM_L4,
2287 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2288 .parse = sfc_mae_rule_parse_item_tcp,
2291 .type = RTE_FLOW_ITEM_TYPE_UDP,
2293 .prev_layer = SFC_FLOW_ITEM_L3,
2294 .layer = SFC_FLOW_ITEM_L4,
2295 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2296 .parse = sfc_mae_rule_parse_item_udp,
2299 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2301 .prev_layer = SFC_FLOW_ITEM_L4,
2302 .layer = SFC_FLOW_ITEM_START_LAYER,
2303 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2304 .parse = sfc_mae_rule_parse_item_tunnel,
2307 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2309 .prev_layer = SFC_FLOW_ITEM_L4,
2310 .layer = SFC_FLOW_ITEM_START_LAYER,
2311 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2312 .parse = sfc_mae_rule_parse_item_tunnel,
2315 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2317 .prev_layer = SFC_FLOW_ITEM_L3,
2318 .layer = SFC_FLOW_ITEM_START_LAYER,
2319 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2320 .parse = sfc_mae_rule_parse_item_tunnel,
2325 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2326 struct sfc_mae_parse_ctx *ctx,
2327 struct sfc_mae_outer_rule **rulep,
2328 struct rte_flow_error *error)
2330 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2333 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2338 SFC_ASSERT(ctx->match_spec_outer != NULL);
2340 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2341 return rte_flow_error_set(error, ENOTSUP,
2342 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2343 "Inconsistent pattern (outer)");
2346 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2348 if (*rulep != NULL) {
2349 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2351 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2352 ctx->encap_type, rulep);
2354 return rte_flow_error_set(error, rc,
2355 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2356 "Failed to process the pattern");
2360 /* The spec has now been tracked by the outer rule entry. */
2361 ctx->match_spec_outer = NULL;
2364 switch (ctx->ft_rule_type) {
2365 case SFC_FT_RULE_NONE:
2367 case SFC_FT_RULE_JUMP:
2368 /* No action rule */
2370 case SFC_FT_RULE_GROUP:
2372 * Match on recirculation ID rather than
2373 * on the outer rule allocation handle.
2375 rc = efx_mae_match_spec_recirc_id_set(ctx->match_spec_action,
2376 SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id));
2378 return rte_flow_error_set(error, rc,
2379 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2380 "tunnel offload: GROUP: AR: failed to request match on RECIRC_ID");
2384 SFC_ASSERT(B_FALSE);
2388 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2389 * inner parse (when some outer rule is hit) and action rule lookup.
2390 * If the currently processed flow does not come with an outer rule,
2391 * its action rule must be available only for packets which miss in
2392 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2393 * in the action rule specification; this ensures correct behaviour.
2395 * If, on the other hand, this flow does have an outer rule, its ID
2396 * may be unknown at the moment (not yet allocated), but OR_ID mask
2397 * has to be set to 0xffffffff anyway for correct class comparisons.
2398 * When the outer rule has been allocated, this match field will be
2399 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2401 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2405 sfc_mae_outer_rule_del(sa, *rulep);
2409 return rte_flow_error_set(error, rc,
2410 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2411 "Failed to process the pattern");
2418 sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
2419 struct sfc_mae_parse_ctx *ctx)
2421 struct sfc_flow_tunnel *ft;
2425 sfc_err(ctx->sa, "tunnel offload: GROUP: NULL spec in item MARK");
2429 ft = sfc_flow_tunnel_pick(ctx->sa, spec->id);
2431 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid tunnel");
2435 if (ft->refcnt == 0) {
2436 sfc_err(ctx->sa, "tunnel offload: GROUP: tunnel=%u does not exist",
2441 user_mark = SFC_FT_GET_USER_MARK(spec->id);
2442 if (user_mark != 0) {
2443 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid item MARK");
2447 sfc_dbg(ctx->sa, "tunnel offload: GROUP: detected");
2449 ctx->ft_rule_type = SFC_FT_RULE_GROUP;
2456 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2457 struct sfc_mae_parse_ctx *ctx,
2458 struct rte_flow_error *error)
2460 const struct rte_flow_item *pattern = ctx->pattern;
2461 struct sfc_mae *mae = &sa->mae;
2462 uint8_t recirc_id = 0;
2465 if (pattern == NULL) {
2466 rte_flow_error_set(error, EINVAL,
2467 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2473 switch (pattern->type) {
2474 case RTE_FLOW_ITEM_TYPE_MARK:
2475 rc = sfc_mae_rule_preparse_item_mark(pattern->spec,
2478 return rte_flow_error_set(error, rc,
2479 RTE_FLOW_ERROR_TYPE_ITEM,
2480 pattern, "tunnel offload: GROUP: invalid item MARK");
2484 case RTE_FLOW_ITEM_TYPE_VXLAN:
2485 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2486 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2487 ctx->tunnel_def_mask_size =
2488 sizeof(rte_flow_item_vxlan_mask);
2490 case RTE_FLOW_ITEM_TYPE_GENEVE:
2491 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2492 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2493 ctx->tunnel_def_mask_size =
2494 sizeof(rte_flow_item_geneve_mask);
2496 case RTE_FLOW_ITEM_TYPE_NVGRE:
2497 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2498 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2499 ctx->tunnel_def_mask_size =
2500 sizeof(rte_flow_item_nvgre_mask);
2502 case RTE_FLOW_ITEM_TYPE_END:
2512 switch (ctx->ft_rule_type) {
2513 case SFC_FT_RULE_NONE:
2514 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2517 case SFC_FT_RULE_JUMP:
2518 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
2519 return rte_flow_error_set(error, ENOTSUP,
2520 RTE_FLOW_ERROR_TYPE_ITEM,
2521 pattern, "tunnel offload: JUMP: invalid item");
2523 ctx->encap_type = ctx->ft->encap_type;
2525 case SFC_FT_RULE_GROUP:
2526 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2527 return rte_flow_error_set(error, EINVAL,
2528 RTE_FLOW_ERROR_TYPE_ITEM,
2529 NULL, "tunnel offload: GROUP: missing tunnel item");
2530 } else if (ctx->encap_type != ctx->ft->encap_type) {
2531 return rte_flow_error_set(error, EINVAL,
2532 RTE_FLOW_ERROR_TYPE_ITEM,
2533 pattern, "tunnel offload: GROUP: tunnel type mismatch");
2537 * The HW/FW hasn't got support for the use of "ENC" fields in
2538 * action rules (except the VNET_ID one) yet. As a workaround,
2539 * start parsing the pattern from the tunnel item.
2541 ctx->pattern = pattern;
2544 SFC_ASSERT(B_FALSE);
2548 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2549 return rte_flow_error_set(error, ENOTSUP,
2550 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2551 "OR: unsupported tunnel type");
2554 switch (ctx->ft_rule_type) {
2555 case SFC_FT_RULE_JUMP:
2556 recirc_id = SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id);
2558 case SFC_FT_RULE_NONE:
2559 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2560 return rte_flow_error_set(error, ENOTSUP,
2561 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2562 NULL, "OR: unsupported priority level");
2565 rc = efx_mae_match_spec_init(sa->nic,
2566 EFX_MAE_RULE_OUTER, ctx->priority,
2567 &ctx->match_spec_outer);
2569 return rte_flow_error_set(error, rc,
2570 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2571 "OR: failed to initialise the match specification");
2575 * Outermost items comprise a match
2576 * specification of type OUTER.
2578 ctx->match_spec = ctx->match_spec_outer;
2580 /* Outermost items use "ENC" EFX MAE field IDs. */
2581 ctx->field_ids_remap = field_ids_remap_to_encap;
2583 rc = efx_mae_outer_rule_recirc_id_set(ctx->match_spec,
2586 return rte_flow_error_set(error, rc,
2587 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2588 "OR: failed to initialise RECIRC_ID");
2591 case SFC_FT_RULE_GROUP:
2592 /* Outermost items -> "ENC" match fields in the action rule. */
2593 ctx->field_ids_remap = field_ids_remap_to_encap;
2594 ctx->match_spec = ctx->match_spec_action;
2596 /* No own outer rule; match on JUMP OR's RECIRC_ID is used. */
2597 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2600 SFC_ASSERT(B_FALSE);
2608 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2609 struct sfc_mae_parse_ctx *ctx)
2611 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2614 if (ctx->match_spec_outer != NULL)
2615 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2619 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2620 const struct rte_flow_item pattern[],
2621 struct sfc_flow_spec_mae *spec,
2622 struct rte_flow_error *error)
2624 struct sfc_mae_parse_ctx ctx_mae;
2625 unsigned int priority_shift = 0;
2626 struct sfc_flow_parse_ctx ctx;
2629 memset(&ctx_mae, 0, sizeof(ctx_mae));
2630 ctx_mae.ft_rule_type = spec->ft_rule_type;
2631 ctx_mae.priority = spec->priority;
2632 ctx_mae.ft = spec->ft;
2635 switch (ctx_mae.ft_rule_type) {
2636 case SFC_FT_RULE_JUMP:
2638 * By design, this flow should be represented solely by the
2639 * outer rule. But the HW/FW hasn't got support for setting
2640 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2641 * does it support outer rule counters. As a workaround, an
2642 * action rule of lower priority is used to do the job.
2647 case SFC_FT_RULE_GROUP:
2648 if (ctx_mae.priority != 0) {
2650 * Because of the above workaround, deny the
2651 * use of priorities to JUMP and GROUP rules.
2653 rc = rte_flow_error_set(error, ENOTSUP,
2654 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2655 "tunnel offload: priorities are not supported");
2656 goto fail_priority_check;
2660 case SFC_FT_RULE_NONE:
2661 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2662 spec->priority + priority_shift,
2663 &ctx_mae.match_spec_action);
2665 rc = rte_flow_error_set(error, rc,
2666 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2667 "AR: failed to initialise the match specification");
2668 goto fail_init_match_spec_action;
2672 SFC_ASSERT(B_FALSE);
2677 * As a preliminary setting, assume that there is no encapsulation
2678 * in the pattern. That is, pattern items are about to comprise a
2679 * match specification of type ACTION and use non-encap. field IDs.
2681 * sfc_mae_rule_encap_parse_init() below may override this.
2683 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2684 ctx_mae.match_spec = ctx_mae.match_spec_action;
2685 ctx_mae.field_ids_remap = field_ids_no_remap;
2686 ctx_mae.pattern = pattern;
2688 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2691 rc = sfc_mae_rule_encap_parse_init(sa, &ctx_mae, error);
2693 goto fail_encap_parse_init;
2696 * sfc_mae_rule_encap_parse_init() may have detected tunnel offload
2697 * GROUP rule. Remember its properties for later use.
2699 spec->ft_rule_type = ctx_mae.ft_rule_type;
2700 spec->ft = ctx_mae.ft;
2702 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2703 ctx_mae.pattern, &ctx, error);
2705 goto fail_parse_pattern;
2707 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2709 goto fail_process_pattern_data;
2711 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2713 goto fail_process_outer;
2715 if (ctx_mae.match_spec_action != NULL &&
2716 !efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2717 rc = rte_flow_error_set(error, ENOTSUP,
2718 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2719 "Inconsistent pattern");
2720 goto fail_validate_match_spec_action;
2723 spec->match_spec = ctx_mae.match_spec_action;
2727 fail_validate_match_spec_action:
2729 fail_process_pattern_data:
2731 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2733 fail_encap_parse_init:
2734 if (ctx_mae.match_spec_action != NULL)
2735 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2737 fail_init_match_spec_action:
2738 fail_priority_check:
2743 * An action supported by MAE may correspond to a bundle of RTE flow actions,
2744 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2745 * That is, related RTE flow actions need to be tracked as parts of a whole
2746 * so that they can be combined into a single action and submitted to MAE
2747 * representation of a given rule's action set.
2749 * Each RTE flow action provided by an application gets classified as
2750 * one belonging to some bundle type. If an action is not supposed to
2751 * belong to any bundle, or if this action is END, it is described as
2752 * one belonging to a dummy bundle of type EMPTY.
2754 * A currently tracked bundle will be submitted if a repeating
2755 * action or an action of different bundle type follows.
2758 enum sfc_mae_actions_bundle_type {
2759 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2760 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2763 struct sfc_mae_actions_bundle {
2764 enum sfc_mae_actions_bundle_type type;
2766 /* Indicates actions already tracked by the current bundle */
2767 uint64_t actions_mask;
2769 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2770 rte_be16_t vlan_push_tpid;
2771 rte_be16_t vlan_push_tci;
2775 * Combine configuration of RTE flow actions tracked by the bundle into a
2776 * single action and submit the result to MAE action set specification.
2777 * Do nothing in the case of dummy action bundle.
2780 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2781 efx_mae_actions_t *spec)
2785 switch (bundle->type) {
2786 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2788 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2789 rc = efx_mae_action_set_populate_vlan_push(
2790 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2793 SFC_ASSERT(B_FALSE);
2801 * Given the type of the next RTE flow action in the line, decide
2802 * whether a new bundle is about to start, and, if this is the case,
2803 * submit and reset the current bundle.
2806 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2807 struct sfc_mae_actions_bundle *bundle,
2808 efx_mae_actions_t *spec,
2809 struct rte_flow_error *error)
2811 enum sfc_mae_actions_bundle_type bundle_type_new;
2814 switch (action->type) {
2815 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2816 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2817 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2818 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2822 * Self-sufficient actions, including END, are handled in this
2823 * case. No checks for unsupported actions are needed here
2824 * because parsing doesn't occur at this point.
2826 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2830 if (bundle_type_new != bundle->type ||
2831 (bundle->actions_mask & (1ULL << action->type)) != 0) {
2832 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2836 memset(bundle, 0, sizeof(*bundle));
2839 bundle->type = bundle_type_new;
2844 return rte_flow_error_set(error, rc,
2845 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2846 "Failed to request the (group of) action(s)");
2850 sfc_mae_rule_parse_action_of_push_vlan(
2851 const struct rte_flow_action_of_push_vlan *conf,
2852 struct sfc_mae_actions_bundle *bundle)
2854 bundle->vlan_push_tpid = conf->ethertype;
2858 sfc_mae_rule_parse_action_of_set_vlan_vid(
2859 const struct rte_flow_action_of_set_vlan_vid *conf,
2860 struct sfc_mae_actions_bundle *bundle)
2862 bundle->vlan_push_tci |= (conf->vlan_vid &
2863 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2867 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2868 const struct rte_flow_action_of_set_vlan_pcp *conf,
2869 struct sfc_mae_actions_bundle *bundle)
2871 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2872 RTE_LEN2MASK(3, uint8_t)) << 13;
2874 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2877 struct sfc_mae_parsed_item {
2878 const struct rte_flow_item *item;
2879 size_t proto_header_ofst;
2880 size_t proto_header_size;
2884 * For each 16-bit word of the given header, override
2885 * bits enforced by the corresponding 16-bit mask.
2888 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2889 const struct sfc_mae_parsed_item *parsed_items,
2890 unsigned int nb_parsed_items)
2892 unsigned int item_idx;
2894 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2895 const struct sfc_mae_parsed_item *parsed_item;
2896 const struct rte_flow_item *item;
2897 size_t proto_header_size;
2900 parsed_item = &parsed_items[item_idx];
2901 proto_header_size = parsed_item->proto_header_size;
2902 item = parsed_item->item;
2904 for (ofst = 0; ofst < proto_header_size;
2905 ofst += sizeof(rte_be16_t)) {
2906 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2907 const rte_be16_t *w_maskp;
2908 const rte_be16_t *w_specp;
2910 w_maskp = RTE_PTR_ADD(item->mask, ofst);
2911 w_specp = RTE_PTR_ADD(item->spec, ofst);
2914 *wp |= (*w_specp & *w_maskp);
2917 header_buf += proto_header_size;
2921 #define SFC_IPV4_TTL_DEF 0x40
2922 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
2923 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2924 #define SFC_VXLAN_FLAGS_DEF 0x08000000
2927 sfc_mae_rule_parse_action_vxlan_encap(
2928 struct sfc_mae *mae,
2929 const struct rte_flow_action_vxlan_encap *conf,
2930 efx_mae_actions_t *spec,
2931 struct rte_flow_error *error)
2933 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2934 struct rte_flow_item *pattern = conf->definition;
2935 uint8_t *buf = bounce_eh->buf;
2937 /* This array will keep track of non-VOID pattern items. */
2938 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2940 1 /* IPv4 or IPv6 */ +
2943 unsigned int nb_parsed_items = 0;
2945 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2946 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2947 sizeof(struct rte_ipv6_hdr))];
2948 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2949 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2950 struct rte_vxlan_hdr *vxlan = NULL;
2951 struct rte_udp_hdr *udp = NULL;
2952 unsigned int nb_vlan_tags = 0;
2953 size_t next_proto_ofst = 0;
2954 size_t ethertype_ofst = 0;
2958 if (pattern == NULL) {
2959 return rte_flow_error_set(error, EINVAL,
2960 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2961 "The encap. header definition is NULL");
2964 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2965 bounce_eh->size = 0;
2968 * Process pattern items and remember non-VOID ones.
2969 * Defer applying masks until after the complete header
2970 * has been built from the pattern items.
2972 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2974 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2975 struct sfc_mae_parsed_item *parsed_item;
2976 const uint64_t exp_items_extra_vlan[] = {
2977 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2979 size_t proto_header_size;
2980 rte_be16_t *ethertypep;
2981 uint8_t *next_protop;
2984 if (pattern->spec == NULL) {
2985 return rte_flow_error_set(error, EINVAL,
2986 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2987 "NULL item spec in the encap. header");
2990 if (pattern->mask == NULL) {
2991 return rte_flow_error_set(error, EINVAL,
2992 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2993 "NULL item mask in the encap. header");
2996 if (pattern->last != NULL) {
2997 /* This is not a match pattern, so disallow range. */
2998 return rte_flow_error_set(error, EINVAL,
2999 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3000 "Range item in the encap. header");
3003 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
3004 /* Handle VOID separately, for clarity. */
3008 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
3009 return rte_flow_error_set(error, ENOTSUP,
3010 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3011 "Unexpected item in the encap. header");
3014 parsed_item = &parsed_items[nb_parsed_items];
3015 buf_cur = buf + bounce_eh->size;
3017 switch (pattern->type) {
3018 case RTE_FLOW_ITEM_TYPE_ETH:
3019 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
3021 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
3024 proto_header_size = sizeof(struct rte_ether_hdr);
3026 ethertype_ofst = eth_ethertype_ofst;
3028 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
3029 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3030 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3032 case RTE_FLOW_ITEM_TYPE_VLAN:
3033 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
3035 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
3038 proto_header_size = sizeof(struct rte_vlan_hdr);
3040 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
3041 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
3043 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3044 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3048 offsetof(struct rte_vlan_hdr, eth_proto);
3050 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3051 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3052 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
3056 case RTE_FLOW_ITEM_TYPE_IPV4:
3057 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
3059 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
3062 proto_header_size = sizeof(struct rte_ipv4_hdr);
3064 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3065 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3069 offsetof(struct rte_ipv4_hdr, next_proto_id);
3071 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
3073 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3075 case RTE_FLOW_ITEM_TYPE_IPV6:
3076 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
3078 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
3081 proto_header_size = sizeof(struct rte_ipv6_hdr);
3083 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3084 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3086 next_proto_ofst = bounce_eh->size +
3087 offsetof(struct rte_ipv6_hdr, proto);
3089 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
3091 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3093 case RTE_FLOW_ITEM_TYPE_UDP:
3094 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
3096 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
3099 proto_header_size = sizeof(struct rte_udp_hdr);
3101 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
3102 *next_protop = IPPROTO_UDP;
3104 udp = (struct rte_udp_hdr *)buf_cur;
3106 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
3108 case RTE_FLOW_ITEM_TYPE_VXLAN:
3109 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
3111 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
3114 proto_header_size = sizeof(struct rte_vxlan_hdr);
3116 vxlan = (struct rte_vxlan_hdr *)buf_cur;
3118 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
3119 udp->dgram_len = RTE_BE16(sizeof(*udp) +
3121 udp->dgram_cksum = 0;
3126 return rte_flow_error_set(error, ENOTSUP,
3127 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3128 "Unknown item in the encap. header");
3131 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
3132 return rte_flow_error_set(error, E2BIG,
3133 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3134 "The encap. header is too big");
3137 if ((proto_header_size & 1) != 0) {
3138 return rte_flow_error_set(error, EINVAL,
3139 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3140 "Odd layer size in the encap. header");
3143 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
3144 bounce_eh->size += proto_header_size;
3146 parsed_item->item = pattern;
3147 parsed_item->proto_header_size = proto_header_size;
3151 if (exp_items != 0) {
3152 /* Parsing item VXLAN would have reset exp_items to 0. */
3153 return rte_flow_error_set(error, ENOTSUP,
3154 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3155 "No item VXLAN in the encap. header");
3158 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
3159 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
3160 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
3161 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
3163 /* The HW cannot compute this checksum. */
3164 ipv4->hdr_checksum = 0;
3165 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
3167 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
3168 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
3169 ipv6->payload_len = udp->dgram_len;
3171 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
3173 /* Take care of the masks. */
3174 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
3176 rc = efx_mae_action_set_populate_encap(spec);
3178 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3179 NULL, "failed to request action ENCAP");
3186 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
3187 const struct rte_flow_action_mark *conf,
3188 const struct sfc_flow_spec_mae *spec_mae,
3189 efx_mae_actions_t *spec)
3193 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3194 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3195 } else if (conf->id > SFC_FT_USER_MARK_MASK) {
3196 sfc_err(sa, "the mark value is too large");
3200 rc = efx_mae_action_set_populate_mark(spec, conf->id);
3202 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
3208 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
3209 const struct rte_flow_action_count *conf
3211 efx_mae_actions_t *spec)
3215 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
3217 "counter queue is not configured for COUNT action");
3219 goto fail_counter_queue_uninit;
3222 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
3224 goto fail_no_service_core;
3227 rc = efx_mae_action_set_populate_count(spec);
3230 "failed to populate counters in MAE action set: %s",
3232 goto fail_populate_count;
3237 fail_populate_count:
3238 fail_no_service_core:
3239 fail_counter_queue_uninit:
3245 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
3246 const struct rte_flow_action_phy_port *conf,
3247 efx_mae_actions_t *spec)
3249 efx_mport_sel_t mport;
3253 if (conf->original != 0)
3254 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
3256 phy_port = conf->index;
3258 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
3260 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
3261 phy_port, strerror(rc));
3265 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3267 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3268 mport.sel, strerror(rc));
3275 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3276 const struct rte_flow_action_vf *vf_conf,
3277 efx_mae_actions_t *spec)
3279 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3280 efx_mport_sel_t mport;
3284 if (vf_conf == NULL)
3285 vf = EFX_PCI_VF_INVALID;
3286 else if (vf_conf->original != 0)
3291 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3293 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3294 encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3299 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3301 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3302 mport.sel, strerror(rc));
3309 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3310 const struct rte_flow_action_port_id *conf,
3311 efx_mae_actions_t *spec)
3313 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3314 struct sfc_mae *mae = &sa->mae;
3315 efx_mport_sel_t mport;
3319 if (conf->id > UINT16_MAX)
3322 port_id = (conf->original != 0) ? sas->port_id : conf->id;
3324 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
3327 sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s",
3328 port_id, strerror(rc));
3332 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3334 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3335 mport.sel, strerror(rc));
3341 static const char * const action_names[] = {
3342 [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3343 [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3344 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3345 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3346 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3347 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3348 [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3349 [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3350 [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3351 [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3352 [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3353 [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3354 [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3355 [RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
3359 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3360 const struct rte_flow_action *action,
3361 const struct sfc_flow_spec_mae *spec_mae,
3362 struct sfc_mae_actions_bundle *bundle,
3363 efx_mae_actions_t *spec,
3364 struct rte_flow_error *error)
3366 const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3367 const uint64_t rx_metadata = sa->negotiated_rx_metadata;
3368 bool custom_error = B_FALSE;
3371 switch (action->type) {
3372 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3373 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3374 bundle->actions_mask);
3375 if (outer_rule == NULL ||
3376 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3379 rc = efx_mae_action_set_populate_decap(spec);
3381 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3382 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3383 bundle->actions_mask);
3384 rc = efx_mae_action_set_populate_vlan_pop(spec);
3386 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3387 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3388 bundle->actions_mask);
3389 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3391 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3392 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3393 bundle->actions_mask);
3394 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3396 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3397 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3398 bundle->actions_mask);
3399 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3401 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3402 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3403 bundle->actions_mask);
3404 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3407 custom_error = B_TRUE;
3409 case RTE_FLOW_ACTION_TYPE_COUNT:
3410 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3411 bundle->actions_mask);
3412 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3414 case RTE_FLOW_ACTION_TYPE_FLAG:
3415 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3416 bundle->actions_mask);
3417 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
3418 rc = efx_mae_action_set_populate_flag(spec);
3420 rc = rte_flow_error_set(error, ENOTSUP,
3421 RTE_FLOW_ERROR_TYPE_ACTION,
3423 "flag delivery has not been negotiated");
3424 custom_error = B_TRUE;
3427 case RTE_FLOW_ACTION_TYPE_MARK:
3428 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3429 bundle->actions_mask);
3430 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
3431 spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3432 rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
3435 rc = rte_flow_error_set(error, ENOTSUP,
3436 RTE_FLOW_ERROR_TYPE_ACTION,
3438 "mark delivery has not been negotiated");
3439 custom_error = B_TRUE;
3442 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3443 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3444 bundle->actions_mask);
3445 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3447 case RTE_FLOW_ACTION_TYPE_PF:
3448 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3449 bundle->actions_mask);
3450 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3452 case RTE_FLOW_ACTION_TYPE_VF:
3453 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3454 bundle->actions_mask);
3455 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3457 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3458 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3459 bundle->actions_mask);
3460 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3462 case RTE_FLOW_ACTION_TYPE_DROP:
3463 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3464 bundle->actions_mask);
3465 rc = efx_mae_action_set_populate_drop(spec);
3467 case RTE_FLOW_ACTION_TYPE_JUMP:
3468 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3469 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3474 return rte_flow_error_set(error, ENOTSUP,
3475 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3476 "Unsupported action");
3480 bundle->actions_mask |= (1ULL << action->type);
3481 } else if (!custom_error) {
3482 if (action->type < RTE_DIM(action_names)) {
3483 const char *action_name = action_names[action->type];
3485 if (action_name != NULL) {
3486 sfc_err(sa, "action %s was rejected: %s",
3487 action_name, strerror(rc));
3490 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3491 NULL, "Failed to request the action");
3498 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3500 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3504 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3505 const struct sfc_mae_bounce_eh *bounce_eh,
3506 struct sfc_mae_encap_header **encap_headerp)
3508 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3509 encap_headerp = NULL;
3513 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3514 if (*encap_headerp != NULL)
3517 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3521 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3522 const struct rte_flow_action actions[],
3523 struct sfc_flow_spec_mae *spec_mae,
3524 struct rte_flow_error *error)
3526 struct sfc_mae_encap_header *encap_header = NULL;
3527 struct sfc_mae_actions_bundle bundle = {0};
3528 struct sfc_flow_tunnel *counter_ft = NULL;
3529 uint64_t *ft_group_hit_counter = NULL;
3530 const struct rte_flow_action *action;
3531 struct sfc_mae *mae = &sa->mae;
3532 unsigned int n_count = 0;
3533 efx_mae_actions_t *spec;
3538 if (actions == NULL) {
3539 return rte_flow_error_set(error, EINVAL,
3540 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3544 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3546 goto fail_action_set_spec_init;
3548 for (action = actions;
3549 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3550 if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
3554 if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
3555 /* JUMP rules don't decapsulate packets. GROUP rules do. */
3556 rc = efx_mae_action_set_populate_decap(spec);
3558 goto fail_enforce_ft_decap;
3560 if (n_count == 0 && sfc_mae_counter_stream_enabled(sa)) {
3562 * The user opted not to use action COUNT in this rule,
3563 * but the counter should be enabled implicitly because
3564 * packets hitting this rule contribute to the tunnel's
3565 * total number of hits. See sfc_mae_counter_get().
3567 rc = efx_mae_action_set_populate_count(spec);
3569 goto fail_enforce_ft_count;
3575 /* Cleanup after previous encap. header bounce buffer usage. */
3576 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3578 for (action = actions;
3579 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3580 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3582 goto fail_rule_parse_action;
3584 rc = sfc_mae_rule_parse_action(sa, action, spec_mae,
3585 &bundle, spec, error);
3587 goto fail_rule_parse_action;
3590 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3592 goto fail_rule_parse_action;
3594 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3596 goto fail_process_encap_header;
3600 sfc_err(sa, "too many count actions requested: %u", n_count);
3604 switch (spec_mae->ft_rule_type) {
3605 case SFC_FT_RULE_NONE:
3607 case SFC_FT_RULE_JUMP:
3608 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3609 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3611 goto fail_workaround_jump_delivery;
3613 counter_ft = spec_mae->ft;
3615 case SFC_FT_RULE_GROUP:
3617 * Packets that go to the rule's AR have FT mark set (from the
3618 * JUMP rule OR's RECIRC_ID). Remove this mark in matching
3619 * packets. The user may have provided their own action
3620 * MARK above, so don't check the return value here.
3622 (void)efx_mae_action_set_populate_mark(spec, 0);
3624 ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
3627 SFC_ASSERT(B_FALSE);
3630 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3632 if (spec_mae->action_set != NULL) {
3633 sfc_mae_encap_header_del(sa, encap_header);
3634 efx_mae_action_set_spec_fini(sa->nic, spec);
3638 rc = sfc_mae_action_set_add(sa, actions, spec, encap_header,
3639 ft_group_hit_counter, counter_ft, n_count,
3640 &spec_mae->action_set);
3642 goto fail_action_set_add;
3646 fail_action_set_add:
3647 fail_workaround_jump_delivery:
3649 sfc_mae_encap_header_del(sa, encap_header);
3651 fail_process_encap_header:
3652 fail_rule_parse_action:
3653 efx_mae_action_set_spec_fini(sa->nic, spec);
3655 fail_enforce_ft_count:
3656 fail_enforce_ft_decap:
3657 fail_action_set_spec_init:
3658 if (rc > 0 && rte_errno == 0) {
3659 rc = rte_flow_error_set(error, rc,
3660 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3661 NULL, "Failed to process the action");
3667 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3668 const efx_mae_match_spec_t *left,
3669 const efx_mae_match_spec_t *right)
3671 bool have_same_class;
3674 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3677 return (rc == 0) ? have_same_class : false;
3681 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3682 struct sfc_mae_outer_rule *rule)
3684 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3685 struct sfc_mae_outer_rule *entry;
3686 struct sfc_mae *mae = &sa->mae;
3688 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3689 /* An active rule is reused. It's class is wittingly valid. */
3693 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3694 sfc_mae_outer_rules, entries) {
3695 const efx_mae_match_spec_t *left = entry->match_spec;
3696 const efx_mae_match_spec_t *right = rule->match_spec;
3701 if (sfc_mae_rules_class_cmp(sa, left, right))
3705 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3706 "support for outer frame pattern items is not guaranteed; "
3707 "other than that, the items are valid from SW standpoint");
3712 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3713 struct sfc_flow_spec_mae *spec)
3715 const struct rte_flow *entry;
3717 if (spec->match_spec == NULL)
3720 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3721 const struct sfc_flow_spec *entry_spec = &entry->spec;
3722 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3723 const efx_mae_match_spec_t *left = es_mae->match_spec;
3724 const efx_mae_match_spec_t *right = spec->match_spec;
3726 switch (entry_spec->type) {
3727 case SFC_FLOW_SPEC_FILTER:
3728 /* Ignore VNIC-level flows */
3730 case SFC_FLOW_SPEC_MAE:
3731 if (sfc_mae_rules_class_cmp(sa, left, right))
3739 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3740 "support for inner frame pattern items is not guaranteed; "
3741 "other than that, the items are valid from SW standpoint");
3746 * Confirm that a given flow can be accepted by the FW.
3749 * Software adapter context
3751 * Flow to be verified
3753 * Zero on success and non-zero in the case of error.
3754 * A special value of EAGAIN indicates that the adapter is
3755 * not in started state. This state is compulsory because
3756 * it only makes sense to compare the rule class of the flow
3757 * being validated with classes of the active rules.
3758 * Such classes are wittingly supported by the FW.
3761 sfc_mae_flow_verify(struct sfc_adapter *sa,
3762 struct rte_flow *flow)
3764 struct sfc_flow_spec *spec = &flow->spec;
3765 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3766 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3769 SFC_ASSERT(sfc_adapter_is_locked(sa));
3771 if (sa->state != SFC_ETHDEV_STARTED)
3774 if (outer_rule != NULL) {
3775 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
3780 return sfc_mae_action_rule_class_verify(sa, spec_mae);
3784 sfc_mae_flow_insert(struct sfc_adapter *sa,
3785 struct rte_flow *flow)
3787 struct sfc_flow_spec *spec = &flow->spec;
3788 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3789 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3790 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3791 struct sfc_mae_fw_rsrc *fw_rsrc;
3794 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
3796 if (outer_rule != NULL) {
3797 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
3798 spec_mae->match_spec);
3800 goto fail_outer_rule_enable;
3803 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3804 spec_mae->ft->reset_jump_hit_counter =
3805 spec_mae->ft->group_hit_counter;
3808 if (action_set == NULL) {
3809 sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
3813 rc = sfc_mae_action_set_enable(sa, action_set);
3815 goto fail_action_set_enable;
3817 if (action_set->n_counters > 0) {
3818 rc = sfc_mae_counter_start(sa);
3820 sfc_err(sa, "failed to start MAE counters support: %s",
3822 goto fail_mae_counter_start;
3826 fw_rsrc = &action_set->fw_rsrc;
3828 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3829 NULL, &fw_rsrc->aset_id,
3830 &spec_mae->rule_id);
3832 goto fail_action_rule_insert;
3834 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3835 flow, spec_mae->rule_id.id);
3839 fail_action_rule_insert:
3840 fail_mae_counter_start:
3841 sfc_mae_action_set_disable(sa, action_set);
3843 fail_action_set_enable:
3844 if (outer_rule != NULL)
3845 sfc_mae_outer_rule_disable(sa, outer_rule);
3847 fail_outer_rule_enable:
3852 sfc_mae_flow_remove(struct sfc_adapter *sa,
3853 struct rte_flow *flow)
3855 struct sfc_flow_spec *spec = &flow->spec;
3856 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3857 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3858 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3861 if (action_set == NULL) {
3862 sfc_dbg(sa, "disabled flow=%p (no AR)", flow);
3863 goto skip_action_rule;
3866 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3868 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3870 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3871 flow, spec_mae->rule_id.id, strerror(rc));
3873 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3874 flow, spec_mae->rule_id.id);
3875 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3877 sfc_mae_action_set_disable(sa, action_set);
3880 if (outer_rule != NULL)
3881 sfc_mae_outer_rule_disable(sa, outer_rule);
3887 sfc_mae_query_counter(struct sfc_adapter *sa,
3888 struct sfc_flow_spec_mae *spec,
3889 const struct rte_flow_action *action,
3890 struct rte_flow_query_count *data,
3891 struct rte_flow_error *error)
3893 struct sfc_mae_action_set *action_set = spec->action_set;
3894 const struct rte_flow_action_count *conf = action->conf;
3898 if (action_set == NULL || action_set->n_counters == 0) {
3899 return rte_flow_error_set(error, EINVAL,
3900 RTE_FLOW_ERROR_TYPE_ACTION, action,
3901 "Queried flow rule does not have count actions");
3904 for (i = 0; i < action_set->n_counters; i++) {
3906 * Get the first available counter of the flow rule if
3907 * counter ID is not specified, provided that this
3908 * counter is not an automatic (implicit) one.
3910 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
3913 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
3914 &action_set->counters[i], data);
3916 return rte_flow_error_set(error, EINVAL,
3917 RTE_FLOW_ERROR_TYPE_ACTION, action,
3918 "Queried flow rule counter action is invalid");
3924 return rte_flow_error_set(error, ENOENT,
3925 RTE_FLOW_ERROR_TYPE_ACTION, action,
3926 "no such flow rule action or such count ID");
3930 sfc_mae_flow_query(struct rte_eth_dev *dev,
3931 struct rte_flow *flow,
3932 const struct rte_flow_action *action,
3934 struct rte_flow_error *error)
3936 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3937 struct sfc_flow_spec *spec = &flow->spec;
3938 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3940 switch (action->type) {
3941 case RTE_FLOW_ACTION_TYPE_COUNT:
3942 return sfc_mae_query_counter(sa, spec_mae, action,
3945 return rte_flow_error_set(error, ENOTSUP,
3946 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3947 "Query for action of this type is not supported");
3952 sfc_mae_switchdev_init(struct sfc_adapter *sa)
3954 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3955 struct sfc_mae *mae = &sa->mae;
3957 efx_mport_sel_t phy;
3960 sfc_log_init(sa, "entry");
3962 if (!sa->switchdev) {
3963 sfc_log_init(sa, "switchdev is not enabled - skip");
3967 if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
3969 sfc_err(sa, "failed to init switchdev - no MAE support");
3973 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
3976 sfc_err(sa, "failed get PF mport");
3980 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
3982 sfc_err(sa, "failed get PHY mport");
3986 rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
3987 SFC_MAE_RULE_PRIO_LOWEST,
3988 &mae->switchdev_rule_pf_to_ext);
3990 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
3994 rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
3995 SFC_MAE_RULE_PRIO_LOWEST,
3996 &mae->switchdev_rule_ext_to_pf);
3998 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
4002 sfc_log_init(sa, "done");
4007 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4013 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
4018 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
4020 struct sfc_mae *mae = &sa->mae;
4025 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4026 sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);