1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
19 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
22 #include "sfc_switch.h"
23 #include "sfc_service.h"
26 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
27 efx_mport_sel_t *mportp)
29 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
31 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
36 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
37 uint32_t nb_counters_max)
39 return sfc_mae_counters_init(®istry->counters, nb_counters_max);
43 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
45 sfc_mae_counters_fini(®istry->counters);
49 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
50 struct sfc_mae_rule **rule)
52 struct sfc_mae *mae = &sa->mae;
53 struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
57 for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
58 if (internal_rules->rules[entry].spec == NULL)
62 if (entry == SFC_MAE_NB_RULES_MAX) {
64 sfc_err(sa, "failed too many rules (%u rules used)", entry);
65 goto fail_too_many_rules;
68 *rule = &internal_rules->rules[entry];
77 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
78 const efx_mport_sel_t *mport_match,
79 const efx_mport_sel_t *mport_deliver,
80 int prio, struct sfc_mae_rule **rulep)
82 struct sfc_mae *mae = &sa->mae;
83 struct sfc_mae_rule *rule;
86 sfc_log_init(sa, "entry");
88 if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
90 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
91 mae->nb_action_rule_prios_max);
92 goto fail_invalid_prio;
95 prio = mae->nb_action_rule_prios_max - 1;
97 rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
99 goto fail_find_empty_slot;
101 sfc_log_init(sa, "init MAE match spec");
102 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
103 (uint32_t)prio, &rule->spec);
105 sfc_err(sa, "failed to init MAE match spec");
106 goto fail_match_init;
109 rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
111 sfc_err(sa, "failed to get MAE match mport selector");
115 rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
117 sfc_err(sa, "failed to init MAE action set");
118 goto fail_action_init;
121 rc = efx_mae_action_set_populate_deliver(rule->actions,
124 sfc_err(sa, "failed to populate deliver action");
125 goto fail_populate_deliver;
128 rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
131 sfc_err(sa, "failed to allocate action set");
132 goto fail_action_set_alloc;
135 rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
139 sfc_err(sa, "failed to insert action rule");
140 goto fail_rule_insert;
145 sfc_log_init(sa, "done");
150 efx_mae_action_set_free(sa->nic, &rule->action_set);
152 fail_action_set_alloc:
153 fail_populate_deliver:
154 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
158 efx_mae_match_spec_fini(sa->nic, rule->spec);
161 fail_find_empty_slot:
163 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
168 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
170 if (rule == NULL || rule->spec == NULL)
173 efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
174 efx_mae_action_set_free(sa->nic, &rule->action_set);
175 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
176 efx_mae_match_spec_fini(sa->nic, rule->spec);
182 sfc_mae_attach(struct sfc_adapter *sa)
184 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
185 struct sfc_mae_switch_port_request switch_port_request = {0};
186 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
187 efx_mport_sel_t entity_mport;
188 struct sfc_mae *mae = &sa->mae;
189 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
190 efx_mae_limits_t limits;
193 sfc_log_init(sa, "entry");
195 if (!encp->enc_mae_supported) {
196 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
200 if (encp->enc_mae_admin) {
201 sfc_log_init(sa, "init MAE");
202 rc = efx_mae_init(sa->nic);
206 sfc_log_init(sa, "get MAE limits");
207 rc = efx_mae_get_limits(sa->nic, &limits);
209 goto fail_mae_get_limits;
211 sfc_log_init(sa, "init MAE counter registry");
212 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
213 limits.eml_max_n_counters);
215 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
216 limits.eml_max_n_counters, rte_strerror(rc));
217 goto fail_counter_registry_init;
221 sfc_log_init(sa, "assign entity MPORT");
222 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
224 goto fail_mae_assign_entity_mport;
226 sfc_log_init(sa, "assign RTE switch domain");
227 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
229 goto fail_mae_assign_switch_domain;
231 sfc_log_init(sa, "assign RTE switch port");
232 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
233 switch_port_request.entity_mportp = &entity_mport;
234 /* RTE ethdev MPORT matches that of the entity for independent ports. */
235 switch_port_request.ethdev_mportp = &entity_mport;
236 switch_port_request.ethdev_port_id = sas->port_id;
237 switch_port_request.port_data.indep.mae_admin =
238 encp->enc_mae_admin == B_TRUE;
239 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
240 &switch_port_request,
241 &mae->switch_port_id);
243 goto fail_mae_assign_switch_port;
245 if (encp->enc_mae_admin) {
246 sfc_log_init(sa, "allocate encap. header bounce buffer");
247 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
248 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
249 bounce_eh->buf_size, 0);
250 if (bounce_eh->buf == NULL)
251 goto fail_mae_alloc_bounce_eh;
253 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
254 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
255 mae->encap_types_supported = limits.eml_encap_types_supported;
258 TAILQ_INIT(&mae->outer_rules);
259 TAILQ_INIT(&mae->encap_headers);
260 TAILQ_INIT(&mae->action_sets);
262 if (encp->enc_mae_admin)
263 mae->status = SFC_MAE_STATUS_ADMIN;
265 mae->status = SFC_MAE_STATUS_SUPPORTED;
267 sfc_log_init(sa, "done");
271 fail_mae_alloc_bounce_eh:
272 fail_mae_assign_switch_port:
273 fail_mae_assign_switch_domain:
274 fail_mae_assign_entity_mport:
275 if (encp->enc_mae_admin)
276 sfc_mae_counter_registry_fini(&mae->counter_registry);
278 fail_counter_registry_init:
280 if (encp->enc_mae_admin)
281 efx_mae_fini(sa->nic);
284 sfc_log_init(sa, "failed %d", rc);
290 sfc_mae_detach(struct sfc_adapter *sa)
292 struct sfc_mae *mae = &sa->mae;
293 enum sfc_mae_status status_prev = mae->status;
295 sfc_log_init(sa, "entry");
297 mae->nb_action_rule_prios_max = 0;
298 mae->status = SFC_MAE_STATUS_UNKNOWN;
300 if (status_prev != SFC_MAE_STATUS_ADMIN)
303 rte_free(mae->bounce_eh.buf);
304 sfc_mae_counter_registry_fini(&mae->counter_registry);
306 efx_mae_fini(sa->nic);
308 sfc_log_init(sa, "done");
311 static struct sfc_mae_outer_rule *
312 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
313 const efx_mae_match_spec_t *match_spec,
314 efx_tunnel_protocol_t encap_type)
316 struct sfc_mae_outer_rule *rule;
317 struct sfc_mae *mae = &sa->mae;
319 SFC_ASSERT(sfc_adapter_is_locked(sa));
321 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
322 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
323 rule->encap_type == encap_type) {
324 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
334 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
335 efx_mae_match_spec_t *match_spec,
336 efx_tunnel_protocol_t encap_type,
337 struct sfc_mae_outer_rule **rulep)
339 struct sfc_mae_outer_rule *rule;
340 struct sfc_mae *mae = &sa->mae;
342 SFC_ASSERT(sfc_adapter_is_locked(sa));
344 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
349 rule->match_spec = match_spec;
350 rule->encap_type = encap_type;
352 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
354 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
358 sfc_dbg(sa, "added outer_rule=%p", rule);
364 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
365 struct sfc_mae_outer_rule *rule)
367 struct sfc_mae *mae = &sa->mae;
369 SFC_ASSERT(sfc_adapter_is_locked(sa));
370 SFC_ASSERT(rule->refcnt != 0);
374 if (rule->refcnt != 0)
377 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
378 rule->fw_rsrc.refcnt != 0) {
379 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
380 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
383 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
385 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
388 sfc_dbg(sa, "deleted outer_rule=%p", rule);
392 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
393 struct sfc_mae_outer_rule *rule,
394 efx_mae_match_spec_t *match_spec_action)
396 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
399 SFC_ASSERT(sfc_adapter_is_locked(sa));
401 if (fw_rsrc->refcnt == 0) {
402 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
403 SFC_ASSERT(rule->match_spec != NULL);
405 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
409 sfc_err(sa, "failed to enable outer_rule=%p: %s",
415 if (match_spec_action == NULL)
416 goto skip_action_rule;
418 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
421 if (fw_rsrc->refcnt == 0) {
422 (void)efx_mae_outer_rule_remove(sa->nic,
424 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
427 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
433 if (fw_rsrc->refcnt == 0) {
434 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
435 rule, fw_rsrc->rule_id.id);
444 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
445 struct sfc_mae_outer_rule *rule)
447 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
450 SFC_ASSERT(sfc_adapter_is_locked(sa));
452 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
453 fw_rsrc->refcnt == 0) {
454 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
455 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
459 if (fw_rsrc->refcnt == 1) {
460 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
462 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
463 rule, fw_rsrc->rule_id.id);
465 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
466 rule, fw_rsrc->rule_id.id, strerror(rc));
468 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
474 static struct sfc_mae_encap_header *
475 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
476 const struct sfc_mae_bounce_eh *bounce_eh)
478 struct sfc_mae_encap_header *encap_header;
479 struct sfc_mae *mae = &sa->mae;
481 SFC_ASSERT(sfc_adapter_is_locked(sa));
483 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
484 if (encap_header->size == bounce_eh->size &&
485 memcmp(encap_header->buf, bounce_eh->buf,
486 bounce_eh->size) == 0) {
487 sfc_dbg(sa, "attaching to encap_header=%p",
489 ++(encap_header->refcnt);
498 sfc_mae_encap_header_add(struct sfc_adapter *sa,
499 const struct sfc_mae_bounce_eh *bounce_eh,
500 struct sfc_mae_encap_header **encap_headerp)
502 struct sfc_mae_encap_header *encap_header;
503 struct sfc_mae *mae = &sa->mae;
505 SFC_ASSERT(sfc_adapter_is_locked(sa));
507 encap_header = rte_zmalloc("sfc_mae_encap_header",
508 sizeof(*encap_header), 0);
509 if (encap_header == NULL)
512 encap_header->size = bounce_eh->size;
514 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
515 encap_header->size, 0);
516 if (encap_header->buf == NULL) {
517 rte_free(encap_header);
521 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
523 encap_header->refcnt = 1;
524 encap_header->type = bounce_eh->type;
525 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
527 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
529 *encap_headerp = encap_header;
531 sfc_dbg(sa, "added encap_header=%p", encap_header);
537 sfc_mae_encap_header_del(struct sfc_adapter *sa,
538 struct sfc_mae_encap_header *encap_header)
540 struct sfc_mae *mae = &sa->mae;
542 if (encap_header == NULL)
545 SFC_ASSERT(sfc_adapter_is_locked(sa));
546 SFC_ASSERT(encap_header->refcnt != 0);
548 --(encap_header->refcnt);
550 if (encap_header->refcnt != 0)
553 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
554 encap_header->fw_rsrc.refcnt != 0) {
555 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
556 encap_header, encap_header->fw_rsrc.eh_id.id,
557 encap_header->fw_rsrc.refcnt);
560 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
561 rte_free(encap_header->buf);
562 rte_free(encap_header);
564 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
568 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
569 struct sfc_mae_encap_header *encap_header,
570 efx_mae_actions_t *action_set_spec)
572 struct sfc_mae_fw_rsrc *fw_rsrc;
575 if (encap_header == NULL)
578 SFC_ASSERT(sfc_adapter_is_locked(sa));
580 fw_rsrc = &encap_header->fw_rsrc;
582 if (fw_rsrc->refcnt == 0) {
583 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
584 SFC_ASSERT(encap_header->buf != NULL);
585 SFC_ASSERT(encap_header->size != 0);
587 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
592 sfc_err(sa, "failed to enable encap_header=%p: %s",
593 encap_header, strerror(rc));
598 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
601 if (fw_rsrc->refcnt == 0) {
602 (void)efx_mae_encap_header_free(sa->nic,
604 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
607 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
612 if (fw_rsrc->refcnt == 0) {
613 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
614 encap_header, fw_rsrc->eh_id.id);
623 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
624 struct sfc_mae_encap_header *encap_header)
626 struct sfc_mae_fw_rsrc *fw_rsrc;
629 if (encap_header == NULL)
632 SFC_ASSERT(sfc_adapter_is_locked(sa));
634 fw_rsrc = &encap_header->fw_rsrc;
636 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
637 fw_rsrc->refcnt == 0) {
638 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
639 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
643 if (fw_rsrc->refcnt == 1) {
644 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
646 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
647 encap_header, fw_rsrc->eh_id.id);
649 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
650 encap_header, fw_rsrc->eh_id.id, strerror(rc));
652 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
659 sfc_mae_counters_enable(struct sfc_adapter *sa,
660 struct sfc_mae_counter_id *counters,
661 unsigned int n_counters,
662 efx_mae_actions_t *action_set_spec)
666 sfc_log_init(sa, "entry");
668 if (n_counters == 0) {
669 sfc_log_init(sa, "no counters - skip");
673 SFC_ASSERT(sfc_adapter_is_locked(sa));
674 SFC_ASSERT(n_counters == 1);
676 rc = sfc_mae_counter_enable(sa, &counters[0]);
678 sfc_err(sa, "failed to enable MAE counter %u: %s",
679 counters[0].mae_id.id, rte_strerror(rc));
680 goto fail_counter_add;
683 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
684 &counters[0].mae_id);
686 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
687 counters[0].mae_id.id, rte_strerror(rc));
688 goto fail_fill_in_id;
694 (void)sfc_mae_counter_disable(sa, &counters[0]);
697 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
702 sfc_mae_counters_disable(struct sfc_adapter *sa,
703 struct sfc_mae_counter_id *counters,
704 unsigned int n_counters)
709 SFC_ASSERT(sfc_adapter_is_locked(sa));
710 SFC_ASSERT(n_counters == 1);
712 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
713 sfc_err(sa, "failed to disable: already disabled");
717 return sfc_mae_counter_disable(sa, &counters[0]);
720 static struct sfc_mae_action_set *
721 sfc_mae_action_set_attach(struct sfc_adapter *sa,
722 const struct sfc_mae_encap_header *encap_header,
723 unsigned int n_count,
724 const efx_mae_actions_t *spec)
726 struct sfc_mae_action_set *action_set;
727 struct sfc_mae *mae = &sa->mae;
729 SFC_ASSERT(sfc_adapter_is_locked(sa));
731 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
733 * Shared counters are not supported, hence action sets with
734 * COUNT are not attachable.
736 if (action_set->encap_header == encap_header &&
738 efx_mae_action_set_specs_equal(action_set->spec, spec)) {
739 sfc_dbg(sa, "attaching to action_set=%p", action_set);
740 ++(action_set->refcnt);
749 sfc_mae_action_set_add(struct sfc_adapter *sa,
750 const struct rte_flow_action actions[],
751 efx_mae_actions_t *spec,
752 struct sfc_mae_encap_header *encap_header,
753 uint64_t *ft_group_hit_counter,
754 struct sfc_flow_tunnel *ft,
755 unsigned int n_counters,
756 struct sfc_mae_action_set **action_setp)
758 struct sfc_mae_action_set *action_set;
759 struct sfc_mae *mae = &sa->mae;
762 SFC_ASSERT(sfc_adapter_is_locked(sa));
764 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
765 if (action_set == NULL) {
766 sfc_err(sa, "failed to alloc action set");
770 if (n_counters > 0) {
771 const struct rte_flow_action *action;
773 action_set->counters = rte_malloc("sfc_mae_counter_ids",
774 sizeof(action_set->counters[0]) * n_counters, 0);
775 if (action_set->counters == NULL) {
776 rte_free(action_set);
777 sfc_err(sa, "failed to alloc counters");
781 for (i = 0; i < n_counters; ++i) {
782 action_set->counters[i].rte_id_valid = B_FALSE;
783 action_set->counters[i].mae_id.id =
784 EFX_MAE_RSRC_ID_INVALID;
786 action_set->counters[i].ft_group_hit_counter =
787 ft_group_hit_counter;
788 action_set->counters[i].ft = ft;
791 for (action = actions, i = 0;
792 action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
794 const struct rte_flow_action_count *conf;
796 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
801 action_set->counters[i].rte_id_valid = B_TRUE;
802 action_set->counters[i].rte_id = conf->id;
805 action_set->n_counters = n_counters;
808 action_set->refcnt = 1;
809 action_set->spec = spec;
810 action_set->encap_header = encap_header;
812 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
814 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
816 *action_setp = action_set;
818 sfc_dbg(sa, "added action_set=%p", action_set);
824 sfc_mae_action_set_del(struct sfc_adapter *sa,
825 struct sfc_mae_action_set *action_set)
827 struct sfc_mae *mae = &sa->mae;
829 SFC_ASSERT(sfc_adapter_is_locked(sa));
830 SFC_ASSERT(action_set->refcnt != 0);
832 --(action_set->refcnt);
834 if (action_set->refcnt != 0)
837 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
838 action_set->fw_rsrc.refcnt != 0) {
839 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
840 action_set, action_set->fw_rsrc.aset_id.id,
841 action_set->fw_rsrc.refcnt);
844 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
845 sfc_mae_encap_header_del(sa, action_set->encap_header);
846 if (action_set->n_counters > 0) {
847 SFC_ASSERT(action_set->n_counters == 1);
848 SFC_ASSERT(action_set->counters[0].mae_id.id ==
849 EFX_MAE_RSRC_ID_INVALID);
850 rte_free(action_set->counters);
852 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
853 rte_free(action_set);
855 sfc_dbg(sa, "deleted action_set=%p", action_set);
859 sfc_mae_action_set_enable(struct sfc_adapter *sa,
860 struct sfc_mae_action_set *action_set)
862 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
863 struct sfc_mae_counter_id *counters = action_set->counters;
864 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
867 SFC_ASSERT(sfc_adapter_is_locked(sa));
869 if (fw_rsrc->refcnt == 0) {
870 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
871 SFC_ASSERT(action_set->spec != NULL);
873 rc = sfc_mae_encap_header_enable(sa, encap_header,
878 rc = sfc_mae_counters_enable(sa, counters,
879 action_set->n_counters,
882 sfc_err(sa, "failed to enable %u MAE counters: %s",
883 action_set->n_counters, rte_strerror(rc));
885 sfc_mae_encap_header_disable(sa, encap_header);
889 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
892 sfc_err(sa, "failed to enable action_set=%p: %s",
893 action_set, strerror(rc));
895 (void)sfc_mae_counters_disable(sa, counters,
896 action_set->n_counters);
897 sfc_mae_encap_header_disable(sa, encap_header);
901 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
902 action_set, fw_rsrc->aset_id.id);
911 sfc_mae_action_set_disable(struct sfc_adapter *sa,
912 struct sfc_mae_action_set *action_set)
914 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
917 SFC_ASSERT(sfc_adapter_is_locked(sa));
919 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
920 fw_rsrc->refcnt == 0) {
921 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
922 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
926 if (fw_rsrc->refcnt == 1) {
927 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
929 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
930 action_set, fw_rsrc->aset_id.id);
932 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
933 action_set, fw_rsrc->aset_id.id, strerror(rc));
935 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
937 rc = sfc_mae_counters_disable(sa, action_set->counters,
938 action_set->n_counters);
940 sfc_err(sa, "failed to disable %u MAE counters: %s",
941 action_set->n_counters, rte_strerror(rc));
944 sfc_mae_encap_header_disable(sa, action_set->encap_header);
951 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
952 struct rte_flow *flow)
954 struct sfc_flow_spec *spec;
955 struct sfc_flow_spec_mae *spec_mae;
965 spec_mae = &spec->mae;
967 if (spec_mae->ft != NULL) {
968 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
969 spec_mae->ft->jump_rule_is_set = B_FALSE;
971 SFC_ASSERT(spec_mae->ft->refcnt != 0);
972 --(spec_mae->ft->refcnt);
975 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
977 if (spec_mae->outer_rule != NULL)
978 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
980 if (spec_mae->action_set != NULL)
981 sfc_mae_action_set_del(sa, spec_mae->action_set);
983 if (spec_mae->match_spec != NULL)
984 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
988 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
990 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
991 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
992 const efx_mae_field_id_t field_ids[] = {
993 EFX_MAE_FIELD_VLAN0_PROTO_BE,
994 EFX_MAE_FIELD_VLAN1_PROTO_BE,
996 const struct sfc_mae_ethertype *et;
1001 * In accordance with RTE flow API convention, the innermost L2
1002 * item's "type" ("inner_type") is a L3 EtherType. If there is
1003 * no L3 item, it's 0x0000/0x0000.
1005 et = &pdata->ethertypes[pdata->nb_vlan_tags];
1006 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1007 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
1009 (const uint8_t *)&et->value,
1011 (const uint8_t *)&et->mask);
1016 * sfc_mae_rule_parse_item_vlan() has already made sure
1017 * that pdata->nb_vlan_tags does not exceed this figure.
1019 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1021 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
1022 et = &pdata->ethertypes[i];
1024 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1025 fremap[field_ids[i]],
1027 (const uint8_t *)&et->value,
1029 (const uint8_t *)&et->mask);
1038 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1039 struct rte_flow_error *error)
1041 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1042 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1043 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1044 const rte_be16_t supported_tpids[] = {
1045 /* VLAN standard TPID (always the first element) */
1046 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1048 /* Double-tagging TPIDs */
1049 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1050 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1051 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1052 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1054 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1055 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1056 unsigned int ethertype_idx;
1057 const uint8_t *valuep;
1058 const uint8_t *maskp;
1061 if (pdata->innermost_ethertype_restriction.mask != 0 &&
1062 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1064 * If a single item VLAN is followed by a L3 item, value
1065 * of "type" in item ETH can't be a double-tagging TPID.
1067 nb_supported_tpids = 1;
1071 * sfc_mae_rule_parse_item_vlan() has already made sure
1072 * that pdata->nb_vlan_tags does not exceed this figure.
1074 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1076 for (ethertype_idx = 0;
1077 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1078 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1079 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1080 unsigned int tpid_idx;
1083 * This loop can have only two iterations. On the second one,
1084 * drop outer tag presence enforcement bit because the inner
1085 * tag presence automatically assumes that for the outer tag.
1087 enforce_tag_presence[0] = B_FALSE;
1089 if (tpid_m == RTE_BE16(0)) {
1090 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1091 enforce_tag_presence[ethertype_idx] = B_TRUE;
1093 /* No match on this field, and no value check. */
1094 nb_supported_tpids = 1;
1098 /* Exact match is supported only. */
1099 if (tpid_m != RTE_BE16(0xffff)) {
1100 sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1101 rte_be_to_cpu_16(tpid_m));
1106 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1107 tpid_idx < nb_supported_tpids; ++tpid_idx) {
1108 if (tpid_v == supported_tpids[tpid_idx])
1112 if (tpid_idx == nb_supported_tpids) {
1113 sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1114 rte_be_to_cpu_16(tpid_v));
1119 nb_supported_tpids = 1;
1122 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1123 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
1124 rte_be16_t enforced_et;
1126 enforced_et = pdata->innermost_ethertype_restriction.value;
1128 if (et->mask == 0) {
1129 et->mask = RTE_BE16(0xffff);
1130 et->value = enforced_et;
1131 } else if (et->mask != RTE_BE16(0xffff) ||
1132 et->value != enforced_et) {
1133 sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1134 rte_be_to_cpu_16(enforced_et),
1135 rte_be_to_cpu_16(et->value),
1136 rte_be_to_cpu_16(et->mask));
1143 * Now, when the number of VLAN tags is known, set fields
1144 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1145 * one is either a valid L3 EtherType (or 0x0000/0x0000),
1146 * and the last two are valid TPIDs (or 0x0000/0x0000).
1148 rc = sfc_mae_set_ethertypes(ctx);
1152 if (pdata->l3_next_proto_restriction_mask == 0xff) {
1153 if (pdata->l3_next_proto_mask == 0) {
1154 pdata->l3_next_proto_mask = 0xff;
1155 pdata->l3_next_proto_value =
1156 pdata->l3_next_proto_restriction_value;
1157 } else if (pdata->l3_next_proto_mask != 0xff ||
1158 pdata->l3_next_proto_value !=
1159 pdata->l3_next_proto_restriction_value) {
1160 sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1161 pdata->l3_next_proto_restriction_value,
1162 pdata->l3_next_proto_value,
1163 pdata->l3_next_proto_mask);
1169 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1170 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1171 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1172 enforce_tag_presence[0] ||
1173 pdata->has_ovlan_value);
1178 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1179 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1180 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1181 enforce_tag_presence[1] ||
1182 pdata->has_ivlan_value);
1187 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1188 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1189 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1190 fremap[EFX_MAE_FIELD_IP_PROTO],
1191 sizeof(pdata->l3_next_proto_value),
1193 sizeof(pdata->l3_next_proto_mask),
1201 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1202 "Failed to process pattern data");
1206 sfc_mae_rule_parse_item_mark(const struct rte_flow_item *item,
1207 struct sfc_flow_parse_ctx *ctx,
1208 struct rte_flow_error *error)
1210 const struct rte_flow_item_mark *spec = item->spec;
1211 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1214 return rte_flow_error_set(error, EINVAL,
1215 RTE_FLOW_ERROR_TYPE_ITEM, item,
1216 "NULL spec in item MARK");
1220 * This item is used in tunnel offload support only.
1221 * It must go before any network header items. This
1222 * way, sfc_mae_rule_preparse_item_mark() must have
1223 * already parsed it. Only one item MARK is allowed.
1225 if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
1226 spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
1227 return rte_flow_error_set(error, EINVAL,
1228 RTE_FLOW_ERROR_TYPE_ITEM,
1229 item, "invalid item MARK");
1236 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1237 struct sfc_flow_parse_ctx *ctx,
1238 struct rte_flow_error *error)
1240 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1241 const struct rte_flow_item_port_id supp_mask = {
1244 const void *def_mask = &rte_flow_item_port_id_mask;
1245 const struct rte_flow_item_port_id *spec = NULL;
1246 const struct rte_flow_item_port_id *mask = NULL;
1247 efx_mport_sel_t mport_sel;
1250 if (ctx_mae->match_mport_set) {
1251 return rte_flow_error_set(error, ENOTSUP,
1252 RTE_FLOW_ERROR_TYPE_ITEM, item,
1253 "Can't handle multiple traffic source items");
1256 rc = sfc_flow_parse_init(item,
1257 (const void **)&spec, (const void **)&mask,
1258 (const void *)&supp_mask, def_mask,
1259 sizeof(struct rte_flow_item_port_id), error);
1263 if (mask->id != supp_mask.id) {
1264 return rte_flow_error_set(error, EINVAL,
1265 RTE_FLOW_ERROR_TYPE_ITEM, item,
1266 "Bad mask in the PORT_ID pattern item");
1269 /* If "spec" is not set, could be any port ID */
1273 if (spec->id > UINT16_MAX) {
1274 return rte_flow_error_set(error, EOVERFLOW,
1275 RTE_FLOW_ERROR_TYPE_ITEM, item,
1276 "The port ID is too large");
1279 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
1280 spec->id, &mport_sel);
1282 return rte_flow_error_set(error, rc,
1283 RTE_FLOW_ERROR_TYPE_ITEM, item,
1284 "Can't find RTE ethdev by the port ID");
1287 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1290 return rte_flow_error_set(error, rc,
1291 RTE_FLOW_ERROR_TYPE_ITEM, item,
1292 "Failed to set MPORT for the port ID");
1295 ctx_mae->match_mport_set = B_TRUE;
1301 sfc_mae_rule_parse_item_port_representor(const struct rte_flow_item *item,
1302 struct sfc_flow_parse_ctx *ctx,
1303 struct rte_flow_error *error)
1305 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1306 const struct rte_flow_item_ethdev supp_mask = {
1309 const void *def_mask = &rte_flow_item_ethdev_mask;
1310 const struct rte_flow_item_ethdev *spec = NULL;
1311 const struct rte_flow_item_ethdev *mask = NULL;
1312 efx_mport_sel_t mport_sel;
1315 if (ctx_mae->match_mport_set) {
1316 return rte_flow_error_set(error, ENOTSUP,
1317 RTE_FLOW_ERROR_TYPE_ITEM, item,
1318 "Can't handle multiple traffic source items");
1321 rc = sfc_flow_parse_init(item,
1322 (const void **)&spec, (const void **)&mask,
1323 (const void *)&supp_mask, def_mask,
1324 sizeof(struct rte_flow_item_ethdev), error);
1328 if (mask->port_id != supp_mask.port_id) {
1329 return rte_flow_error_set(error, EINVAL,
1330 RTE_FLOW_ERROR_TYPE_ITEM, item,
1331 "Bad mask in the PORT_REPRESENTOR pattern item");
1334 /* If "spec" is not set, could be any port ID */
1338 rc = sfc_mae_switch_port_by_ethdev(
1339 ctx_mae->sa->mae.switch_domain_id,
1340 spec->port_id, &mport_sel);
1342 return rte_flow_error_set(error, rc,
1343 RTE_FLOW_ERROR_TYPE_ITEM, item,
1344 "Can't find RTE ethdev by the port ID");
1347 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1350 return rte_flow_error_set(error, rc,
1351 RTE_FLOW_ERROR_TYPE_ITEM, item,
1352 "Failed to set MPORT for the port ID");
1355 ctx_mae->match_mport_set = B_TRUE;
1361 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1362 struct sfc_flow_parse_ctx *ctx,
1363 struct rte_flow_error *error)
1365 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1366 const struct rte_flow_item_phy_port supp_mask = {
1367 .index = 0xffffffff,
1369 const void *def_mask = &rte_flow_item_phy_port_mask;
1370 const struct rte_flow_item_phy_port *spec = NULL;
1371 const struct rte_flow_item_phy_port *mask = NULL;
1372 efx_mport_sel_t mport_v;
1375 if (ctx_mae->match_mport_set) {
1376 return rte_flow_error_set(error, ENOTSUP,
1377 RTE_FLOW_ERROR_TYPE_ITEM, item,
1378 "Can't handle multiple traffic source items");
1381 rc = sfc_flow_parse_init(item,
1382 (const void **)&spec, (const void **)&mask,
1383 (const void *)&supp_mask, def_mask,
1384 sizeof(struct rte_flow_item_phy_port), error);
1388 if (mask->index != supp_mask.index) {
1389 return rte_flow_error_set(error, EINVAL,
1390 RTE_FLOW_ERROR_TYPE_ITEM, item,
1391 "Bad mask in the PHY_PORT pattern item");
1394 /* If "spec" is not set, could be any physical port */
1398 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1400 return rte_flow_error_set(error, rc,
1401 RTE_FLOW_ERROR_TYPE_ITEM, item,
1402 "Failed to convert the PHY_PORT index");
1405 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1407 return rte_flow_error_set(error, rc,
1408 RTE_FLOW_ERROR_TYPE_ITEM, item,
1409 "Failed to set MPORT for the PHY_PORT");
1412 ctx_mae->match_mport_set = B_TRUE;
1418 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1419 struct sfc_flow_parse_ctx *ctx,
1420 struct rte_flow_error *error)
1422 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1423 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1424 efx_mport_sel_t mport_v;
1427 if (ctx_mae->match_mport_set) {
1428 return rte_flow_error_set(error, ENOTSUP,
1429 RTE_FLOW_ERROR_TYPE_ITEM, item,
1430 "Can't handle multiple traffic source items");
1433 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1436 return rte_flow_error_set(error, rc,
1437 RTE_FLOW_ERROR_TYPE_ITEM, item,
1438 "Failed to convert the PF ID");
1441 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1443 return rte_flow_error_set(error, rc,
1444 RTE_FLOW_ERROR_TYPE_ITEM, item,
1445 "Failed to set MPORT for the PF");
1448 ctx_mae->match_mport_set = B_TRUE;
1454 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1455 struct sfc_flow_parse_ctx *ctx,
1456 struct rte_flow_error *error)
1458 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1459 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1460 const struct rte_flow_item_vf supp_mask = {
1463 const void *def_mask = &rte_flow_item_vf_mask;
1464 const struct rte_flow_item_vf *spec = NULL;
1465 const struct rte_flow_item_vf *mask = NULL;
1466 efx_mport_sel_t mport_v;
1469 if (ctx_mae->match_mport_set) {
1470 return rte_flow_error_set(error, ENOTSUP,
1471 RTE_FLOW_ERROR_TYPE_ITEM, item,
1472 "Can't handle multiple traffic source items");
1475 rc = sfc_flow_parse_init(item,
1476 (const void **)&spec, (const void **)&mask,
1477 (const void *)&supp_mask, def_mask,
1478 sizeof(struct rte_flow_item_vf), error);
1482 if (mask->id != supp_mask.id) {
1483 return rte_flow_error_set(error, EINVAL,
1484 RTE_FLOW_ERROR_TYPE_ITEM, item,
1485 "Bad mask in the VF pattern item");
1489 * If "spec" is not set, the item requests any VF related to the
1490 * PF of the current DPDK port (but not the PF itself).
1491 * Reject this match criterion as unsupported.
1494 return rte_flow_error_set(error, EINVAL,
1495 RTE_FLOW_ERROR_TYPE_ITEM, item,
1496 "Bad spec in the VF pattern item");
1499 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1501 return rte_flow_error_set(error, rc,
1502 RTE_FLOW_ERROR_TYPE_ITEM, item,
1503 "Failed to convert the PF + VF IDs");
1506 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1508 return rte_flow_error_set(error, rc,
1509 RTE_FLOW_ERROR_TYPE_ITEM, item,
1510 "Failed to set MPORT for the PF + VF");
1513 ctx_mae->match_mport_set = B_TRUE;
1519 * Having this field ID in a field locator means that this
1520 * locator cannot be used to actually set the field at the
1521 * time when the corresponding item gets encountered. Such
1522 * fields get stashed in the parsing context instead. This
1523 * is required to resolve dependencies between the stashed
1524 * fields. See sfc_mae_rule_process_pattern_data().
1526 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1528 struct sfc_mae_field_locator {
1529 efx_mae_field_id_t field_id;
1531 /* Field offset in the corresponding rte_flow_item_ struct */
1536 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1537 unsigned int nb_field_locators, void *mask_ptr,
1542 memset(mask_ptr, 0, mask_size);
1544 for (i = 0; i < nb_field_locators; ++i) {
1545 const struct sfc_mae_field_locator *fl = &field_locators[i];
1547 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1548 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1553 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1554 unsigned int nb_field_locators, const uint8_t *spec,
1555 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1556 struct rte_flow_error *error)
1558 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1562 for (i = 0; i < nb_field_locators; ++i) {
1563 const struct sfc_mae_field_locator *fl = &field_locators[i];
1565 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1568 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1569 fremap[fl->field_id],
1570 fl->size, spec + fl->ofst,
1571 fl->size, mask + fl->ofst);
1577 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1578 NULL, "Failed to process item fields");
1584 static const struct sfc_mae_field_locator flocs_eth[] = {
1587 * This locator is used only for building supported fields mask.
1588 * The field is handled by sfc_mae_rule_process_pattern_data().
1590 SFC_MAE_FIELD_HANDLING_DEFERRED,
1591 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1592 offsetof(struct rte_flow_item_eth, type),
1595 EFX_MAE_FIELD_ETH_DADDR_BE,
1596 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1597 offsetof(struct rte_flow_item_eth, dst),
1600 EFX_MAE_FIELD_ETH_SADDR_BE,
1601 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1602 offsetof(struct rte_flow_item_eth, src),
1607 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1608 struct sfc_flow_parse_ctx *ctx,
1609 struct rte_flow_error *error)
1611 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1612 struct rte_flow_item_eth override_mask;
1613 struct rte_flow_item_eth supp_mask;
1614 const uint8_t *spec = NULL;
1615 const uint8_t *mask = NULL;
1618 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1619 &supp_mask, sizeof(supp_mask));
1620 supp_mask.has_vlan = 1;
1622 rc = sfc_flow_parse_init(item,
1623 (const void **)&spec, (const void **)&mask,
1624 (const void *)&supp_mask,
1625 &rte_flow_item_eth_mask,
1626 sizeof(struct rte_flow_item_eth), error);
1630 if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
1632 * The HW/FW hasn't got support for match on MAC addresses in
1633 * outer rules yet (this will change). Match on VLAN presence
1634 * isn't supported either. Ignore these match criteria.
1636 memcpy(&override_mask, mask, sizeof(override_mask));
1637 memset(&override_mask.hdr.dst_addr, 0,
1638 sizeof(override_mask.hdr.dst_addr));
1639 memset(&override_mask.hdr.src_addr, 0,
1640 sizeof(override_mask.hdr.src_addr));
1641 override_mask.has_vlan = 0;
1643 mask = (const uint8_t *)&override_mask;
1647 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1648 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1649 const struct rte_flow_item_eth *item_spec;
1650 const struct rte_flow_item_eth *item_mask;
1652 item_spec = (const struct rte_flow_item_eth *)spec;
1653 item_mask = (const struct rte_flow_item_eth *)mask;
1656 * Remember various match criteria in the parsing context.
1657 * sfc_mae_rule_process_pattern_data() will consider them
1658 * altogether when the rest of the items have been parsed.
1660 ethertypes[0].value = item_spec->type;
1661 ethertypes[0].mask = item_mask->type;
1662 if (item_mask->has_vlan) {
1663 pdata->has_ovlan_mask = B_TRUE;
1664 if (item_spec->has_vlan)
1665 pdata->has_ovlan_value = B_TRUE;
1669 * The specification is empty. The overall pattern
1670 * validity will be enforced at the end of parsing.
1671 * See sfc_mae_rule_process_pattern_data().
1676 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1680 static const struct sfc_mae_field_locator flocs_vlan[] = {
1683 EFX_MAE_FIELD_VLAN0_TCI_BE,
1684 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1685 offsetof(struct rte_flow_item_vlan, tci),
1689 * This locator is used only for building supported fields mask.
1690 * The field is handled by sfc_mae_rule_process_pattern_data().
1692 SFC_MAE_FIELD_HANDLING_DEFERRED,
1693 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1694 offsetof(struct rte_flow_item_vlan, inner_type),
1699 EFX_MAE_FIELD_VLAN1_TCI_BE,
1700 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1701 offsetof(struct rte_flow_item_vlan, tci),
1705 * This locator is used only for building supported fields mask.
1706 * The field is handled by sfc_mae_rule_process_pattern_data().
1708 SFC_MAE_FIELD_HANDLING_DEFERRED,
1709 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1710 offsetof(struct rte_flow_item_vlan, inner_type),
1715 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1716 struct sfc_flow_parse_ctx *ctx,
1717 struct rte_flow_error *error)
1719 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1720 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1721 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1722 &pdata->has_ovlan_mask,
1723 &pdata->has_ivlan_mask,
1725 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1726 &pdata->has_ovlan_value,
1727 &pdata->has_ivlan_value,
1729 boolean_t *cur_tag_presence_bit_mp;
1730 boolean_t *cur_tag_presence_bit_vp;
1731 const struct sfc_mae_field_locator *flocs;
1732 struct rte_flow_item_vlan supp_mask;
1733 const uint8_t *spec = NULL;
1734 const uint8_t *mask = NULL;
1735 unsigned int nb_flocs;
1738 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1740 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1741 return rte_flow_error_set(error, ENOTSUP,
1742 RTE_FLOW_ERROR_TYPE_ITEM, item,
1743 "Can't match that many VLAN tags");
1746 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1747 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1749 if (*cur_tag_presence_bit_mp == B_TRUE &&
1750 *cur_tag_presence_bit_vp == B_FALSE) {
1751 return rte_flow_error_set(error, EINVAL,
1752 RTE_FLOW_ERROR_TYPE_ITEM, item,
1753 "The previous item enforces no (more) VLAN, "
1754 "so the current item (VLAN) must not exist");
1757 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1758 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1760 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1761 &supp_mask, sizeof(supp_mask));
1763 * This only means that the field is supported by the driver and libefx.
1764 * Support on NIC level will be checked when all items have been parsed.
1766 supp_mask.has_more_vlan = 1;
1768 rc = sfc_flow_parse_init(item,
1769 (const void **)&spec, (const void **)&mask,
1770 (const void *)&supp_mask,
1771 &rte_flow_item_vlan_mask,
1772 sizeof(struct rte_flow_item_vlan), error);
1777 struct sfc_mae_ethertype *et = pdata->ethertypes;
1778 const struct rte_flow_item_vlan *item_spec;
1779 const struct rte_flow_item_vlan *item_mask;
1781 item_spec = (const struct rte_flow_item_vlan *)spec;
1782 item_mask = (const struct rte_flow_item_vlan *)mask;
1785 * Remember various match criteria in the parsing context.
1786 * sfc_mae_rule_process_pattern_data() will consider them
1787 * altogether when the rest of the items have been parsed.
1789 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1790 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1791 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1792 if (item_mask->has_more_vlan) {
1793 if (pdata->nb_vlan_tags ==
1794 SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1795 return rte_flow_error_set(error, ENOTSUP,
1796 RTE_FLOW_ERROR_TYPE_ITEM, item,
1797 "Can't use 'has_more_vlan' in "
1798 "the second item VLAN");
1800 pdata->has_ivlan_mask = B_TRUE;
1801 if (item_spec->has_more_vlan)
1802 pdata->has_ivlan_value = B_TRUE;
1805 /* Convert TCI to MAE representation right now. */
1806 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1812 ++(pdata->nb_vlan_tags);
1817 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1819 EFX_MAE_FIELD_SRC_IP4_BE,
1820 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1821 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1824 EFX_MAE_FIELD_DST_IP4_BE,
1825 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1826 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1830 * This locator is used only for building supported fields mask.
1831 * The field is handled by sfc_mae_rule_process_pattern_data().
1833 SFC_MAE_FIELD_HANDLING_DEFERRED,
1834 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1835 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1838 EFX_MAE_FIELD_IP_TOS,
1839 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1840 hdr.type_of_service),
1841 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1844 EFX_MAE_FIELD_IP_TTL,
1845 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1846 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1851 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1852 struct sfc_flow_parse_ctx *ctx,
1853 struct rte_flow_error *error)
1855 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1856 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1857 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1858 struct rte_flow_item_ipv4 supp_mask;
1859 const uint8_t *spec = NULL;
1860 const uint8_t *mask = NULL;
1863 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1864 &supp_mask, sizeof(supp_mask));
1866 rc = sfc_flow_parse_init(item,
1867 (const void **)&spec, (const void **)&mask,
1868 (const void *)&supp_mask,
1869 &rte_flow_item_ipv4_mask,
1870 sizeof(struct rte_flow_item_ipv4), error);
1874 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1875 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1878 const struct rte_flow_item_ipv4 *item_spec;
1879 const struct rte_flow_item_ipv4 *item_mask;
1881 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1882 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1884 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1885 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1890 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1894 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1896 EFX_MAE_FIELD_SRC_IP6_BE,
1897 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1898 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1901 EFX_MAE_FIELD_DST_IP6_BE,
1902 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1903 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1907 * This locator is used only for building supported fields mask.
1908 * The field is handled by sfc_mae_rule_process_pattern_data().
1910 SFC_MAE_FIELD_HANDLING_DEFERRED,
1911 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1912 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1915 EFX_MAE_FIELD_IP_TTL,
1916 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1917 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1922 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1923 struct sfc_flow_parse_ctx *ctx,
1924 struct rte_flow_error *error)
1926 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1927 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1928 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1929 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1930 struct rte_flow_item_ipv6 supp_mask;
1931 const uint8_t *spec = NULL;
1932 const uint8_t *mask = NULL;
1933 rte_be32_t vtc_flow_be;
1939 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1940 &supp_mask, sizeof(supp_mask));
1942 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1943 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1945 rc = sfc_flow_parse_init(item,
1946 (const void **)&spec, (const void **)&mask,
1947 (const void *)&supp_mask,
1948 &rte_flow_item_ipv6_mask,
1949 sizeof(struct rte_flow_item_ipv6), error);
1953 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1954 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1957 const struct rte_flow_item_ipv6 *item_spec;
1958 const struct rte_flow_item_ipv6 *item_mask;
1960 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1961 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1963 pdata->l3_next_proto_value = item_spec->hdr.proto;
1964 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1969 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1974 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1975 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1976 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1978 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1979 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1980 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1982 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1983 fremap[EFX_MAE_FIELD_IP_TOS],
1984 sizeof(tc_value), &tc_value,
1985 sizeof(tc_mask), &tc_mask);
1987 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1988 NULL, "Failed to process item fields");
1994 static const struct sfc_mae_field_locator flocs_tcp[] = {
1996 EFX_MAE_FIELD_L4_SPORT_BE,
1997 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1998 offsetof(struct rte_flow_item_tcp, hdr.src_port),
2001 EFX_MAE_FIELD_L4_DPORT_BE,
2002 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
2003 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
2006 EFX_MAE_FIELD_TCP_FLAGS_BE,
2008 * The values have been picked intentionally since the
2009 * target MAE field is oversize (16 bit). This mapping
2010 * relies on the fact that the MAE field is big-endian.
2012 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
2013 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
2014 offsetof(struct rte_flow_item_tcp, hdr.data_off),
2019 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
2020 struct sfc_flow_parse_ctx *ctx,
2021 struct rte_flow_error *error)
2023 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2024 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2025 struct rte_flow_item_tcp supp_mask;
2026 const uint8_t *spec = NULL;
2027 const uint8_t *mask = NULL;
2031 * When encountered among outermost items, item TCP is invalid.
2032 * Check which match specification is being constructed now.
2034 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
2035 return rte_flow_error_set(error, EINVAL,
2036 RTE_FLOW_ERROR_TYPE_ITEM, item,
2037 "TCP in outer frame is invalid");
2040 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
2041 &supp_mask, sizeof(supp_mask));
2043 rc = sfc_flow_parse_init(item,
2044 (const void **)&spec, (const void **)&mask,
2045 (const void *)&supp_mask,
2046 &rte_flow_item_tcp_mask,
2047 sizeof(struct rte_flow_item_tcp), error);
2051 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
2052 pdata->l3_next_proto_restriction_mask = 0xff;
2057 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
2061 static const struct sfc_mae_field_locator flocs_udp[] = {
2063 EFX_MAE_FIELD_L4_SPORT_BE,
2064 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
2065 offsetof(struct rte_flow_item_udp, hdr.src_port),
2068 EFX_MAE_FIELD_L4_DPORT_BE,
2069 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
2070 offsetof(struct rte_flow_item_udp, hdr.dst_port),
2075 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
2076 struct sfc_flow_parse_ctx *ctx,
2077 struct rte_flow_error *error)
2079 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2080 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2081 struct rte_flow_item_udp supp_mask;
2082 const uint8_t *spec = NULL;
2083 const uint8_t *mask = NULL;
2086 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
2087 &supp_mask, sizeof(supp_mask));
2089 rc = sfc_flow_parse_init(item,
2090 (const void **)&spec, (const void **)&mask,
2091 (const void *)&supp_mask,
2092 &rte_flow_item_udp_mask,
2093 sizeof(struct rte_flow_item_udp), error);
2097 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
2098 pdata->l3_next_proto_restriction_mask = 0xff;
2103 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
2107 static const struct sfc_mae_field_locator flocs_tunnel[] = {
2110 * The size and offset values are relevant
2111 * for Geneve and NVGRE, too.
2113 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
2114 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
2119 * An auxiliary registry which allows using non-encap. field IDs
2120 * directly when building a match specification of type ACTION.
2122 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
2124 static const efx_mae_field_id_t field_ids_no_remap[] = {
2125 #define FIELD_ID_NO_REMAP(_field) \
2126 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
2128 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
2129 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
2130 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
2131 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
2132 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
2133 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
2134 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
2135 FIELD_ID_NO_REMAP(SRC_IP4_BE),
2136 FIELD_ID_NO_REMAP(DST_IP4_BE),
2137 FIELD_ID_NO_REMAP(IP_PROTO),
2138 FIELD_ID_NO_REMAP(IP_TOS),
2139 FIELD_ID_NO_REMAP(IP_TTL),
2140 FIELD_ID_NO_REMAP(SRC_IP6_BE),
2141 FIELD_ID_NO_REMAP(DST_IP6_BE),
2142 FIELD_ID_NO_REMAP(L4_SPORT_BE),
2143 FIELD_ID_NO_REMAP(L4_DPORT_BE),
2144 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2145 FIELD_ID_NO_REMAP(HAS_OVLAN),
2146 FIELD_ID_NO_REMAP(HAS_IVLAN),
2148 #undef FIELD_ID_NO_REMAP
2152 * An auxiliary registry which allows using "ENC" field IDs
2153 * when building a match specification of type OUTER.
2155 * See sfc_mae_rule_encap_parse_init().
2157 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2158 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2159 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2161 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2162 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2163 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2164 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2165 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2166 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2167 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2168 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2169 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2170 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2171 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2172 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2173 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2174 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2175 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2176 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2177 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2178 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2180 #undef FIELD_ID_REMAP_TO_ENCAP
2184 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2185 struct sfc_flow_parse_ctx *ctx,
2186 struct rte_flow_error *error)
2188 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2189 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2190 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2191 const struct rte_flow_item_vxlan *vxp;
2192 uint8_t supp_mask[sizeof(uint64_t)];
2193 const uint8_t *spec = NULL;
2194 const uint8_t *mask = NULL;
2197 if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
2199 * As a workaround, pattern processing has started from
2200 * this (tunnel) item. No pattern data to process yet.
2204 * We're about to start processing inner frame items.
2205 * Process pattern data that has been deferred so far
2206 * and reset pattern data storage.
2208 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2213 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2215 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2216 &supp_mask, sizeof(supp_mask));
2219 * This tunnel item was preliminarily detected by
2220 * sfc_mae_rule_encap_parse_init(). Default mask
2221 * was also picked by that helper. Use it here.
2223 rc = sfc_flow_parse_init(item,
2224 (const void **)&spec, (const void **)&mask,
2225 (const void *)&supp_mask,
2226 ctx_mae->tunnel_def_mask,
2227 ctx_mae->tunnel_def_mask_size, error);
2232 * This item and later ones comprise a
2233 * match specification of type ACTION.
2235 ctx_mae->match_spec = ctx_mae->match_spec_action;
2237 /* This item and later ones use non-encap. EFX MAE field IDs. */
2238 ctx_mae->field_ids_remap = field_ids_no_remap;
2244 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2245 * Copy 24-bit VNI, which is BE, at offset 1 in it.
2246 * The extra byte is 0 both in the mask and in the value.
2248 vxp = (const struct rte_flow_item_vxlan *)spec;
2249 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2251 vxp = (const struct rte_flow_item_vxlan *)mask;
2252 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2254 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2255 EFX_MAE_FIELD_ENC_VNET_ID_BE,
2256 sizeof(vnet_id_v), vnet_id_v,
2257 sizeof(vnet_id_m), vnet_id_m);
2259 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2260 item, "Failed to set VXLAN VNI");
2266 static const struct sfc_flow_item sfc_flow_items[] = {
2268 .type = RTE_FLOW_ITEM_TYPE_MARK,
2270 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2271 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2272 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2273 .parse = sfc_mae_rule_parse_item_mark,
2276 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2279 * In terms of RTE flow, this item is a META one,
2280 * and its position in the pattern is don't care.
2282 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2283 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2284 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2285 .parse = sfc_mae_rule_parse_item_port_id,
2288 .type = RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR,
2289 .name = "PORT_REPRESENTOR",
2291 * In terms of RTE flow, this item is a META one,
2292 * and its position in the pattern is don't care.
2294 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2295 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2296 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2297 .parse = sfc_mae_rule_parse_item_port_representor,
2300 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2303 * In terms of RTE flow, this item is a META one,
2304 * and its position in the pattern is don't care.
2306 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2307 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2308 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2309 .parse = sfc_mae_rule_parse_item_phy_port,
2312 .type = RTE_FLOW_ITEM_TYPE_PF,
2315 * In terms of RTE flow, this item is a META one,
2316 * and its position in the pattern is don't care.
2318 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2319 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2320 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2321 .parse = sfc_mae_rule_parse_item_pf,
2324 .type = RTE_FLOW_ITEM_TYPE_VF,
2327 * In terms of RTE flow, this item is a META one,
2328 * and its position in the pattern is don't care.
2330 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2331 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2332 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2333 .parse = sfc_mae_rule_parse_item_vf,
2336 .type = RTE_FLOW_ITEM_TYPE_ETH,
2338 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2339 .layer = SFC_FLOW_ITEM_L2,
2340 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2341 .parse = sfc_mae_rule_parse_item_eth,
2344 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2346 .prev_layer = SFC_FLOW_ITEM_L2,
2347 .layer = SFC_FLOW_ITEM_L2,
2348 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2349 .parse = sfc_mae_rule_parse_item_vlan,
2352 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2354 .prev_layer = SFC_FLOW_ITEM_L2,
2355 .layer = SFC_FLOW_ITEM_L3,
2356 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2357 .parse = sfc_mae_rule_parse_item_ipv4,
2360 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2362 .prev_layer = SFC_FLOW_ITEM_L2,
2363 .layer = SFC_FLOW_ITEM_L3,
2364 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2365 .parse = sfc_mae_rule_parse_item_ipv6,
2368 .type = RTE_FLOW_ITEM_TYPE_TCP,
2370 .prev_layer = SFC_FLOW_ITEM_L3,
2371 .layer = SFC_FLOW_ITEM_L4,
2372 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2373 .parse = sfc_mae_rule_parse_item_tcp,
2376 .type = RTE_FLOW_ITEM_TYPE_UDP,
2378 .prev_layer = SFC_FLOW_ITEM_L3,
2379 .layer = SFC_FLOW_ITEM_L4,
2380 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2381 .parse = sfc_mae_rule_parse_item_udp,
2384 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2386 .prev_layer = SFC_FLOW_ITEM_L4,
2387 .layer = SFC_FLOW_ITEM_START_LAYER,
2388 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2389 .parse = sfc_mae_rule_parse_item_tunnel,
2392 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2394 .prev_layer = SFC_FLOW_ITEM_L4,
2395 .layer = SFC_FLOW_ITEM_START_LAYER,
2396 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2397 .parse = sfc_mae_rule_parse_item_tunnel,
2400 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2402 .prev_layer = SFC_FLOW_ITEM_L3,
2403 .layer = SFC_FLOW_ITEM_START_LAYER,
2404 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2405 .parse = sfc_mae_rule_parse_item_tunnel,
2410 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2411 struct sfc_mae_parse_ctx *ctx,
2412 struct sfc_mae_outer_rule **rulep,
2413 struct rte_flow_error *error)
2415 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2418 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2423 SFC_ASSERT(ctx->match_spec_outer != NULL);
2425 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2426 return rte_flow_error_set(error, ENOTSUP,
2427 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2428 "Inconsistent pattern (outer)");
2431 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2433 if (*rulep != NULL) {
2434 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2436 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2437 ctx->encap_type, rulep);
2439 return rte_flow_error_set(error, rc,
2440 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2441 "Failed to process the pattern");
2445 /* The spec has now been tracked by the outer rule entry. */
2446 ctx->match_spec_outer = NULL;
2449 switch (ctx->ft_rule_type) {
2450 case SFC_FT_RULE_NONE:
2452 case SFC_FT_RULE_JUMP:
2453 /* No action rule */
2455 case SFC_FT_RULE_GROUP:
2457 * Match on recirculation ID rather than
2458 * on the outer rule allocation handle.
2460 rc = efx_mae_match_spec_recirc_id_set(ctx->match_spec_action,
2461 SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id));
2463 return rte_flow_error_set(error, rc,
2464 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2465 "tunnel offload: GROUP: AR: failed to request match on RECIRC_ID");
2469 SFC_ASSERT(B_FALSE);
2473 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2474 * inner parse (when some outer rule is hit) and action rule lookup.
2475 * If the currently processed flow does not come with an outer rule,
2476 * its action rule must be available only for packets which miss in
2477 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2478 * in the action rule specification; this ensures correct behaviour.
2480 * If, on the other hand, this flow does have an outer rule, its ID
2481 * may be unknown at the moment (not yet allocated), but OR_ID mask
2482 * has to be set to 0xffffffff anyway for correct class comparisons.
2483 * When the outer rule has been allocated, this match field will be
2484 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2486 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2490 sfc_mae_outer_rule_del(sa, *rulep);
2494 return rte_flow_error_set(error, rc,
2495 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2496 "Failed to process the pattern");
2503 sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
2504 struct sfc_mae_parse_ctx *ctx)
2506 struct sfc_flow_tunnel *ft;
2510 sfc_err(ctx->sa, "tunnel offload: GROUP: NULL spec in item MARK");
2514 ft = sfc_flow_tunnel_pick(ctx->sa, spec->id);
2516 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid tunnel");
2520 if (ft->refcnt == 0) {
2521 sfc_err(ctx->sa, "tunnel offload: GROUP: tunnel=%u does not exist",
2526 user_mark = SFC_FT_GET_USER_MARK(spec->id);
2527 if (user_mark != 0) {
2528 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid item MARK");
2532 sfc_dbg(ctx->sa, "tunnel offload: GROUP: detected");
2534 ctx->ft_rule_type = SFC_FT_RULE_GROUP;
2541 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2542 struct sfc_mae_parse_ctx *ctx,
2543 struct rte_flow_error *error)
2545 const struct rte_flow_item *pattern = ctx->pattern;
2546 struct sfc_mae *mae = &sa->mae;
2547 uint8_t recirc_id = 0;
2550 if (pattern == NULL) {
2551 rte_flow_error_set(error, EINVAL,
2552 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2558 switch (pattern->type) {
2559 case RTE_FLOW_ITEM_TYPE_MARK:
2560 rc = sfc_mae_rule_preparse_item_mark(pattern->spec,
2563 return rte_flow_error_set(error, rc,
2564 RTE_FLOW_ERROR_TYPE_ITEM,
2565 pattern, "tunnel offload: GROUP: invalid item MARK");
2569 case RTE_FLOW_ITEM_TYPE_VXLAN:
2570 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2571 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2572 ctx->tunnel_def_mask_size =
2573 sizeof(rte_flow_item_vxlan_mask);
2575 case RTE_FLOW_ITEM_TYPE_GENEVE:
2576 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2577 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2578 ctx->tunnel_def_mask_size =
2579 sizeof(rte_flow_item_geneve_mask);
2581 case RTE_FLOW_ITEM_TYPE_NVGRE:
2582 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2583 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2584 ctx->tunnel_def_mask_size =
2585 sizeof(rte_flow_item_nvgre_mask);
2587 case RTE_FLOW_ITEM_TYPE_END:
2597 switch (ctx->ft_rule_type) {
2598 case SFC_FT_RULE_NONE:
2599 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2602 case SFC_FT_RULE_JUMP:
2603 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
2604 return rte_flow_error_set(error, ENOTSUP,
2605 RTE_FLOW_ERROR_TYPE_ITEM,
2606 pattern, "tunnel offload: JUMP: invalid item");
2608 ctx->encap_type = ctx->ft->encap_type;
2610 case SFC_FT_RULE_GROUP:
2611 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2612 return rte_flow_error_set(error, EINVAL,
2613 RTE_FLOW_ERROR_TYPE_ITEM,
2614 NULL, "tunnel offload: GROUP: missing tunnel item");
2615 } else if (ctx->encap_type != ctx->ft->encap_type) {
2616 return rte_flow_error_set(error, EINVAL,
2617 RTE_FLOW_ERROR_TYPE_ITEM,
2618 pattern, "tunnel offload: GROUP: tunnel type mismatch");
2622 * The HW/FW hasn't got support for the use of "ENC" fields in
2623 * action rules (except the VNET_ID one) yet. As a workaround,
2624 * start parsing the pattern from the tunnel item.
2626 ctx->pattern = pattern;
2629 SFC_ASSERT(B_FALSE);
2633 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2634 return rte_flow_error_set(error, ENOTSUP,
2635 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2636 "OR: unsupported tunnel type");
2639 switch (ctx->ft_rule_type) {
2640 case SFC_FT_RULE_JUMP:
2641 recirc_id = SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id);
2643 case SFC_FT_RULE_NONE:
2644 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2645 return rte_flow_error_set(error, ENOTSUP,
2646 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2647 NULL, "OR: unsupported priority level");
2650 rc = efx_mae_match_spec_init(sa->nic,
2651 EFX_MAE_RULE_OUTER, ctx->priority,
2652 &ctx->match_spec_outer);
2654 return rte_flow_error_set(error, rc,
2655 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2656 "OR: failed to initialise the match specification");
2660 * Outermost items comprise a match
2661 * specification of type OUTER.
2663 ctx->match_spec = ctx->match_spec_outer;
2665 /* Outermost items use "ENC" EFX MAE field IDs. */
2666 ctx->field_ids_remap = field_ids_remap_to_encap;
2668 rc = efx_mae_outer_rule_recirc_id_set(ctx->match_spec,
2671 return rte_flow_error_set(error, rc,
2672 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2673 "OR: failed to initialise RECIRC_ID");
2676 case SFC_FT_RULE_GROUP:
2677 /* Outermost items -> "ENC" match fields in the action rule. */
2678 ctx->field_ids_remap = field_ids_remap_to_encap;
2679 ctx->match_spec = ctx->match_spec_action;
2681 /* No own outer rule; match on JUMP OR's RECIRC_ID is used. */
2682 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2685 SFC_ASSERT(B_FALSE);
2693 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2694 struct sfc_mae_parse_ctx *ctx)
2696 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2699 if (ctx->match_spec_outer != NULL)
2700 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2704 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2705 const struct rte_flow_item pattern[],
2706 struct sfc_flow_spec_mae *spec,
2707 struct rte_flow_error *error)
2709 struct sfc_mae_parse_ctx ctx_mae;
2710 unsigned int priority_shift = 0;
2711 struct sfc_flow_parse_ctx ctx;
2714 memset(&ctx_mae, 0, sizeof(ctx_mae));
2715 ctx_mae.ft_rule_type = spec->ft_rule_type;
2716 ctx_mae.priority = spec->priority;
2717 ctx_mae.ft = spec->ft;
2720 switch (ctx_mae.ft_rule_type) {
2721 case SFC_FT_RULE_JUMP:
2723 * By design, this flow should be represented solely by the
2724 * outer rule. But the HW/FW hasn't got support for setting
2725 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2726 * does it support outer rule counters. As a workaround, an
2727 * action rule of lower priority is used to do the job.
2732 case SFC_FT_RULE_GROUP:
2733 if (ctx_mae.priority != 0) {
2735 * Because of the above workaround, deny the
2736 * use of priorities to JUMP and GROUP rules.
2738 rc = rte_flow_error_set(error, ENOTSUP,
2739 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2740 "tunnel offload: priorities are not supported");
2741 goto fail_priority_check;
2745 case SFC_FT_RULE_NONE:
2746 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2747 spec->priority + priority_shift,
2748 &ctx_mae.match_spec_action);
2750 rc = rte_flow_error_set(error, rc,
2751 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2752 "AR: failed to initialise the match specification");
2753 goto fail_init_match_spec_action;
2757 SFC_ASSERT(B_FALSE);
2762 * As a preliminary setting, assume that there is no encapsulation
2763 * in the pattern. That is, pattern items are about to comprise a
2764 * match specification of type ACTION and use non-encap. field IDs.
2766 * sfc_mae_rule_encap_parse_init() below may override this.
2768 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2769 ctx_mae.match_spec = ctx_mae.match_spec_action;
2770 ctx_mae.field_ids_remap = field_ids_no_remap;
2771 ctx_mae.pattern = pattern;
2773 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2776 rc = sfc_mae_rule_encap_parse_init(sa, &ctx_mae, error);
2778 goto fail_encap_parse_init;
2781 * sfc_mae_rule_encap_parse_init() may have detected tunnel offload
2782 * GROUP rule. Remember its properties for later use.
2784 spec->ft_rule_type = ctx_mae.ft_rule_type;
2785 spec->ft = ctx_mae.ft;
2787 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2788 ctx_mae.pattern, &ctx, error);
2790 goto fail_parse_pattern;
2792 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2794 goto fail_process_pattern_data;
2796 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2798 goto fail_process_outer;
2800 if (ctx_mae.match_spec_action != NULL &&
2801 !efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2802 rc = rte_flow_error_set(error, ENOTSUP,
2803 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2804 "Inconsistent pattern");
2805 goto fail_validate_match_spec_action;
2808 spec->match_spec = ctx_mae.match_spec_action;
2812 fail_validate_match_spec_action:
2814 fail_process_pattern_data:
2816 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2818 fail_encap_parse_init:
2819 if (ctx_mae.match_spec_action != NULL)
2820 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2822 fail_init_match_spec_action:
2823 fail_priority_check:
2828 * An action supported by MAE may correspond to a bundle of RTE flow actions,
2829 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2830 * That is, related RTE flow actions need to be tracked as parts of a whole
2831 * so that they can be combined into a single action and submitted to MAE
2832 * representation of a given rule's action set.
2834 * Each RTE flow action provided by an application gets classified as
2835 * one belonging to some bundle type. If an action is not supposed to
2836 * belong to any bundle, or if this action is END, it is described as
2837 * one belonging to a dummy bundle of type EMPTY.
2839 * A currently tracked bundle will be submitted if a repeating
2840 * action or an action of different bundle type follows.
2843 enum sfc_mae_actions_bundle_type {
2844 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2845 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2848 struct sfc_mae_actions_bundle {
2849 enum sfc_mae_actions_bundle_type type;
2851 /* Indicates actions already tracked by the current bundle */
2852 uint64_t actions_mask;
2854 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2855 rte_be16_t vlan_push_tpid;
2856 rte_be16_t vlan_push_tci;
2860 * Combine configuration of RTE flow actions tracked by the bundle into a
2861 * single action and submit the result to MAE action set specification.
2862 * Do nothing in the case of dummy action bundle.
2865 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2866 efx_mae_actions_t *spec)
2870 switch (bundle->type) {
2871 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2873 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2874 rc = efx_mae_action_set_populate_vlan_push(
2875 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2878 SFC_ASSERT(B_FALSE);
2886 * Given the type of the next RTE flow action in the line, decide
2887 * whether a new bundle is about to start, and, if this is the case,
2888 * submit and reset the current bundle.
2891 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2892 struct sfc_mae_actions_bundle *bundle,
2893 efx_mae_actions_t *spec,
2894 struct rte_flow_error *error)
2896 enum sfc_mae_actions_bundle_type bundle_type_new;
2899 switch (action->type) {
2900 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2901 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2902 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2903 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2907 * Self-sufficient actions, including END, are handled in this
2908 * case. No checks for unsupported actions are needed here
2909 * because parsing doesn't occur at this point.
2911 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2915 if (bundle_type_new != bundle->type ||
2916 (bundle->actions_mask & (1ULL << action->type)) != 0) {
2917 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2921 memset(bundle, 0, sizeof(*bundle));
2924 bundle->type = bundle_type_new;
2929 return rte_flow_error_set(error, rc,
2930 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2931 "Failed to request the (group of) action(s)");
2935 sfc_mae_rule_parse_action_of_push_vlan(
2936 const struct rte_flow_action_of_push_vlan *conf,
2937 struct sfc_mae_actions_bundle *bundle)
2939 bundle->vlan_push_tpid = conf->ethertype;
2943 sfc_mae_rule_parse_action_of_set_vlan_vid(
2944 const struct rte_flow_action_of_set_vlan_vid *conf,
2945 struct sfc_mae_actions_bundle *bundle)
2947 bundle->vlan_push_tci |= (conf->vlan_vid &
2948 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2952 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2953 const struct rte_flow_action_of_set_vlan_pcp *conf,
2954 struct sfc_mae_actions_bundle *bundle)
2956 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2957 RTE_LEN2MASK(3, uint8_t)) << 13;
2959 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2962 struct sfc_mae_parsed_item {
2963 const struct rte_flow_item *item;
2964 size_t proto_header_ofst;
2965 size_t proto_header_size;
2969 * For each 16-bit word of the given header, override
2970 * bits enforced by the corresponding 16-bit mask.
2973 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2974 const struct sfc_mae_parsed_item *parsed_items,
2975 unsigned int nb_parsed_items)
2977 unsigned int item_idx;
2979 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2980 const struct sfc_mae_parsed_item *parsed_item;
2981 const struct rte_flow_item *item;
2982 size_t proto_header_size;
2985 parsed_item = &parsed_items[item_idx];
2986 proto_header_size = parsed_item->proto_header_size;
2987 item = parsed_item->item;
2989 for (ofst = 0; ofst < proto_header_size;
2990 ofst += sizeof(rte_be16_t)) {
2991 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2992 const rte_be16_t *w_maskp;
2993 const rte_be16_t *w_specp;
2995 w_maskp = RTE_PTR_ADD(item->mask, ofst);
2996 w_specp = RTE_PTR_ADD(item->spec, ofst);
2999 *wp |= (*w_specp & *w_maskp);
3002 header_buf += proto_header_size;
3006 #define SFC_IPV4_TTL_DEF 0x40
3007 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
3008 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
3009 #define SFC_VXLAN_FLAGS_DEF 0x08000000
3012 sfc_mae_rule_parse_action_vxlan_encap(
3013 struct sfc_mae *mae,
3014 const struct rte_flow_action_vxlan_encap *conf,
3015 efx_mae_actions_t *spec,
3016 struct rte_flow_error *error)
3018 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
3019 struct rte_flow_item *pattern = conf->definition;
3020 uint8_t *buf = bounce_eh->buf;
3022 /* This array will keep track of non-VOID pattern items. */
3023 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
3025 1 /* IPv4 or IPv6 */ +
3028 unsigned int nb_parsed_items = 0;
3030 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
3031 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
3032 sizeof(struct rte_ipv6_hdr))];
3033 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
3034 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
3035 struct rte_vxlan_hdr *vxlan = NULL;
3036 struct rte_udp_hdr *udp = NULL;
3037 unsigned int nb_vlan_tags = 0;
3038 size_t next_proto_ofst = 0;
3039 size_t ethertype_ofst = 0;
3043 if (pattern == NULL) {
3044 return rte_flow_error_set(error, EINVAL,
3045 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3046 "The encap. header definition is NULL");
3049 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
3050 bounce_eh->size = 0;
3053 * Process pattern items and remember non-VOID ones.
3054 * Defer applying masks until after the complete header
3055 * has been built from the pattern items.
3057 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
3059 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
3060 struct sfc_mae_parsed_item *parsed_item;
3061 const uint64_t exp_items_extra_vlan[] = {
3062 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
3064 size_t proto_header_size;
3065 rte_be16_t *ethertypep;
3066 uint8_t *next_protop;
3069 if (pattern->spec == NULL) {
3070 return rte_flow_error_set(error, EINVAL,
3071 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3072 "NULL item spec in the encap. header");
3075 if (pattern->mask == NULL) {
3076 return rte_flow_error_set(error, EINVAL,
3077 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3078 "NULL item mask in the encap. header");
3081 if (pattern->last != NULL) {
3082 /* This is not a match pattern, so disallow range. */
3083 return rte_flow_error_set(error, EINVAL,
3084 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3085 "Range item in the encap. header");
3088 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
3089 /* Handle VOID separately, for clarity. */
3093 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
3094 return rte_flow_error_set(error, ENOTSUP,
3095 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3096 "Unexpected item in the encap. header");
3099 parsed_item = &parsed_items[nb_parsed_items];
3100 buf_cur = buf + bounce_eh->size;
3102 switch (pattern->type) {
3103 case RTE_FLOW_ITEM_TYPE_ETH:
3104 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
3106 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
3109 proto_header_size = sizeof(struct rte_ether_hdr);
3111 ethertype_ofst = eth_ethertype_ofst;
3113 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
3114 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3115 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3117 case RTE_FLOW_ITEM_TYPE_VLAN:
3118 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
3120 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
3123 proto_header_size = sizeof(struct rte_vlan_hdr);
3125 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
3126 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
3128 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3129 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3133 offsetof(struct rte_vlan_hdr, eth_proto);
3135 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3136 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3137 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
3141 case RTE_FLOW_ITEM_TYPE_IPV4:
3142 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
3144 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
3147 proto_header_size = sizeof(struct rte_ipv4_hdr);
3149 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3150 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3154 offsetof(struct rte_ipv4_hdr, next_proto_id);
3156 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
3158 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3160 case RTE_FLOW_ITEM_TYPE_IPV6:
3161 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
3163 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
3166 proto_header_size = sizeof(struct rte_ipv6_hdr);
3168 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3169 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3171 next_proto_ofst = bounce_eh->size +
3172 offsetof(struct rte_ipv6_hdr, proto);
3174 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
3176 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3178 case RTE_FLOW_ITEM_TYPE_UDP:
3179 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
3181 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
3184 proto_header_size = sizeof(struct rte_udp_hdr);
3186 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
3187 *next_protop = IPPROTO_UDP;
3189 udp = (struct rte_udp_hdr *)buf_cur;
3191 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
3193 case RTE_FLOW_ITEM_TYPE_VXLAN:
3194 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
3196 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
3199 proto_header_size = sizeof(struct rte_vxlan_hdr);
3201 vxlan = (struct rte_vxlan_hdr *)buf_cur;
3203 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
3204 udp->dgram_len = RTE_BE16(sizeof(*udp) +
3206 udp->dgram_cksum = 0;
3211 return rte_flow_error_set(error, ENOTSUP,
3212 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3213 "Unknown item in the encap. header");
3216 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
3217 return rte_flow_error_set(error, E2BIG,
3218 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3219 "The encap. header is too big");
3222 if ((proto_header_size & 1) != 0) {
3223 return rte_flow_error_set(error, EINVAL,
3224 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3225 "Odd layer size in the encap. header");
3228 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
3229 bounce_eh->size += proto_header_size;
3231 parsed_item->item = pattern;
3232 parsed_item->proto_header_size = proto_header_size;
3236 if (exp_items != 0) {
3237 /* Parsing item VXLAN would have reset exp_items to 0. */
3238 return rte_flow_error_set(error, ENOTSUP,
3239 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3240 "No item VXLAN in the encap. header");
3243 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
3244 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
3245 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
3246 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
3248 /* The HW cannot compute this checksum. */
3249 ipv4->hdr_checksum = 0;
3250 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
3252 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
3253 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
3254 ipv6->payload_len = udp->dgram_len;
3256 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
3258 /* Take care of the masks. */
3259 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
3261 rc = efx_mae_action_set_populate_encap(spec);
3263 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3264 NULL, "failed to request action ENCAP");
3271 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
3272 const struct rte_flow_action_mark *conf,
3273 const struct sfc_flow_spec_mae *spec_mae,
3274 efx_mae_actions_t *spec)
3278 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3279 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3280 } else if (conf->id > SFC_FT_USER_MARK_MASK) {
3281 sfc_err(sa, "the mark value is too large");
3285 rc = efx_mae_action_set_populate_mark(spec, conf->id);
3287 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
3293 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
3294 const struct rte_flow_action_count *conf
3296 efx_mae_actions_t *spec)
3300 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
3302 "counter queue is not configured for COUNT action");
3304 goto fail_counter_queue_uninit;
3307 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
3309 goto fail_no_service_core;
3312 rc = efx_mae_action_set_populate_count(spec);
3315 "failed to populate counters in MAE action set: %s",
3317 goto fail_populate_count;
3322 fail_populate_count:
3323 fail_no_service_core:
3324 fail_counter_queue_uninit:
3330 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
3331 const struct rte_flow_action_phy_port *conf,
3332 efx_mae_actions_t *spec)
3334 efx_mport_sel_t mport;
3338 if (conf->original != 0)
3339 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
3341 phy_port = conf->index;
3343 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
3345 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
3346 phy_port, strerror(rc));
3350 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3352 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3353 mport.sel, strerror(rc));
3360 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3361 const struct rte_flow_action_vf *vf_conf,
3362 efx_mae_actions_t *spec)
3364 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3365 efx_mport_sel_t mport;
3369 if (vf_conf == NULL)
3370 vf = EFX_PCI_VF_INVALID;
3371 else if (vf_conf->original != 0)
3376 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3378 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3379 encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3384 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3386 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3387 mport.sel, strerror(rc));
3394 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3395 const struct rte_flow_action_port_id *conf,
3396 efx_mae_actions_t *spec)
3398 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3399 struct sfc_mae *mae = &sa->mae;
3400 efx_mport_sel_t mport;
3404 if (conf->id > UINT16_MAX)
3407 port_id = (conf->original != 0) ? sas->port_id : conf->id;
3409 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
3412 sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s",
3413 port_id, strerror(rc));
3417 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3419 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3420 mport.sel, strerror(rc));
3426 static const char * const action_names[] = {
3427 [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3428 [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3429 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3430 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3431 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3432 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3433 [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3434 [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3435 [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3436 [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3437 [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3438 [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3439 [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3440 [RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
3444 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3445 const struct rte_flow_action *action,
3446 const struct sfc_flow_spec_mae *spec_mae,
3447 struct sfc_mae_actions_bundle *bundle,
3448 efx_mae_actions_t *spec,
3449 struct rte_flow_error *error)
3451 const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3452 const uint64_t rx_metadata = sa->negotiated_rx_metadata;
3453 bool custom_error = B_FALSE;
3456 switch (action->type) {
3457 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3458 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3459 bundle->actions_mask);
3460 if (outer_rule == NULL ||
3461 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3464 rc = efx_mae_action_set_populate_decap(spec);
3466 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3467 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3468 bundle->actions_mask);
3469 rc = efx_mae_action_set_populate_vlan_pop(spec);
3471 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3472 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3473 bundle->actions_mask);
3474 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3476 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3477 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3478 bundle->actions_mask);
3479 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3481 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3482 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3483 bundle->actions_mask);
3484 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3486 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3487 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3488 bundle->actions_mask);
3489 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3492 custom_error = B_TRUE;
3494 case RTE_FLOW_ACTION_TYPE_COUNT:
3495 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3496 bundle->actions_mask);
3497 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3499 case RTE_FLOW_ACTION_TYPE_FLAG:
3500 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3501 bundle->actions_mask);
3502 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
3503 rc = efx_mae_action_set_populate_flag(spec);
3505 rc = rte_flow_error_set(error, ENOTSUP,
3506 RTE_FLOW_ERROR_TYPE_ACTION,
3508 "flag delivery has not been negotiated");
3509 custom_error = B_TRUE;
3512 case RTE_FLOW_ACTION_TYPE_MARK:
3513 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3514 bundle->actions_mask);
3515 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
3516 spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3517 rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
3520 rc = rte_flow_error_set(error, ENOTSUP,
3521 RTE_FLOW_ERROR_TYPE_ACTION,
3523 "mark delivery has not been negotiated");
3524 custom_error = B_TRUE;
3527 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3528 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3529 bundle->actions_mask);
3530 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3532 case RTE_FLOW_ACTION_TYPE_PF:
3533 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3534 bundle->actions_mask);
3535 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3537 case RTE_FLOW_ACTION_TYPE_VF:
3538 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3539 bundle->actions_mask);
3540 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3542 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3543 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3544 bundle->actions_mask);
3545 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3547 case RTE_FLOW_ACTION_TYPE_DROP:
3548 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3549 bundle->actions_mask);
3550 rc = efx_mae_action_set_populate_drop(spec);
3552 case RTE_FLOW_ACTION_TYPE_JUMP:
3553 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3554 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3559 return rte_flow_error_set(error, ENOTSUP,
3560 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3561 "Unsupported action");
3565 bundle->actions_mask |= (1ULL << action->type);
3566 } else if (!custom_error) {
3567 if (action->type < RTE_DIM(action_names)) {
3568 const char *action_name = action_names[action->type];
3570 if (action_name != NULL) {
3571 sfc_err(sa, "action %s was rejected: %s",
3572 action_name, strerror(rc));
3575 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3576 NULL, "Failed to request the action");
3583 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3585 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3589 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3590 const struct sfc_mae_bounce_eh *bounce_eh,
3591 struct sfc_mae_encap_header **encap_headerp)
3593 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3594 encap_headerp = NULL;
3598 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3599 if (*encap_headerp != NULL)
3602 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3606 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3607 const struct rte_flow_action actions[],
3608 struct sfc_flow_spec_mae *spec_mae,
3609 struct rte_flow_error *error)
3611 struct sfc_mae_encap_header *encap_header = NULL;
3612 struct sfc_mae_actions_bundle bundle = {0};
3613 struct sfc_flow_tunnel *counter_ft = NULL;
3614 uint64_t *ft_group_hit_counter = NULL;
3615 const struct rte_flow_action *action;
3616 struct sfc_mae *mae = &sa->mae;
3617 unsigned int n_count = 0;
3618 efx_mae_actions_t *spec;
3623 if (actions == NULL) {
3624 return rte_flow_error_set(error, EINVAL,
3625 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3629 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3631 goto fail_action_set_spec_init;
3633 for (action = actions;
3634 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3635 if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
3639 if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
3640 /* JUMP rules don't decapsulate packets. GROUP rules do. */
3641 rc = efx_mae_action_set_populate_decap(spec);
3643 goto fail_enforce_ft_decap;
3645 if (n_count == 0 && sfc_mae_counter_stream_enabled(sa)) {
3647 * The user opted not to use action COUNT in this rule,
3648 * but the counter should be enabled implicitly because
3649 * packets hitting this rule contribute to the tunnel's
3650 * total number of hits. See sfc_mae_counter_get().
3652 rc = efx_mae_action_set_populate_count(spec);
3654 goto fail_enforce_ft_count;
3660 /* Cleanup after previous encap. header bounce buffer usage. */
3661 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3663 for (action = actions;
3664 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3665 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3667 goto fail_rule_parse_action;
3669 rc = sfc_mae_rule_parse_action(sa, action, spec_mae,
3670 &bundle, spec, error);
3672 goto fail_rule_parse_action;
3675 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3677 goto fail_rule_parse_action;
3679 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3681 goto fail_process_encap_header;
3685 sfc_err(sa, "too many count actions requested: %u", n_count);
3689 switch (spec_mae->ft_rule_type) {
3690 case SFC_FT_RULE_NONE:
3692 case SFC_FT_RULE_JUMP:
3693 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3694 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3696 goto fail_workaround_jump_delivery;
3698 counter_ft = spec_mae->ft;
3700 case SFC_FT_RULE_GROUP:
3702 * Packets that go to the rule's AR have FT mark set (from the
3703 * JUMP rule OR's RECIRC_ID). Remove this mark in matching
3704 * packets. The user may have provided their own action
3705 * MARK above, so don't check the return value here.
3707 (void)efx_mae_action_set_populate_mark(spec, 0);
3709 ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
3712 SFC_ASSERT(B_FALSE);
3715 spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3717 if (spec_mae->action_set != NULL) {
3718 sfc_mae_encap_header_del(sa, encap_header);
3719 efx_mae_action_set_spec_fini(sa->nic, spec);
3723 rc = sfc_mae_action_set_add(sa, actions, spec, encap_header,
3724 ft_group_hit_counter, counter_ft, n_count,
3725 &spec_mae->action_set);
3727 goto fail_action_set_add;
3731 fail_action_set_add:
3732 fail_workaround_jump_delivery:
3734 sfc_mae_encap_header_del(sa, encap_header);
3736 fail_process_encap_header:
3737 fail_rule_parse_action:
3738 efx_mae_action_set_spec_fini(sa->nic, spec);
3740 fail_enforce_ft_count:
3741 fail_enforce_ft_decap:
3742 fail_action_set_spec_init:
3743 if (rc > 0 && rte_errno == 0) {
3744 rc = rte_flow_error_set(error, rc,
3745 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3746 NULL, "Failed to process the action");
3752 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3753 const efx_mae_match_spec_t *left,
3754 const efx_mae_match_spec_t *right)
3756 bool have_same_class;
3759 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3762 return (rc == 0) ? have_same_class : false;
3766 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3767 struct sfc_mae_outer_rule *rule)
3769 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3770 struct sfc_mae_outer_rule *entry;
3771 struct sfc_mae *mae = &sa->mae;
3773 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3774 /* An active rule is reused. It's class is wittingly valid. */
3778 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3779 sfc_mae_outer_rules, entries) {
3780 const efx_mae_match_spec_t *left = entry->match_spec;
3781 const efx_mae_match_spec_t *right = rule->match_spec;
3786 if (sfc_mae_rules_class_cmp(sa, left, right))
3790 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3791 "support for outer frame pattern items is not guaranteed; "
3792 "other than that, the items are valid from SW standpoint");
3797 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3798 struct sfc_flow_spec_mae *spec)
3800 const struct rte_flow *entry;
3802 if (spec->match_spec == NULL)
3805 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3806 const struct sfc_flow_spec *entry_spec = &entry->spec;
3807 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3808 const efx_mae_match_spec_t *left = es_mae->match_spec;
3809 const efx_mae_match_spec_t *right = spec->match_spec;
3811 switch (entry_spec->type) {
3812 case SFC_FLOW_SPEC_FILTER:
3813 /* Ignore VNIC-level flows */
3815 case SFC_FLOW_SPEC_MAE:
3816 if (sfc_mae_rules_class_cmp(sa, left, right))
3824 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3825 "support for inner frame pattern items is not guaranteed; "
3826 "other than that, the items are valid from SW standpoint");
3831 * Confirm that a given flow can be accepted by the FW.
3834 * Software adapter context
3836 * Flow to be verified
3838 * Zero on success and non-zero in the case of error.
3839 * A special value of EAGAIN indicates that the adapter is
3840 * not in started state. This state is compulsory because
3841 * it only makes sense to compare the rule class of the flow
3842 * being validated with classes of the active rules.
3843 * Such classes are wittingly supported by the FW.
3846 sfc_mae_flow_verify(struct sfc_adapter *sa,
3847 struct rte_flow *flow)
3849 struct sfc_flow_spec *spec = &flow->spec;
3850 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3851 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3854 SFC_ASSERT(sfc_adapter_is_locked(sa));
3856 if (sa->state != SFC_ETHDEV_STARTED)
3859 if (outer_rule != NULL) {
3860 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
3865 return sfc_mae_action_rule_class_verify(sa, spec_mae);
3869 sfc_mae_flow_insert(struct sfc_adapter *sa,
3870 struct rte_flow *flow)
3872 struct sfc_flow_spec *spec = &flow->spec;
3873 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3874 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3875 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3876 struct sfc_mae_fw_rsrc *fw_rsrc;
3879 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
3881 if (outer_rule != NULL) {
3882 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
3883 spec_mae->match_spec);
3885 goto fail_outer_rule_enable;
3888 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3889 spec_mae->ft->reset_jump_hit_counter =
3890 spec_mae->ft->group_hit_counter;
3893 if (action_set == NULL) {
3894 sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
3898 rc = sfc_mae_action_set_enable(sa, action_set);
3900 goto fail_action_set_enable;
3902 if (action_set->n_counters > 0) {
3903 rc = sfc_mae_counter_start(sa);
3905 sfc_err(sa, "failed to start MAE counters support: %s",
3907 goto fail_mae_counter_start;
3911 fw_rsrc = &action_set->fw_rsrc;
3913 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3914 NULL, &fw_rsrc->aset_id,
3915 &spec_mae->rule_id);
3917 goto fail_action_rule_insert;
3919 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3920 flow, spec_mae->rule_id.id);
3924 fail_action_rule_insert:
3925 fail_mae_counter_start:
3926 sfc_mae_action_set_disable(sa, action_set);
3928 fail_action_set_enable:
3929 if (outer_rule != NULL)
3930 sfc_mae_outer_rule_disable(sa, outer_rule);
3932 fail_outer_rule_enable:
3937 sfc_mae_flow_remove(struct sfc_adapter *sa,
3938 struct rte_flow *flow)
3940 struct sfc_flow_spec *spec = &flow->spec;
3941 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3942 struct sfc_mae_action_set *action_set = spec_mae->action_set;
3943 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3946 if (action_set == NULL) {
3947 sfc_dbg(sa, "disabled flow=%p (no AR)", flow);
3948 goto skip_action_rule;
3951 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3953 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3955 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3956 flow, spec_mae->rule_id.id, strerror(rc));
3958 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3959 flow, spec_mae->rule_id.id);
3960 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3962 sfc_mae_action_set_disable(sa, action_set);
3965 if (outer_rule != NULL)
3966 sfc_mae_outer_rule_disable(sa, outer_rule);
3972 sfc_mae_query_counter(struct sfc_adapter *sa,
3973 struct sfc_flow_spec_mae *spec,
3974 const struct rte_flow_action *action,
3975 struct rte_flow_query_count *data,
3976 struct rte_flow_error *error)
3978 struct sfc_mae_action_set *action_set = spec->action_set;
3979 const struct rte_flow_action_count *conf = action->conf;
3983 if (action_set == NULL || action_set->n_counters == 0) {
3984 return rte_flow_error_set(error, EINVAL,
3985 RTE_FLOW_ERROR_TYPE_ACTION, action,
3986 "Queried flow rule does not have count actions");
3989 for (i = 0; i < action_set->n_counters; i++) {
3991 * Get the first available counter of the flow rule if
3992 * counter ID is not specified, provided that this
3993 * counter is not an automatic (implicit) one.
3995 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
3998 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
3999 &action_set->counters[i], data);
4001 return rte_flow_error_set(error, EINVAL,
4002 RTE_FLOW_ERROR_TYPE_ACTION, action,
4003 "Queried flow rule counter action is invalid");
4009 return rte_flow_error_set(error, ENOENT,
4010 RTE_FLOW_ERROR_TYPE_ACTION, action,
4011 "no such flow rule action or such count ID");
4015 sfc_mae_flow_query(struct rte_eth_dev *dev,
4016 struct rte_flow *flow,
4017 const struct rte_flow_action *action,
4019 struct rte_flow_error *error)
4021 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
4022 struct sfc_flow_spec *spec = &flow->spec;
4023 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4025 switch (action->type) {
4026 case RTE_FLOW_ACTION_TYPE_COUNT:
4027 return sfc_mae_query_counter(sa, spec_mae, action,
4030 return rte_flow_error_set(error, ENOTSUP,
4031 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4032 "Query for action of this type is not supported");
4037 sfc_mae_switchdev_init(struct sfc_adapter *sa)
4039 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
4040 struct sfc_mae *mae = &sa->mae;
4042 efx_mport_sel_t phy;
4045 sfc_log_init(sa, "entry");
4047 if (!sa->switchdev) {
4048 sfc_log_init(sa, "switchdev is not enabled - skip");
4052 if (mae->status != SFC_MAE_STATUS_ADMIN) {
4054 sfc_err(sa, "failed to init switchdev - no admin MAE privilege");
4058 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
4061 sfc_err(sa, "failed get PF mport");
4065 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
4067 sfc_err(sa, "failed get PHY mport");
4071 rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
4072 SFC_MAE_RULE_PRIO_LOWEST,
4073 &mae->switchdev_rule_pf_to_ext);
4075 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
4079 rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
4080 SFC_MAE_RULE_PRIO_LOWEST,
4081 &mae->switchdev_rule_ext_to_pf);
4083 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
4087 sfc_log_init(sa, "done");
4092 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4098 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
4103 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
4105 struct sfc_mae *mae = &sa->mae;
4110 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4111 sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);