net/sfc: move adapter state enum to separate header
[dpdk.git] / drivers / net / sfc / sfc_mae.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_mae_counter.h"
20 #include "sfc_log.h"
21 #include "sfc_switch.h"
22 #include "sfc_service.h"
23
24 static int
25 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
26                             efx_mport_sel_t *mportp)
27 {
28         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
29
30         return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
31                                               mportp);
32 }
33
34 static int
35 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
36                               uint32_t nb_counters_max)
37 {
38         return sfc_mae_counters_init(&registry->counters, nb_counters_max);
39 }
40
41 static void
42 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
43 {
44         sfc_mae_counters_fini(&registry->counters);
45 }
46
47 static int
48 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
49                                       struct sfc_mae_rule **rule)
50 {
51         struct sfc_mae *mae = &sa->mae;
52         struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
53         unsigned int entry;
54         int rc;
55
56         for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
57                 if (internal_rules->rules[entry].spec == NULL)
58                         break;
59         }
60
61         if (entry == SFC_MAE_NB_RULES_MAX) {
62                 rc = ENOSPC;
63                 sfc_err(sa, "failed too many rules (%u rules used)", entry);
64                 goto fail_too_many_rules;
65         }
66
67         *rule = &internal_rules->rules[entry];
68
69         return 0;
70
71 fail_too_many_rules:
72         return rc;
73 }
74
75 int
76 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
77                                      const efx_mport_sel_t *mport_match,
78                                      const efx_mport_sel_t *mport_deliver,
79                                      int prio, struct sfc_mae_rule **rulep)
80 {
81         struct sfc_mae *mae = &sa->mae;
82         struct sfc_mae_rule *rule;
83         int rc;
84
85         sfc_log_init(sa, "entry");
86
87         if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
88                 rc = EINVAL;
89                 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
90                         mae->nb_action_rule_prios_max);
91                 goto fail_invalid_prio;
92         }
93         if (prio < 0)
94                 prio = mae->nb_action_rule_prios_max - 1;
95
96         rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
97         if (rc != 0)
98                 goto fail_find_empty_slot;
99
100         sfc_log_init(sa, "init MAE match spec");
101         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
102                                      (uint32_t)prio, &rule->spec);
103         if (rc != 0) {
104                 sfc_err(sa, "failed to init MAE match spec");
105                 goto fail_match_init;
106         }
107
108         rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
109         if (rc != 0) {
110                 sfc_err(sa, "failed to get MAE match mport selector");
111                 goto fail_mport_set;
112         }
113
114         rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
115         if (rc != 0) {
116                 sfc_err(sa, "failed to init MAE action set");
117                 goto fail_action_init;
118         }
119
120         rc = efx_mae_action_set_populate_deliver(rule->actions,
121                                                  mport_deliver);
122         if (rc != 0) {
123                 sfc_err(sa, "failed to populate deliver action");
124                 goto fail_populate_deliver;
125         }
126
127         rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
128                                       &rule->action_set);
129         if (rc != 0) {
130                 sfc_err(sa, "failed to allocate action set");
131                 goto fail_action_set_alloc;
132         }
133
134         rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
135                                         &rule->action_set,
136                                         &rule->rule_id);
137         if (rc != 0) {
138                 sfc_err(sa, "failed to insert action rule");
139                 goto fail_rule_insert;
140         }
141
142         *rulep = rule;
143
144         sfc_log_init(sa, "done");
145
146         return 0;
147
148 fail_rule_insert:
149         efx_mae_action_set_free(sa->nic, &rule->action_set);
150
151 fail_action_set_alloc:
152 fail_populate_deliver:
153         efx_mae_action_set_spec_fini(sa->nic, rule->actions);
154
155 fail_action_init:
156 fail_mport_set:
157         efx_mae_match_spec_fini(sa->nic, rule->spec);
158
159 fail_match_init:
160 fail_find_empty_slot:
161 fail_invalid_prio:
162         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
163         return rc;
164 }
165
166 void
167 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
168 {
169         if (rule == NULL || rule->spec == NULL)
170                 return;
171
172         efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
173         efx_mae_action_set_free(sa->nic, &rule->action_set);
174         efx_mae_action_set_spec_fini(sa->nic, rule->actions);
175         efx_mae_match_spec_fini(sa->nic, rule->spec);
176
177         rule->spec = NULL;
178 }
179
180 int
181 sfc_mae_attach(struct sfc_adapter *sa)
182 {
183         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
184         struct sfc_mae_switch_port_request switch_port_request = {0};
185         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
186         efx_mport_sel_t entity_mport;
187         struct sfc_mae *mae = &sa->mae;
188         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
189         efx_mae_limits_t limits;
190         int rc;
191
192         sfc_log_init(sa, "entry");
193
194         if (!encp->enc_mae_supported) {
195                 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
196                 return 0;
197         }
198
199         sfc_log_init(sa, "init MAE");
200         rc = efx_mae_init(sa->nic);
201         if (rc != 0)
202                 goto fail_mae_init;
203
204         sfc_log_init(sa, "get MAE limits");
205         rc = efx_mae_get_limits(sa->nic, &limits);
206         if (rc != 0)
207                 goto fail_mae_get_limits;
208
209         sfc_log_init(sa, "init MAE counter registry");
210         rc = sfc_mae_counter_registry_init(&mae->counter_registry,
211                                            limits.eml_max_n_counters);
212         if (rc != 0) {
213                 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
214                         limits.eml_max_n_counters, rte_strerror(rc));
215                 goto fail_counter_registry_init;
216         }
217
218         sfc_log_init(sa, "assign entity MPORT");
219         rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
220         if (rc != 0)
221                 goto fail_mae_assign_entity_mport;
222
223         sfc_log_init(sa, "assign RTE switch domain");
224         rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
225         if (rc != 0)
226                 goto fail_mae_assign_switch_domain;
227
228         sfc_log_init(sa, "assign RTE switch port");
229         switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
230         switch_port_request.entity_mportp = &entity_mport;
231         /*
232          * As of now, the driver does not support representors, so
233          * RTE ethdev MPORT simply matches that of the entity.
234          */
235         switch_port_request.ethdev_mportp = &entity_mport;
236         switch_port_request.ethdev_port_id = sas->port_id;
237         rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
238                                         &switch_port_request,
239                                         &mae->switch_port_id);
240         if (rc != 0)
241                 goto fail_mae_assign_switch_port;
242
243         sfc_log_init(sa, "allocate encap. header bounce buffer");
244         bounce_eh->buf_size = limits.eml_encap_header_size_limit;
245         bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
246                                     bounce_eh->buf_size, 0);
247         if (bounce_eh->buf == NULL)
248                 goto fail_mae_alloc_bounce_eh;
249
250         mae->status = SFC_MAE_STATUS_SUPPORTED;
251         mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
252         mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
253         mae->encap_types_supported = limits.eml_encap_types_supported;
254         TAILQ_INIT(&mae->outer_rules);
255         TAILQ_INIT(&mae->encap_headers);
256         TAILQ_INIT(&mae->action_sets);
257
258         sfc_log_init(sa, "done");
259
260         return 0;
261
262 fail_mae_alloc_bounce_eh:
263 fail_mae_assign_switch_port:
264 fail_mae_assign_switch_domain:
265 fail_mae_assign_entity_mport:
266         sfc_mae_counter_registry_fini(&mae->counter_registry);
267
268 fail_counter_registry_init:
269 fail_mae_get_limits:
270         efx_mae_fini(sa->nic);
271
272 fail_mae_init:
273         sfc_log_init(sa, "failed %d", rc);
274
275         return rc;
276 }
277
278 void
279 sfc_mae_detach(struct sfc_adapter *sa)
280 {
281         struct sfc_mae *mae = &sa->mae;
282         enum sfc_mae_status status_prev = mae->status;
283
284         sfc_log_init(sa, "entry");
285
286         mae->nb_action_rule_prios_max = 0;
287         mae->status = SFC_MAE_STATUS_UNKNOWN;
288
289         if (status_prev != SFC_MAE_STATUS_SUPPORTED)
290                 return;
291
292         rte_free(mae->bounce_eh.buf);
293         sfc_mae_counter_registry_fini(&mae->counter_registry);
294
295         efx_mae_fini(sa->nic);
296
297         sfc_log_init(sa, "done");
298 }
299
300 static struct sfc_mae_outer_rule *
301 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
302                           const efx_mae_match_spec_t *match_spec,
303                           efx_tunnel_protocol_t encap_type)
304 {
305         struct sfc_mae_outer_rule *rule;
306         struct sfc_mae *mae = &sa->mae;
307
308         SFC_ASSERT(sfc_adapter_is_locked(sa));
309
310         TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
311                 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
312                     rule->encap_type == encap_type) {
313                         sfc_dbg(sa, "attaching to outer_rule=%p", rule);
314                         ++(rule->refcnt);
315                         return rule;
316                 }
317         }
318
319         return NULL;
320 }
321
322 static int
323 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
324                        efx_mae_match_spec_t *match_spec,
325                        efx_tunnel_protocol_t encap_type,
326                        struct sfc_mae_outer_rule **rulep)
327 {
328         struct sfc_mae_outer_rule *rule;
329         struct sfc_mae *mae = &sa->mae;
330
331         SFC_ASSERT(sfc_adapter_is_locked(sa));
332
333         rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
334         if (rule == NULL)
335                 return ENOMEM;
336
337         rule->refcnt = 1;
338         rule->match_spec = match_spec;
339         rule->encap_type = encap_type;
340
341         rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
342
343         TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
344
345         *rulep = rule;
346
347         sfc_dbg(sa, "added outer_rule=%p", rule);
348
349         return 0;
350 }
351
352 static void
353 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
354                        struct sfc_mae_outer_rule *rule)
355 {
356         struct sfc_mae *mae = &sa->mae;
357
358         SFC_ASSERT(sfc_adapter_is_locked(sa));
359         SFC_ASSERT(rule->refcnt != 0);
360
361         --(rule->refcnt);
362
363         if (rule->refcnt != 0)
364                 return;
365
366         if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
367             rule->fw_rsrc.refcnt != 0) {
368                 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
369                         rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
370         }
371
372         efx_mae_match_spec_fini(sa->nic, rule->match_spec);
373
374         TAILQ_REMOVE(&mae->outer_rules, rule, entries);
375         rte_free(rule);
376
377         sfc_dbg(sa, "deleted outer_rule=%p", rule);
378 }
379
380 static int
381 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
382                           struct sfc_mae_outer_rule *rule,
383                           efx_mae_match_spec_t *match_spec_action)
384 {
385         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
386         int rc;
387
388         SFC_ASSERT(sfc_adapter_is_locked(sa));
389
390         if (fw_rsrc->refcnt == 0) {
391                 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
392                 SFC_ASSERT(rule->match_spec != NULL);
393
394                 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
395                                                rule->encap_type,
396                                                &fw_rsrc->rule_id);
397                 if (rc != 0) {
398                         sfc_err(sa, "failed to enable outer_rule=%p: %s",
399                                 rule, strerror(rc));
400                         return rc;
401                 }
402         }
403
404         rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
405                                                   &fw_rsrc->rule_id);
406         if (rc != 0) {
407                 if (fw_rsrc->refcnt == 0) {
408                         (void)efx_mae_outer_rule_remove(sa->nic,
409                                                         &fw_rsrc->rule_id);
410                         fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
411                 }
412
413                 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
414
415                 return rc;
416         }
417
418         if (fw_rsrc->refcnt == 0) {
419                 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
420                         rule, fw_rsrc->rule_id.id);
421         }
422
423         ++(fw_rsrc->refcnt);
424
425         return 0;
426 }
427
428 static void
429 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
430                            struct sfc_mae_outer_rule *rule)
431 {
432         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
433         int rc;
434
435         SFC_ASSERT(sfc_adapter_is_locked(sa));
436
437         if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
438             fw_rsrc->refcnt == 0) {
439                 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
440                         rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
441                 return;
442         }
443
444         if (fw_rsrc->refcnt == 1) {
445                 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
446                 if (rc == 0) {
447                         sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
448                                 rule, fw_rsrc->rule_id.id);
449                 } else {
450                         sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
451                                 rule, fw_rsrc->rule_id.id, strerror(rc));
452                 }
453                 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
454         }
455
456         --(fw_rsrc->refcnt);
457 }
458
459 static struct sfc_mae_encap_header *
460 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
461                             const struct sfc_mae_bounce_eh *bounce_eh)
462 {
463         struct sfc_mae_encap_header *encap_header;
464         struct sfc_mae *mae = &sa->mae;
465
466         SFC_ASSERT(sfc_adapter_is_locked(sa));
467
468         TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
469                 if (encap_header->size == bounce_eh->size &&
470                     memcmp(encap_header->buf, bounce_eh->buf,
471                            bounce_eh->size) == 0) {
472                         sfc_dbg(sa, "attaching to encap_header=%p",
473                                 encap_header);
474                         ++(encap_header->refcnt);
475                         return encap_header;
476                 }
477         }
478
479         return NULL;
480 }
481
482 static int
483 sfc_mae_encap_header_add(struct sfc_adapter *sa,
484                          const struct sfc_mae_bounce_eh *bounce_eh,
485                          struct sfc_mae_encap_header **encap_headerp)
486 {
487         struct sfc_mae_encap_header *encap_header;
488         struct sfc_mae *mae = &sa->mae;
489
490         SFC_ASSERT(sfc_adapter_is_locked(sa));
491
492         encap_header = rte_zmalloc("sfc_mae_encap_header",
493                                    sizeof(*encap_header), 0);
494         if (encap_header == NULL)
495                 return ENOMEM;
496
497         encap_header->size = bounce_eh->size;
498
499         encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
500                                        encap_header->size, 0);
501         if (encap_header->buf == NULL) {
502                 rte_free(encap_header);
503                 return ENOMEM;
504         }
505
506         rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
507
508         encap_header->refcnt = 1;
509         encap_header->type = bounce_eh->type;
510         encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
511
512         TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
513
514         *encap_headerp = encap_header;
515
516         sfc_dbg(sa, "added encap_header=%p", encap_header);
517
518         return 0;
519 }
520
521 static void
522 sfc_mae_encap_header_del(struct sfc_adapter *sa,
523                        struct sfc_mae_encap_header *encap_header)
524 {
525         struct sfc_mae *mae = &sa->mae;
526
527         if (encap_header == NULL)
528                 return;
529
530         SFC_ASSERT(sfc_adapter_is_locked(sa));
531         SFC_ASSERT(encap_header->refcnt != 0);
532
533         --(encap_header->refcnt);
534
535         if (encap_header->refcnt != 0)
536                 return;
537
538         if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
539             encap_header->fw_rsrc.refcnt != 0) {
540                 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
541                         encap_header, encap_header->fw_rsrc.eh_id.id,
542                         encap_header->fw_rsrc.refcnt);
543         }
544
545         TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
546         rte_free(encap_header->buf);
547         rte_free(encap_header);
548
549         sfc_dbg(sa, "deleted encap_header=%p", encap_header);
550 }
551
552 static int
553 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
554                             struct sfc_mae_encap_header *encap_header,
555                             efx_mae_actions_t *action_set_spec)
556 {
557         struct sfc_mae_fw_rsrc *fw_rsrc;
558         int rc;
559
560         if (encap_header == NULL)
561                 return 0;
562
563         SFC_ASSERT(sfc_adapter_is_locked(sa));
564
565         fw_rsrc = &encap_header->fw_rsrc;
566
567         if (fw_rsrc->refcnt == 0) {
568                 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
569                 SFC_ASSERT(encap_header->buf != NULL);
570                 SFC_ASSERT(encap_header->size != 0);
571
572                 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
573                                                 encap_header->buf,
574                                                 encap_header->size,
575                                                 &fw_rsrc->eh_id);
576                 if (rc != 0) {
577                         sfc_err(sa, "failed to enable encap_header=%p: %s",
578                                 encap_header, strerror(rc));
579                         return rc;
580                 }
581         }
582
583         rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
584                                               &fw_rsrc->eh_id);
585         if (rc != 0) {
586                 if (fw_rsrc->refcnt == 0) {
587                         (void)efx_mae_encap_header_free(sa->nic,
588                                                         &fw_rsrc->eh_id);
589                         fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
590                 }
591
592                 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
593
594                 return rc;
595         }
596
597         if (fw_rsrc->refcnt == 0) {
598                 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
599                         encap_header, fw_rsrc->eh_id.id);
600         }
601
602         ++(fw_rsrc->refcnt);
603
604         return 0;
605 }
606
607 static void
608 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
609                              struct sfc_mae_encap_header *encap_header)
610 {
611         struct sfc_mae_fw_rsrc *fw_rsrc;
612         int rc;
613
614         if (encap_header == NULL)
615                 return;
616
617         SFC_ASSERT(sfc_adapter_is_locked(sa));
618
619         fw_rsrc = &encap_header->fw_rsrc;
620
621         if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
622             fw_rsrc->refcnt == 0) {
623                 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
624                         encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
625                 return;
626         }
627
628         if (fw_rsrc->refcnt == 1) {
629                 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
630                 if (rc == 0) {
631                         sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
632                                 encap_header, fw_rsrc->eh_id.id);
633                 } else {
634                         sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
635                                 encap_header, fw_rsrc->eh_id.id, strerror(rc));
636                 }
637                 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
638         }
639
640         --(fw_rsrc->refcnt);
641 }
642
643 static int
644 sfc_mae_counters_enable(struct sfc_adapter *sa,
645                         struct sfc_mae_counter_id *counters,
646                         unsigned int n_counters,
647                         efx_mae_actions_t *action_set_spec)
648 {
649         int rc;
650
651         sfc_log_init(sa, "entry");
652
653         if (n_counters == 0) {
654                 sfc_log_init(sa, "no counters - skip");
655                 return 0;
656         }
657
658         SFC_ASSERT(sfc_adapter_is_locked(sa));
659         SFC_ASSERT(n_counters == 1);
660
661         rc = sfc_mae_counter_enable(sa, &counters[0]);
662         if (rc != 0) {
663                 sfc_err(sa, "failed to enable MAE counter %u: %s",
664                         counters[0].mae_id.id, rte_strerror(rc));
665                 goto fail_counter_add;
666         }
667
668         rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
669                                                    &counters[0].mae_id);
670         if (rc != 0) {
671                 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
672                         counters[0].mae_id.id, rte_strerror(rc));
673                 goto fail_fill_in_id;
674         }
675
676         return 0;
677
678 fail_fill_in_id:
679         (void)sfc_mae_counter_disable(sa, &counters[0]);
680
681 fail_counter_add:
682         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
683         return rc;
684 }
685
686 static int
687 sfc_mae_counters_disable(struct sfc_adapter *sa,
688                          struct sfc_mae_counter_id *counters,
689                          unsigned int n_counters)
690 {
691         if (n_counters == 0)
692                 return 0;
693
694         SFC_ASSERT(sfc_adapter_is_locked(sa));
695         SFC_ASSERT(n_counters == 1);
696
697         if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
698                 sfc_err(sa, "failed to disable: already disabled");
699                 return EALREADY;
700         }
701
702         return sfc_mae_counter_disable(sa, &counters[0]);
703 }
704
705 static struct sfc_mae_action_set *
706 sfc_mae_action_set_attach(struct sfc_adapter *sa,
707                           const struct sfc_mae_encap_header *encap_header,
708                           unsigned int n_count,
709                           const efx_mae_actions_t *spec)
710 {
711         struct sfc_mae_action_set *action_set;
712         struct sfc_mae *mae = &sa->mae;
713
714         SFC_ASSERT(sfc_adapter_is_locked(sa));
715
716         TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
717                 /*
718                  * Shared counters are not supported, hence action sets with
719                  * COUNT are not attachable.
720                  */
721                 if (action_set->encap_header == encap_header &&
722                     n_count == 0 &&
723                     efx_mae_action_set_specs_equal(action_set->spec, spec)) {
724                         sfc_dbg(sa, "attaching to action_set=%p", action_set);
725                         ++(action_set->refcnt);
726                         return action_set;
727                 }
728         }
729
730         return NULL;
731 }
732
733 static int
734 sfc_mae_action_set_add(struct sfc_adapter *sa,
735                        const struct rte_flow_action actions[],
736                        efx_mae_actions_t *spec,
737                        struct sfc_mae_encap_header *encap_header,
738                        unsigned int n_counters,
739                        struct sfc_mae_action_set **action_setp)
740 {
741         struct sfc_mae_action_set *action_set;
742         struct sfc_mae *mae = &sa->mae;
743         unsigned int i;
744
745         SFC_ASSERT(sfc_adapter_is_locked(sa));
746
747         action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
748         if (action_set == NULL) {
749                 sfc_err(sa, "failed to alloc action set");
750                 return ENOMEM;
751         }
752
753         if (n_counters > 0) {
754                 const struct rte_flow_action *action;
755
756                 action_set->counters = rte_malloc("sfc_mae_counter_ids",
757                         sizeof(action_set->counters[0]) * n_counters, 0);
758                 if (action_set->counters == NULL) {
759                         rte_free(action_set);
760                         sfc_err(sa, "failed to alloc counters");
761                         return ENOMEM;
762                 }
763
764                 for (action = actions, i = 0;
765                      action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
766                      ++action) {
767                         const struct rte_flow_action_count *conf;
768
769                         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
770                                 continue;
771
772                         conf = action->conf;
773
774                         action_set->counters[i].mae_id.id =
775                                 EFX_MAE_RSRC_ID_INVALID;
776                         action_set->counters[i].rte_id = conf->id;
777                         i++;
778                 }
779                 action_set->n_counters = n_counters;
780         }
781
782         action_set->refcnt = 1;
783         action_set->spec = spec;
784         action_set->encap_header = encap_header;
785
786         action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
787
788         TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
789
790         *action_setp = action_set;
791
792         sfc_dbg(sa, "added action_set=%p", action_set);
793
794         return 0;
795 }
796
797 static void
798 sfc_mae_action_set_del(struct sfc_adapter *sa,
799                        struct sfc_mae_action_set *action_set)
800 {
801         struct sfc_mae *mae = &sa->mae;
802
803         SFC_ASSERT(sfc_adapter_is_locked(sa));
804         SFC_ASSERT(action_set->refcnt != 0);
805
806         --(action_set->refcnt);
807
808         if (action_set->refcnt != 0)
809                 return;
810
811         if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
812             action_set->fw_rsrc.refcnt != 0) {
813                 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
814                         action_set, action_set->fw_rsrc.aset_id.id,
815                         action_set->fw_rsrc.refcnt);
816         }
817
818         efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
819         sfc_mae_encap_header_del(sa, action_set->encap_header);
820         if (action_set->n_counters > 0) {
821                 SFC_ASSERT(action_set->n_counters == 1);
822                 SFC_ASSERT(action_set->counters[0].mae_id.id ==
823                            EFX_MAE_RSRC_ID_INVALID);
824                 rte_free(action_set->counters);
825         }
826         TAILQ_REMOVE(&mae->action_sets, action_set, entries);
827         rte_free(action_set);
828
829         sfc_dbg(sa, "deleted action_set=%p", action_set);
830 }
831
832 static int
833 sfc_mae_action_set_enable(struct sfc_adapter *sa,
834                           struct sfc_mae_action_set *action_set)
835 {
836         struct sfc_mae_encap_header *encap_header = action_set->encap_header;
837         struct sfc_mae_counter_id *counters = action_set->counters;
838         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
839         int rc;
840
841         SFC_ASSERT(sfc_adapter_is_locked(sa));
842
843         if (fw_rsrc->refcnt == 0) {
844                 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
845                 SFC_ASSERT(action_set->spec != NULL);
846
847                 rc = sfc_mae_encap_header_enable(sa, encap_header,
848                                                  action_set->spec);
849                 if (rc != 0)
850                         return rc;
851
852                 rc = sfc_mae_counters_enable(sa, counters,
853                                              action_set->n_counters,
854                                              action_set->spec);
855                 if (rc != 0) {
856                         sfc_err(sa, "failed to enable %u MAE counters: %s",
857                                 action_set->n_counters, rte_strerror(rc));
858
859                         sfc_mae_encap_header_disable(sa, encap_header);
860                         return rc;
861                 }
862
863                 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
864                                               &fw_rsrc->aset_id);
865                 if (rc != 0) {
866                         sfc_err(sa, "failed to enable action_set=%p: %s",
867                                 action_set, strerror(rc));
868
869                         (void)sfc_mae_counters_disable(sa, counters,
870                                                        action_set->n_counters);
871                         sfc_mae_encap_header_disable(sa, encap_header);
872                         return rc;
873                 }
874
875                 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
876                         action_set, fw_rsrc->aset_id.id);
877         }
878
879         ++(fw_rsrc->refcnt);
880
881         return 0;
882 }
883
884 static void
885 sfc_mae_action_set_disable(struct sfc_adapter *sa,
886                            struct sfc_mae_action_set *action_set)
887 {
888         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
889         int rc;
890
891         SFC_ASSERT(sfc_adapter_is_locked(sa));
892
893         if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
894             fw_rsrc->refcnt == 0) {
895                 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
896                         action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
897                 return;
898         }
899
900         if (fw_rsrc->refcnt == 1) {
901                 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
902                 if (rc == 0) {
903                         sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
904                                 action_set, fw_rsrc->aset_id.id);
905                 } else {
906                         sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
907                                 action_set, fw_rsrc->aset_id.id, strerror(rc));
908                 }
909                 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
910
911                 rc = sfc_mae_counters_disable(sa, action_set->counters,
912                                               action_set->n_counters);
913                 if (rc != 0) {
914                         sfc_err(sa, "failed to disable %u MAE counters: %s",
915                                 action_set->n_counters, rte_strerror(rc));
916                 }
917
918                 sfc_mae_encap_header_disable(sa, action_set->encap_header);
919         }
920
921         --(fw_rsrc->refcnt);
922 }
923
924 void
925 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
926                      struct rte_flow *flow)
927 {
928         struct sfc_flow_spec *spec;
929         struct sfc_flow_spec_mae *spec_mae;
930
931         if (flow == NULL)
932                 return;
933
934         spec = &flow->spec;
935
936         if (spec == NULL)
937                 return;
938
939         spec_mae = &spec->mae;
940
941         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
942
943         if (spec_mae->outer_rule != NULL)
944                 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
945
946         if (spec_mae->action_set != NULL)
947                 sfc_mae_action_set_del(sa, spec_mae->action_set);
948
949         if (spec_mae->match_spec != NULL)
950                 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
951 }
952
953 static int
954 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
955 {
956         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
957         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
958         const efx_mae_field_id_t field_ids[] = {
959                 EFX_MAE_FIELD_VLAN0_PROTO_BE,
960                 EFX_MAE_FIELD_VLAN1_PROTO_BE,
961         };
962         const struct sfc_mae_ethertype *et;
963         unsigned int i;
964         int rc;
965
966         /*
967          * In accordance with RTE flow API convention, the innermost L2
968          * item's "type" ("inner_type") is a L3 EtherType. If there is
969          * no L3 item, it's 0x0000/0x0000.
970          */
971         et = &pdata->ethertypes[pdata->nb_vlan_tags];
972         rc = efx_mae_match_spec_field_set(ctx->match_spec,
973                                           fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
974                                           sizeof(et->value),
975                                           (const uint8_t *)&et->value,
976                                           sizeof(et->mask),
977                                           (const uint8_t *)&et->mask);
978         if (rc != 0)
979                 return rc;
980
981         /*
982          * sfc_mae_rule_parse_item_vlan() has already made sure
983          * that pdata->nb_vlan_tags does not exceed this figure.
984          */
985         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
986
987         for (i = 0; i < pdata->nb_vlan_tags; ++i) {
988                 et = &pdata->ethertypes[i];
989
990                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
991                                                   fremap[field_ids[i]],
992                                                   sizeof(et->value),
993                                                   (const uint8_t *)&et->value,
994                                                   sizeof(et->mask),
995                                                   (const uint8_t *)&et->mask);
996                 if (rc != 0)
997                         return rc;
998         }
999
1000         return 0;
1001 }
1002
1003 static int
1004 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1005                                   struct rte_flow_error *error)
1006 {
1007         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1008         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1009         struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1010         const rte_be16_t supported_tpids[] = {
1011                 /* VLAN standard TPID (always the first element) */
1012                 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1013
1014                 /* Double-tagging TPIDs */
1015                 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1016                 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1017                 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1018                 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1019         };
1020         bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1021         unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1022         unsigned int ethertype_idx;
1023         const uint8_t *valuep;
1024         const uint8_t *maskp;
1025         int rc;
1026
1027         if (pdata->innermost_ethertype_restriction.mask != 0 &&
1028             pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1029                 /*
1030                  * If a single item VLAN is followed by a L3 item, value
1031                  * of "type" in item ETH can't be a double-tagging TPID.
1032                  */
1033                 nb_supported_tpids = 1;
1034         }
1035
1036         /*
1037          * sfc_mae_rule_parse_item_vlan() has already made sure
1038          * that pdata->nb_vlan_tags does not exceed this figure.
1039          */
1040         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1041
1042         for (ethertype_idx = 0;
1043              ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1044                 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1045                 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1046                 unsigned int tpid_idx;
1047
1048                 /*
1049                  * This loop can have only two iterations. On the second one,
1050                  * drop outer tag presence enforcement bit because the inner
1051                  * tag presence automatically assumes that for the outer tag.
1052                  */
1053                 enforce_tag_presence[0] = B_FALSE;
1054
1055                 if (tpid_m == RTE_BE16(0)) {
1056                         if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1057                                 enforce_tag_presence[ethertype_idx] = B_TRUE;
1058
1059                         /* No match on this field, and no value check. */
1060                         nb_supported_tpids = 1;
1061                         continue;
1062                 }
1063
1064                 /* Exact match is supported only. */
1065                 if (tpid_m != RTE_BE16(0xffff)) {
1066                         sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1067                                 rte_be_to_cpu_16(tpid_m));
1068                         rc = EINVAL;
1069                         goto fail;
1070                 }
1071
1072                 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1073                      tpid_idx < nb_supported_tpids; ++tpid_idx) {
1074                         if (tpid_v == supported_tpids[tpid_idx])
1075                                 break;
1076                 }
1077
1078                 if (tpid_idx == nb_supported_tpids) {
1079                         sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1080                                 rte_be_to_cpu_16(tpid_v));
1081                         rc = EINVAL;
1082                         goto fail;
1083                 }
1084
1085                 nb_supported_tpids = 1;
1086         }
1087
1088         if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1089                 struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
1090                 rte_be16_t enforced_et;
1091
1092                 enforced_et = pdata->innermost_ethertype_restriction.value;
1093
1094                 if (et->mask == 0) {
1095                         et->mask = RTE_BE16(0xffff);
1096                         et->value = enforced_et;
1097                 } else if (et->mask != RTE_BE16(0xffff) ||
1098                            et->value != enforced_et) {
1099                         sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1100                                 rte_be_to_cpu_16(enforced_et),
1101                                 rte_be_to_cpu_16(et->value),
1102                                 rte_be_to_cpu_16(et->mask));
1103                         rc = EINVAL;
1104                         goto fail;
1105                 }
1106         }
1107
1108         /*
1109          * Now, when the number of VLAN tags is known, set fields
1110          * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1111          * one is either a valid L3 EtherType (or 0x0000/0x0000),
1112          * and the last two are valid TPIDs (or 0x0000/0x0000).
1113          */
1114         rc = sfc_mae_set_ethertypes(ctx);
1115         if (rc != 0)
1116                 goto fail;
1117
1118         if (pdata->l3_next_proto_restriction_mask == 0xff) {
1119                 if (pdata->l3_next_proto_mask == 0) {
1120                         pdata->l3_next_proto_mask = 0xff;
1121                         pdata->l3_next_proto_value =
1122                                 pdata->l3_next_proto_restriction_value;
1123                 } else if (pdata->l3_next_proto_mask != 0xff ||
1124                            pdata->l3_next_proto_value !=
1125                            pdata->l3_next_proto_restriction_value) {
1126                         sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1127                                 pdata->l3_next_proto_restriction_value,
1128                                 pdata->l3_next_proto_value,
1129                                 pdata->l3_next_proto_mask);
1130                         rc = EINVAL;
1131                         goto fail;
1132                 }
1133         }
1134
1135         if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1136                 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1137                                                 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1138                                                 enforce_tag_presence[0] ||
1139                                                 pdata->has_ovlan_value);
1140                 if (rc != 0)
1141                         goto fail;
1142         }
1143
1144         if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1145                 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1146                                                 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1147                                                 enforce_tag_presence[1] ||
1148                                                 pdata->has_ivlan_value);
1149                 if (rc != 0)
1150                         goto fail;
1151         }
1152
1153         valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1154         maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1155         rc = efx_mae_match_spec_field_set(ctx->match_spec,
1156                                           fremap[EFX_MAE_FIELD_IP_PROTO],
1157                                           sizeof(pdata->l3_next_proto_value),
1158                                           valuep,
1159                                           sizeof(pdata->l3_next_proto_mask),
1160                                           maskp);
1161         if (rc != 0)
1162                 goto fail;
1163
1164         return 0;
1165
1166 fail:
1167         return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1168                                   "Failed to process pattern data");
1169 }
1170
1171 static int
1172 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1173                                 struct sfc_flow_parse_ctx *ctx,
1174                                 struct rte_flow_error *error)
1175 {
1176         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1177         const struct rte_flow_item_port_id supp_mask = {
1178                 .id = 0xffffffff,
1179         };
1180         const void *def_mask = &rte_flow_item_port_id_mask;
1181         const struct rte_flow_item_port_id *spec = NULL;
1182         const struct rte_flow_item_port_id *mask = NULL;
1183         efx_mport_sel_t mport_sel;
1184         int rc;
1185
1186         if (ctx_mae->match_mport_set) {
1187                 return rte_flow_error_set(error, ENOTSUP,
1188                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1189                                 "Can't handle multiple traffic source items");
1190         }
1191
1192         rc = sfc_flow_parse_init(item,
1193                                  (const void **)&spec, (const void **)&mask,
1194                                  (const void *)&supp_mask, def_mask,
1195                                  sizeof(struct rte_flow_item_port_id), error);
1196         if (rc != 0)
1197                 return rc;
1198
1199         if (mask->id != supp_mask.id) {
1200                 return rte_flow_error_set(error, EINVAL,
1201                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1202                                 "Bad mask in the PORT_ID pattern item");
1203         }
1204
1205         /* If "spec" is not set, could be any port ID */
1206         if (spec == NULL)
1207                 return 0;
1208
1209         if (spec->id > UINT16_MAX) {
1210                 return rte_flow_error_set(error, EOVERFLOW,
1211                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1212                                           "The port ID is too large");
1213         }
1214
1215         rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
1216                                            spec->id, &mport_sel);
1217         if (rc != 0) {
1218                 return rte_flow_error_set(error, rc,
1219                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1220                                 "Can't find RTE ethdev by the port ID");
1221         }
1222
1223         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1224                                           &mport_sel, NULL);
1225         if (rc != 0) {
1226                 return rte_flow_error_set(error, rc,
1227                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1228                                 "Failed to set MPORT for the port ID");
1229         }
1230
1231         ctx_mae->match_mport_set = B_TRUE;
1232
1233         return 0;
1234 }
1235
1236 static int
1237 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1238                                  struct sfc_flow_parse_ctx *ctx,
1239                                  struct rte_flow_error *error)
1240 {
1241         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1242         const struct rte_flow_item_phy_port supp_mask = {
1243                 .index = 0xffffffff,
1244         };
1245         const void *def_mask = &rte_flow_item_phy_port_mask;
1246         const struct rte_flow_item_phy_port *spec = NULL;
1247         const struct rte_flow_item_phy_port *mask = NULL;
1248         efx_mport_sel_t mport_v;
1249         int rc;
1250
1251         if (ctx_mae->match_mport_set) {
1252                 return rte_flow_error_set(error, ENOTSUP,
1253                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1254                                 "Can't handle multiple traffic source items");
1255         }
1256
1257         rc = sfc_flow_parse_init(item,
1258                                  (const void **)&spec, (const void **)&mask,
1259                                  (const void *)&supp_mask, def_mask,
1260                                  sizeof(struct rte_flow_item_phy_port), error);
1261         if (rc != 0)
1262                 return rc;
1263
1264         if (mask->index != supp_mask.index) {
1265                 return rte_flow_error_set(error, EINVAL,
1266                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1267                                 "Bad mask in the PHY_PORT pattern item");
1268         }
1269
1270         /* If "spec" is not set, could be any physical port */
1271         if (spec == NULL)
1272                 return 0;
1273
1274         rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1275         if (rc != 0) {
1276                 return rte_flow_error_set(error, rc,
1277                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1278                                 "Failed to convert the PHY_PORT index");
1279         }
1280
1281         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1282         if (rc != 0) {
1283                 return rte_flow_error_set(error, rc,
1284                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1285                                 "Failed to set MPORT for the PHY_PORT");
1286         }
1287
1288         ctx_mae->match_mport_set = B_TRUE;
1289
1290         return 0;
1291 }
1292
1293 static int
1294 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1295                            struct sfc_flow_parse_ctx *ctx,
1296                            struct rte_flow_error *error)
1297 {
1298         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1299         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1300         efx_mport_sel_t mport_v;
1301         int rc;
1302
1303         if (ctx_mae->match_mport_set) {
1304                 return rte_flow_error_set(error, ENOTSUP,
1305                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1306                                 "Can't handle multiple traffic source items");
1307         }
1308
1309         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1310                                             &mport_v);
1311         if (rc != 0) {
1312                 return rte_flow_error_set(error, rc,
1313                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1314                                 "Failed to convert the PF ID");
1315         }
1316
1317         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1318         if (rc != 0) {
1319                 return rte_flow_error_set(error, rc,
1320                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1321                                 "Failed to set MPORT for the PF");
1322         }
1323
1324         ctx_mae->match_mport_set = B_TRUE;
1325
1326         return 0;
1327 }
1328
1329 static int
1330 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1331                            struct sfc_flow_parse_ctx *ctx,
1332                            struct rte_flow_error *error)
1333 {
1334         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1335         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1336         const struct rte_flow_item_vf supp_mask = {
1337                 .id = 0xffffffff,
1338         };
1339         const void *def_mask = &rte_flow_item_vf_mask;
1340         const struct rte_flow_item_vf *spec = NULL;
1341         const struct rte_flow_item_vf *mask = NULL;
1342         efx_mport_sel_t mport_v;
1343         int rc;
1344
1345         if (ctx_mae->match_mport_set) {
1346                 return rte_flow_error_set(error, ENOTSUP,
1347                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1348                                 "Can't handle multiple traffic source items");
1349         }
1350
1351         rc = sfc_flow_parse_init(item,
1352                                  (const void **)&spec, (const void **)&mask,
1353                                  (const void *)&supp_mask, def_mask,
1354                                  sizeof(struct rte_flow_item_vf), error);
1355         if (rc != 0)
1356                 return rc;
1357
1358         if (mask->id != supp_mask.id) {
1359                 return rte_flow_error_set(error, EINVAL,
1360                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1361                                 "Bad mask in the VF pattern item");
1362         }
1363
1364         /*
1365          * If "spec" is not set, the item requests any VF related to the
1366          * PF of the current DPDK port (but not the PF itself).
1367          * Reject this match criterion as unsupported.
1368          */
1369         if (spec == NULL) {
1370                 return rte_flow_error_set(error, EINVAL,
1371                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1372                                 "Bad spec in the VF pattern item");
1373         }
1374
1375         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1376         if (rc != 0) {
1377                 return rte_flow_error_set(error, rc,
1378                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1379                                 "Failed to convert the PF + VF IDs");
1380         }
1381
1382         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1383         if (rc != 0) {
1384                 return rte_flow_error_set(error, rc,
1385                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1386                                 "Failed to set MPORT for the PF + VF");
1387         }
1388
1389         ctx_mae->match_mport_set = B_TRUE;
1390
1391         return 0;
1392 }
1393
1394 /*
1395  * Having this field ID in a field locator means that this
1396  * locator cannot be used to actually set the field at the
1397  * time when the corresponding item gets encountered. Such
1398  * fields get stashed in the parsing context instead. This
1399  * is required to resolve dependencies between the stashed
1400  * fields. See sfc_mae_rule_process_pattern_data().
1401  */
1402 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1403
1404 struct sfc_mae_field_locator {
1405         efx_mae_field_id_t              field_id;
1406         size_t                          size;
1407         /* Field offset in the corresponding rte_flow_item_ struct */
1408         size_t                          ofst;
1409 };
1410
1411 static void
1412 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1413                              unsigned int nb_field_locators, void *mask_ptr,
1414                              size_t mask_size)
1415 {
1416         unsigned int i;
1417
1418         memset(mask_ptr, 0, mask_size);
1419
1420         for (i = 0; i < nb_field_locators; ++i) {
1421                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1422
1423                 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1424                 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1425         }
1426 }
1427
1428 static int
1429 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1430                    unsigned int nb_field_locators, const uint8_t *spec,
1431                    const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1432                    struct rte_flow_error *error)
1433 {
1434         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1435         unsigned int i;
1436         int rc = 0;
1437
1438         for (i = 0; i < nb_field_locators; ++i) {
1439                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1440
1441                 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1442                         continue;
1443
1444                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1445                                                   fremap[fl->field_id],
1446                                                   fl->size, spec + fl->ofst,
1447                                                   fl->size, mask + fl->ofst);
1448                 if (rc != 0)
1449                         break;
1450         }
1451
1452         if (rc != 0) {
1453                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1454                                 NULL, "Failed to process item fields");
1455         }
1456
1457         return rc;
1458 }
1459
1460 static const struct sfc_mae_field_locator flocs_eth[] = {
1461         {
1462                 /*
1463                  * This locator is used only for building supported fields mask.
1464                  * The field is handled by sfc_mae_rule_process_pattern_data().
1465                  */
1466                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1467                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1468                 offsetof(struct rte_flow_item_eth, type),
1469         },
1470         {
1471                 EFX_MAE_FIELD_ETH_DADDR_BE,
1472                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1473                 offsetof(struct rte_flow_item_eth, dst),
1474         },
1475         {
1476                 EFX_MAE_FIELD_ETH_SADDR_BE,
1477                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1478                 offsetof(struct rte_flow_item_eth, src),
1479         },
1480 };
1481
1482 static int
1483 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1484                             struct sfc_flow_parse_ctx *ctx,
1485                             struct rte_flow_error *error)
1486 {
1487         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1488         struct rte_flow_item_eth supp_mask;
1489         const uint8_t *spec = NULL;
1490         const uint8_t *mask = NULL;
1491         int rc;
1492
1493         sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1494                                      &supp_mask, sizeof(supp_mask));
1495         supp_mask.has_vlan = 1;
1496
1497         rc = sfc_flow_parse_init(item,
1498                                  (const void **)&spec, (const void **)&mask,
1499                                  (const void *)&supp_mask,
1500                                  &rte_flow_item_eth_mask,
1501                                  sizeof(struct rte_flow_item_eth), error);
1502         if (rc != 0)
1503                 return rc;
1504
1505         if (spec != NULL) {
1506                 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1507                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1508                 const struct rte_flow_item_eth *item_spec;
1509                 const struct rte_flow_item_eth *item_mask;
1510
1511                 item_spec = (const struct rte_flow_item_eth *)spec;
1512                 item_mask = (const struct rte_flow_item_eth *)mask;
1513
1514                 /*
1515                  * Remember various match criteria in the parsing context.
1516                  * sfc_mae_rule_process_pattern_data() will consider them
1517                  * altogether when the rest of the items have been parsed.
1518                  */
1519                 ethertypes[0].value = item_spec->type;
1520                 ethertypes[0].mask = item_mask->type;
1521                 if (item_mask->has_vlan) {
1522                         pdata->has_ovlan_mask = B_TRUE;
1523                         if (item_spec->has_vlan)
1524                                 pdata->has_ovlan_value = B_TRUE;
1525                 }
1526         } else {
1527                 /*
1528                  * The specification is empty. The overall pattern
1529                  * validity will be enforced at the end of parsing.
1530                  * See sfc_mae_rule_process_pattern_data().
1531                  */
1532                 return 0;
1533         }
1534
1535         return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1536                                   ctx_mae, error);
1537 }
1538
1539 static const struct sfc_mae_field_locator flocs_vlan[] = {
1540         /* Outermost tag */
1541         {
1542                 EFX_MAE_FIELD_VLAN0_TCI_BE,
1543                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1544                 offsetof(struct rte_flow_item_vlan, tci),
1545         },
1546         {
1547                 /*
1548                  * This locator is used only for building supported fields mask.
1549                  * The field is handled by sfc_mae_rule_process_pattern_data().
1550                  */
1551                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1552                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1553                 offsetof(struct rte_flow_item_vlan, inner_type),
1554         },
1555
1556         /* Innermost tag */
1557         {
1558                 EFX_MAE_FIELD_VLAN1_TCI_BE,
1559                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1560                 offsetof(struct rte_flow_item_vlan, tci),
1561         },
1562         {
1563                 /*
1564                  * This locator is used only for building supported fields mask.
1565                  * The field is handled by sfc_mae_rule_process_pattern_data().
1566                  */
1567                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1568                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1569                 offsetof(struct rte_flow_item_vlan, inner_type),
1570         },
1571 };
1572
1573 static int
1574 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1575                              struct sfc_flow_parse_ctx *ctx,
1576                              struct rte_flow_error *error)
1577 {
1578         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1579         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1580         boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1581                 &pdata->has_ovlan_mask,
1582                 &pdata->has_ivlan_mask,
1583         };
1584         boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1585                 &pdata->has_ovlan_value,
1586                 &pdata->has_ivlan_value,
1587         };
1588         boolean_t *cur_tag_presence_bit_mp;
1589         boolean_t *cur_tag_presence_bit_vp;
1590         const struct sfc_mae_field_locator *flocs;
1591         struct rte_flow_item_vlan supp_mask;
1592         const uint8_t *spec = NULL;
1593         const uint8_t *mask = NULL;
1594         unsigned int nb_flocs;
1595         int rc;
1596
1597         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1598
1599         if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1600                 return rte_flow_error_set(error, ENOTSUP,
1601                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1602                                 "Can't match that many VLAN tags");
1603         }
1604
1605         cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1606         cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1607
1608         if (*cur_tag_presence_bit_mp == B_TRUE &&
1609             *cur_tag_presence_bit_vp == B_FALSE) {
1610                 return rte_flow_error_set(error, EINVAL,
1611                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1612                                 "The previous item enforces no (more) VLAN, "
1613                                 "so the current item (VLAN) must not exist");
1614         }
1615
1616         nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1617         flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1618
1619         sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1620                                      &supp_mask, sizeof(supp_mask));
1621         /*
1622          * This only means that the field is supported by the driver and libefx.
1623          * Support on NIC level will be checked when all items have been parsed.
1624          */
1625         supp_mask.has_more_vlan = 1;
1626
1627         rc = sfc_flow_parse_init(item,
1628                                  (const void **)&spec, (const void **)&mask,
1629                                  (const void *)&supp_mask,
1630                                  &rte_flow_item_vlan_mask,
1631                                  sizeof(struct rte_flow_item_vlan), error);
1632         if (rc != 0)
1633                 return rc;
1634
1635         if (spec != NULL) {
1636                 struct sfc_mae_ethertype *et = pdata->ethertypes;
1637                 const struct rte_flow_item_vlan *item_spec;
1638                 const struct rte_flow_item_vlan *item_mask;
1639
1640                 item_spec = (const struct rte_flow_item_vlan *)spec;
1641                 item_mask = (const struct rte_flow_item_vlan *)mask;
1642
1643                 /*
1644                  * Remember various match criteria in the parsing context.
1645                  * sfc_mae_rule_process_pattern_data() will consider them
1646                  * altogether when the rest of the items have been parsed.
1647                  */
1648                 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1649                 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1650                 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1651                 if (item_mask->has_more_vlan) {
1652                         if (pdata->nb_vlan_tags ==
1653                             SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1654                                 return rte_flow_error_set(error, ENOTSUP,
1655                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1656                                         "Can't use 'has_more_vlan' in "
1657                                         "the second item VLAN");
1658                         }
1659                         pdata->has_ivlan_mask = B_TRUE;
1660                         if (item_spec->has_more_vlan)
1661                                 pdata->has_ivlan_value = B_TRUE;
1662                 }
1663
1664                 /* Convert TCI to MAE representation right now. */
1665                 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1666                                         ctx_mae, error);
1667                 if (rc != 0)
1668                         return rc;
1669         }
1670
1671         ++(pdata->nb_vlan_tags);
1672
1673         return 0;
1674 }
1675
1676 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1677         {
1678                 EFX_MAE_FIELD_SRC_IP4_BE,
1679                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1680                 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1681         },
1682         {
1683                 EFX_MAE_FIELD_DST_IP4_BE,
1684                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1685                 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1686         },
1687         {
1688                 /*
1689                  * This locator is used only for building supported fields mask.
1690                  * The field is handled by sfc_mae_rule_process_pattern_data().
1691                  */
1692                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1693                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1694                 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1695         },
1696         {
1697                 EFX_MAE_FIELD_IP_TOS,
1698                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1699                                  hdr.type_of_service),
1700                 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1701         },
1702         {
1703                 EFX_MAE_FIELD_IP_TTL,
1704                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1705                 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1706         },
1707 };
1708
1709 static int
1710 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1711                              struct sfc_flow_parse_ctx *ctx,
1712                              struct rte_flow_error *error)
1713 {
1714         rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1715         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1716         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1717         struct rte_flow_item_ipv4 supp_mask;
1718         const uint8_t *spec = NULL;
1719         const uint8_t *mask = NULL;
1720         int rc;
1721
1722         sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1723                                      &supp_mask, sizeof(supp_mask));
1724
1725         rc = sfc_flow_parse_init(item,
1726                                  (const void **)&spec, (const void **)&mask,
1727                                  (const void *)&supp_mask,
1728                                  &rte_flow_item_ipv4_mask,
1729                                  sizeof(struct rte_flow_item_ipv4), error);
1730         if (rc != 0)
1731                 return rc;
1732
1733         pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1734         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1735
1736         if (spec != NULL) {
1737                 const struct rte_flow_item_ipv4 *item_spec;
1738                 const struct rte_flow_item_ipv4 *item_mask;
1739
1740                 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1741                 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1742
1743                 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1744                 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1745         } else {
1746                 return 0;
1747         }
1748
1749         return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1750                                   ctx_mae, error);
1751 }
1752
1753 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1754         {
1755                 EFX_MAE_FIELD_SRC_IP6_BE,
1756                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1757                 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1758         },
1759         {
1760                 EFX_MAE_FIELD_DST_IP6_BE,
1761                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1762                 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1763         },
1764         {
1765                 /*
1766                  * This locator is used only for building supported fields mask.
1767                  * The field is handled by sfc_mae_rule_process_pattern_data().
1768                  */
1769                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1770                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1771                 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1772         },
1773         {
1774                 EFX_MAE_FIELD_IP_TTL,
1775                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1776                 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1777         },
1778 };
1779
1780 static int
1781 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1782                              struct sfc_flow_parse_ctx *ctx,
1783                              struct rte_flow_error *error)
1784 {
1785         rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1786         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1787         const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1788         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1789         struct rte_flow_item_ipv6 supp_mask;
1790         const uint8_t *spec = NULL;
1791         const uint8_t *mask = NULL;
1792         rte_be32_t vtc_flow_be;
1793         uint32_t vtc_flow;
1794         uint8_t tc_value;
1795         uint8_t tc_mask;
1796         int rc;
1797
1798         sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1799                                      &supp_mask, sizeof(supp_mask));
1800
1801         vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1802         memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1803
1804         rc = sfc_flow_parse_init(item,
1805                                  (const void **)&spec, (const void **)&mask,
1806                                  (const void *)&supp_mask,
1807                                  &rte_flow_item_ipv6_mask,
1808                                  sizeof(struct rte_flow_item_ipv6), error);
1809         if (rc != 0)
1810                 return rc;
1811
1812         pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1813         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1814
1815         if (spec != NULL) {
1816                 const struct rte_flow_item_ipv6 *item_spec;
1817                 const struct rte_flow_item_ipv6 *item_mask;
1818
1819                 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1820                 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1821
1822                 pdata->l3_next_proto_value = item_spec->hdr.proto;
1823                 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1824         } else {
1825                 return 0;
1826         }
1827
1828         rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1829                                 ctx_mae, error);
1830         if (rc != 0)
1831                 return rc;
1832
1833         memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1834         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1835         tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1836
1837         memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1838         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1839         tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1840
1841         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1842                                           fremap[EFX_MAE_FIELD_IP_TOS],
1843                                           sizeof(tc_value), &tc_value,
1844                                           sizeof(tc_mask), &tc_mask);
1845         if (rc != 0) {
1846                 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1847                                 NULL, "Failed to process item fields");
1848         }
1849
1850         return 0;
1851 }
1852
1853 static const struct sfc_mae_field_locator flocs_tcp[] = {
1854         {
1855                 EFX_MAE_FIELD_L4_SPORT_BE,
1856                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1857                 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1858         },
1859         {
1860                 EFX_MAE_FIELD_L4_DPORT_BE,
1861                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1862                 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1863         },
1864         {
1865                 EFX_MAE_FIELD_TCP_FLAGS_BE,
1866                 /*
1867                  * The values have been picked intentionally since the
1868                  * target MAE field is oversize (16 bit). This mapping
1869                  * relies on the fact that the MAE field is big-endian.
1870                  */
1871                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1872                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1873                 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1874         },
1875 };
1876
1877 static int
1878 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1879                             struct sfc_flow_parse_ctx *ctx,
1880                             struct rte_flow_error *error)
1881 {
1882         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1883         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1884         struct rte_flow_item_tcp supp_mask;
1885         const uint8_t *spec = NULL;
1886         const uint8_t *mask = NULL;
1887         int rc;
1888
1889         /*
1890          * When encountered among outermost items, item TCP is invalid.
1891          * Check which match specification is being constructed now.
1892          */
1893         if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1894                 return rte_flow_error_set(error, EINVAL,
1895                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1896                                           "TCP in outer frame is invalid");
1897         }
1898
1899         sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1900                                      &supp_mask, sizeof(supp_mask));
1901
1902         rc = sfc_flow_parse_init(item,
1903                                  (const void **)&spec, (const void **)&mask,
1904                                  (const void *)&supp_mask,
1905                                  &rte_flow_item_tcp_mask,
1906                                  sizeof(struct rte_flow_item_tcp), error);
1907         if (rc != 0)
1908                 return rc;
1909
1910         pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1911         pdata->l3_next_proto_restriction_mask = 0xff;
1912
1913         if (spec == NULL)
1914                 return 0;
1915
1916         return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1917                                   ctx_mae, error);
1918 }
1919
1920 static const struct sfc_mae_field_locator flocs_udp[] = {
1921         {
1922                 EFX_MAE_FIELD_L4_SPORT_BE,
1923                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1924                 offsetof(struct rte_flow_item_udp, hdr.src_port),
1925         },
1926         {
1927                 EFX_MAE_FIELD_L4_DPORT_BE,
1928                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1929                 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1930         },
1931 };
1932
1933 static int
1934 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1935                             struct sfc_flow_parse_ctx *ctx,
1936                             struct rte_flow_error *error)
1937 {
1938         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1939         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1940         struct rte_flow_item_udp supp_mask;
1941         const uint8_t *spec = NULL;
1942         const uint8_t *mask = NULL;
1943         int rc;
1944
1945         sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1946                                      &supp_mask, sizeof(supp_mask));
1947
1948         rc = sfc_flow_parse_init(item,
1949                                  (const void **)&spec, (const void **)&mask,
1950                                  (const void *)&supp_mask,
1951                                  &rte_flow_item_udp_mask,
1952                                  sizeof(struct rte_flow_item_udp), error);
1953         if (rc != 0)
1954                 return rc;
1955
1956         pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1957         pdata->l3_next_proto_restriction_mask = 0xff;
1958
1959         if (spec == NULL)
1960                 return 0;
1961
1962         return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1963                                   ctx_mae, error);
1964 }
1965
1966 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1967         {
1968                 /*
1969                  * The size and offset values are relevant
1970                  * for Geneve and NVGRE, too.
1971                  */
1972                 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1973                 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1974         },
1975 };
1976
1977 /*
1978  * An auxiliary registry which allows using non-encap. field IDs
1979  * directly when building a match specification of type ACTION.
1980  *
1981  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1982  */
1983 static const efx_mae_field_id_t field_ids_no_remap[] = {
1984 #define FIELD_ID_NO_REMAP(_field) \
1985         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1986
1987         FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1988         FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1989         FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1990         FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1991         FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1992         FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1993         FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1994         FIELD_ID_NO_REMAP(SRC_IP4_BE),
1995         FIELD_ID_NO_REMAP(DST_IP4_BE),
1996         FIELD_ID_NO_REMAP(IP_PROTO),
1997         FIELD_ID_NO_REMAP(IP_TOS),
1998         FIELD_ID_NO_REMAP(IP_TTL),
1999         FIELD_ID_NO_REMAP(SRC_IP6_BE),
2000         FIELD_ID_NO_REMAP(DST_IP6_BE),
2001         FIELD_ID_NO_REMAP(L4_SPORT_BE),
2002         FIELD_ID_NO_REMAP(L4_DPORT_BE),
2003         FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2004         FIELD_ID_NO_REMAP(HAS_OVLAN),
2005         FIELD_ID_NO_REMAP(HAS_IVLAN),
2006
2007 #undef FIELD_ID_NO_REMAP
2008 };
2009
2010 /*
2011  * An auxiliary registry which allows using "ENC" field IDs
2012  * when building a match specification of type OUTER.
2013  *
2014  * See sfc_mae_rule_encap_parse_init().
2015  */
2016 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2017 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2018         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2019
2020         FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2021         FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2022         FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2023         FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2024         FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2025         FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2026         FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2027         FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2028         FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2029         FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2030         FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2031         FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2032         FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2033         FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2034         FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2035         FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2036         FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2037         FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2038
2039 #undef FIELD_ID_REMAP_TO_ENCAP
2040 };
2041
2042 static int
2043 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2044                                struct sfc_flow_parse_ctx *ctx,
2045                                struct rte_flow_error *error)
2046 {
2047         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2048         uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2049         uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2050         const struct rte_flow_item_vxlan *vxp;
2051         uint8_t supp_mask[sizeof(uint64_t)];
2052         const uint8_t *spec = NULL;
2053         const uint8_t *mask = NULL;
2054         int rc;
2055
2056         /*
2057          * We're about to start processing inner frame items.
2058          * Process pattern data that has been deferred so far
2059          * and reset pattern data storage.
2060          */
2061         rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2062         if (rc != 0)
2063                 return rc;
2064
2065         memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2066
2067         sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2068                                      &supp_mask, sizeof(supp_mask));
2069
2070         /*
2071          * This tunnel item was preliminarily detected by
2072          * sfc_mae_rule_encap_parse_init(). Default mask
2073          * was also picked by that helper. Use it here.
2074          */
2075         rc = sfc_flow_parse_init(item,
2076                                  (const void **)&spec, (const void **)&mask,
2077                                  (const void *)&supp_mask,
2078                                  ctx_mae->tunnel_def_mask,
2079                                  ctx_mae->tunnel_def_mask_size,  error);
2080         if (rc != 0)
2081                 return rc;
2082
2083         /*
2084          * This item and later ones comprise a
2085          * match specification of type ACTION.
2086          */
2087         ctx_mae->match_spec = ctx_mae->match_spec_action;
2088
2089         /* This item and later ones use non-encap. EFX MAE field IDs. */
2090         ctx_mae->field_ids_remap = field_ids_no_remap;
2091
2092         if (spec == NULL)
2093                 return 0;
2094
2095         /*
2096          * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2097          * Copy 24-bit VNI, which is BE, at offset 1 in it.
2098          * The extra byte is 0 both in the mask and in the value.
2099          */
2100         vxp = (const struct rte_flow_item_vxlan *)spec;
2101         memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2102
2103         vxp = (const struct rte_flow_item_vxlan *)mask;
2104         memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2105
2106         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2107                                           EFX_MAE_FIELD_ENC_VNET_ID_BE,
2108                                           sizeof(vnet_id_v), vnet_id_v,
2109                                           sizeof(vnet_id_m), vnet_id_m);
2110         if (rc != 0) {
2111                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2112                                         item, "Failed to set VXLAN VNI");
2113         }
2114
2115         return rc;
2116 }
2117
2118 static const struct sfc_flow_item sfc_flow_items[] = {
2119         {
2120                 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2121                 .name = "PORT_ID",
2122                 /*
2123                  * In terms of RTE flow, this item is a META one,
2124                  * and its position in the pattern is don't care.
2125                  */
2126                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2127                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2128                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2129                 .parse = sfc_mae_rule_parse_item_port_id,
2130         },
2131         {
2132                 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2133                 .name = "PHY_PORT",
2134                 /*
2135                  * In terms of RTE flow, this item is a META one,
2136                  * and its position in the pattern is don't care.
2137                  */
2138                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2139                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2140                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2141                 .parse = sfc_mae_rule_parse_item_phy_port,
2142         },
2143         {
2144                 .type = RTE_FLOW_ITEM_TYPE_PF,
2145                 .name = "PF",
2146                 /*
2147                  * In terms of RTE flow, this item is a META one,
2148                  * and its position in the pattern is don't care.
2149                  */
2150                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2151                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2152                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2153                 .parse = sfc_mae_rule_parse_item_pf,
2154         },
2155         {
2156                 .type = RTE_FLOW_ITEM_TYPE_VF,
2157                 .name = "VF",
2158                 /*
2159                  * In terms of RTE flow, this item is a META one,
2160                  * and its position in the pattern is don't care.
2161                  */
2162                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2163                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2164                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2165                 .parse = sfc_mae_rule_parse_item_vf,
2166         },
2167         {
2168                 .type = RTE_FLOW_ITEM_TYPE_ETH,
2169                 .name = "ETH",
2170                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2171                 .layer = SFC_FLOW_ITEM_L2,
2172                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2173                 .parse = sfc_mae_rule_parse_item_eth,
2174         },
2175         {
2176                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2177                 .name = "VLAN",
2178                 .prev_layer = SFC_FLOW_ITEM_L2,
2179                 .layer = SFC_FLOW_ITEM_L2,
2180                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2181                 .parse = sfc_mae_rule_parse_item_vlan,
2182         },
2183         {
2184                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2185                 .name = "IPV4",
2186                 .prev_layer = SFC_FLOW_ITEM_L2,
2187                 .layer = SFC_FLOW_ITEM_L3,
2188                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2189                 .parse = sfc_mae_rule_parse_item_ipv4,
2190         },
2191         {
2192                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2193                 .name = "IPV6",
2194                 .prev_layer = SFC_FLOW_ITEM_L2,
2195                 .layer = SFC_FLOW_ITEM_L3,
2196                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2197                 .parse = sfc_mae_rule_parse_item_ipv6,
2198         },
2199         {
2200                 .type = RTE_FLOW_ITEM_TYPE_TCP,
2201                 .name = "TCP",
2202                 .prev_layer = SFC_FLOW_ITEM_L3,
2203                 .layer = SFC_FLOW_ITEM_L4,
2204                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2205                 .parse = sfc_mae_rule_parse_item_tcp,
2206         },
2207         {
2208                 .type = RTE_FLOW_ITEM_TYPE_UDP,
2209                 .name = "UDP",
2210                 .prev_layer = SFC_FLOW_ITEM_L3,
2211                 .layer = SFC_FLOW_ITEM_L4,
2212                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2213                 .parse = sfc_mae_rule_parse_item_udp,
2214         },
2215         {
2216                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2217                 .name = "VXLAN",
2218                 .prev_layer = SFC_FLOW_ITEM_L4,
2219                 .layer = SFC_FLOW_ITEM_START_LAYER,
2220                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2221                 .parse = sfc_mae_rule_parse_item_tunnel,
2222         },
2223         {
2224                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2225                 .name = "GENEVE",
2226                 .prev_layer = SFC_FLOW_ITEM_L4,
2227                 .layer = SFC_FLOW_ITEM_START_LAYER,
2228                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2229                 .parse = sfc_mae_rule_parse_item_tunnel,
2230         },
2231         {
2232                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2233                 .name = "NVGRE",
2234                 .prev_layer = SFC_FLOW_ITEM_L3,
2235                 .layer = SFC_FLOW_ITEM_START_LAYER,
2236                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2237                 .parse = sfc_mae_rule_parse_item_tunnel,
2238         },
2239 };
2240
2241 static int
2242 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2243                            struct sfc_mae_parse_ctx *ctx,
2244                            struct sfc_mae_outer_rule **rulep,
2245                            struct rte_flow_error *error)
2246 {
2247         efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2248         int rc;
2249
2250         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2251                 *rulep = NULL;
2252                 goto no_or_id;
2253         }
2254
2255         SFC_ASSERT(ctx->match_spec_outer != NULL);
2256
2257         if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2258                 return rte_flow_error_set(error, ENOTSUP,
2259                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2260                                           "Inconsistent pattern (outer)");
2261         }
2262
2263         *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2264                                            ctx->encap_type);
2265         if (*rulep != NULL) {
2266                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2267         } else {
2268                 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2269                                             ctx->encap_type, rulep);
2270                 if (rc != 0) {
2271                         return rte_flow_error_set(error, rc,
2272                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2273                                         "Failed to process the pattern");
2274                 }
2275         }
2276
2277         /* The spec has now been tracked by the outer rule entry. */
2278         ctx->match_spec_outer = NULL;
2279
2280 no_or_id:
2281         /*
2282          * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2283          * inner parse (when some outer rule is hit) and action rule lookup.
2284          * If the currently processed flow does not come with an outer rule,
2285          * its action rule must be available only for packets which miss in
2286          * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2287          * in the action rule specification; this ensures correct behaviour.
2288          *
2289          * If, on the other hand, this flow does have an outer rule, its ID
2290          * may be unknown at the moment (not yet allocated), but OR_ID mask
2291          * has to be set to 0xffffffff anyway for correct class comparisons.
2292          * When the outer rule has been allocated, this match field will be
2293          * overridden by sfc_mae_outer_rule_enable() to use the right value.
2294          */
2295         rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2296                                                   &invalid_rule_id);
2297         if (rc != 0) {
2298                 if (*rulep != NULL)
2299                         sfc_mae_outer_rule_del(sa, *rulep);
2300
2301                 *rulep = NULL;
2302
2303                 return rte_flow_error_set(error, rc,
2304                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2305                                           "Failed to process the pattern");
2306         }
2307
2308         return 0;
2309 }
2310
2311 static int
2312 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2313                               const struct rte_flow_item pattern[],
2314                               struct sfc_mae_parse_ctx *ctx,
2315                               struct rte_flow_error *error)
2316 {
2317         struct sfc_mae *mae = &sa->mae;
2318         int rc;
2319
2320         if (pattern == NULL) {
2321                 rte_flow_error_set(error, EINVAL,
2322                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2323                                    "NULL pattern");
2324                 return -rte_errno;
2325         }
2326
2327         for (;;) {
2328                 switch (pattern->type) {
2329                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2330                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2331                         ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2332                         ctx->tunnel_def_mask_size =
2333                                 sizeof(rte_flow_item_vxlan_mask);
2334                         break;
2335                 case RTE_FLOW_ITEM_TYPE_GENEVE:
2336                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2337                         ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2338                         ctx->tunnel_def_mask_size =
2339                                 sizeof(rte_flow_item_geneve_mask);
2340                         break;
2341                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2342                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2343                         ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2344                         ctx->tunnel_def_mask_size =
2345                                 sizeof(rte_flow_item_nvgre_mask);
2346                         break;
2347                 case RTE_FLOW_ITEM_TYPE_END:
2348                         break;
2349                 default:
2350                         ++pattern;
2351                         continue;
2352                 };
2353
2354                 break;
2355         }
2356
2357         if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2358                 return 0;
2359
2360         if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2361                 return rte_flow_error_set(error, ENOTSUP,
2362                                           RTE_FLOW_ERROR_TYPE_ITEM,
2363                                           pattern, "Unsupported tunnel item");
2364         }
2365
2366         if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2367                 return rte_flow_error_set(error, ENOTSUP,
2368                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2369                                           NULL, "Unsupported priority level");
2370         }
2371
2372         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
2373                                      &ctx->match_spec_outer);
2374         if (rc != 0) {
2375                 return rte_flow_error_set(error, rc,
2376                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
2377                         "Failed to initialise outer rule match specification");
2378         }
2379
2380         /* Outermost items comprise a match specification of type OUTER. */
2381         ctx->match_spec = ctx->match_spec_outer;
2382
2383         /* Outermost items use "ENC" EFX MAE field IDs. */
2384         ctx->field_ids_remap = field_ids_remap_to_encap;
2385
2386         return 0;
2387 }
2388
2389 static void
2390 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2391                               struct sfc_mae_parse_ctx *ctx)
2392 {
2393         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2394                 return;
2395
2396         if (ctx->match_spec_outer != NULL)
2397                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2398 }
2399
2400 int
2401 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2402                            const struct rte_flow_item pattern[],
2403                            struct sfc_flow_spec_mae *spec,
2404                            struct rte_flow_error *error)
2405 {
2406         struct sfc_mae_parse_ctx ctx_mae;
2407         struct sfc_flow_parse_ctx ctx;
2408         int rc;
2409
2410         memset(&ctx_mae, 0, sizeof(ctx_mae));
2411         ctx_mae.priority = spec->priority;
2412         ctx_mae.sa = sa;
2413
2414         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2415                                      spec->priority,
2416                                      &ctx_mae.match_spec_action);
2417         if (rc != 0) {
2418                 rc = rte_flow_error_set(error, rc,
2419                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2420                         "Failed to initialise action rule match specification");
2421                 goto fail_init_match_spec_action;
2422         }
2423
2424         /*
2425          * As a preliminary setting, assume that there is no encapsulation
2426          * in the pattern. That is, pattern items are about to comprise a
2427          * match specification of type ACTION and use non-encap. field IDs.
2428          *
2429          * sfc_mae_rule_encap_parse_init() below may override this.
2430          */
2431         ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2432         ctx_mae.match_spec = ctx_mae.match_spec_action;
2433         ctx_mae.field_ids_remap = field_ids_no_remap;
2434
2435         ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2436         ctx.mae = &ctx_mae;
2437
2438         rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2439         if (rc != 0)
2440                 goto fail_encap_parse_init;
2441
2442         rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2443                                     pattern, &ctx, error);
2444         if (rc != 0)
2445                 goto fail_parse_pattern;
2446
2447         rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2448         if (rc != 0)
2449                 goto fail_process_pattern_data;
2450
2451         rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2452         if (rc != 0)
2453                 goto fail_process_outer;
2454
2455         if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2456                 rc = rte_flow_error_set(error, ENOTSUP,
2457                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2458                                         "Inconsistent pattern");
2459                 goto fail_validate_match_spec_action;
2460         }
2461
2462         spec->match_spec = ctx_mae.match_spec_action;
2463
2464         return 0;
2465
2466 fail_validate_match_spec_action:
2467 fail_process_outer:
2468 fail_process_pattern_data:
2469 fail_parse_pattern:
2470         sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2471
2472 fail_encap_parse_init:
2473         efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2474
2475 fail_init_match_spec_action:
2476         return rc;
2477 }
2478
2479 /*
2480  * An action supported by MAE may correspond to a bundle of RTE flow actions,
2481  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2482  * That is, related RTE flow actions need to be tracked as parts of a whole
2483  * so that they can be combined into a single action and submitted to MAE
2484  * representation of a given rule's action set.
2485  *
2486  * Each RTE flow action provided by an application gets classified as
2487  * one belonging to some bundle type. If an action is not supposed to
2488  * belong to any bundle, or if this action is END, it is described as
2489  * one belonging to a dummy bundle of type EMPTY.
2490  *
2491  * A currently tracked bundle will be submitted if a repeating
2492  * action or an action of different bundle type follows.
2493  */
2494
2495 enum sfc_mae_actions_bundle_type {
2496         SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2497         SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2498 };
2499
2500 struct sfc_mae_actions_bundle {
2501         enum sfc_mae_actions_bundle_type        type;
2502
2503         /* Indicates actions already tracked by the current bundle */
2504         uint64_t                                actions_mask;
2505
2506         /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2507         rte_be16_t                              vlan_push_tpid;
2508         rte_be16_t                              vlan_push_tci;
2509 };
2510
2511 /*
2512  * Combine configuration of RTE flow actions tracked by the bundle into a
2513  * single action and submit the result to MAE action set specification.
2514  * Do nothing in the case of dummy action bundle.
2515  */
2516 static int
2517 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2518                               efx_mae_actions_t *spec)
2519 {
2520         int rc = 0;
2521
2522         switch (bundle->type) {
2523         case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2524                 break;
2525         case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2526                 rc = efx_mae_action_set_populate_vlan_push(
2527                         spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2528                 break;
2529         default:
2530                 SFC_ASSERT(B_FALSE);
2531                 break;
2532         }
2533
2534         return rc;
2535 }
2536
2537 /*
2538  * Given the type of the next RTE flow action in the line, decide
2539  * whether a new bundle is about to start, and, if this is the case,
2540  * submit and reset the current bundle.
2541  */
2542 static int
2543 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2544                             struct sfc_mae_actions_bundle *bundle,
2545                             efx_mae_actions_t *spec,
2546                             struct rte_flow_error *error)
2547 {
2548         enum sfc_mae_actions_bundle_type bundle_type_new;
2549         int rc;
2550
2551         switch (action->type) {
2552         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2553         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2554         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2555                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2556                 break;
2557         default:
2558                 /*
2559                  * Self-sufficient actions, including END, are handled in this
2560                  * case. No checks for unsupported actions are needed here
2561                  * because parsing doesn't occur at this point.
2562                  */
2563                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2564                 break;
2565         }
2566
2567         if (bundle_type_new != bundle->type ||
2568             (bundle->actions_mask & (1ULL << action->type)) != 0) {
2569                 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2570                 if (rc != 0)
2571                         goto fail_submit;
2572
2573                 memset(bundle, 0, sizeof(*bundle));
2574         }
2575
2576         bundle->type = bundle_type_new;
2577
2578         return 0;
2579
2580 fail_submit:
2581         return rte_flow_error_set(error, rc,
2582                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2583                         "Failed to request the (group of) action(s)");
2584 }
2585
2586 static void
2587 sfc_mae_rule_parse_action_of_push_vlan(
2588                             const struct rte_flow_action_of_push_vlan *conf,
2589                             struct sfc_mae_actions_bundle *bundle)
2590 {
2591         bundle->vlan_push_tpid = conf->ethertype;
2592 }
2593
2594 static void
2595 sfc_mae_rule_parse_action_of_set_vlan_vid(
2596                             const struct rte_flow_action_of_set_vlan_vid *conf,
2597                             struct sfc_mae_actions_bundle *bundle)
2598 {
2599         bundle->vlan_push_tci |= (conf->vlan_vid &
2600                                   rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2601 }
2602
2603 static void
2604 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2605                             const struct rte_flow_action_of_set_vlan_pcp *conf,
2606                             struct sfc_mae_actions_bundle *bundle)
2607 {
2608         uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2609                                            RTE_LEN2MASK(3, uint8_t)) << 13;
2610
2611         bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2612 }
2613
2614 struct sfc_mae_parsed_item {
2615         const struct rte_flow_item      *item;
2616         size_t                          proto_header_ofst;
2617         size_t                          proto_header_size;
2618 };
2619
2620 /*
2621  * For each 16-bit word of the given header, override
2622  * bits enforced by the corresponding 16-bit mask.
2623  */
2624 static void
2625 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2626                                 const struct sfc_mae_parsed_item *parsed_items,
2627                                 unsigned int nb_parsed_items)
2628 {
2629         unsigned int item_idx;
2630
2631         for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2632                 const struct sfc_mae_parsed_item *parsed_item;
2633                 const struct rte_flow_item *item;
2634                 size_t proto_header_size;
2635                 size_t ofst;
2636
2637                 parsed_item = &parsed_items[item_idx];
2638                 proto_header_size = parsed_item->proto_header_size;
2639                 item = parsed_item->item;
2640
2641                 for (ofst = 0; ofst < proto_header_size;
2642                      ofst += sizeof(rte_be16_t)) {
2643                         rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2644                         const rte_be16_t *w_maskp;
2645                         const rte_be16_t *w_specp;
2646
2647                         w_maskp = RTE_PTR_ADD(item->mask, ofst);
2648                         w_specp = RTE_PTR_ADD(item->spec, ofst);
2649
2650                         *wp &= ~(*w_maskp);
2651                         *wp |= (*w_specp & *w_maskp);
2652                 }
2653
2654                 header_buf += proto_header_size;
2655         }
2656 }
2657
2658 #define SFC_IPV4_TTL_DEF        0x40
2659 #define SFC_IPV6_VTC_FLOW_DEF   0x60000000
2660 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2661 #define SFC_VXLAN_FLAGS_DEF     0x08000000
2662
2663 static int
2664 sfc_mae_rule_parse_action_vxlan_encap(
2665                             struct sfc_mae *mae,
2666                             const struct rte_flow_action_vxlan_encap *conf,
2667                             efx_mae_actions_t *spec,
2668                             struct rte_flow_error *error)
2669 {
2670         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2671         struct rte_flow_item *pattern = conf->definition;
2672         uint8_t *buf = bounce_eh->buf;
2673
2674         /* This array will keep track of non-VOID pattern items. */
2675         struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2676                                                 2 /* VLAN tags */ +
2677                                                 1 /* IPv4 or IPv6 */ +
2678                                                 1 /* UDP */ +
2679                                                 1 /* VXLAN */];
2680         unsigned int nb_parsed_items = 0;
2681
2682         size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2683         uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2684                                   sizeof(struct rte_ipv6_hdr))];
2685         struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2686         struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2687         struct rte_vxlan_hdr *vxlan = NULL;
2688         struct rte_udp_hdr *udp = NULL;
2689         unsigned int nb_vlan_tags = 0;
2690         size_t next_proto_ofst = 0;
2691         size_t ethertype_ofst = 0;
2692         uint64_t exp_items;
2693         int rc;
2694
2695         if (pattern == NULL) {
2696                 return rte_flow_error_set(error, EINVAL,
2697                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2698                                 "The encap. header definition is NULL");
2699         }
2700
2701         bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2702         bounce_eh->size = 0;
2703
2704         /*
2705          * Process pattern items and remember non-VOID ones.
2706          * Defer applying masks until after the complete header
2707          * has been built from the pattern items.
2708          */
2709         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2710
2711         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2712                 struct sfc_mae_parsed_item *parsed_item;
2713                 const uint64_t exp_items_extra_vlan[] = {
2714                         RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2715                 };
2716                 size_t proto_header_size;
2717                 rte_be16_t *ethertypep;
2718                 uint8_t *next_protop;
2719                 uint8_t *buf_cur;
2720
2721                 if (pattern->spec == NULL) {
2722                         return rte_flow_error_set(error, EINVAL,
2723                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2724                                         "NULL item spec in the encap. header");
2725                 }
2726
2727                 if (pattern->mask == NULL) {
2728                         return rte_flow_error_set(error, EINVAL,
2729                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2730                                         "NULL item mask in the encap. header");
2731                 }
2732
2733                 if (pattern->last != NULL) {
2734                         /* This is not a match pattern, so disallow range. */
2735                         return rte_flow_error_set(error, EINVAL,
2736                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2737                                         "Range item in the encap. header");
2738                 }
2739
2740                 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2741                         /* Handle VOID separately, for clarity. */
2742                         continue;
2743                 }
2744
2745                 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2746                         return rte_flow_error_set(error, ENOTSUP,
2747                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2748                                         "Unexpected item in the encap. header");
2749                 }
2750
2751                 parsed_item = &parsed_items[nb_parsed_items];
2752                 buf_cur = buf + bounce_eh->size;
2753
2754                 switch (pattern->type) {
2755                 case RTE_FLOW_ITEM_TYPE_ETH:
2756                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2757                                                exp_items);
2758                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2759                                                   hdr) != 0);
2760
2761                         proto_header_size = sizeof(struct rte_ether_hdr);
2762
2763                         ethertype_ofst = eth_ethertype_ofst;
2764
2765                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2766                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2767                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2768                         break;
2769                 case RTE_FLOW_ITEM_TYPE_VLAN:
2770                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2771                                                exp_items);
2772                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2773                                                   hdr) != 0);
2774
2775                         proto_header_size = sizeof(struct rte_vlan_hdr);
2776
2777                         ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2778                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2779
2780                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2781                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2782
2783                         ethertype_ofst =
2784                             bounce_eh->size +
2785                             offsetof(struct rte_vlan_hdr, eth_proto);
2786
2787                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2788                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2789                         exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2790
2791                         ++nb_vlan_tags;
2792                         break;
2793                 case RTE_FLOW_ITEM_TYPE_IPV4:
2794                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2795                                                exp_items);
2796                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2797                                                   hdr) != 0);
2798
2799                         proto_header_size = sizeof(struct rte_ipv4_hdr);
2800
2801                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2802                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2803
2804                         next_proto_ofst =
2805                             bounce_eh->size +
2806                             offsetof(struct rte_ipv4_hdr, next_proto_id);
2807
2808                         ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2809
2810                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2811                         break;
2812                 case RTE_FLOW_ITEM_TYPE_IPV6:
2813                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2814                                                exp_items);
2815                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2816                                                   hdr) != 0);
2817
2818                         proto_header_size = sizeof(struct rte_ipv6_hdr);
2819
2820                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2821                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2822
2823                         next_proto_ofst = bounce_eh->size +
2824                                           offsetof(struct rte_ipv6_hdr, proto);
2825
2826                         ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2827
2828                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2829                         break;
2830                 case RTE_FLOW_ITEM_TYPE_UDP:
2831                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2832                                                exp_items);
2833                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2834                                                   hdr) != 0);
2835
2836                         proto_header_size = sizeof(struct rte_udp_hdr);
2837
2838                         next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2839                         *next_protop = IPPROTO_UDP;
2840
2841                         udp = (struct rte_udp_hdr *)buf_cur;
2842
2843                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2844                         break;
2845                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2846                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2847                                                exp_items);
2848                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2849                                                   hdr) != 0);
2850
2851                         proto_header_size = sizeof(struct rte_vxlan_hdr);
2852
2853                         vxlan = (struct rte_vxlan_hdr *)buf_cur;
2854
2855                         udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2856                         udp->dgram_len = RTE_BE16(sizeof(*udp) +
2857                                                   sizeof(*vxlan));
2858                         udp->dgram_cksum = 0;
2859
2860                         exp_items = 0;
2861                         break;
2862                 default:
2863                         return rte_flow_error_set(error, ENOTSUP,
2864                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2865                                         "Unknown item in the encap. header");
2866                 }
2867
2868                 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2869                         return rte_flow_error_set(error, E2BIG,
2870                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2871                                         "The encap. header is too big");
2872                 }
2873
2874                 if ((proto_header_size & 1) != 0) {
2875                         return rte_flow_error_set(error, EINVAL,
2876                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2877                                         "Odd layer size in the encap. header");
2878                 }
2879
2880                 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2881                 bounce_eh->size += proto_header_size;
2882
2883                 parsed_item->item = pattern;
2884                 parsed_item->proto_header_size = proto_header_size;
2885                 ++nb_parsed_items;
2886         }
2887
2888         if (exp_items != 0) {
2889                 /* Parsing item VXLAN would have reset exp_items to 0. */
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2892                                         "No item VXLAN in the encap. header");
2893         }
2894
2895         /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2896         ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2897         ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2898         ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2899                                       sizeof(*vxlan));
2900         /* The HW cannot compute this checksum. */
2901         ipv4->hdr_checksum = 0;
2902         ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2903
2904         ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2905         ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2906         ipv6->payload_len = udp->dgram_len;
2907
2908         vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2909
2910         /* Take care of the masks. */
2911         sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2912
2913         rc = efx_mae_action_set_populate_encap(spec);
2914         if (rc != 0) {
2915                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2916                                 NULL, "failed to request action ENCAP");
2917         }
2918
2919         return rc;
2920 }
2921
2922 static int
2923 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
2924                                const struct rte_flow_action_mark *conf,
2925                                efx_mae_actions_t *spec)
2926 {
2927         int rc;
2928
2929         rc = efx_mae_action_set_populate_mark(spec, conf->id);
2930         if (rc != 0)
2931                 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
2932
2933         return rc;
2934 }
2935
2936 static int
2937 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
2938                                 const struct rte_flow_action_count *conf,
2939                                 efx_mae_actions_t *spec)
2940 {
2941         int rc;
2942
2943         if (conf->shared) {
2944                 rc = ENOTSUP;
2945                 goto fail_counter_shared;
2946         }
2947
2948         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
2949                 sfc_err(sa,
2950                         "counter queue is not configured for COUNT action");
2951                 rc = EINVAL;
2952                 goto fail_counter_queue_uninit;
2953         }
2954
2955         if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
2956                 rc = EINVAL;
2957                 goto fail_no_service_core;
2958         }
2959
2960         rc = efx_mae_action_set_populate_count(spec);
2961         if (rc != 0) {
2962                 sfc_err(sa,
2963                         "failed to populate counters in MAE action set: %s",
2964                         rte_strerror(rc));
2965                 goto fail_populate_count;
2966         }
2967
2968         return 0;
2969
2970 fail_populate_count:
2971 fail_no_service_core:
2972 fail_counter_queue_uninit:
2973 fail_counter_shared:
2974
2975         return rc;
2976 }
2977
2978 static int
2979 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2980                                    const struct rte_flow_action_phy_port *conf,
2981                                    efx_mae_actions_t *spec)
2982 {
2983         efx_mport_sel_t mport;
2984         uint32_t phy_port;
2985         int rc;
2986
2987         if (conf->original != 0)
2988                 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2989         else
2990                 phy_port = conf->index;
2991
2992         rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2993         if (rc != 0) {
2994                 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
2995                         phy_port, strerror(rc));
2996                 return rc;
2997         }
2998
2999         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3000         if (rc != 0) {
3001                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3002                         mport.sel, strerror(rc));
3003         }
3004
3005         return rc;
3006 }
3007
3008 static int
3009 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3010                                 const struct rte_flow_action_vf *vf_conf,
3011                                 efx_mae_actions_t *spec)
3012 {
3013         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3014         efx_mport_sel_t mport;
3015         uint32_t vf;
3016         int rc;
3017
3018         if (vf_conf == NULL)
3019                 vf = EFX_PCI_VF_INVALID;
3020         else if (vf_conf->original != 0)
3021                 vf = encp->enc_vf;
3022         else
3023                 vf = vf_conf->id;
3024
3025         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3026         if (rc != 0) {
3027                 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3028                         encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3029                         strerror(rc));
3030                 return rc;
3031         }
3032
3033         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3034         if (rc != 0) {
3035                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3036                         mport.sel, strerror(rc));
3037         }
3038
3039         return rc;
3040 }
3041
3042 static int
3043 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3044                                   const struct rte_flow_action_port_id *conf,
3045                                   efx_mae_actions_t *spec)
3046 {
3047         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3048         struct sfc_mae *mae = &sa->mae;
3049         efx_mport_sel_t mport;
3050         uint16_t port_id;
3051         int rc;
3052
3053         if (conf->id > UINT16_MAX)
3054                 return EOVERFLOW;
3055
3056         port_id = (conf->original != 0) ? sas->port_id : conf->id;
3057
3058         rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
3059                                            port_id, &mport);
3060         if (rc != 0) {
3061                 sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s",
3062                         port_id, strerror(rc));
3063                 return rc;
3064         }
3065
3066         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3067         if (rc != 0) {
3068                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3069                         mport.sel, strerror(rc));
3070         }
3071
3072         return rc;
3073 }
3074
3075 static const char * const action_names[] = {
3076         [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3077         [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3078         [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3079         [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3080         [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3081         [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3082         [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3083         [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3084         [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3085         [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3086         [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3087         [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3088         [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3089 };
3090
3091 static int
3092 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3093                           const struct rte_flow_action *action,
3094                           const struct sfc_mae_outer_rule *outer_rule,
3095                           struct sfc_mae_actions_bundle *bundle,
3096                           efx_mae_actions_t *spec,
3097                           struct rte_flow_error *error)
3098 {
3099         bool custom_error = B_FALSE;
3100         int rc = 0;
3101
3102         switch (action->type) {
3103         case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3104                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3105                                        bundle->actions_mask);
3106                 if (outer_rule == NULL ||
3107                     outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3108                         rc = EINVAL;
3109                 else
3110                         rc = efx_mae_action_set_populate_decap(spec);
3111                 break;
3112         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3113                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3114                                        bundle->actions_mask);
3115                 rc = efx_mae_action_set_populate_vlan_pop(spec);
3116                 break;
3117         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3118                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3119                                        bundle->actions_mask);
3120                 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3121                 break;
3122         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3123                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3124                                        bundle->actions_mask);
3125                 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3126                 break;
3127         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3128                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3129                                        bundle->actions_mask);
3130                 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3131                 break;
3132         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3133                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3134                                        bundle->actions_mask);
3135                 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3136                                                            action->conf,
3137                                                            spec, error);
3138                 custom_error = B_TRUE;
3139                 break;
3140         case RTE_FLOW_ACTION_TYPE_COUNT:
3141                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3142                                        bundle->actions_mask);
3143                 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3144                 break;
3145         case RTE_FLOW_ACTION_TYPE_FLAG:
3146                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3147                                        bundle->actions_mask);
3148                 rc = efx_mae_action_set_populate_flag(spec);
3149                 break;
3150         case RTE_FLOW_ACTION_TYPE_MARK:
3151                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3152                                        bundle->actions_mask);
3153                 rc = sfc_mae_rule_parse_action_mark(sa, action->conf, spec);
3154                 break;
3155         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3156                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3157                                        bundle->actions_mask);
3158                 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3159                 break;
3160         case RTE_FLOW_ACTION_TYPE_PF:
3161                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3162                                        bundle->actions_mask);
3163                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3164                 break;
3165         case RTE_FLOW_ACTION_TYPE_VF:
3166                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3167                                        bundle->actions_mask);
3168                 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3169                 break;
3170         case RTE_FLOW_ACTION_TYPE_PORT_ID:
3171                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3172                                        bundle->actions_mask);
3173                 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3174                 break;
3175         case RTE_FLOW_ACTION_TYPE_DROP:
3176                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3177                                        bundle->actions_mask);
3178                 rc = efx_mae_action_set_populate_drop(spec);
3179                 break;
3180         default:
3181                 return rte_flow_error_set(error, ENOTSUP,
3182                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3183                                 "Unsupported action");
3184         }
3185
3186         if (rc == 0) {
3187                 bundle->actions_mask |= (1ULL << action->type);
3188         } else if (!custom_error) {
3189                 if (action->type < RTE_DIM(action_names)) {
3190                         const char *action_name = action_names[action->type];
3191
3192                         if (action_name != NULL) {
3193                                 sfc_err(sa, "action %s was rejected: %s",
3194                                         action_name, strerror(rc));
3195                         }
3196                 }
3197                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3198                                 NULL, "Failed to request the action");
3199         }
3200
3201         return rc;
3202 }
3203
3204 static void
3205 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3206 {
3207         bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3208 }
3209
3210 static int
3211 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3212                              const struct sfc_mae_bounce_eh *bounce_eh,
3213                              struct sfc_mae_encap_header **encap_headerp)
3214 {
3215         if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3216                 encap_headerp = NULL;
3217                 return 0;
3218         }
3219
3220         *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3221         if (*encap_headerp != NULL)
3222                 return 0;
3223
3224         return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3225 }
3226
3227 int
3228 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3229                            const struct rte_flow_action actions[],
3230                            struct sfc_flow_spec_mae *spec_mae,
3231                            struct rte_flow_error *error)
3232 {
3233         struct sfc_mae_encap_header *encap_header = NULL;
3234         struct sfc_mae_actions_bundle bundle = {0};
3235         const struct rte_flow_action *action;
3236         struct sfc_mae *mae = &sa->mae;
3237         efx_mae_actions_t *spec;
3238         unsigned int n_count;
3239         int rc;
3240
3241         rte_errno = 0;
3242
3243         if (actions == NULL) {
3244                 return rte_flow_error_set(error, EINVAL,
3245                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3246                                 "NULL actions");
3247         }
3248
3249         rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3250         if (rc != 0)
3251                 goto fail_action_set_spec_init;
3252
3253         /* Cleanup after previous encap. header bounce buffer usage. */
3254         sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3255
3256         for (action = actions;
3257              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3258                 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3259                 if (rc != 0)
3260                         goto fail_rule_parse_action;
3261
3262                 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
3263                                                &bundle, spec, error);
3264                 if (rc != 0)
3265                         goto fail_rule_parse_action;
3266         }
3267
3268         rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3269         if (rc != 0)
3270                 goto fail_rule_parse_action;
3271
3272         rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3273         if (rc != 0)
3274                 goto fail_process_encap_header;
3275
3276         n_count = efx_mae_action_set_get_nb_count(spec);
3277         if (n_count > 1) {
3278                 rc = ENOTSUP;
3279                 sfc_err(sa, "too many count actions requested: %u", n_count);
3280                 goto fail_nb_count;
3281         }
3282
3283         spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3284                                                          n_count, spec);
3285         if (spec_mae->action_set != NULL) {
3286                 sfc_mae_encap_header_del(sa, encap_header);
3287                 efx_mae_action_set_spec_fini(sa->nic, spec);
3288                 return 0;
3289         }
3290
3291         rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
3292                                     &spec_mae->action_set);
3293         if (rc != 0)
3294                 goto fail_action_set_add;
3295
3296         return 0;
3297
3298 fail_action_set_add:
3299 fail_nb_count:
3300         sfc_mae_encap_header_del(sa, encap_header);
3301
3302 fail_process_encap_header:
3303 fail_rule_parse_action:
3304         efx_mae_action_set_spec_fini(sa->nic, spec);
3305
3306 fail_action_set_spec_init:
3307         if (rc > 0 && rte_errno == 0) {
3308                 rc = rte_flow_error_set(error, rc,
3309                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3310                         NULL, "Failed to process the action");
3311         }
3312         return rc;
3313 }
3314
3315 static bool
3316 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3317                         const efx_mae_match_spec_t *left,
3318                         const efx_mae_match_spec_t *right)
3319 {
3320         bool have_same_class;
3321         int rc;
3322
3323         rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3324                                            &have_same_class);
3325
3326         return (rc == 0) ? have_same_class : false;
3327 }
3328
3329 static int
3330 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3331                                 struct sfc_mae_outer_rule *rule)
3332 {
3333         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3334         struct sfc_mae_outer_rule *entry;
3335         struct sfc_mae *mae = &sa->mae;
3336
3337         if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3338                 /* An active rule is reused. It's class is wittingly valid. */
3339                 return 0;
3340         }
3341
3342         TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3343                               sfc_mae_outer_rules, entries) {
3344                 const efx_mae_match_spec_t *left = entry->match_spec;
3345                 const efx_mae_match_spec_t *right = rule->match_spec;
3346
3347                 if (entry == rule)
3348                         continue;
3349
3350                 if (sfc_mae_rules_class_cmp(sa, left, right))
3351                         return 0;
3352         }
3353
3354         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3355                  "support for outer frame pattern items is not guaranteed; "
3356                  "other than that, the items are valid from SW standpoint");
3357         return 0;
3358 }
3359
3360 static int
3361 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3362                                  struct sfc_flow_spec_mae *spec)
3363 {
3364         const struct rte_flow *entry;
3365
3366         TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3367                 const struct sfc_flow_spec *entry_spec = &entry->spec;
3368                 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3369                 const efx_mae_match_spec_t *left = es_mae->match_spec;
3370                 const efx_mae_match_spec_t *right = spec->match_spec;
3371
3372                 switch (entry_spec->type) {
3373                 case SFC_FLOW_SPEC_FILTER:
3374                         /* Ignore VNIC-level flows */
3375                         break;
3376                 case SFC_FLOW_SPEC_MAE:
3377                         if (sfc_mae_rules_class_cmp(sa, left, right))
3378                                 return 0;
3379                         break;
3380                 default:
3381                         SFC_ASSERT(false);
3382                 }
3383         }
3384
3385         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3386                  "support for inner frame pattern items is not guaranteed; "
3387                  "other than that, the items are valid from SW standpoint");
3388         return 0;
3389 }
3390
3391 /**
3392  * Confirm that a given flow can be accepted by the FW.
3393  *
3394  * @param sa
3395  *   Software adapter context
3396  * @param flow
3397  *   Flow to be verified
3398  * @return
3399  *   Zero on success and non-zero in the case of error.
3400  *   A special value of EAGAIN indicates that the adapter is
3401  *   not in started state. This state is compulsory because
3402  *   it only makes sense to compare the rule class of the flow
3403  *   being validated with classes of the active rules.
3404  *   Such classes are wittingly supported by the FW.
3405  */
3406 int
3407 sfc_mae_flow_verify(struct sfc_adapter *sa,
3408                     struct rte_flow *flow)
3409 {
3410         struct sfc_flow_spec *spec = &flow->spec;
3411         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3412         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3413         int rc;
3414
3415         SFC_ASSERT(sfc_adapter_is_locked(sa));
3416
3417         if (sa->state != SFC_ETHDEV_STARTED)
3418                 return EAGAIN;
3419
3420         if (outer_rule != NULL) {
3421                 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
3422                 if (rc != 0)
3423                         return rc;
3424         }
3425
3426         return sfc_mae_action_rule_class_verify(sa, spec_mae);
3427 }
3428
3429 int
3430 sfc_mae_flow_insert(struct sfc_adapter *sa,
3431                     struct rte_flow *flow)
3432 {
3433         struct sfc_flow_spec *spec = &flow->spec;
3434         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3435         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3436         struct sfc_mae_action_set *action_set = spec_mae->action_set;
3437         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
3438         int rc;
3439
3440         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
3441         SFC_ASSERT(action_set != NULL);
3442
3443         if (outer_rule != NULL) {
3444                 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
3445                                                spec_mae->match_spec);
3446                 if (rc != 0)
3447                         goto fail_outer_rule_enable;
3448         }
3449
3450         rc = sfc_mae_action_set_enable(sa, action_set);
3451         if (rc != 0)
3452                 goto fail_action_set_enable;
3453
3454         if (action_set->n_counters > 0) {
3455                 rc = sfc_mae_counter_start(sa);
3456                 if (rc != 0) {
3457                         sfc_err(sa, "failed to start MAE counters support: %s",
3458                                 rte_strerror(rc));
3459                         goto fail_mae_counter_start;
3460                 }
3461         }
3462
3463         rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3464                                         NULL, &fw_rsrc->aset_id,
3465                                         &spec_mae->rule_id);
3466         if (rc != 0)
3467                 goto fail_action_rule_insert;
3468
3469         sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3470                 flow, spec_mae->rule_id.id);
3471
3472         return 0;
3473
3474 fail_action_rule_insert:
3475 fail_mae_counter_start:
3476         sfc_mae_action_set_disable(sa, action_set);
3477
3478 fail_action_set_enable:
3479         if (outer_rule != NULL)
3480                 sfc_mae_outer_rule_disable(sa, outer_rule);
3481
3482 fail_outer_rule_enable:
3483         return rc;
3484 }
3485
3486 int
3487 sfc_mae_flow_remove(struct sfc_adapter *sa,
3488                     struct rte_flow *flow)
3489 {
3490         struct sfc_flow_spec *spec = &flow->spec;
3491         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3492         struct sfc_mae_action_set *action_set = spec_mae->action_set;
3493         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3494         int rc;
3495
3496         SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3497         SFC_ASSERT(action_set != NULL);
3498
3499         rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3500         if (rc != 0) {
3501                 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3502                         flow, spec_mae->rule_id.id, strerror(rc));
3503         }
3504         sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3505                 flow, spec_mae->rule_id.id);
3506         spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3507
3508         sfc_mae_action_set_disable(sa, action_set);
3509
3510         if (outer_rule != NULL)
3511                 sfc_mae_outer_rule_disable(sa, outer_rule);
3512
3513         return 0;
3514 }
3515
3516 static int
3517 sfc_mae_query_counter(struct sfc_adapter *sa,
3518                       struct sfc_flow_spec_mae *spec,
3519                       const struct rte_flow_action *action,
3520                       struct rte_flow_query_count *data,
3521                       struct rte_flow_error *error)
3522 {
3523         struct sfc_mae_action_set *action_set = spec->action_set;
3524         const struct rte_flow_action_count *conf = action->conf;
3525         unsigned int i;
3526         int rc;
3527
3528         if (action_set->n_counters == 0) {
3529                 return rte_flow_error_set(error, EINVAL,
3530                         RTE_FLOW_ERROR_TYPE_ACTION, action,
3531                         "Queried flow rule does not have count actions");
3532         }
3533
3534         for (i = 0; i < action_set->n_counters; i++) {
3535                 /*
3536                  * Get the first available counter of the flow rule if
3537                  * counter ID is not specified.
3538                  */
3539                 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
3540                         continue;
3541
3542                 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
3543                                          &action_set->counters[i], data);
3544                 if (rc != 0) {
3545                         return rte_flow_error_set(error, EINVAL,
3546                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
3547                                 "Queried flow rule counter action is invalid");
3548                 }
3549
3550                 return 0;
3551         }
3552
3553         return rte_flow_error_set(error, ENOENT,
3554                                   RTE_FLOW_ERROR_TYPE_ACTION, action,
3555                                   "No such flow rule action count ID");
3556 }
3557
3558 int
3559 sfc_mae_flow_query(struct rte_eth_dev *dev,
3560                    struct rte_flow *flow,
3561                    const struct rte_flow_action *action,
3562                    void *data,
3563                    struct rte_flow_error *error)
3564 {
3565         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3566         struct sfc_flow_spec *spec = &flow->spec;
3567         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3568
3569         switch (action->type) {
3570         case RTE_FLOW_ACTION_TYPE_COUNT:
3571                 return sfc_mae_query_counter(sa, spec_mae, action,
3572                                              data, error);
3573         default:
3574                 return rte_flow_error_set(error, ENOTSUP,
3575                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3576                         "Query for action of this type is not supported");
3577         }
3578 }
3579
3580 int
3581 sfc_mae_switchdev_init(struct sfc_adapter *sa)
3582 {
3583         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3584         struct sfc_mae *mae = &sa->mae;
3585         efx_mport_sel_t pf;
3586         efx_mport_sel_t phy;
3587         int rc;
3588
3589         sfc_log_init(sa, "entry");
3590
3591         if (!sa->switchdev) {
3592                 sfc_log_init(sa, "switchdev is not enabled - skip");
3593                 return 0;
3594         }
3595
3596         if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
3597                 rc = ENOTSUP;
3598                 sfc_err(sa, "failed to init switchdev - no MAE support");
3599                 goto fail_no_mae;
3600         }
3601
3602         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
3603                                             &pf);
3604         if (rc != 0) {
3605                 sfc_err(sa, "failed get PF mport");
3606                 goto fail_pf_get;
3607         }
3608
3609         rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
3610         if (rc != 0) {
3611                 sfc_err(sa, "failed get PHY mport");
3612                 goto fail_phy_get;
3613         }
3614
3615         rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
3616                         SFC_MAE_RULE_PRIO_LOWEST,
3617                         &mae->switchdev_rule_pf_to_ext);
3618         if (rc != 0) {
3619                 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
3620                 goto fail_pf_add;
3621         }
3622
3623         rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
3624                         SFC_MAE_RULE_PRIO_LOWEST,
3625                         &mae->switchdev_rule_ext_to_pf);
3626         if (rc != 0) {
3627                 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
3628                 goto fail_phy_add;
3629         }
3630
3631         sfc_log_init(sa, "done");
3632
3633         return 0;
3634
3635 fail_phy_add:
3636         sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
3637
3638 fail_pf_add:
3639 fail_phy_get:
3640 fail_pf_get:
3641 fail_no_mae:
3642         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
3643         return rc;
3644 }
3645
3646 void
3647 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
3648 {
3649         struct sfc_mae *mae = &sa->mae;
3650
3651         if (!sa->switchdev)
3652                 return;
3653
3654         sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
3655         sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);
3656 }