net/sfc: fix printout label for count action
[dpdk.git] / drivers / net / sfc / sfc_mae.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
21 #include "sfc_log.h"
22 #include "sfc_switch.h"
23 #include "sfc_service.h"
24
25 static int
26 sfc_mae_assign_ethdev_mport(struct sfc_adapter *sa,
27                             efx_mport_sel_t *mportp)
28 {
29         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
30
31         return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
32                                               mportp);
33 }
34
35 static int
36 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
37                             efx_mport_sel_t *mportp)
38 {
39         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
40         int rc = 0;
41
42         if (encp->enc_mae_admin) {
43                 /*
44                  * This ethdev sits on MAE admin PF. The represented
45                  * entity is the network port assigned to that PF.
46                  */
47                 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, mportp);
48         } else {
49                 /*
50                  * This ethdev sits on unprivileged PF / VF. The entity
51                  * represented by the ethdev can change dynamically
52                  * as MAE admin changes default traffic rules.
53                  *
54                  * For the sake of simplicity, do not fill in the m-port
55                  * and assume that flow rules should not be allowed to
56                  * reference the entity represented by this ethdev.
57                  */
58                 efx_mae_mport_invalid(mportp);
59         }
60
61         return rc;
62 }
63
64 static int
65 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
66                               uint32_t nb_counters_max)
67 {
68         return sfc_mae_counters_init(&registry->counters, nb_counters_max);
69 }
70
71 static void
72 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
73 {
74         sfc_mae_counters_fini(&registry->counters);
75 }
76
77 static int
78 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
79                                       struct sfc_mae_rule **rule)
80 {
81         struct sfc_mae *mae = &sa->mae;
82         struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
83         unsigned int entry;
84         int rc;
85
86         for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
87                 if (internal_rules->rules[entry].spec == NULL)
88                         break;
89         }
90
91         if (entry == SFC_MAE_NB_RULES_MAX) {
92                 rc = ENOSPC;
93                 sfc_err(sa, "failed too many rules (%u rules used)", entry);
94                 goto fail_too_many_rules;
95         }
96
97         *rule = &internal_rules->rules[entry];
98
99         return 0;
100
101 fail_too_many_rules:
102         return rc;
103 }
104
105 int
106 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
107                                      const efx_mport_sel_t *mport_match,
108                                      const efx_mport_sel_t *mport_deliver,
109                                      int prio, struct sfc_mae_rule **rulep)
110 {
111         struct sfc_mae *mae = &sa->mae;
112         struct sfc_mae_rule *rule;
113         int rc;
114
115         sfc_log_init(sa, "entry");
116
117         if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
118                 rc = EINVAL;
119                 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
120                         mae->nb_action_rule_prios_max);
121                 goto fail_invalid_prio;
122         }
123         if (prio < 0)
124                 prio = mae->nb_action_rule_prios_max - 1;
125
126         rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
127         if (rc != 0)
128                 goto fail_find_empty_slot;
129
130         sfc_log_init(sa, "init MAE match spec");
131         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
132                                      (uint32_t)prio, &rule->spec);
133         if (rc != 0) {
134                 sfc_err(sa, "failed to init MAE match spec");
135                 goto fail_match_init;
136         }
137
138         rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
139         if (rc != 0) {
140                 sfc_err(sa, "failed to get MAE match mport selector");
141                 goto fail_mport_set;
142         }
143
144         rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
145         if (rc != 0) {
146                 sfc_err(sa, "failed to init MAE action set");
147                 goto fail_action_init;
148         }
149
150         rc = efx_mae_action_set_populate_deliver(rule->actions,
151                                                  mport_deliver);
152         if (rc != 0) {
153                 sfc_err(sa, "failed to populate deliver action");
154                 goto fail_populate_deliver;
155         }
156
157         rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
158                                       &rule->action_set);
159         if (rc != 0) {
160                 sfc_err(sa, "failed to allocate action set");
161                 goto fail_action_set_alloc;
162         }
163
164         rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
165                                         &rule->action_set,
166                                         &rule->rule_id);
167         if (rc != 0) {
168                 sfc_err(sa, "failed to insert action rule");
169                 goto fail_rule_insert;
170         }
171
172         *rulep = rule;
173
174         sfc_log_init(sa, "done");
175
176         return 0;
177
178 fail_rule_insert:
179         efx_mae_action_set_free(sa->nic, &rule->action_set);
180
181 fail_action_set_alloc:
182 fail_populate_deliver:
183         efx_mae_action_set_spec_fini(sa->nic, rule->actions);
184
185 fail_action_init:
186 fail_mport_set:
187         efx_mae_match_spec_fini(sa->nic, rule->spec);
188
189 fail_match_init:
190 fail_find_empty_slot:
191 fail_invalid_prio:
192         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
193         return rc;
194 }
195
196 void
197 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
198 {
199         if (rule == NULL || rule->spec == NULL)
200                 return;
201
202         efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
203         efx_mae_action_set_free(sa->nic, &rule->action_set);
204         efx_mae_action_set_spec_fini(sa->nic, rule->actions);
205         efx_mae_match_spec_fini(sa->nic, rule->spec);
206
207         rule->spec = NULL;
208 }
209
210 int
211 sfc_mae_attach(struct sfc_adapter *sa)
212 {
213         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
214         struct sfc_mae_switch_port_request switch_port_request = {0};
215         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
216         efx_mport_sel_t ethdev_mport;
217         efx_mport_sel_t entity_mport;
218         struct sfc_mae *mae = &sa->mae;
219         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
220         efx_mae_limits_t limits;
221         int rc;
222
223         sfc_log_init(sa, "entry");
224
225         if (!encp->enc_mae_supported) {
226                 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
227                 return 0;
228         }
229
230         if (encp->enc_mae_admin) {
231                 sfc_log_init(sa, "init MAE");
232                 rc = efx_mae_init(sa->nic);
233                 if (rc != 0)
234                         goto fail_mae_init;
235
236                 sfc_log_init(sa, "get MAE limits");
237                 rc = efx_mae_get_limits(sa->nic, &limits);
238                 if (rc != 0)
239                         goto fail_mae_get_limits;
240
241                 sfc_log_init(sa, "init MAE counter registry");
242                 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
243                                                    limits.eml_max_n_counters);
244                 if (rc != 0) {
245                         sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
246                                 limits.eml_max_n_counters, rte_strerror(rc));
247                         goto fail_counter_registry_init;
248                 }
249         }
250
251         sfc_log_init(sa, "assign ethdev MPORT");
252         rc = sfc_mae_assign_ethdev_mport(sa, &ethdev_mport);
253         if (rc != 0)
254                 goto fail_mae_assign_ethdev_mport;
255
256         sfc_log_init(sa, "assign entity MPORT");
257         rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
258         if (rc != 0)
259                 goto fail_mae_assign_entity_mport;
260
261         sfc_log_init(sa, "assign RTE switch domain");
262         rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
263         if (rc != 0)
264                 goto fail_mae_assign_switch_domain;
265
266         sfc_log_init(sa, "assign RTE switch port");
267         switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
268         switch_port_request.ethdev_mportp = &ethdev_mport;
269         switch_port_request.entity_mportp = &entity_mport;
270         switch_port_request.ethdev_port_id = sas->port_id;
271         switch_port_request.port_data.indep.mae_admin =
272                 encp->enc_mae_admin == B_TRUE;
273         rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
274                                         &switch_port_request,
275                                         &mae->switch_port_id);
276         if (rc != 0)
277                 goto fail_mae_assign_switch_port;
278
279         if (encp->enc_mae_admin) {
280                 sfc_log_init(sa, "allocate encap. header bounce buffer");
281                 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
282                 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
283                                             bounce_eh->buf_size, 0);
284                 if (bounce_eh->buf == NULL)
285                         goto fail_mae_alloc_bounce_eh;
286
287                 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
288                 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
289                 mae->encap_types_supported = limits.eml_encap_types_supported;
290         }
291
292         TAILQ_INIT(&mae->outer_rules);
293         TAILQ_INIT(&mae->encap_headers);
294         TAILQ_INIT(&mae->action_sets);
295
296         if (encp->enc_mae_admin)
297                 mae->status = SFC_MAE_STATUS_ADMIN;
298         else
299                 mae->status = SFC_MAE_STATUS_SUPPORTED;
300
301         sfc_log_init(sa, "done");
302
303         return 0;
304
305 fail_mae_alloc_bounce_eh:
306 fail_mae_assign_switch_port:
307 fail_mae_assign_switch_domain:
308 fail_mae_assign_entity_mport:
309 fail_mae_assign_ethdev_mport:
310         if (encp->enc_mae_admin)
311                 sfc_mae_counter_registry_fini(&mae->counter_registry);
312
313 fail_counter_registry_init:
314 fail_mae_get_limits:
315         if (encp->enc_mae_admin)
316                 efx_mae_fini(sa->nic);
317
318 fail_mae_init:
319         sfc_log_init(sa, "failed %d", rc);
320
321         return rc;
322 }
323
324 void
325 sfc_mae_detach(struct sfc_adapter *sa)
326 {
327         struct sfc_mae *mae = &sa->mae;
328         enum sfc_mae_status status_prev = mae->status;
329
330         sfc_log_init(sa, "entry");
331
332         mae->nb_action_rule_prios_max = 0;
333         mae->status = SFC_MAE_STATUS_UNKNOWN;
334
335         if (status_prev != SFC_MAE_STATUS_ADMIN)
336                 return;
337
338         rte_free(mae->bounce_eh.buf);
339         sfc_mae_counter_registry_fini(&mae->counter_registry);
340
341         efx_mae_fini(sa->nic);
342
343         sfc_log_init(sa, "done");
344 }
345
346 static struct sfc_mae_outer_rule *
347 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
348                           const efx_mae_match_spec_t *match_spec,
349                           efx_tunnel_protocol_t encap_type)
350 {
351         struct sfc_mae_outer_rule *rule;
352         struct sfc_mae *mae = &sa->mae;
353
354         SFC_ASSERT(sfc_adapter_is_locked(sa));
355
356         TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
357                 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
358                     rule->encap_type == encap_type) {
359                         sfc_dbg(sa, "attaching to outer_rule=%p", rule);
360                         ++(rule->refcnt);
361                         return rule;
362                 }
363         }
364
365         return NULL;
366 }
367
368 static int
369 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
370                        efx_mae_match_spec_t *match_spec,
371                        efx_tunnel_protocol_t encap_type,
372                        struct sfc_mae_outer_rule **rulep)
373 {
374         struct sfc_mae_outer_rule *rule;
375         struct sfc_mae *mae = &sa->mae;
376
377         SFC_ASSERT(sfc_adapter_is_locked(sa));
378
379         rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
380         if (rule == NULL)
381                 return ENOMEM;
382
383         rule->refcnt = 1;
384         rule->match_spec = match_spec;
385         rule->encap_type = encap_type;
386
387         rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
388
389         TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
390
391         *rulep = rule;
392
393         sfc_dbg(sa, "added outer_rule=%p", rule);
394
395         return 0;
396 }
397
398 static void
399 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
400                        struct sfc_mae_outer_rule *rule)
401 {
402         struct sfc_mae *mae = &sa->mae;
403
404         SFC_ASSERT(sfc_adapter_is_locked(sa));
405         SFC_ASSERT(rule->refcnt != 0);
406
407         --(rule->refcnt);
408
409         if (rule->refcnt != 0)
410                 return;
411
412         if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
413             rule->fw_rsrc.refcnt != 0) {
414                 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
415                         rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
416         }
417
418         efx_mae_match_spec_fini(sa->nic, rule->match_spec);
419
420         TAILQ_REMOVE(&mae->outer_rules, rule, entries);
421         rte_free(rule);
422
423         sfc_dbg(sa, "deleted outer_rule=%p", rule);
424 }
425
426 static int
427 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
428                           struct sfc_mae_outer_rule *rule,
429                           efx_mae_match_spec_t *match_spec_action)
430 {
431         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
432         int rc;
433
434         SFC_ASSERT(sfc_adapter_is_locked(sa));
435
436         if (fw_rsrc->refcnt == 0) {
437                 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
438                 SFC_ASSERT(rule->match_spec != NULL);
439
440                 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
441                                                rule->encap_type,
442                                                &fw_rsrc->rule_id);
443                 if (rc != 0) {
444                         sfc_err(sa, "failed to enable outer_rule=%p: %s",
445                                 rule, strerror(rc));
446                         return rc;
447                 }
448         }
449
450         if (match_spec_action == NULL)
451                 goto skip_action_rule;
452
453         rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
454                                                   &fw_rsrc->rule_id);
455         if (rc != 0) {
456                 if (fw_rsrc->refcnt == 0) {
457                         (void)efx_mae_outer_rule_remove(sa->nic,
458                                                         &fw_rsrc->rule_id);
459                         fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
460                 }
461
462                 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
463
464                 return rc;
465         }
466
467 skip_action_rule:
468         if (fw_rsrc->refcnt == 0) {
469                 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
470                         rule, fw_rsrc->rule_id.id);
471         }
472
473         ++(fw_rsrc->refcnt);
474
475         return 0;
476 }
477
478 static void
479 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
480                            struct sfc_mae_outer_rule *rule)
481 {
482         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
483         int rc;
484
485         SFC_ASSERT(sfc_adapter_is_locked(sa));
486
487         if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
488             fw_rsrc->refcnt == 0) {
489                 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
490                         rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
491                 return;
492         }
493
494         if (fw_rsrc->refcnt == 1) {
495                 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
496                 if (rc == 0) {
497                         sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
498                                 rule, fw_rsrc->rule_id.id);
499                 } else {
500                         sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
501                                 rule, fw_rsrc->rule_id.id, strerror(rc));
502                 }
503                 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
504         }
505
506         --(fw_rsrc->refcnt);
507 }
508
509 static struct sfc_mae_encap_header *
510 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
511                             const struct sfc_mae_bounce_eh *bounce_eh)
512 {
513         struct sfc_mae_encap_header *encap_header;
514         struct sfc_mae *mae = &sa->mae;
515
516         SFC_ASSERT(sfc_adapter_is_locked(sa));
517
518         TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
519                 if (encap_header->size == bounce_eh->size &&
520                     memcmp(encap_header->buf, bounce_eh->buf,
521                            bounce_eh->size) == 0) {
522                         sfc_dbg(sa, "attaching to encap_header=%p",
523                                 encap_header);
524                         ++(encap_header->refcnt);
525                         return encap_header;
526                 }
527         }
528
529         return NULL;
530 }
531
532 static int
533 sfc_mae_encap_header_add(struct sfc_adapter *sa,
534                          const struct sfc_mae_bounce_eh *bounce_eh,
535                          struct sfc_mae_encap_header **encap_headerp)
536 {
537         struct sfc_mae_encap_header *encap_header;
538         struct sfc_mae *mae = &sa->mae;
539
540         SFC_ASSERT(sfc_adapter_is_locked(sa));
541
542         encap_header = rte_zmalloc("sfc_mae_encap_header",
543                                    sizeof(*encap_header), 0);
544         if (encap_header == NULL)
545                 return ENOMEM;
546
547         encap_header->size = bounce_eh->size;
548
549         encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
550                                        encap_header->size, 0);
551         if (encap_header->buf == NULL) {
552                 rte_free(encap_header);
553                 return ENOMEM;
554         }
555
556         rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
557
558         encap_header->refcnt = 1;
559         encap_header->type = bounce_eh->type;
560         encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
561
562         TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
563
564         *encap_headerp = encap_header;
565
566         sfc_dbg(sa, "added encap_header=%p", encap_header);
567
568         return 0;
569 }
570
571 static void
572 sfc_mae_encap_header_del(struct sfc_adapter *sa,
573                        struct sfc_mae_encap_header *encap_header)
574 {
575         struct sfc_mae *mae = &sa->mae;
576
577         if (encap_header == NULL)
578                 return;
579
580         SFC_ASSERT(sfc_adapter_is_locked(sa));
581         SFC_ASSERT(encap_header->refcnt != 0);
582
583         --(encap_header->refcnt);
584
585         if (encap_header->refcnt != 0)
586                 return;
587
588         if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
589             encap_header->fw_rsrc.refcnt != 0) {
590                 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
591                         encap_header, encap_header->fw_rsrc.eh_id.id,
592                         encap_header->fw_rsrc.refcnt);
593         }
594
595         TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
596         rte_free(encap_header->buf);
597         rte_free(encap_header);
598
599         sfc_dbg(sa, "deleted encap_header=%p", encap_header);
600 }
601
602 static int
603 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
604                             struct sfc_mae_encap_header *encap_header,
605                             efx_mae_actions_t *action_set_spec)
606 {
607         struct sfc_mae_fw_rsrc *fw_rsrc;
608         int rc;
609
610         if (encap_header == NULL)
611                 return 0;
612
613         SFC_ASSERT(sfc_adapter_is_locked(sa));
614
615         fw_rsrc = &encap_header->fw_rsrc;
616
617         if (fw_rsrc->refcnt == 0) {
618                 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
619                 SFC_ASSERT(encap_header->buf != NULL);
620                 SFC_ASSERT(encap_header->size != 0);
621
622                 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
623                                                 encap_header->buf,
624                                                 encap_header->size,
625                                                 &fw_rsrc->eh_id);
626                 if (rc != 0) {
627                         sfc_err(sa, "failed to enable encap_header=%p: %s",
628                                 encap_header, strerror(rc));
629                         return rc;
630                 }
631         }
632
633         rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
634                                               &fw_rsrc->eh_id);
635         if (rc != 0) {
636                 if (fw_rsrc->refcnt == 0) {
637                         (void)efx_mae_encap_header_free(sa->nic,
638                                                         &fw_rsrc->eh_id);
639                         fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
640                 }
641
642                 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
643
644                 return rc;
645         }
646
647         if (fw_rsrc->refcnt == 0) {
648                 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
649                         encap_header, fw_rsrc->eh_id.id);
650         }
651
652         ++(fw_rsrc->refcnt);
653
654         return 0;
655 }
656
657 static void
658 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
659                              struct sfc_mae_encap_header *encap_header)
660 {
661         struct sfc_mae_fw_rsrc *fw_rsrc;
662         int rc;
663
664         if (encap_header == NULL)
665                 return;
666
667         SFC_ASSERT(sfc_adapter_is_locked(sa));
668
669         fw_rsrc = &encap_header->fw_rsrc;
670
671         if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
672             fw_rsrc->refcnt == 0) {
673                 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
674                         encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
675                 return;
676         }
677
678         if (fw_rsrc->refcnt == 1) {
679                 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
680                 if (rc == 0) {
681                         sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
682                                 encap_header, fw_rsrc->eh_id.id);
683                 } else {
684                         sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
685                                 encap_header, fw_rsrc->eh_id.id, strerror(rc));
686                 }
687                 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
688         }
689
690         --(fw_rsrc->refcnt);
691 }
692
693 static int
694 sfc_mae_counters_enable(struct sfc_adapter *sa,
695                         struct sfc_mae_counter_id *counters,
696                         unsigned int n_counters,
697                         efx_mae_actions_t *action_set_spec)
698 {
699         int rc;
700
701         sfc_log_init(sa, "entry");
702
703         if (n_counters == 0) {
704                 sfc_log_init(sa, "no counters - skip");
705                 return 0;
706         }
707
708         SFC_ASSERT(sfc_adapter_is_locked(sa));
709         SFC_ASSERT(n_counters == 1);
710
711         rc = sfc_mae_counter_enable(sa, &counters[0]);
712         if (rc != 0) {
713                 sfc_err(sa, "failed to enable MAE counter %u: %s",
714                         counters[0].mae_id.id, rte_strerror(rc));
715                 goto fail_counter_add;
716         }
717
718         rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
719                                                    &counters[0].mae_id);
720         if (rc != 0) {
721                 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
722                         counters[0].mae_id.id, rte_strerror(rc));
723                 goto fail_fill_in_id;
724         }
725
726         return 0;
727
728 fail_fill_in_id:
729         (void)sfc_mae_counter_disable(sa, &counters[0]);
730
731 fail_counter_add:
732         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
733         return rc;
734 }
735
736 static int
737 sfc_mae_counters_disable(struct sfc_adapter *sa,
738                          struct sfc_mae_counter_id *counters,
739                          unsigned int n_counters)
740 {
741         if (n_counters == 0)
742                 return 0;
743
744         SFC_ASSERT(sfc_adapter_is_locked(sa));
745         SFC_ASSERT(n_counters == 1);
746
747         if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
748                 sfc_err(sa, "failed to disable: already disabled");
749                 return EALREADY;
750         }
751
752         return sfc_mae_counter_disable(sa, &counters[0]);
753 }
754
755 static struct sfc_mae_action_set *
756 sfc_mae_action_set_attach(struct sfc_adapter *sa,
757                           const struct sfc_mae_encap_header *encap_header,
758                           unsigned int n_count,
759                           const efx_mae_actions_t *spec)
760 {
761         struct sfc_mae_action_set *action_set;
762         struct sfc_mae *mae = &sa->mae;
763
764         SFC_ASSERT(sfc_adapter_is_locked(sa));
765
766         TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
767                 /*
768                  * Shared counters are not supported, hence action sets with
769                  * COUNT are not attachable.
770                  */
771                 if (action_set->encap_header == encap_header &&
772                     n_count == 0 &&
773                     efx_mae_action_set_specs_equal(action_set->spec, spec)) {
774                         sfc_dbg(sa, "attaching to action_set=%p", action_set);
775                         ++(action_set->refcnt);
776                         return action_set;
777                 }
778         }
779
780         return NULL;
781 }
782
783 static int
784 sfc_mae_action_set_add(struct sfc_adapter *sa,
785                        const struct rte_flow_action actions[],
786                        efx_mae_actions_t *spec,
787                        struct sfc_mae_encap_header *encap_header,
788                        uint64_t *ft_group_hit_counter,
789                        struct sfc_flow_tunnel *ft,
790                        unsigned int n_counters,
791                        struct sfc_mae_action_set **action_setp)
792 {
793         struct sfc_mae_action_set *action_set;
794         struct sfc_mae *mae = &sa->mae;
795         unsigned int i;
796
797         SFC_ASSERT(sfc_adapter_is_locked(sa));
798
799         action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
800         if (action_set == NULL) {
801                 sfc_err(sa, "failed to alloc action set");
802                 return ENOMEM;
803         }
804
805         if (n_counters > 0) {
806                 const struct rte_flow_action *action;
807
808                 action_set->counters = rte_malloc("sfc_mae_counter_ids",
809                         sizeof(action_set->counters[0]) * n_counters, 0);
810                 if (action_set->counters == NULL) {
811                         rte_free(action_set);
812                         sfc_err(sa, "failed to alloc counters");
813                         return ENOMEM;
814                 }
815
816                 for (i = 0; i < n_counters; ++i) {
817                         action_set->counters[i].rte_id_valid = B_FALSE;
818                         action_set->counters[i].mae_id.id =
819                                 EFX_MAE_RSRC_ID_INVALID;
820
821                         action_set->counters[i].ft_group_hit_counter =
822                                 ft_group_hit_counter;
823                         action_set->counters[i].ft = ft;
824                 }
825
826                 for (action = actions, i = 0;
827                      action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
828                      ++action) {
829                         const struct rte_flow_action_count *conf;
830
831                         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
832                                 continue;
833
834                         conf = action->conf;
835
836                         action_set->counters[i].rte_id_valid = B_TRUE;
837                         action_set->counters[i].rte_id = conf->id;
838                         i++;
839                 }
840                 action_set->n_counters = n_counters;
841         }
842
843         action_set->refcnt = 1;
844         action_set->spec = spec;
845         action_set->encap_header = encap_header;
846
847         action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
848
849         TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
850
851         *action_setp = action_set;
852
853         sfc_dbg(sa, "added action_set=%p", action_set);
854
855         return 0;
856 }
857
858 static void
859 sfc_mae_action_set_del(struct sfc_adapter *sa,
860                        struct sfc_mae_action_set *action_set)
861 {
862         struct sfc_mae *mae = &sa->mae;
863
864         SFC_ASSERT(sfc_adapter_is_locked(sa));
865         SFC_ASSERT(action_set->refcnt != 0);
866
867         --(action_set->refcnt);
868
869         if (action_set->refcnt != 0)
870                 return;
871
872         if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
873             action_set->fw_rsrc.refcnt != 0) {
874                 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
875                         action_set, action_set->fw_rsrc.aset_id.id,
876                         action_set->fw_rsrc.refcnt);
877         }
878
879         efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
880         sfc_mae_encap_header_del(sa, action_set->encap_header);
881         if (action_set->n_counters > 0) {
882                 SFC_ASSERT(action_set->n_counters == 1);
883                 SFC_ASSERT(action_set->counters[0].mae_id.id ==
884                            EFX_MAE_RSRC_ID_INVALID);
885                 rte_free(action_set->counters);
886         }
887         TAILQ_REMOVE(&mae->action_sets, action_set, entries);
888         rte_free(action_set);
889
890         sfc_dbg(sa, "deleted action_set=%p", action_set);
891 }
892
893 static int
894 sfc_mae_action_set_enable(struct sfc_adapter *sa,
895                           struct sfc_mae_action_set *action_set)
896 {
897         struct sfc_mae_encap_header *encap_header = action_set->encap_header;
898         struct sfc_mae_counter_id *counters = action_set->counters;
899         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
900         int rc;
901
902         SFC_ASSERT(sfc_adapter_is_locked(sa));
903
904         if (fw_rsrc->refcnt == 0) {
905                 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
906                 SFC_ASSERT(action_set->spec != NULL);
907
908                 rc = sfc_mae_encap_header_enable(sa, encap_header,
909                                                  action_set->spec);
910                 if (rc != 0)
911                         return rc;
912
913                 rc = sfc_mae_counters_enable(sa, counters,
914                                              action_set->n_counters,
915                                              action_set->spec);
916                 if (rc != 0) {
917                         sfc_err(sa, "failed to enable %u MAE counters: %s",
918                                 action_set->n_counters, rte_strerror(rc));
919
920                         sfc_mae_encap_header_disable(sa, encap_header);
921                         return rc;
922                 }
923
924                 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
925                                               &fw_rsrc->aset_id);
926                 if (rc != 0) {
927                         sfc_err(sa, "failed to enable action_set=%p: %s",
928                                 action_set, strerror(rc));
929
930                         (void)sfc_mae_counters_disable(sa, counters,
931                                                        action_set->n_counters);
932                         sfc_mae_encap_header_disable(sa, encap_header);
933                         return rc;
934                 }
935
936                 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
937                         action_set, fw_rsrc->aset_id.id);
938         }
939
940         ++(fw_rsrc->refcnt);
941
942         return 0;
943 }
944
945 static void
946 sfc_mae_action_set_disable(struct sfc_adapter *sa,
947                            struct sfc_mae_action_set *action_set)
948 {
949         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
950         int rc;
951
952         SFC_ASSERT(sfc_adapter_is_locked(sa));
953
954         if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
955             fw_rsrc->refcnt == 0) {
956                 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
957                         action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
958                 return;
959         }
960
961         if (fw_rsrc->refcnt == 1) {
962                 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
963                 if (rc == 0) {
964                         sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
965                                 action_set, fw_rsrc->aset_id.id);
966                 } else {
967                         sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
968                                 action_set, fw_rsrc->aset_id.id, strerror(rc));
969                 }
970                 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
971
972                 rc = sfc_mae_counters_disable(sa, action_set->counters,
973                                               action_set->n_counters);
974                 if (rc != 0) {
975                         sfc_err(sa, "failed to disable %u MAE counters: %s",
976                                 action_set->n_counters, rte_strerror(rc));
977                 }
978
979                 sfc_mae_encap_header_disable(sa, action_set->encap_header);
980         }
981
982         --(fw_rsrc->refcnt);
983 }
984
985 void
986 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
987                      struct rte_flow *flow)
988 {
989         struct sfc_flow_spec *spec;
990         struct sfc_flow_spec_mae *spec_mae;
991
992         if (flow == NULL)
993                 return;
994
995         spec = &flow->spec;
996
997         if (spec == NULL)
998                 return;
999
1000         spec_mae = &spec->mae;
1001
1002         if (spec_mae->ft != NULL) {
1003                 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
1004                         spec_mae->ft->jump_rule_is_set = B_FALSE;
1005
1006                 SFC_ASSERT(spec_mae->ft->refcnt != 0);
1007                 --(spec_mae->ft->refcnt);
1008         }
1009
1010         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
1011
1012         if (spec_mae->outer_rule != NULL)
1013                 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
1014
1015         if (spec_mae->action_set != NULL)
1016                 sfc_mae_action_set_del(sa, spec_mae->action_set);
1017
1018         if (spec_mae->match_spec != NULL)
1019                 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
1020 }
1021
1022 static int
1023 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
1024 {
1025         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1026         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1027         const efx_mae_field_id_t field_ids[] = {
1028                 EFX_MAE_FIELD_VLAN0_PROTO_BE,
1029                 EFX_MAE_FIELD_VLAN1_PROTO_BE,
1030         };
1031         const struct sfc_mae_ethertype *et;
1032         unsigned int i;
1033         int rc;
1034
1035         /*
1036          * In accordance with RTE flow API convention, the innermost L2
1037          * item's "type" ("inner_type") is a L3 EtherType. If there is
1038          * no L3 item, it's 0x0000/0x0000.
1039          */
1040         et = &pdata->ethertypes[pdata->nb_vlan_tags];
1041         rc = efx_mae_match_spec_field_set(ctx->match_spec,
1042                                           fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
1043                                           sizeof(et->value),
1044                                           (const uint8_t *)&et->value,
1045                                           sizeof(et->mask),
1046                                           (const uint8_t *)&et->mask);
1047         if (rc != 0)
1048                 return rc;
1049
1050         /*
1051          * sfc_mae_rule_parse_item_vlan() has already made sure
1052          * that pdata->nb_vlan_tags does not exceed this figure.
1053          */
1054         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1055
1056         for (i = 0; i < pdata->nb_vlan_tags; ++i) {
1057                 et = &pdata->ethertypes[i];
1058
1059                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1060                                                   fremap[field_ids[i]],
1061                                                   sizeof(et->value),
1062                                                   (const uint8_t *)&et->value,
1063                                                   sizeof(et->mask),
1064                                                   (const uint8_t *)&et->mask);
1065                 if (rc != 0)
1066                         return rc;
1067         }
1068
1069         return 0;
1070 }
1071
1072 static int
1073 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1074                                   struct rte_flow_error *error)
1075 {
1076         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1077         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1078         struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1079         const rte_be16_t supported_tpids[] = {
1080                 /* VLAN standard TPID (always the first element) */
1081                 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1082
1083                 /* Double-tagging TPIDs */
1084                 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1085                 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1086                 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1087                 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1088         };
1089         bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1090         unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1091         unsigned int ethertype_idx;
1092         const uint8_t *valuep;
1093         const uint8_t *maskp;
1094         int rc;
1095
1096         if (pdata->innermost_ethertype_restriction.mask != 0 &&
1097             pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1098                 /*
1099                  * If a single item VLAN is followed by a L3 item, value
1100                  * of "type" in item ETH can't be a double-tagging TPID.
1101                  */
1102                 nb_supported_tpids = 1;
1103         }
1104
1105         /*
1106          * sfc_mae_rule_parse_item_vlan() has already made sure
1107          * that pdata->nb_vlan_tags does not exceed this figure.
1108          */
1109         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1110
1111         for (ethertype_idx = 0;
1112              ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1113                 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1114                 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1115                 unsigned int tpid_idx;
1116
1117                 /*
1118                  * This loop can have only two iterations. On the second one,
1119                  * drop outer tag presence enforcement bit because the inner
1120                  * tag presence automatically assumes that for the outer tag.
1121                  */
1122                 enforce_tag_presence[0] = B_FALSE;
1123
1124                 if (tpid_m == RTE_BE16(0)) {
1125                         if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1126                                 enforce_tag_presence[ethertype_idx] = B_TRUE;
1127
1128                         /* No match on this field, and no value check. */
1129                         nb_supported_tpids = 1;
1130                         continue;
1131                 }
1132
1133                 /* Exact match is supported only. */
1134                 if (tpid_m != RTE_BE16(0xffff)) {
1135                         sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1136                                 rte_be_to_cpu_16(tpid_m));
1137                         rc = EINVAL;
1138                         goto fail;
1139                 }
1140
1141                 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1142                      tpid_idx < nb_supported_tpids; ++tpid_idx) {
1143                         if (tpid_v == supported_tpids[tpid_idx])
1144                                 break;
1145                 }
1146
1147                 if (tpid_idx == nb_supported_tpids) {
1148                         sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1149                                 rte_be_to_cpu_16(tpid_v));
1150                         rc = EINVAL;
1151                         goto fail;
1152                 }
1153
1154                 nb_supported_tpids = 1;
1155         }
1156
1157         if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1158                 struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
1159                 rte_be16_t enforced_et;
1160
1161                 enforced_et = pdata->innermost_ethertype_restriction.value;
1162
1163                 if (et->mask == 0) {
1164                         et->mask = RTE_BE16(0xffff);
1165                         et->value = enforced_et;
1166                 } else if (et->mask != RTE_BE16(0xffff) ||
1167                            et->value != enforced_et) {
1168                         sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1169                                 rte_be_to_cpu_16(enforced_et),
1170                                 rte_be_to_cpu_16(et->value),
1171                                 rte_be_to_cpu_16(et->mask));
1172                         rc = EINVAL;
1173                         goto fail;
1174                 }
1175         }
1176
1177         /*
1178          * Now, when the number of VLAN tags is known, set fields
1179          * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1180          * one is either a valid L3 EtherType (or 0x0000/0x0000),
1181          * and the last two are valid TPIDs (or 0x0000/0x0000).
1182          */
1183         rc = sfc_mae_set_ethertypes(ctx);
1184         if (rc != 0)
1185                 goto fail;
1186
1187         if (pdata->l3_next_proto_restriction_mask == 0xff) {
1188                 if (pdata->l3_next_proto_mask == 0) {
1189                         pdata->l3_next_proto_mask = 0xff;
1190                         pdata->l3_next_proto_value =
1191                                 pdata->l3_next_proto_restriction_value;
1192                 } else if (pdata->l3_next_proto_mask != 0xff ||
1193                            pdata->l3_next_proto_value !=
1194                            pdata->l3_next_proto_restriction_value) {
1195                         sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1196                                 pdata->l3_next_proto_restriction_value,
1197                                 pdata->l3_next_proto_value,
1198                                 pdata->l3_next_proto_mask);
1199                         rc = EINVAL;
1200                         goto fail;
1201                 }
1202         }
1203
1204         if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1205                 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1206                                                 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1207                                                 enforce_tag_presence[0] ||
1208                                                 pdata->has_ovlan_value);
1209                 if (rc != 0)
1210                         goto fail;
1211         }
1212
1213         if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1214                 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1215                                                 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1216                                                 enforce_tag_presence[1] ||
1217                                                 pdata->has_ivlan_value);
1218                 if (rc != 0)
1219                         goto fail;
1220         }
1221
1222         valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1223         maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1224         rc = efx_mae_match_spec_field_set(ctx->match_spec,
1225                                           fremap[EFX_MAE_FIELD_IP_PROTO],
1226                                           sizeof(pdata->l3_next_proto_value),
1227                                           valuep,
1228                                           sizeof(pdata->l3_next_proto_mask),
1229                                           maskp);
1230         if (rc != 0)
1231                 goto fail;
1232
1233         return 0;
1234
1235 fail:
1236         return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1237                                   "Failed to process pattern data");
1238 }
1239
1240 static int
1241 sfc_mae_rule_parse_item_mark(const struct rte_flow_item *item,
1242                              struct sfc_flow_parse_ctx *ctx,
1243                              struct rte_flow_error *error)
1244 {
1245         const struct rte_flow_item_mark *spec = item->spec;
1246         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1247
1248         if (spec == NULL) {
1249                 return rte_flow_error_set(error, EINVAL,
1250                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1251                                 "NULL spec in item MARK");
1252         }
1253
1254         /*
1255          * This item is used in tunnel offload support only.
1256          * It must go before any network header items. This
1257          * way, sfc_mae_rule_preparse_item_mark() must have
1258          * already parsed it. Only one item MARK is allowed.
1259          */
1260         if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
1261             spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
1262                 return rte_flow_error_set(error, EINVAL,
1263                                           RTE_FLOW_ERROR_TYPE_ITEM,
1264                                           item, "invalid item MARK");
1265         }
1266
1267         return 0;
1268 }
1269
1270 static int
1271 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1272                                 struct sfc_flow_parse_ctx *ctx,
1273                                 struct rte_flow_error *error)
1274 {
1275         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1276         const struct rte_flow_item_port_id supp_mask = {
1277                 .id = 0xffffffff,
1278         };
1279         const void *def_mask = &rte_flow_item_port_id_mask;
1280         const struct rte_flow_item_port_id *spec = NULL;
1281         const struct rte_flow_item_port_id *mask = NULL;
1282         efx_mport_sel_t mport_sel;
1283         int rc;
1284
1285         if (ctx_mae->match_mport_set) {
1286                 return rte_flow_error_set(error, ENOTSUP,
1287                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1288                                 "Can't handle multiple traffic source items");
1289         }
1290
1291         rc = sfc_flow_parse_init(item,
1292                                  (const void **)&spec, (const void **)&mask,
1293                                  (const void *)&supp_mask, def_mask,
1294                                  sizeof(struct rte_flow_item_port_id), error);
1295         if (rc != 0)
1296                 return rc;
1297
1298         if (mask->id != supp_mask.id) {
1299                 return rte_flow_error_set(error, EINVAL,
1300                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1301                                 "Bad mask in the PORT_ID pattern item");
1302         }
1303
1304         /* If "spec" is not set, could be any port ID */
1305         if (spec == NULL)
1306                 return 0;
1307
1308         if (spec->id > UINT16_MAX) {
1309                 return rte_flow_error_set(error, EOVERFLOW,
1310                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1311                                           "The port ID is too large");
1312         }
1313
1314         rc = sfc_mae_switch_get_ethdev_mport(ctx_mae->sa->mae.switch_domain_id,
1315                                              spec->id, &mport_sel);
1316         if (rc != 0) {
1317                 return rte_flow_error_set(error, rc,
1318                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1319                                 "Can't get m-port for the given ethdev");
1320         }
1321
1322         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1323                                           &mport_sel, NULL);
1324         if (rc != 0) {
1325                 return rte_flow_error_set(error, rc,
1326                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1327                                 "Failed to set MPORT for the port ID");
1328         }
1329
1330         ctx_mae->match_mport_set = B_TRUE;
1331
1332         return 0;
1333 }
1334
1335 static int
1336 sfc_mae_rule_parse_item_ethdev_based(const struct rte_flow_item *item,
1337                                      struct sfc_flow_parse_ctx *ctx,
1338                                      struct rte_flow_error *error)
1339 {
1340         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1341         const struct rte_flow_item_ethdev supp_mask = {
1342                 .port_id = 0xffff,
1343         };
1344         const void *def_mask = &rte_flow_item_ethdev_mask;
1345         const struct rte_flow_item_ethdev *spec = NULL;
1346         const struct rte_flow_item_ethdev *mask = NULL;
1347         efx_mport_sel_t mport_sel;
1348         int rc;
1349
1350         if (ctx_mae->match_mport_set) {
1351                 return rte_flow_error_set(error, ENOTSUP,
1352                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1353                                 "Can't handle multiple traffic source items");
1354         }
1355
1356         rc = sfc_flow_parse_init(item,
1357                                  (const void **)&spec, (const void **)&mask,
1358                                  (const void *)&supp_mask, def_mask,
1359                                  sizeof(struct rte_flow_item_ethdev), error);
1360         if (rc != 0)
1361                 return rc;
1362
1363         if (mask->port_id != supp_mask.port_id) {
1364                 return rte_flow_error_set(error, EINVAL,
1365                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1366                                 "Bad mask in the ethdev-based pattern item");
1367         }
1368
1369         /* If "spec" is not set, could be any port ID */
1370         if (spec == NULL)
1371                 return 0;
1372
1373         switch (item->type) {
1374         case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
1375                 rc = sfc_mae_switch_get_ethdev_mport(
1376                                 ctx_mae->sa->mae.switch_domain_id,
1377                                 spec->port_id, &mport_sel);
1378                 if (rc != 0) {
1379                         return rte_flow_error_set(error, rc,
1380                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1381                                         "Can't get m-port for the given ethdev");
1382                 }
1383                 break;
1384         case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
1385                 rc = sfc_mae_switch_get_entity_mport(
1386                                 ctx_mae->sa->mae.switch_domain_id,
1387                                 spec->port_id, &mport_sel);
1388                 if (rc != 0) {
1389                         return rte_flow_error_set(error, rc,
1390                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1391                                         "Can't get m-port for the given ethdev");
1392                 }
1393                 break;
1394         default:
1395                 return rte_flow_error_set(error, EINVAL,
1396                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1397                                 "Unsupported ethdev-based flow item");
1398         }
1399
1400         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1401                                           &mport_sel, NULL);
1402         if (rc != 0) {
1403                 return rte_flow_error_set(error, rc,
1404                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1405                                 "Failed to set MPORT for the port ID");
1406         }
1407
1408         ctx_mae->match_mport_set = B_TRUE;
1409
1410         return 0;
1411 }
1412
1413 static int
1414 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1415                                  struct sfc_flow_parse_ctx *ctx,
1416                                  struct rte_flow_error *error)
1417 {
1418         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1419         const struct rte_flow_item_phy_port supp_mask = {
1420                 .index = 0xffffffff,
1421         };
1422         const void *def_mask = &rte_flow_item_phy_port_mask;
1423         const struct rte_flow_item_phy_port *spec = NULL;
1424         const struct rte_flow_item_phy_port *mask = NULL;
1425         efx_mport_sel_t mport_v;
1426         int rc;
1427
1428         if (ctx_mae->match_mport_set) {
1429                 return rte_flow_error_set(error, ENOTSUP,
1430                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1431                                 "Can't handle multiple traffic source items");
1432         }
1433
1434         rc = sfc_flow_parse_init(item,
1435                                  (const void **)&spec, (const void **)&mask,
1436                                  (const void *)&supp_mask, def_mask,
1437                                  sizeof(struct rte_flow_item_phy_port), error);
1438         if (rc != 0)
1439                 return rc;
1440
1441         if (mask->index != supp_mask.index) {
1442                 return rte_flow_error_set(error, EINVAL,
1443                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1444                                 "Bad mask in the PHY_PORT pattern item");
1445         }
1446
1447         /* If "spec" is not set, could be any physical port */
1448         if (spec == NULL)
1449                 return 0;
1450
1451         rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1452         if (rc != 0) {
1453                 return rte_flow_error_set(error, rc,
1454                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1455                                 "Failed to convert the PHY_PORT index");
1456         }
1457
1458         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1459         if (rc != 0) {
1460                 return rte_flow_error_set(error, rc,
1461                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1462                                 "Failed to set MPORT for the PHY_PORT");
1463         }
1464
1465         ctx_mae->match_mport_set = B_TRUE;
1466
1467         return 0;
1468 }
1469
1470 static int
1471 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1472                            struct sfc_flow_parse_ctx *ctx,
1473                            struct rte_flow_error *error)
1474 {
1475         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1476         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1477         efx_mport_sel_t mport_v;
1478         int rc;
1479
1480         if (ctx_mae->match_mport_set) {
1481                 return rte_flow_error_set(error, ENOTSUP,
1482                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1483                                 "Can't handle multiple traffic source items");
1484         }
1485
1486         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1487                                             &mport_v);
1488         if (rc != 0) {
1489                 return rte_flow_error_set(error, rc,
1490                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1491                                 "Failed to convert the PF ID");
1492         }
1493
1494         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1495         if (rc != 0) {
1496                 return rte_flow_error_set(error, rc,
1497                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1498                                 "Failed to set MPORT for the PF");
1499         }
1500
1501         ctx_mae->match_mport_set = B_TRUE;
1502
1503         return 0;
1504 }
1505
1506 static int
1507 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1508                            struct sfc_flow_parse_ctx *ctx,
1509                            struct rte_flow_error *error)
1510 {
1511         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1512         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1513         const struct rte_flow_item_vf supp_mask = {
1514                 .id = 0xffffffff,
1515         };
1516         const void *def_mask = &rte_flow_item_vf_mask;
1517         const struct rte_flow_item_vf *spec = NULL;
1518         const struct rte_flow_item_vf *mask = NULL;
1519         efx_mport_sel_t mport_v;
1520         int rc;
1521
1522         if (ctx_mae->match_mport_set) {
1523                 return rte_flow_error_set(error, ENOTSUP,
1524                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1525                                 "Can't handle multiple traffic source items");
1526         }
1527
1528         rc = sfc_flow_parse_init(item,
1529                                  (const void **)&spec, (const void **)&mask,
1530                                  (const void *)&supp_mask, def_mask,
1531                                  sizeof(struct rte_flow_item_vf), error);
1532         if (rc != 0)
1533                 return rc;
1534
1535         if (mask->id != supp_mask.id) {
1536                 return rte_flow_error_set(error, EINVAL,
1537                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1538                                 "Bad mask in the VF pattern item");
1539         }
1540
1541         /*
1542          * If "spec" is not set, the item requests any VF related to the
1543          * PF of the current DPDK port (but not the PF itself).
1544          * Reject this match criterion as unsupported.
1545          */
1546         if (spec == NULL) {
1547                 return rte_flow_error_set(error, EINVAL,
1548                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1549                                 "Bad spec in the VF pattern item");
1550         }
1551
1552         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1553         if (rc != 0) {
1554                 return rte_flow_error_set(error, rc,
1555                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1556                                 "Failed to convert the PF + VF IDs");
1557         }
1558
1559         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1560         if (rc != 0) {
1561                 return rte_flow_error_set(error, rc,
1562                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1563                                 "Failed to set MPORT for the PF + VF");
1564         }
1565
1566         ctx_mae->match_mport_set = B_TRUE;
1567
1568         return 0;
1569 }
1570
1571 /*
1572  * Having this field ID in a field locator means that this
1573  * locator cannot be used to actually set the field at the
1574  * time when the corresponding item gets encountered. Such
1575  * fields get stashed in the parsing context instead. This
1576  * is required to resolve dependencies between the stashed
1577  * fields. See sfc_mae_rule_process_pattern_data().
1578  */
1579 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1580
1581 struct sfc_mae_field_locator {
1582         efx_mae_field_id_t              field_id;
1583         size_t                          size;
1584         /* Field offset in the corresponding rte_flow_item_ struct */
1585         size_t                          ofst;
1586 };
1587
1588 static void
1589 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1590                              unsigned int nb_field_locators, void *mask_ptr,
1591                              size_t mask_size)
1592 {
1593         unsigned int i;
1594
1595         memset(mask_ptr, 0, mask_size);
1596
1597         for (i = 0; i < nb_field_locators; ++i) {
1598                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1599
1600                 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1601                 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1602         }
1603 }
1604
1605 static int
1606 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1607                    unsigned int nb_field_locators, const uint8_t *spec,
1608                    const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1609                    struct rte_flow_error *error)
1610 {
1611         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1612         unsigned int i;
1613         int rc = 0;
1614
1615         for (i = 0; i < nb_field_locators; ++i) {
1616                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1617
1618                 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1619                         continue;
1620
1621                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1622                                                   fremap[fl->field_id],
1623                                                   fl->size, spec + fl->ofst,
1624                                                   fl->size, mask + fl->ofst);
1625                 if (rc != 0)
1626                         break;
1627         }
1628
1629         if (rc != 0) {
1630                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1631                                 NULL, "Failed to process item fields");
1632         }
1633
1634         return rc;
1635 }
1636
1637 static const struct sfc_mae_field_locator flocs_eth[] = {
1638         {
1639                 /*
1640                  * This locator is used only for building supported fields mask.
1641                  * The field is handled by sfc_mae_rule_process_pattern_data().
1642                  */
1643                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1644                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1645                 offsetof(struct rte_flow_item_eth, type),
1646         },
1647         {
1648                 EFX_MAE_FIELD_ETH_DADDR_BE,
1649                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1650                 offsetof(struct rte_flow_item_eth, dst),
1651         },
1652         {
1653                 EFX_MAE_FIELD_ETH_SADDR_BE,
1654                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1655                 offsetof(struct rte_flow_item_eth, src),
1656         },
1657 };
1658
1659 static int
1660 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1661                             struct sfc_flow_parse_ctx *ctx,
1662                             struct rte_flow_error *error)
1663 {
1664         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1665         struct rte_flow_item_eth override_mask;
1666         struct rte_flow_item_eth supp_mask;
1667         const uint8_t *spec = NULL;
1668         const uint8_t *mask = NULL;
1669         int rc;
1670
1671         sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1672                                      &supp_mask, sizeof(supp_mask));
1673         supp_mask.has_vlan = 1;
1674
1675         rc = sfc_flow_parse_init(item,
1676                                  (const void **)&spec, (const void **)&mask,
1677                                  (const void *)&supp_mask,
1678                                  &rte_flow_item_eth_mask,
1679                                  sizeof(struct rte_flow_item_eth), error);
1680         if (rc != 0)
1681                 return rc;
1682
1683         if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
1684                 /*
1685                  * The HW/FW hasn't got support for match on MAC addresses in
1686                  * outer rules yet (this will change). Match on VLAN presence
1687                  * isn't supported either. Ignore these match criteria.
1688                  */
1689                 memcpy(&override_mask, mask, sizeof(override_mask));
1690                 memset(&override_mask.hdr.dst_addr, 0,
1691                        sizeof(override_mask.hdr.dst_addr));
1692                 memset(&override_mask.hdr.src_addr, 0,
1693                        sizeof(override_mask.hdr.src_addr));
1694                 override_mask.has_vlan = 0;
1695
1696                 mask = (const uint8_t *)&override_mask;
1697         }
1698
1699         if (spec != NULL) {
1700                 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1701                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1702                 const struct rte_flow_item_eth *item_spec;
1703                 const struct rte_flow_item_eth *item_mask;
1704
1705                 item_spec = (const struct rte_flow_item_eth *)spec;
1706                 item_mask = (const struct rte_flow_item_eth *)mask;
1707
1708                 /*
1709                  * Remember various match criteria in the parsing context.
1710                  * sfc_mae_rule_process_pattern_data() will consider them
1711                  * altogether when the rest of the items have been parsed.
1712                  */
1713                 ethertypes[0].value = item_spec->type;
1714                 ethertypes[0].mask = item_mask->type;
1715                 if (item_mask->has_vlan) {
1716                         pdata->has_ovlan_mask = B_TRUE;
1717                         if (item_spec->has_vlan)
1718                                 pdata->has_ovlan_value = B_TRUE;
1719                 }
1720         } else {
1721                 /*
1722                  * The specification is empty. The overall pattern
1723                  * validity will be enforced at the end of parsing.
1724                  * See sfc_mae_rule_process_pattern_data().
1725                  */
1726                 return 0;
1727         }
1728
1729         return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1730                                   ctx_mae, error);
1731 }
1732
1733 static const struct sfc_mae_field_locator flocs_vlan[] = {
1734         /* Outermost tag */
1735         {
1736                 EFX_MAE_FIELD_VLAN0_TCI_BE,
1737                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1738                 offsetof(struct rte_flow_item_vlan, tci),
1739         },
1740         {
1741                 /*
1742                  * This locator is used only for building supported fields mask.
1743                  * The field is handled by sfc_mae_rule_process_pattern_data().
1744                  */
1745                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1746                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1747                 offsetof(struct rte_flow_item_vlan, inner_type),
1748         },
1749
1750         /* Innermost tag */
1751         {
1752                 EFX_MAE_FIELD_VLAN1_TCI_BE,
1753                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1754                 offsetof(struct rte_flow_item_vlan, tci),
1755         },
1756         {
1757                 /*
1758                  * This locator is used only for building supported fields mask.
1759                  * The field is handled by sfc_mae_rule_process_pattern_data().
1760                  */
1761                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1762                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1763                 offsetof(struct rte_flow_item_vlan, inner_type),
1764         },
1765 };
1766
1767 static int
1768 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1769                              struct sfc_flow_parse_ctx *ctx,
1770                              struct rte_flow_error *error)
1771 {
1772         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1773         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1774         boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1775                 &pdata->has_ovlan_mask,
1776                 &pdata->has_ivlan_mask,
1777         };
1778         boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1779                 &pdata->has_ovlan_value,
1780                 &pdata->has_ivlan_value,
1781         };
1782         boolean_t *cur_tag_presence_bit_mp;
1783         boolean_t *cur_tag_presence_bit_vp;
1784         const struct sfc_mae_field_locator *flocs;
1785         struct rte_flow_item_vlan supp_mask;
1786         const uint8_t *spec = NULL;
1787         const uint8_t *mask = NULL;
1788         unsigned int nb_flocs;
1789         int rc;
1790
1791         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1792
1793         if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1794                 return rte_flow_error_set(error, ENOTSUP,
1795                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1796                                 "Can't match that many VLAN tags");
1797         }
1798
1799         cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1800         cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1801
1802         if (*cur_tag_presence_bit_mp == B_TRUE &&
1803             *cur_tag_presence_bit_vp == B_FALSE) {
1804                 return rte_flow_error_set(error, EINVAL,
1805                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1806                                 "The previous item enforces no (more) VLAN, "
1807                                 "so the current item (VLAN) must not exist");
1808         }
1809
1810         nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1811         flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1812
1813         sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1814                                      &supp_mask, sizeof(supp_mask));
1815         /*
1816          * This only means that the field is supported by the driver and libefx.
1817          * Support on NIC level will be checked when all items have been parsed.
1818          */
1819         supp_mask.has_more_vlan = 1;
1820
1821         rc = sfc_flow_parse_init(item,
1822                                  (const void **)&spec, (const void **)&mask,
1823                                  (const void *)&supp_mask,
1824                                  &rte_flow_item_vlan_mask,
1825                                  sizeof(struct rte_flow_item_vlan), error);
1826         if (rc != 0)
1827                 return rc;
1828
1829         if (spec != NULL) {
1830                 struct sfc_mae_ethertype *et = pdata->ethertypes;
1831                 const struct rte_flow_item_vlan *item_spec;
1832                 const struct rte_flow_item_vlan *item_mask;
1833
1834                 item_spec = (const struct rte_flow_item_vlan *)spec;
1835                 item_mask = (const struct rte_flow_item_vlan *)mask;
1836
1837                 /*
1838                  * Remember various match criteria in the parsing context.
1839                  * sfc_mae_rule_process_pattern_data() will consider them
1840                  * altogether when the rest of the items have been parsed.
1841                  */
1842                 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1843                 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1844                 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1845                 if (item_mask->has_more_vlan) {
1846                         if (pdata->nb_vlan_tags ==
1847                             SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1848                                 return rte_flow_error_set(error, ENOTSUP,
1849                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1850                                         "Can't use 'has_more_vlan' in "
1851                                         "the second item VLAN");
1852                         }
1853                         pdata->has_ivlan_mask = B_TRUE;
1854                         if (item_spec->has_more_vlan)
1855                                 pdata->has_ivlan_value = B_TRUE;
1856                 }
1857
1858                 /* Convert TCI to MAE representation right now. */
1859                 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1860                                         ctx_mae, error);
1861                 if (rc != 0)
1862                         return rc;
1863         }
1864
1865         ++(pdata->nb_vlan_tags);
1866
1867         return 0;
1868 }
1869
1870 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1871         {
1872                 EFX_MAE_FIELD_SRC_IP4_BE,
1873                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1874                 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1875         },
1876         {
1877                 EFX_MAE_FIELD_DST_IP4_BE,
1878                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1879                 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1880         },
1881         {
1882                 /*
1883                  * This locator is used only for building supported fields mask.
1884                  * The field is handled by sfc_mae_rule_process_pattern_data().
1885                  */
1886                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1887                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1888                 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1889         },
1890         {
1891                 EFX_MAE_FIELD_IP_TOS,
1892                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1893                                  hdr.type_of_service),
1894                 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1895         },
1896         {
1897                 EFX_MAE_FIELD_IP_TTL,
1898                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1899                 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1900         },
1901 };
1902
1903 static int
1904 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1905                              struct sfc_flow_parse_ctx *ctx,
1906                              struct rte_flow_error *error)
1907 {
1908         rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1909         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1910         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1911         struct rte_flow_item_ipv4 supp_mask;
1912         const uint8_t *spec = NULL;
1913         const uint8_t *mask = NULL;
1914         int rc;
1915
1916         sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1917                                      &supp_mask, sizeof(supp_mask));
1918
1919         rc = sfc_flow_parse_init(item,
1920                                  (const void **)&spec, (const void **)&mask,
1921                                  (const void *)&supp_mask,
1922                                  &rte_flow_item_ipv4_mask,
1923                                  sizeof(struct rte_flow_item_ipv4), error);
1924         if (rc != 0)
1925                 return rc;
1926
1927         pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1928         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1929
1930         if (spec != NULL) {
1931                 const struct rte_flow_item_ipv4 *item_spec;
1932                 const struct rte_flow_item_ipv4 *item_mask;
1933
1934                 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1935                 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1936
1937                 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1938                 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1939         } else {
1940                 return 0;
1941         }
1942
1943         return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1944                                   ctx_mae, error);
1945 }
1946
1947 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1948         {
1949                 EFX_MAE_FIELD_SRC_IP6_BE,
1950                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1951                 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1952         },
1953         {
1954                 EFX_MAE_FIELD_DST_IP6_BE,
1955                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1956                 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1957         },
1958         {
1959                 /*
1960                  * This locator is used only for building supported fields mask.
1961                  * The field is handled by sfc_mae_rule_process_pattern_data().
1962                  */
1963                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1964                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1965                 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1966         },
1967         {
1968                 EFX_MAE_FIELD_IP_TTL,
1969                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1970                 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1971         },
1972 };
1973
1974 static int
1975 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1976                              struct sfc_flow_parse_ctx *ctx,
1977                              struct rte_flow_error *error)
1978 {
1979         rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1980         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1981         const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1982         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1983         struct rte_flow_item_ipv6 supp_mask;
1984         const uint8_t *spec = NULL;
1985         const uint8_t *mask = NULL;
1986         rte_be32_t vtc_flow_be;
1987         uint32_t vtc_flow;
1988         uint8_t tc_value;
1989         uint8_t tc_mask;
1990         int rc;
1991
1992         sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1993                                      &supp_mask, sizeof(supp_mask));
1994
1995         vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1996         memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1997
1998         rc = sfc_flow_parse_init(item,
1999                                  (const void **)&spec, (const void **)&mask,
2000                                  (const void *)&supp_mask,
2001                                  &rte_flow_item_ipv6_mask,
2002                                  sizeof(struct rte_flow_item_ipv6), error);
2003         if (rc != 0)
2004                 return rc;
2005
2006         pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
2007         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
2008
2009         if (spec != NULL) {
2010                 const struct rte_flow_item_ipv6 *item_spec;
2011                 const struct rte_flow_item_ipv6 *item_mask;
2012
2013                 item_spec = (const struct rte_flow_item_ipv6 *)spec;
2014                 item_mask = (const struct rte_flow_item_ipv6 *)mask;
2015
2016                 pdata->l3_next_proto_value = item_spec->hdr.proto;
2017                 pdata->l3_next_proto_mask = item_mask->hdr.proto;
2018         } else {
2019                 return 0;
2020         }
2021
2022         rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
2023                                 ctx_mae, error);
2024         if (rc != 0)
2025                 return rc;
2026
2027         memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
2028         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
2029         tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
2030
2031         memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
2032         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
2033         tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
2034
2035         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2036                                           fremap[EFX_MAE_FIELD_IP_TOS],
2037                                           sizeof(tc_value), &tc_value,
2038                                           sizeof(tc_mask), &tc_mask);
2039         if (rc != 0) {
2040                 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2041                                 NULL, "Failed to process item fields");
2042         }
2043
2044         return 0;
2045 }
2046
2047 static const struct sfc_mae_field_locator flocs_tcp[] = {
2048         {
2049                 EFX_MAE_FIELD_L4_SPORT_BE,
2050                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
2051                 offsetof(struct rte_flow_item_tcp, hdr.src_port),
2052         },
2053         {
2054                 EFX_MAE_FIELD_L4_DPORT_BE,
2055                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
2056                 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
2057         },
2058         {
2059                 EFX_MAE_FIELD_TCP_FLAGS_BE,
2060                 /*
2061                  * The values have been picked intentionally since the
2062                  * target MAE field is oversize (16 bit). This mapping
2063                  * relies on the fact that the MAE field is big-endian.
2064                  */
2065                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
2066                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
2067                 offsetof(struct rte_flow_item_tcp, hdr.data_off),
2068         },
2069 };
2070
2071 static int
2072 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
2073                             struct sfc_flow_parse_ctx *ctx,
2074                             struct rte_flow_error *error)
2075 {
2076         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2077         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2078         struct rte_flow_item_tcp supp_mask;
2079         const uint8_t *spec = NULL;
2080         const uint8_t *mask = NULL;
2081         int rc;
2082
2083         /*
2084          * When encountered among outermost items, item TCP is invalid.
2085          * Check which match specification is being constructed now.
2086          */
2087         if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
2088                 return rte_flow_error_set(error, EINVAL,
2089                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2090                                           "TCP in outer frame is invalid");
2091         }
2092
2093         sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
2094                                      &supp_mask, sizeof(supp_mask));
2095
2096         rc = sfc_flow_parse_init(item,
2097                                  (const void **)&spec, (const void **)&mask,
2098                                  (const void *)&supp_mask,
2099                                  &rte_flow_item_tcp_mask,
2100                                  sizeof(struct rte_flow_item_tcp), error);
2101         if (rc != 0)
2102                 return rc;
2103
2104         pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
2105         pdata->l3_next_proto_restriction_mask = 0xff;
2106
2107         if (spec == NULL)
2108                 return 0;
2109
2110         return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
2111                                   ctx_mae, error);
2112 }
2113
2114 static const struct sfc_mae_field_locator flocs_udp[] = {
2115         {
2116                 EFX_MAE_FIELD_L4_SPORT_BE,
2117                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
2118                 offsetof(struct rte_flow_item_udp, hdr.src_port),
2119         },
2120         {
2121                 EFX_MAE_FIELD_L4_DPORT_BE,
2122                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
2123                 offsetof(struct rte_flow_item_udp, hdr.dst_port),
2124         },
2125 };
2126
2127 static int
2128 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
2129                             struct sfc_flow_parse_ctx *ctx,
2130                             struct rte_flow_error *error)
2131 {
2132         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2133         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2134         struct rte_flow_item_udp supp_mask;
2135         const uint8_t *spec = NULL;
2136         const uint8_t *mask = NULL;
2137         int rc;
2138
2139         sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
2140                                      &supp_mask, sizeof(supp_mask));
2141
2142         rc = sfc_flow_parse_init(item,
2143                                  (const void **)&spec, (const void **)&mask,
2144                                  (const void *)&supp_mask,
2145                                  &rte_flow_item_udp_mask,
2146                                  sizeof(struct rte_flow_item_udp), error);
2147         if (rc != 0)
2148                 return rc;
2149
2150         pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
2151         pdata->l3_next_proto_restriction_mask = 0xff;
2152
2153         if (spec == NULL)
2154                 return 0;
2155
2156         return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
2157                                   ctx_mae, error);
2158 }
2159
2160 static const struct sfc_mae_field_locator flocs_tunnel[] = {
2161         {
2162                 /*
2163                  * The size and offset values are relevant
2164                  * for Geneve and NVGRE, too.
2165                  */
2166                 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
2167                 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
2168         },
2169 };
2170
2171 /*
2172  * An auxiliary registry which allows using non-encap. field IDs
2173  * directly when building a match specification of type ACTION.
2174  *
2175  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
2176  */
2177 static const efx_mae_field_id_t field_ids_no_remap[] = {
2178 #define FIELD_ID_NO_REMAP(_field) \
2179         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
2180
2181         FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
2182         FIELD_ID_NO_REMAP(ETH_SADDR_BE),
2183         FIELD_ID_NO_REMAP(ETH_DADDR_BE),
2184         FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
2185         FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
2186         FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
2187         FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
2188         FIELD_ID_NO_REMAP(SRC_IP4_BE),
2189         FIELD_ID_NO_REMAP(DST_IP4_BE),
2190         FIELD_ID_NO_REMAP(IP_PROTO),
2191         FIELD_ID_NO_REMAP(IP_TOS),
2192         FIELD_ID_NO_REMAP(IP_TTL),
2193         FIELD_ID_NO_REMAP(SRC_IP6_BE),
2194         FIELD_ID_NO_REMAP(DST_IP6_BE),
2195         FIELD_ID_NO_REMAP(L4_SPORT_BE),
2196         FIELD_ID_NO_REMAP(L4_DPORT_BE),
2197         FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2198         FIELD_ID_NO_REMAP(HAS_OVLAN),
2199         FIELD_ID_NO_REMAP(HAS_IVLAN),
2200
2201 #undef FIELD_ID_NO_REMAP
2202 };
2203
2204 /*
2205  * An auxiliary registry which allows using "ENC" field IDs
2206  * when building a match specification of type OUTER.
2207  *
2208  * See sfc_mae_rule_encap_parse_init().
2209  */
2210 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2211 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2212         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2213
2214         FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2215         FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2216         FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2217         FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2218         FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2219         FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2220         FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2221         FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2222         FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2223         FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2224         FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2225         FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2226         FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2227         FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2228         FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2229         FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2230         FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2231         FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2232
2233 #undef FIELD_ID_REMAP_TO_ENCAP
2234 };
2235
2236 static int
2237 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2238                                struct sfc_flow_parse_ctx *ctx,
2239                                struct rte_flow_error *error)
2240 {
2241         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2242         uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2243         uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2244         const struct rte_flow_item_vxlan *vxp;
2245         uint8_t supp_mask[sizeof(uint64_t)];
2246         const uint8_t *spec = NULL;
2247         const uint8_t *mask = NULL;
2248         int rc;
2249
2250         if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
2251                 /*
2252                  * As a workaround, pattern processing has started from
2253                  * this (tunnel) item. No pattern data to process yet.
2254                  */
2255         } else {
2256                 /*
2257                  * We're about to start processing inner frame items.
2258                  * Process pattern data that has been deferred so far
2259                  * and reset pattern data storage.
2260                  */
2261                 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2262                 if (rc != 0)
2263                         return rc;
2264         }
2265
2266         memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2267
2268         sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2269                                      &supp_mask, sizeof(supp_mask));
2270
2271         /*
2272          * This tunnel item was preliminarily detected by
2273          * sfc_mae_rule_encap_parse_init(). Default mask
2274          * was also picked by that helper. Use it here.
2275          */
2276         rc = sfc_flow_parse_init(item,
2277                                  (const void **)&spec, (const void **)&mask,
2278                                  (const void *)&supp_mask,
2279                                  ctx_mae->tunnel_def_mask,
2280                                  ctx_mae->tunnel_def_mask_size,  error);
2281         if (rc != 0)
2282                 return rc;
2283
2284         /*
2285          * This item and later ones comprise a
2286          * match specification of type ACTION.
2287          */
2288         ctx_mae->match_spec = ctx_mae->match_spec_action;
2289
2290         /* This item and later ones use non-encap. EFX MAE field IDs. */
2291         ctx_mae->field_ids_remap = field_ids_no_remap;
2292
2293         if (spec == NULL)
2294                 return 0;
2295
2296         /*
2297          * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2298          * Copy 24-bit VNI, which is BE, at offset 1 in it.
2299          * The extra byte is 0 both in the mask and in the value.
2300          */
2301         vxp = (const struct rte_flow_item_vxlan *)spec;
2302         memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2303
2304         vxp = (const struct rte_flow_item_vxlan *)mask;
2305         memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2306
2307         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2308                                           EFX_MAE_FIELD_ENC_VNET_ID_BE,
2309                                           sizeof(vnet_id_v), vnet_id_v,
2310                                           sizeof(vnet_id_m), vnet_id_m);
2311         if (rc != 0) {
2312                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2313                                         item, "Failed to set VXLAN VNI");
2314         }
2315
2316         return rc;
2317 }
2318
2319 static const struct sfc_flow_item sfc_flow_items[] = {
2320         {
2321                 .type = RTE_FLOW_ITEM_TYPE_MARK,
2322                 .name = "MARK",
2323                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2324                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2325                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2326                 .parse = sfc_mae_rule_parse_item_mark,
2327         },
2328         {
2329                 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2330                 .name = "PORT_ID",
2331                 /*
2332                  * In terms of RTE flow, this item is a META one,
2333                  * and its position in the pattern is don't care.
2334                  */
2335                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2336                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2337                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2338                 .parse = sfc_mae_rule_parse_item_port_id,
2339         },
2340         {
2341                 .type = RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR,
2342                 .name = "PORT_REPRESENTOR",
2343                 /*
2344                  * In terms of RTE flow, this item is a META one,
2345                  * and its position in the pattern is don't care.
2346                  */
2347                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2348                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2349                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2350                 .parse = sfc_mae_rule_parse_item_ethdev_based,
2351         },
2352         {
2353                 .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
2354                 .name = "REPRESENTED_PORT",
2355                 /*
2356                  * In terms of RTE flow, this item is a META one,
2357                  * and its position in the pattern is don't care.
2358                  */
2359                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2360                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2361                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2362                 .parse = sfc_mae_rule_parse_item_ethdev_based,
2363         },
2364         {
2365                 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2366                 .name = "PHY_PORT",
2367                 /*
2368                  * In terms of RTE flow, this item is a META one,
2369                  * and its position in the pattern is don't care.
2370                  */
2371                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2372                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2373                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2374                 .parse = sfc_mae_rule_parse_item_phy_port,
2375         },
2376         {
2377                 .type = RTE_FLOW_ITEM_TYPE_PF,
2378                 .name = "PF",
2379                 /*
2380                  * In terms of RTE flow, this item is a META one,
2381                  * and its position in the pattern is don't care.
2382                  */
2383                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2384                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2385                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2386                 .parse = sfc_mae_rule_parse_item_pf,
2387         },
2388         {
2389                 .type = RTE_FLOW_ITEM_TYPE_VF,
2390                 .name = "VF",
2391                 /*
2392                  * In terms of RTE flow, this item is a META one,
2393                  * and its position in the pattern is don't care.
2394                  */
2395                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2396                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2397                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2398                 .parse = sfc_mae_rule_parse_item_vf,
2399         },
2400         {
2401                 .type = RTE_FLOW_ITEM_TYPE_ETH,
2402                 .name = "ETH",
2403                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2404                 .layer = SFC_FLOW_ITEM_L2,
2405                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2406                 .parse = sfc_mae_rule_parse_item_eth,
2407         },
2408         {
2409                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2410                 .name = "VLAN",
2411                 .prev_layer = SFC_FLOW_ITEM_L2,
2412                 .layer = SFC_FLOW_ITEM_L2,
2413                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2414                 .parse = sfc_mae_rule_parse_item_vlan,
2415         },
2416         {
2417                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2418                 .name = "IPV4",
2419                 .prev_layer = SFC_FLOW_ITEM_L2,
2420                 .layer = SFC_FLOW_ITEM_L3,
2421                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2422                 .parse = sfc_mae_rule_parse_item_ipv4,
2423         },
2424         {
2425                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2426                 .name = "IPV6",
2427                 .prev_layer = SFC_FLOW_ITEM_L2,
2428                 .layer = SFC_FLOW_ITEM_L3,
2429                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2430                 .parse = sfc_mae_rule_parse_item_ipv6,
2431         },
2432         {
2433                 .type = RTE_FLOW_ITEM_TYPE_TCP,
2434                 .name = "TCP",
2435                 .prev_layer = SFC_FLOW_ITEM_L3,
2436                 .layer = SFC_FLOW_ITEM_L4,
2437                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2438                 .parse = sfc_mae_rule_parse_item_tcp,
2439         },
2440         {
2441                 .type = RTE_FLOW_ITEM_TYPE_UDP,
2442                 .name = "UDP",
2443                 .prev_layer = SFC_FLOW_ITEM_L3,
2444                 .layer = SFC_FLOW_ITEM_L4,
2445                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2446                 .parse = sfc_mae_rule_parse_item_udp,
2447         },
2448         {
2449                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2450                 .name = "VXLAN",
2451                 .prev_layer = SFC_FLOW_ITEM_L4,
2452                 .layer = SFC_FLOW_ITEM_START_LAYER,
2453                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2454                 .parse = sfc_mae_rule_parse_item_tunnel,
2455         },
2456         {
2457                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2458                 .name = "GENEVE",
2459                 .prev_layer = SFC_FLOW_ITEM_L4,
2460                 .layer = SFC_FLOW_ITEM_START_LAYER,
2461                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2462                 .parse = sfc_mae_rule_parse_item_tunnel,
2463         },
2464         {
2465                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2466                 .name = "NVGRE",
2467                 .prev_layer = SFC_FLOW_ITEM_L3,
2468                 .layer = SFC_FLOW_ITEM_START_LAYER,
2469                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2470                 .parse = sfc_mae_rule_parse_item_tunnel,
2471         },
2472 };
2473
2474 static int
2475 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2476                            struct sfc_mae_parse_ctx *ctx,
2477                            struct sfc_mae_outer_rule **rulep,
2478                            struct rte_flow_error *error)
2479 {
2480         efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2481         int rc;
2482
2483         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2484                 *rulep = NULL;
2485                 goto no_or_id;
2486         }
2487
2488         SFC_ASSERT(ctx->match_spec_outer != NULL);
2489
2490         if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2491                 return rte_flow_error_set(error, ENOTSUP,
2492                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2493                                           "Inconsistent pattern (outer)");
2494         }
2495
2496         *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2497                                            ctx->encap_type);
2498         if (*rulep != NULL) {
2499                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2500         } else {
2501                 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2502                                             ctx->encap_type, rulep);
2503                 if (rc != 0) {
2504                         return rte_flow_error_set(error, rc,
2505                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2506                                         "Failed to process the pattern");
2507                 }
2508         }
2509
2510         /* The spec has now been tracked by the outer rule entry. */
2511         ctx->match_spec_outer = NULL;
2512
2513 no_or_id:
2514         switch (ctx->ft_rule_type) {
2515         case SFC_FT_RULE_NONE:
2516                 break;
2517         case SFC_FT_RULE_JUMP:
2518                 /* No action rule */
2519                 return 0;
2520         case SFC_FT_RULE_GROUP:
2521                 /*
2522                  * Match on recirculation ID rather than
2523                  * on the outer rule allocation handle.
2524                  */
2525                 rc = efx_mae_match_spec_recirc_id_set(ctx->match_spec_action,
2526                                         SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id));
2527                 if (rc != 0) {
2528                         return rte_flow_error_set(error, rc,
2529                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2530                                         "tunnel offload: GROUP: AR: failed to request match on RECIRC_ID");
2531                 }
2532                 return 0;
2533         default:
2534                 SFC_ASSERT(B_FALSE);
2535         }
2536
2537         /*
2538          * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2539          * inner parse (when some outer rule is hit) and action rule lookup.
2540          * If the currently processed flow does not come with an outer rule,
2541          * its action rule must be available only for packets which miss in
2542          * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2543          * in the action rule specification; this ensures correct behaviour.
2544          *
2545          * If, on the other hand, this flow does have an outer rule, its ID
2546          * may be unknown at the moment (not yet allocated), but OR_ID mask
2547          * has to be set to 0xffffffff anyway for correct class comparisons.
2548          * When the outer rule has been allocated, this match field will be
2549          * overridden by sfc_mae_outer_rule_enable() to use the right value.
2550          */
2551         rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2552                                                   &invalid_rule_id);
2553         if (rc != 0) {
2554                 if (*rulep != NULL)
2555                         sfc_mae_outer_rule_del(sa, *rulep);
2556
2557                 *rulep = NULL;
2558
2559                 return rte_flow_error_set(error, rc,
2560                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2561                                           "Failed to process the pattern");
2562         }
2563
2564         return 0;
2565 }
2566
2567 static int
2568 sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
2569                                 struct sfc_mae_parse_ctx *ctx)
2570 {
2571         struct sfc_flow_tunnel *ft;
2572         uint32_t user_mark;
2573
2574         if (spec == NULL) {
2575                 sfc_err(ctx->sa, "tunnel offload: GROUP: NULL spec in item MARK");
2576                 return EINVAL;
2577         }
2578
2579         ft = sfc_flow_tunnel_pick(ctx->sa, spec->id);
2580         if (ft == NULL) {
2581                 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid tunnel");
2582                 return EINVAL;
2583         }
2584
2585         if (ft->refcnt == 0) {
2586                 sfc_err(ctx->sa, "tunnel offload: GROUP: tunnel=%u does not exist",
2587                         ft->id);
2588                 return ENOENT;
2589         }
2590
2591         user_mark = SFC_FT_GET_USER_MARK(spec->id);
2592         if (user_mark != 0) {
2593                 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid item MARK");
2594                 return EINVAL;
2595         }
2596
2597         sfc_dbg(ctx->sa, "tunnel offload: GROUP: detected");
2598
2599         ctx->ft_rule_type = SFC_FT_RULE_GROUP;
2600         ctx->ft = ft;
2601
2602         return 0;
2603 }
2604
2605 static int
2606 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2607                               struct sfc_mae_parse_ctx *ctx,
2608                               struct rte_flow_error *error)
2609 {
2610         const struct rte_flow_item *pattern = ctx->pattern;
2611         struct sfc_mae *mae = &sa->mae;
2612         uint8_t recirc_id = 0;
2613         int rc;
2614
2615         if (pattern == NULL) {
2616                 rte_flow_error_set(error, EINVAL,
2617                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2618                                    "NULL pattern");
2619                 return -rte_errno;
2620         }
2621
2622         for (;;) {
2623                 switch (pattern->type) {
2624                 case RTE_FLOW_ITEM_TYPE_MARK:
2625                         rc = sfc_mae_rule_preparse_item_mark(pattern->spec,
2626                                                              ctx);
2627                         if (rc != 0) {
2628                                 return rte_flow_error_set(error, rc,
2629                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2630                                                   pattern, "tunnel offload: GROUP: invalid item MARK");
2631                         }
2632                         ++pattern;
2633                         continue;
2634                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2635                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2636                         ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2637                         ctx->tunnel_def_mask_size =
2638                                 sizeof(rte_flow_item_vxlan_mask);
2639                         break;
2640                 case RTE_FLOW_ITEM_TYPE_GENEVE:
2641                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2642                         ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2643                         ctx->tunnel_def_mask_size =
2644                                 sizeof(rte_flow_item_geneve_mask);
2645                         break;
2646                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2647                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2648                         ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2649                         ctx->tunnel_def_mask_size =
2650                                 sizeof(rte_flow_item_nvgre_mask);
2651                         break;
2652                 case RTE_FLOW_ITEM_TYPE_END:
2653                         break;
2654                 default:
2655                         ++pattern;
2656                         continue;
2657                 };
2658
2659                 break;
2660         }
2661
2662         switch (ctx->ft_rule_type) {
2663         case SFC_FT_RULE_NONE:
2664                 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2665                         return 0;
2666                 break;
2667         case SFC_FT_RULE_JUMP:
2668                 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
2669                         return rte_flow_error_set(error, ENOTSUP,
2670                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2671                                                   pattern, "tunnel offload: JUMP: invalid item");
2672                 }
2673                 ctx->encap_type = ctx->ft->encap_type;
2674                 break;
2675         case SFC_FT_RULE_GROUP:
2676                 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2677                         return rte_flow_error_set(error, EINVAL,
2678                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2679                                                   NULL, "tunnel offload: GROUP: missing tunnel item");
2680                 } else if (ctx->encap_type != ctx->ft->encap_type) {
2681                         return rte_flow_error_set(error, EINVAL,
2682                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2683                                                   pattern, "tunnel offload: GROUP: tunnel type mismatch");
2684                 }
2685
2686                 /*
2687                  * The HW/FW hasn't got support for the use of "ENC" fields in
2688                  * action rules (except the VNET_ID one) yet. As a workaround,
2689                  * start parsing the pattern from the tunnel item.
2690                  */
2691                 ctx->pattern = pattern;
2692                 break;
2693         default:
2694                 SFC_ASSERT(B_FALSE);
2695                 break;
2696         }
2697
2698         if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2699                 return rte_flow_error_set(error, ENOTSUP,
2700                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2701                                           "OR: unsupported tunnel type");
2702         }
2703
2704         switch (ctx->ft_rule_type) {
2705         case SFC_FT_RULE_JUMP:
2706                 recirc_id = SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id);
2707                 /* FALLTHROUGH */
2708         case SFC_FT_RULE_NONE:
2709                 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2710                         return rte_flow_error_set(error, ENOTSUP,
2711                                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2712                                         NULL, "OR: unsupported priority level");
2713                 }
2714
2715                 rc = efx_mae_match_spec_init(sa->nic,
2716                                              EFX_MAE_RULE_OUTER, ctx->priority,
2717                                              &ctx->match_spec_outer);
2718                 if (rc != 0) {
2719                         return rte_flow_error_set(error, rc,
2720                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2721                                 "OR: failed to initialise the match specification");
2722                 }
2723
2724                 /*
2725                  * Outermost items comprise a match
2726                  * specification of type OUTER.
2727                  */
2728                 ctx->match_spec = ctx->match_spec_outer;
2729
2730                 /* Outermost items use "ENC" EFX MAE field IDs. */
2731                 ctx->field_ids_remap = field_ids_remap_to_encap;
2732
2733                 rc = efx_mae_outer_rule_recirc_id_set(ctx->match_spec,
2734                                                       recirc_id);
2735                 if (rc != 0) {
2736                         return rte_flow_error_set(error, rc,
2737                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2738                                         "OR: failed to initialise RECIRC_ID");
2739                 }
2740                 break;
2741         case SFC_FT_RULE_GROUP:
2742                 /* Outermost items -> "ENC" match fields in the action rule. */
2743                 ctx->field_ids_remap = field_ids_remap_to_encap;
2744                 ctx->match_spec = ctx->match_spec_action;
2745
2746                 /* No own outer rule; match on JUMP OR's RECIRC_ID is used. */
2747                 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2748                 break;
2749         default:
2750                 SFC_ASSERT(B_FALSE);
2751                 break;
2752         }
2753
2754         return 0;
2755 }
2756
2757 static void
2758 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2759                               struct sfc_mae_parse_ctx *ctx)
2760 {
2761         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2762                 return;
2763
2764         if (ctx->match_spec_outer != NULL)
2765                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2766 }
2767
2768 int
2769 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2770                            const struct rte_flow_item pattern[],
2771                            struct sfc_flow_spec_mae *spec,
2772                            struct rte_flow_error *error)
2773 {
2774         struct sfc_mae_parse_ctx ctx_mae;
2775         unsigned int priority_shift = 0;
2776         struct sfc_flow_parse_ctx ctx;
2777         int rc;
2778
2779         memset(&ctx_mae, 0, sizeof(ctx_mae));
2780         ctx_mae.ft_rule_type = spec->ft_rule_type;
2781         ctx_mae.priority = spec->priority;
2782         ctx_mae.ft = spec->ft;
2783         ctx_mae.sa = sa;
2784
2785         switch (ctx_mae.ft_rule_type) {
2786         case SFC_FT_RULE_JUMP:
2787                 /*
2788                  * By design, this flow should be represented solely by the
2789                  * outer rule. But the HW/FW hasn't got support for setting
2790                  * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2791                  * does it support outer rule counters. As a workaround, an
2792                  * action rule of lower priority is used to do the job.
2793                  */
2794                 priority_shift = 1;
2795
2796                 /* FALLTHROUGH */
2797         case SFC_FT_RULE_GROUP:
2798                 if (ctx_mae.priority != 0) {
2799                         /*
2800                          * Because of the above workaround, deny the
2801                          * use of priorities to JUMP and GROUP rules.
2802                          */
2803                         rc = rte_flow_error_set(error, ENOTSUP,
2804                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2805                                 "tunnel offload: priorities are not supported");
2806                         goto fail_priority_check;
2807                 }
2808
2809                 /* FALLTHROUGH */
2810         case SFC_FT_RULE_NONE:
2811                 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2812                                              spec->priority + priority_shift,
2813                                              &ctx_mae.match_spec_action);
2814                 if (rc != 0) {
2815                         rc = rte_flow_error_set(error, rc,
2816                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2817                                 "AR: failed to initialise the match specification");
2818                         goto fail_init_match_spec_action;
2819                 }
2820                 break;
2821         default:
2822                 SFC_ASSERT(B_FALSE);
2823                 break;
2824         }
2825
2826         /*
2827          * As a preliminary setting, assume that there is no encapsulation
2828          * in the pattern. That is, pattern items are about to comprise a
2829          * match specification of type ACTION and use non-encap. field IDs.
2830          *
2831          * sfc_mae_rule_encap_parse_init() below may override this.
2832          */
2833         ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2834         ctx_mae.match_spec = ctx_mae.match_spec_action;
2835         ctx_mae.field_ids_remap = field_ids_no_remap;
2836         ctx_mae.pattern = pattern;
2837
2838         ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2839         ctx.mae = &ctx_mae;
2840
2841         rc = sfc_mae_rule_encap_parse_init(sa, &ctx_mae, error);
2842         if (rc != 0)
2843                 goto fail_encap_parse_init;
2844
2845         /*
2846          * sfc_mae_rule_encap_parse_init() may have detected tunnel offload
2847          * GROUP rule. Remember its properties for later use.
2848          */
2849         spec->ft_rule_type = ctx_mae.ft_rule_type;
2850         spec->ft = ctx_mae.ft;
2851
2852         rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2853                                     ctx_mae.pattern, &ctx, error);
2854         if (rc != 0)
2855                 goto fail_parse_pattern;
2856
2857         rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2858         if (rc != 0)
2859                 goto fail_process_pattern_data;
2860
2861         rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2862         if (rc != 0)
2863                 goto fail_process_outer;
2864
2865         if (ctx_mae.match_spec_action != NULL &&
2866             !efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2867                 rc = rte_flow_error_set(error, ENOTSUP,
2868                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2869                                         "Inconsistent pattern");
2870                 goto fail_validate_match_spec_action;
2871         }
2872
2873         spec->match_spec = ctx_mae.match_spec_action;
2874
2875         return 0;
2876
2877 fail_validate_match_spec_action:
2878 fail_process_outer:
2879 fail_process_pattern_data:
2880 fail_parse_pattern:
2881         sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2882
2883 fail_encap_parse_init:
2884         if (ctx_mae.match_spec_action != NULL)
2885                 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2886
2887 fail_init_match_spec_action:
2888 fail_priority_check:
2889         return rc;
2890 }
2891
2892 /*
2893  * An action supported by MAE may correspond to a bundle of RTE flow actions,
2894  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2895  * That is, related RTE flow actions need to be tracked as parts of a whole
2896  * so that they can be combined into a single action and submitted to MAE
2897  * representation of a given rule's action set.
2898  *
2899  * Each RTE flow action provided by an application gets classified as
2900  * one belonging to some bundle type. If an action is not supposed to
2901  * belong to any bundle, or if this action is END, it is described as
2902  * one belonging to a dummy bundle of type EMPTY.
2903  *
2904  * A currently tracked bundle will be submitted if a repeating
2905  * action or an action of different bundle type follows.
2906  */
2907
2908 enum sfc_mae_actions_bundle_type {
2909         SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2910         SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2911 };
2912
2913 struct sfc_mae_actions_bundle {
2914         enum sfc_mae_actions_bundle_type        type;
2915
2916         /* Indicates actions already tracked by the current bundle */
2917         uint64_t                                actions_mask;
2918
2919         /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2920         rte_be16_t                              vlan_push_tpid;
2921         rte_be16_t                              vlan_push_tci;
2922 };
2923
2924 /*
2925  * Combine configuration of RTE flow actions tracked by the bundle into a
2926  * single action and submit the result to MAE action set specification.
2927  * Do nothing in the case of dummy action bundle.
2928  */
2929 static int
2930 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2931                               efx_mae_actions_t *spec)
2932 {
2933         int rc = 0;
2934
2935         switch (bundle->type) {
2936         case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2937                 break;
2938         case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2939                 rc = efx_mae_action_set_populate_vlan_push(
2940                         spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2941                 break;
2942         default:
2943                 SFC_ASSERT(B_FALSE);
2944                 break;
2945         }
2946
2947         return rc;
2948 }
2949
2950 /*
2951  * Given the type of the next RTE flow action in the line, decide
2952  * whether a new bundle is about to start, and, if this is the case,
2953  * submit and reset the current bundle.
2954  */
2955 static int
2956 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2957                             struct sfc_mae_actions_bundle *bundle,
2958                             efx_mae_actions_t *spec,
2959                             struct rte_flow_error *error)
2960 {
2961         enum sfc_mae_actions_bundle_type bundle_type_new;
2962         int rc;
2963
2964         switch (action->type) {
2965         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2966         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2967         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2968                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2969                 break;
2970         default:
2971                 /*
2972                  * Self-sufficient actions, including END, are handled in this
2973                  * case. No checks for unsupported actions are needed here
2974                  * because parsing doesn't occur at this point.
2975                  */
2976                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2977                 break;
2978         }
2979
2980         if (bundle_type_new != bundle->type ||
2981             (bundle->actions_mask & (1ULL << action->type)) != 0) {
2982                 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2983                 if (rc != 0)
2984                         goto fail_submit;
2985
2986                 memset(bundle, 0, sizeof(*bundle));
2987         }
2988
2989         bundle->type = bundle_type_new;
2990
2991         return 0;
2992
2993 fail_submit:
2994         return rte_flow_error_set(error, rc,
2995                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2996                         "Failed to request the (group of) action(s)");
2997 }
2998
2999 static void
3000 sfc_mae_rule_parse_action_of_push_vlan(
3001                             const struct rte_flow_action_of_push_vlan *conf,
3002                             struct sfc_mae_actions_bundle *bundle)
3003 {
3004         bundle->vlan_push_tpid = conf->ethertype;
3005 }
3006
3007 static void
3008 sfc_mae_rule_parse_action_of_set_vlan_vid(
3009                             const struct rte_flow_action_of_set_vlan_vid *conf,
3010                             struct sfc_mae_actions_bundle *bundle)
3011 {
3012         bundle->vlan_push_tci |= (conf->vlan_vid &
3013                                   rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
3014 }
3015
3016 static void
3017 sfc_mae_rule_parse_action_of_set_vlan_pcp(
3018                             const struct rte_flow_action_of_set_vlan_pcp *conf,
3019                             struct sfc_mae_actions_bundle *bundle)
3020 {
3021         uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
3022                                            RTE_LEN2MASK(3, uint8_t)) << 13;
3023
3024         bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
3025 }
3026
3027 struct sfc_mae_parsed_item {
3028         const struct rte_flow_item      *item;
3029         size_t                          proto_header_ofst;
3030         size_t                          proto_header_size;
3031 };
3032
3033 /*
3034  * For each 16-bit word of the given header, override
3035  * bits enforced by the corresponding 16-bit mask.
3036  */
3037 static void
3038 sfc_mae_header_force_item_masks(uint8_t *header_buf,
3039                                 const struct sfc_mae_parsed_item *parsed_items,
3040                                 unsigned int nb_parsed_items)
3041 {
3042         unsigned int item_idx;
3043
3044         for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
3045                 const struct sfc_mae_parsed_item *parsed_item;
3046                 const struct rte_flow_item *item;
3047                 size_t proto_header_size;
3048                 size_t ofst;
3049
3050                 parsed_item = &parsed_items[item_idx];
3051                 proto_header_size = parsed_item->proto_header_size;
3052                 item = parsed_item->item;
3053
3054                 for (ofst = 0; ofst < proto_header_size;
3055                      ofst += sizeof(rte_be16_t)) {
3056                         rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
3057                         const rte_be16_t *w_maskp;
3058                         const rte_be16_t *w_specp;
3059
3060                         w_maskp = RTE_PTR_ADD(item->mask, ofst);
3061                         w_specp = RTE_PTR_ADD(item->spec, ofst);
3062
3063                         *wp &= ~(*w_maskp);
3064                         *wp |= (*w_specp & *w_maskp);
3065                 }
3066
3067                 header_buf += proto_header_size;
3068         }
3069 }
3070
3071 #define SFC_IPV4_TTL_DEF        0x40
3072 #define SFC_IPV6_VTC_FLOW_DEF   0x60000000
3073 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
3074 #define SFC_VXLAN_FLAGS_DEF     0x08000000
3075
3076 static int
3077 sfc_mae_rule_parse_action_vxlan_encap(
3078                             struct sfc_mae *mae,
3079                             const struct rte_flow_action_vxlan_encap *conf,
3080                             efx_mae_actions_t *spec,
3081                             struct rte_flow_error *error)
3082 {
3083         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
3084         struct rte_flow_item *pattern = conf->definition;
3085         uint8_t *buf = bounce_eh->buf;
3086
3087         /* This array will keep track of non-VOID pattern items. */
3088         struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
3089                                                 2 /* VLAN tags */ +
3090                                                 1 /* IPv4 or IPv6 */ +
3091                                                 1 /* UDP */ +
3092                                                 1 /* VXLAN */];
3093         unsigned int nb_parsed_items = 0;
3094
3095         size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
3096         uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
3097                                   sizeof(struct rte_ipv6_hdr))];
3098         struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
3099         struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
3100         struct rte_vxlan_hdr *vxlan = NULL;
3101         struct rte_udp_hdr *udp = NULL;
3102         unsigned int nb_vlan_tags = 0;
3103         size_t next_proto_ofst = 0;
3104         size_t ethertype_ofst = 0;
3105         uint64_t exp_items;
3106         int rc;
3107
3108         if (pattern == NULL) {
3109                 return rte_flow_error_set(error, EINVAL,
3110                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3111                                 "The encap. header definition is NULL");
3112         }
3113
3114         bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
3115         bounce_eh->size = 0;
3116
3117         /*
3118          * Process pattern items and remember non-VOID ones.
3119          * Defer applying masks until after the complete header
3120          * has been built from the pattern items.
3121          */
3122         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
3123
3124         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
3125                 struct sfc_mae_parsed_item *parsed_item;
3126                 const uint64_t exp_items_extra_vlan[] = {
3127                         RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
3128                 };
3129                 size_t proto_header_size;
3130                 rte_be16_t *ethertypep;
3131                 uint8_t *next_protop;
3132                 uint8_t *buf_cur;
3133
3134                 if (pattern->spec == NULL) {
3135                         return rte_flow_error_set(error, EINVAL,
3136                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3137                                         "NULL item spec in the encap. header");
3138                 }
3139
3140                 if (pattern->mask == NULL) {
3141                         return rte_flow_error_set(error, EINVAL,
3142                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3143                                         "NULL item mask in the encap. header");
3144                 }
3145
3146                 if (pattern->last != NULL) {
3147                         /* This is not a match pattern, so disallow range. */
3148                         return rte_flow_error_set(error, EINVAL,
3149                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3150                                         "Range item in the encap. header");
3151                 }
3152
3153                 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
3154                         /* Handle VOID separately, for clarity. */
3155                         continue;
3156                 }
3157
3158                 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
3159                         return rte_flow_error_set(error, ENOTSUP,
3160                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3161                                         "Unexpected item in the encap. header");
3162                 }
3163
3164                 parsed_item = &parsed_items[nb_parsed_items];
3165                 buf_cur = buf + bounce_eh->size;
3166
3167                 switch (pattern->type) {
3168                 case RTE_FLOW_ITEM_TYPE_ETH:
3169                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
3170                                                exp_items);
3171                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
3172                                                   hdr) != 0);
3173
3174                         proto_header_size = sizeof(struct rte_ether_hdr);
3175
3176                         ethertype_ofst = eth_ethertype_ofst;
3177
3178                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
3179                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3180                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3181                         break;
3182                 case RTE_FLOW_ITEM_TYPE_VLAN:
3183                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
3184                                                exp_items);
3185                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
3186                                                   hdr) != 0);
3187
3188                         proto_header_size = sizeof(struct rte_vlan_hdr);
3189
3190                         ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
3191                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
3192
3193                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3194                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3195
3196                         ethertype_ofst =
3197                             bounce_eh->size +
3198                             offsetof(struct rte_vlan_hdr, eth_proto);
3199
3200                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3201                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3202                         exp_items |= exp_items_extra_vlan[nb_vlan_tags];
3203
3204                         ++nb_vlan_tags;
3205                         break;
3206                 case RTE_FLOW_ITEM_TYPE_IPV4:
3207                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
3208                                                exp_items);
3209                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
3210                                                   hdr) != 0);
3211
3212                         proto_header_size = sizeof(struct rte_ipv4_hdr);
3213
3214                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3215                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3216
3217                         next_proto_ofst =
3218                             bounce_eh->size +
3219                             offsetof(struct rte_ipv4_hdr, next_proto_id);
3220
3221                         ipv4 = (struct rte_ipv4_hdr *)buf_cur;
3222
3223                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3224                         break;
3225                 case RTE_FLOW_ITEM_TYPE_IPV6:
3226                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
3227                                                exp_items);
3228                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
3229                                                   hdr) != 0);
3230
3231                         proto_header_size = sizeof(struct rte_ipv6_hdr);
3232
3233                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3234                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3235
3236                         next_proto_ofst = bounce_eh->size +
3237                                           offsetof(struct rte_ipv6_hdr, proto);
3238
3239                         ipv6 = (struct rte_ipv6_hdr *)buf_cur;
3240
3241                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3242                         break;
3243                 case RTE_FLOW_ITEM_TYPE_UDP:
3244                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
3245                                                exp_items);
3246                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
3247                                                   hdr) != 0);
3248
3249                         proto_header_size = sizeof(struct rte_udp_hdr);
3250
3251                         next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
3252                         *next_protop = IPPROTO_UDP;
3253
3254                         udp = (struct rte_udp_hdr *)buf_cur;
3255
3256                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
3257                         break;
3258                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3259                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
3260                                                exp_items);
3261                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
3262                                                   hdr) != 0);
3263
3264                         proto_header_size = sizeof(struct rte_vxlan_hdr);
3265
3266                         vxlan = (struct rte_vxlan_hdr *)buf_cur;
3267
3268                         udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
3269                         udp->dgram_len = RTE_BE16(sizeof(*udp) +
3270                                                   sizeof(*vxlan));
3271                         udp->dgram_cksum = 0;
3272
3273                         exp_items = 0;
3274                         break;
3275                 default:
3276                         return rte_flow_error_set(error, ENOTSUP,
3277                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3278                                         "Unknown item in the encap. header");
3279                 }
3280
3281                 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
3282                         return rte_flow_error_set(error, E2BIG,
3283                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3284                                         "The encap. header is too big");
3285                 }
3286
3287                 if ((proto_header_size & 1) != 0) {
3288                         return rte_flow_error_set(error, EINVAL,
3289                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3290                                         "Odd layer size in the encap. header");
3291                 }
3292
3293                 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
3294                 bounce_eh->size += proto_header_size;
3295
3296                 parsed_item->item = pattern;
3297                 parsed_item->proto_header_size = proto_header_size;
3298                 ++nb_parsed_items;
3299         }
3300
3301         if (exp_items != 0) {
3302                 /* Parsing item VXLAN would have reset exp_items to 0. */
3303                 return rte_flow_error_set(error, ENOTSUP,
3304                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3305                                         "No item VXLAN in the encap. header");
3306         }
3307
3308         /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
3309         ipv4->version_ihl = RTE_IPV4_VHL_DEF;
3310         ipv4->time_to_live = SFC_IPV4_TTL_DEF;
3311         ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
3312                                       sizeof(*vxlan));
3313         /* The HW cannot compute this checksum. */
3314         ipv4->hdr_checksum = 0;
3315         ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
3316
3317         ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
3318         ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
3319         ipv6->payload_len = udp->dgram_len;
3320
3321         vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
3322
3323         /* Take care of the masks. */
3324         sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
3325
3326         rc = efx_mae_action_set_populate_encap(spec);
3327         if (rc != 0) {
3328                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3329                                 NULL, "failed to request action ENCAP");
3330         }
3331
3332         return rc;
3333 }
3334
3335 static int
3336 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
3337                                const struct rte_flow_action_mark *conf,
3338                                const struct sfc_flow_spec_mae *spec_mae,
3339                                efx_mae_actions_t *spec)
3340 {
3341         int rc;
3342
3343         if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3344                 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3345         } else if (conf->id > SFC_FT_USER_MARK_MASK) {
3346                 sfc_err(sa, "the mark value is too large");
3347                 return EINVAL;
3348         }
3349
3350         rc = efx_mae_action_set_populate_mark(spec, conf->id);
3351         if (rc != 0)
3352                 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
3353
3354         return rc;
3355 }
3356
3357 static int
3358 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
3359                                 const struct rte_flow_action_count *conf
3360                                         __rte_unused,
3361                                 efx_mae_actions_t *spec)
3362 {
3363         int rc;
3364
3365         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
3366                 sfc_err(sa,
3367                         "counter queue is not configured for COUNT action");
3368                 rc = EINVAL;
3369                 goto fail_counter_queue_uninit;
3370         }
3371
3372         if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
3373                 rc = EINVAL;
3374                 goto fail_no_service_core;
3375         }
3376
3377         rc = efx_mae_action_set_populate_count(spec);
3378         if (rc != 0) {
3379                 sfc_err(sa,
3380                         "failed to populate counters in MAE action set: %s",
3381                         rte_strerror(rc));
3382                 goto fail_populate_count;
3383         }
3384
3385         return 0;
3386
3387 fail_populate_count:
3388 fail_no_service_core:
3389 fail_counter_queue_uninit:
3390
3391         return rc;
3392 }
3393
3394 static int
3395 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
3396                                    const struct rte_flow_action_phy_port *conf,
3397                                    efx_mae_actions_t *spec)
3398 {
3399         efx_mport_sel_t mport;
3400         uint32_t phy_port;
3401         int rc;
3402
3403         if (conf->original != 0)
3404                 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
3405         else
3406                 phy_port = conf->index;
3407
3408         rc = efx_mae_mport_by_phy_port(phy_port, &mport);
3409         if (rc != 0) {
3410                 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
3411                         phy_port, strerror(rc));
3412                 return rc;
3413         }
3414
3415         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3416         if (rc != 0) {
3417                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3418                         mport.sel, strerror(rc));
3419         }
3420
3421         return rc;
3422 }
3423
3424 static int
3425 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3426                                 const struct rte_flow_action_vf *vf_conf,
3427                                 efx_mae_actions_t *spec)
3428 {
3429         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3430         efx_mport_sel_t mport;
3431         uint32_t vf;
3432         int rc;
3433
3434         if (vf_conf == NULL)
3435                 vf = EFX_PCI_VF_INVALID;
3436         else if (vf_conf->original != 0)
3437                 vf = encp->enc_vf;
3438         else
3439                 vf = vf_conf->id;
3440
3441         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3442         if (rc != 0) {
3443                 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3444                         encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3445                         strerror(rc));
3446                 return rc;
3447         }
3448
3449         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3450         if (rc != 0) {
3451                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3452                         mport.sel, strerror(rc));
3453         }
3454
3455         return rc;
3456 }
3457
3458 static int
3459 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3460                                   const struct rte_flow_action_port_id *conf,
3461                                   efx_mae_actions_t *spec)
3462 {
3463         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3464         struct sfc_mae *mae = &sa->mae;
3465         efx_mport_sel_t mport;
3466         uint16_t port_id;
3467         int rc;
3468
3469         if (conf->id > UINT16_MAX)
3470                 return EOVERFLOW;
3471
3472         port_id = (conf->original != 0) ? sas->port_id : conf->id;
3473
3474         rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
3475                                              port_id, &mport);
3476         if (rc != 0) {
3477                 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3478                         port_id, strerror(rc));
3479                 return rc;
3480         }
3481
3482         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3483         if (rc != 0) {
3484                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3485                         mport.sel, strerror(rc));
3486         }
3487
3488         return rc;
3489 }
3490
3491 static int
3492 sfc_mae_rule_parse_action_port_representor(struct sfc_adapter *sa,
3493                 const struct rte_flow_action_ethdev *conf,
3494                 efx_mae_actions_t *spec)
3495 {
3496         struct sfc_mae *mae = &sa->mae;
3497         efx_mport_sel_t mport;
3498         int rc;
3499
3500         rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
3501                                              conf->port_id, &mport);
3502         if (rc != 0) {
3503                 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3504                         conf->port_id, strerror(rc));
3505                 return rc;
3506         }
3507
3508         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3509         if (rc != 0) {
3510                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3511                         mport.sel, strerror(rc));
3512         }
3513
3514         return rc;
3515 }
3516
3517 static int
3518 sfc_mae_rule_parse_action_represented_port(struct sfc_adapter *sa,
3519                 const struct rte_flow_action_ethdev *conf,
3520                 efx_mae_actions_t *spec)
3521 {
3522         struct sfc_mae *mae = &sa->mae;
3523         efx_mport_sel_t mport;
3524         int rc;
3525
3526         rc = sfc_mae_switch_get_entity_mport(mae->switch_domain_id,
3527                                              conf->port_id, &mport);
3528         if (rc != 0) {
3529                 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3530                         conf->port_id, strerror(rc));
3531                 return rc;
3532         }
3533
3534         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3535         if (rc != 0) {
3536                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3537                         mport.sel, strerror(rc));
3538         }
3539
3540         return rc;
3541 }
3542
3543 static const char * const action_names[] = {
3544         [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3545         [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3546         [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3547         [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3548         [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3549         [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3550         [RTE_FLOW_ACTION_TYPE_COUNT] = "COUNT",
3551         [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3552         [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3553         [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3554         [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3555         [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3556         [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3557         [RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = "PORT_REPRESENTOR",
3558         [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = "REPRESENTED_PORT",
3559         [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3560         [RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
3561 };
3562
3563 static int
3564 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3565                           const struct rte_flow_action *action,
3566                           const struct sfc_flow_spec_mae *spec_mae,
3567                           struct sfc_mae_actions_bundle *bundle,
3568                           efx_mae_actions_t *spec,
3569                           struct rte_flow_error *error)
3570 {
3571         const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3572         const uint64_t rx_metadata = sa->negotiated_rx_metadata;
3573         bool custom_error = B_FALSE;
3574         int rc = 0;
3575
3576         switch (action->type) {
3577         case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3578                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3579                                        bundle->actions_mask);
3580                 if (outer_rule == NULL ||
3581                     outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3582                         rc = EINVAL;
3583                 else
3584                         rc = efx_mae_action_set_populate_decap(spec);
3585                 break;
3586         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3587                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3588                                        bundle->actions_mask);
3589                 rc = efx_mae_action_set_populate_vlan_pop(spec);
3590                 break;
3591         case RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL:
3592         case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3593                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL,
3594                                        bundle->actions_mask);
3595                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DEC_TTL,
3596                                        bundle->actions_mask);
3597                 rc = efx_mae_action_set_populate_decr_ip_ttl(spec);
3598                 break;
3599         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3600                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3601                                        bundle->actions_mask);
3602                 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3603                 break;
3604         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3605                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3606                                        bundle->actions_mask);
3607                 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3608                 break;
3609         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3610                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3611                                        bundle->actions_mask);
3612                 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3613                 break;
3614         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3615                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3616                                        bundle->actions_mask);
3617                 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3618                                                            action->conf,
3619                                                            spec, error);
3620                 custom_error = B_TRUE;
3621                 break;
3622         case RTE_FLOW_ACTION_TYPE_COUNT:
3623                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3624                                        bundle->actions_mask);
3625                 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3626                 break;
3627         case RTE_FLOW_ACTION_TYPE_FLAG:
3628                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3629                                        bundle->actions_mask);
3630                 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
3631                         rc = efx_mae_action_set_populate_flag(spec);
3632                 } else {
3633                         rc = rte_flow_error_set(error, ENOTSUP,
3634                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3635                                                 action,
3636                                                 "flag delivery has not been negotiated");
3637                         custom_error = B_TRUE;
3638                 }
3639                 break;
3640         case RTE_FLOW_ACTION_TYPE_MARK:
3641                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3642                                        bundle->actions_mask);
3643                 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
3644                     spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3645                         rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
3646                                                             spec_mae, spec);
3647                 } else {
3648                         rc = rte_flow_error_set(error, ENOTSUP,
3649                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3650                                                 action,
3651                                                 "mark delivery has not been negotiated");
3652                         custom_error = B_TRUE;
3653                 }
3654                 break;
3655         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3656                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3657                                        bundle->actions_mask);
3658                 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3659                 break;
3660         case RTE_FLOW_ACTION_TYPE_PF:
3661                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3662                                        bundle->actions_mask);
3663                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3664                 break;
3665         case RTE_FLOW_ACTION_TYPE_VF:
3666                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3667                                        bundle->actions_mask);
3668                 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3669                 break;
3670         case RTE_FLOW_ACTION_TYPE_PORT_ID:
3671                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3672                                        bundle->actions_mask);
3673                 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3674                 break;
3675         case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
3676                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
3677                                        bundle->actions_mask);
3678                 rc = sfc_mae_rule_parse_action_port_representor(sa,
3679                                 action->conf, spec);
3680                 break;
3681         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3682                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
3683                                        bundle->actions_mask);
3684                 rc = sfc_mae_rule_parse_action_represented_port(sa,
3685                                 action->conf, spec);
3686                 break;
3687         case RTE_FLOW_ACTION_TYPE_DROP:
3688                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3689                                        bundle->actions_mask);
3690                 rc = efx_mae_action_set_populate_drop(spec);
3691                 break;
3692         case RTE_FLOW_ACTION_TYPE_JUMP:
3693                 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3694                         /* Workaround. See sfc_flow_parse_rte_to_mae() */
3695                         break;
3696                 }
3697                 /* FALLTHROUGH */
3698         default:
3699                 return rte_flow_error_set(error, ENOTSUP,
3700                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3701                                 "Unsupported action");
3702         }
3703
3704         if (rc == 0) {
3705                 bundle->actions_mask |= (1ULL << action->type);
3706         } else if (!custom_error) {
3707                 if (action->type < RTE_DIM(action_names)) {
3708                         const char *action_name = action_names[action->type];
3709
3710                         if (action_name != NULL) {
3711                                 sfc_err(sa, "action %s was rejected: %s",
3712                                         action_name, strerror(rc));
3713                         }
3714                 }
3715                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3716                                 NULL, "Failed to request the action");
3717         }
3718
3719         return rc;
3720 }
3721
3722 static void
3723 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3724 {
3725         bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3726 }
3727
3728 static int
3729 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3730                              const struct sfc_mae_bounce_eh *bounce_eh,
3731                              struct sfc_mae_encap_header **encap_headerp)
3732 {
3733         if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3734                 encap_headerp = NULL;
3735                 return 0;
3736         }
3737
3738         *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3739         if (*encap_headerp != NULL)
3740                 return 0;
3741
3742         return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3743 }
3744
3745 int
3746 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3747                            const struct rte_flow_action actions[],
3748                            struct sfc_flow_spec_mae *spec_mae,
3749                            struct rte_flow_error *error)
3750 {
3751         struct sfc_mae_encap_header *encap_header = NULL;
3752         struct sfc_mae_actions_bundle bundle = {0};
3753         struct sfc_flow_tunnel *counter_ft = NULL;
3754         uint64_t *ft_group_hit_counter = NULL;
3755         const struct rte_flow_action *action;
3756         struct sfc_mae *mae = &sa->mae;
3757         unsigned int n_count = 0;
3758         efx_mae_actions_t *spec;
3759         int rc;
3760
3761         rte_errno = 0;
3762
3763         if (actions == NULL) {
3764                 return rte_flow_error_set(error, EINVAL,
3765                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3766                                 "NULL actions");
3767         }
3768
3769         rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3770         if (rc != 0)
3771                 goto fail_action_set_spec_init;
3772
3773         for (action = actions;
3774              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3775                 if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
3776                         ++n_count;
3777         }
3778
3779         if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
3780                 /* JUMP rules don't decapsulate packets. GROUP rules do. */
3781                 rc = efx_mae_action_set_populate_decap(spec);
3782                 if (rc != 0)
3783                         goto fail_enforce_ft_decap;
3784
3785                 if (n_count == 0 && sfc_mae_counter_stream_enabled(sa)) {
3786                         /*
3787                          * The user opted not to use action COUNT in this rule,
3788                          * but the counter should be enabled implicitly because
3789                          * packets hitting this rule contribute to the tunnel's
3790                          * total number of hits. See sfc_mae_counter_get().
3791                          */
3792                         rc = efx_mae_action_set_populate_count(spec);
3793                         if (rc != 0)
3794                                 goto fail_enforce_ft_count;
3795
3796                         n_count = 1;
3797                 }
3798         }
3799
3800         /* Cleanup after previous encap. header bounce buffer usage. */
3801         sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3802
3803         for (action = actions;
3804              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3805                 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3806                 if (rc != 0)
3807                         goto fail_rule_parse_action;
3808
3809                 rc = sfc_mae_rule_parse_action(sa, action, spec_mae,
3810                                                &bundle, spec, error);
3811                 if (rc != 0)
3812                         goto fail_rule_parse_action;
3813         }
3814
3815         rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3816         if (rc != 0)
3817                 goto fail_rule_parse_action;
3818
3819         rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3820         if (rc != 0)
3821                 goto fail_process_encap_header;
3822
3823         if (n_count > 1) {
3824                 rc = ENOTSUP;
3825                 sfc_err(sa, "too many count actions requested: %u", n_count);
3826                 goto fail_nb_count;
3827         }
3828
3829         switch (spec_mae->ft_rule_type) {
3830         case SFC_FT_RULE_NONE:
3831                 break;
3832         case SFC_FT_RULE_JUMP:
3833                 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3834                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3835                 if (rc != 0)
3836                         goto fail_workaround_jump_delivery;
3837
3838                 counter_ft = spec_mae->ft;
3839                 break;
3840         case SFC_FT_RULE_GROUP:
3841                 /*
3842                  * Packets that go to the rule's AR have FT mark set (from the
3843                  * JUMP rule OR's RECIRC_ID). Remove this mark in matching
3844                  * packets. The user may have provided their own action
3845                  * MARK above, so don't check the return value here.
3846                  */
3847                 (void)efx_mae_action_set_populate_mark(spec, 0);
3848
3849                 ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
3850                 break;
3851         default:
3852                 SFC_ASSERT(B_FALSE);
3853         }
3854
3855         spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3856                                                          n_count, spec);
3857         if (spec_mae->action_set != NULL) {
3858                 sfc_mae_encap_header_del(sa, encap_header);
3859                 efx_mae_action_set_spec_fini(sa->nic, spec);
3860                 return 0;
3861         }
3862
3863         rc = sfc_mae_action_set_add(sa, actions, spec, encap_header,
3864                                     ft_group_hit_counter, counter_ft, n_count,
3865                                     &spec_mae->action_set);
3866         if (rc != 0)
3867                 goto fail_action_set_add;
3868
3869         return 0;
3870
3871 fail_action_set_add:
3872 fail_workaround_jump_delivery:
3873 fail_nb_count:
3874         sfc_mae_encap_header_del(sa, encap_header);
3875
3876 fail_process_encap_header:
3877 fail_rule_parse_action:
3878         efx_mae_action_set_spec_fini(sa->nic, spec);
3879
3880 fail_enforce_ft_count:
3881 fail_enforce_ft_decap:
3882 fail_action_set_spec_init:
3883         if (rc > 0 && rte_errno == 0) {
3884                 rc = rte_flow_error_set(error, rc,
3885                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3886                         NULL, "Failed to process the action");
3887         }
3888         return rc;
3889 }
3890
3891 static bool
3892 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3893                         const efx_mae_match_spec_t *left,
3894                         const efx_mae_match_spec_t *right)
3895 {
3896         bool have_same_class;
3897         int rc;
3898
3899         rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3900                                            &have_same_class);
3901
3902         return (rc == 0) ? have_same_class : false;
3903 }
3904
3905 static int
3906 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3907                                 struct sfc_mae_outer_rule *rule)
3908 {
3909         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3910         struct sfc_mae_outer_rule *entry;
3911         struct sfc_mae *mae = &sa->mae;
3912
3913         if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3914                 /* An active rule is reused. It's class is wittingly valid. */
3915                 return 0;
3916         }
3917
3918         TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3919                               sfc_mae_outer_rules, entries) {
3920                 const efx_mae_match_spec_t *left = entry->match_spec;
3921                 const efx_mae_match_spec_t *right = rule->match_spec;
3922
3923                 if (entry == rule)
3924                         continue;
3925
3926                 if (sfc_mae_rules_class_cmp(sa, left, right))
3927                         return 0;
3928         }
3929
3930         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3931                  "support for outer frame pattern items is not guaranteed; "
3932                  "other than that, the items are valid from SW standpoint");
3933         return 0;
3934 }
3935
3936 static int
3937 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3938                                  struct sfc_flow_spec_mae *spec)
3939 {
3940         const struct rte_flow *entry;
3941
3942         if (spec->match_spec == NULL)
3943                 return 0;
3944
3945         TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3946                 const struct sfc_flow_spec *entry_spec = &entry->spec;
3947                 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3948                 const efx_mae_match_spec_t *left = es_mae->match_spec;
3949                 const efx_mae_match_spec_t *right = spec->match_spec;
3950
3951                 switch (entry_spec->type) {
3952                 case SFC_FLOW_SPEC_FILTER:
3953                         /* Ignore VNIC-level flows */
3954                         break;
3955                 case SFC_FLOW_SPEC_MAE:
3956                         if (sfc_mae_rules_class_cmp(sa, left, right))
3957                                 return 0;
3958                         break;
3959                 default:
3960                         SFC_ASSERT(false);
3961                 }
3962         }
3963
3964         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3965                  "support for inner frame pattern items is not guaranteed; "
3966                  "other than that, the items are valid from SW standpoint");
3967         return 0;
3968 }
3969
3970 /**
3971  * Confirm that a given flow can be accepted by the FW.
3972  *
3973  * @param sa
3974  *   Software adapter context
3975  * @param flow
3976  *   Flow to be verified
3977  * @return
3978  *   Zero on success and non-zero in the case of error.
3979  *   A special value of EAGAIN indicates that the adapter is
3980  *   not in started state. This state is compulsory because
3981  *   it only makes sense to compare the rule class of the flow
3982  *   being validated with classes of the active rules.
3983  *   Such classes are wittingly supported by the FW.
3984  */
3985 int
3986 sfc_mae_flow_verify(struct sfc_adapter *sa,
3987                     struct rte_flow *flow)
3988 {
3989         struct sfc_flow_spec *spec = &flow->spec;
3990         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3991         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3992         int rc;
3993
3994         SFC_ASSERT(sfc_adapter_is_locked(sa));
3995
3996         if (sa->state != SFC_ETHDEV_STARTED)
3997                 return EAGAIN;
3998
3999         if (outer_rule != NULL) {
4000                 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
4001                 if (rc != 0)
4002                         return rc;
4003         }
4004
4005         return sfc_mae_action_rule_class_verify(sa, spec_mae);
4006 }
4007
4008 int
4009 sfc_mae_flow_insert(struct sfc_adapter *sa,
4010                     struct rte_flow *flow)
4011 {
4012         struct sfc_flow_spec *spec = &flow->spec;
4013         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4014         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4015         struct sfc_mae_action_set *action_set = spec_mae->action_set;
4016         struct sfc_mae_fw_rsrc *fw_rsrc;
4017         int rc;
4018
4019         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
4020
4021         if (outer_rule != NULL) {
4022                 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
4023                                                spec_mae->match_spec);
4024                 if (rc != 0)
4025                         goto fail_outer_rule_enable;
4026         }
4027
4028         if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
4029                 spec_mae->ft->reset_jump_hit_counter =
4030                         spec_mae->ft->group_hit_counter;
4031         }
4032
4033         if (action_set == NULL) {
4034                 sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
4035                 return 0;
4036         }
4037
4038         rc = sfc_mae_action_set_enable(sa, action_set);
4039         if (rc != 0)
4040                 goto fail_action_set_enable;
4041
4042         if (action_set->n_counters > 0) {
4043                 rc = sfc_mae_counter_start(sa);
4044                 if (rc != 0) {
4045                         sfc_err(sa, "failed to start MAE counters support: %s",
4046                                 rte_strerror(rc));
4047                         goto fail_mae_counter_start;
4048                 }
4049         }
4050
4051         fw_rsrc = &action_set->fw_rsrc;
4052
4053         rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
4054                                         NULL, &fw_rsrc->aset_id,
4055                                         &spec_mae->rule_id);
4056         if (rc != 0)
4057                 goto fail_action_rule_insert;
4058
4059         sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
4060                 flow, spec_mae->rule_id.id);
4061
4062         return 0;
4063
4064 fail_action_rule_insert:
4065 fail_mae_counter_start:
4066         sfc_mae_action_set_disable(sa, action_set);
4067
4068 fail_action_set_enable:
4069         if (outer_rule != NULL)
4070                 sfc_mae_outer_rule_disable(sa, outer_rule);
4071
4072 fail_outer_rule_enable:
4073         return rc;
4074 }
4075
4076 int
4077 sfc_mae_flow_remove(struct sfc_adapter *sa,
4078                     struct rte_flow *flow)
4079 {
4080         struct sfc_flow_spec *spec = &flow->spec;
4081         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4082         struct sfc_mae_action_set *action_set = spec_mae->action_set;
4083         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4084         int rc;
4085
4086         if (action_set == NULL) {
4087                 sfc_dbg(sa, "disabled flow=%p (no AR)", flow);
4088                 goto skip_action_rule;
4089         }
4090
4091         SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
4092
4093         rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
4094         if (rc != 0) {
4095                 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
4096                         flow, spec_mae->rule_id.id, strerror(rc));
4097         }
4098         sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
4099                 flow, spec_mae->rule_id.id);
4100         spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
4101
4102         sfc_mae_action_set_disable(sa, action_set);
4103
4104 skip_action_rule:
4105         if (outer_rule != NULL)
4106                 sfc_mae_outer_rule_disable(sa, outer_rule);
4107
4108         return 0;
4109 }
4110
4111 static int
4112 sfc_mae_query_counter(struct sfc_adapter *sa,
4113                       struct sfc_flow_spec_mae *spec,
4114                       const struct rte_flow_action *action,
4115                       struct rte_flow_query_count *data,
4116                       struct rte_flow_error *error)
4117 {
4118         struct sfc_mae_action_set *action_set = spec->action_set;
4119         const struct rte_flow_action_count *conf = action->conf;
4120         unsigned int i;
4121         int rc;
4122
4123         if (action_set == NULL || action_set->n_counters == 0) {
4124                 return rte_flow_error_set(error, EINVAL,
4125                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4126                         "Queried flow rule does not have count actions");
4127         }
4128
4129         for (i = 0; i < action_set->n_counters; i++) {
4130                 /*
4131                  * Get the first available counter of the flow rule if
4132                  * counter ID is not specified, provided that this
4133                  * counter is not an automatic (implicit) one.
4134                  */
4135                 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
4136                         continue;
4137
4138                 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
4139                                          &action_set->counters[i], data);
4140                 if (rc != 0) {
4141                         return rte_flow_error_set(error, EINVAL,
4142                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4143                                 "Queried flow rule counter action is invalid");
4144                 }
4145
4146                 return 0;
4147         }
4148
4149         return rte_flow_error_set(error, ENOENT,
4150                                   RTE_FLOW_ERROR_TYPE_ACTION, action,
4151                                   "no such flow rule action or such count ID");
4152 }
4153
4154 int
4155 sfc_mae_flow_query(struct rte_eth_dev *dev,
4156                    struct rte_flow *flow,
4157                    const struct rte_flow_action *action,
4158                    void *data,
4159                    struct rte_flow_error *error)
4160 {
4161         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
4162         struct sfc_flow_spec *spec = &flow->spec;
4163         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4164
4165         switch (action->type) {
4166         case RTE_FLOW_ACTION_TYPE_COUNT:
4167                 return sfc_mae_query_counter(sa, spec_mae, action,
4168                                              data, error);
4169         default:
4170                 return rte_flow_error_set(error, ENOTSUP,
4171                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4172                         "Query for action of this type is not supported");
4173         }
4174 }
4175
4176 int
4177 sfc_mae_switchdev_init(struct sfc_adapter *sa)
4178 {
4179         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
4180         struct sfc_mae *mae = &sa->mae;
4181         efx_mport_sel_t pf;
4182         efx_mport_sel_t phy;
4183         int rc;
4184
4185         sfc_log_init(sa, "entry");
4186
4187         if (!sa->switchdev) {
4188                 sfc_log_init(sa, "switchdev is not enabled - skip");
4189                 return 0;
4190         }
4191
4192         if (mae->status != SFC_MAE_STATUS_ADMIN) {
4193                 rc = ENOTSUP;
4194                 sfc_err(sa, "failed to init switchdev - no admin MAE privilege");
4195                 goto fail_no_mae;
4196         }
4197
4198         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
4199                                             &pf);
4200         if (rc != 0) {
4201                 sfc_err(sa, "failed get PF mport");
4202                 goto fail_pf_get;
4203         }
4204
4205         rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
4206         if (rc != 0) {
4207                 sfc_err(sa, "failed get PHY mport");
4208                 goto fail_phy_get;
4209         }
4210
4211         rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
4212                         SFC_MAE_RULE_PRIO_LOWEST,
4213                         &mae->switchdev_rule_pf_to_ext);
4214         if (rc != 0) {
4215                 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
4216                 goto fail_pf_add;
4217         }
4218
4219         rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
4220                         SFC_MAE_RULE_PRIO_LOWEST,
4221                         &mae->switchdev_rule_ext_to_pf);
4222         if (rc != 0) {
4223                 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
4224                 goto fail_phy_add;
4225         }
4226
4227         sfc_log_init(sa, "done");
4228
4229         return 0;
4230
4231 fail_phy_add:
4232         sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4233
4234 fail_pf_add:
4235 fail_phy_get:
4236 fail_pf_get:
4237 fail_no_mae:
4238         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
4239         return rc;
4240 }
4241
4242 void
4243 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
4244 {
4245         struct sfc_mae *mae = &sa->mae;
4246
4247         if (!sa->switchdev)
4248                 return;
4249
4250         sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4251         sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);
4252 }