73bedf5088d8129d73bb885f3855ee2bec0d0ea9
[dpdk.git] / drivers / net / sfc / sfc_mae.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
21 #include "sfc_log.h"
22 #include "sfc_switch.h"
23 #include "sfc_service.h"
24
25 static int
26 sfc_mae_assign_ethdev_mport(struct sfc_adapter *sa,
27                             efx_mport_sel_t *mportp)
28 {
29         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
30
31         return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
32                                               mportp);
33 }
34
35 static int
36 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
37                             efx_mport_sel_t *mportp)
38 {
39         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
40         int rc = 0;
41
42         if (encp->enc_mae_admin) {
43                 /*
44                  * This ethdev sits on MAE admin PF. The represented
45                  * entity is the network port assigned to that PF.
46                  */
47                 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, mportp);
48         } else {
49                 /*
50                  * This ethdev sits on unprivileged PF / VF. The entity
51                  * represented by the ethdev can change dynamically
52                  * as MAE admin changes default traffic rules.
53                  *
54                  * For the sake of simplicity, do not fill in the m-port
55                  * and assume that flow rules should not be allowed to
56                  * reference the entity represented by this ethdev.
57                  */
58                 efx_mae_mport_invalid(mportp);
59         }
60
61         return rc;
62 }
63
64 static int
65 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
66                               uint32_t nb_counters_max)
67 {
68         return sfc_mae_counters_init(&registry->counters, nb_counters_max);
69 }
70
71 static void
72 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
73 {
74         sfc_mae_counters_fini(&registry->counters);
75 }
76
77 static int
78 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
79                                       struct sfc_mae_rule **rule)
80 {
81         struct sfc_mae *mae = &sa->mae;
82         struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
83         unsigned int entry;
84         int rc;
85
86         for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
87                 if (internal_rules->rules[entry].spec == NULL)
88                         break;
89         }
90
91         if (entry == SFC_MAE_NB_RULES_MAX) {
92                 rc = ENOSPC;
93                 sfc_err(sa, "failed too many rules (%u rules used)", entry);
94                 goto fail_too_many_rules;
95         }
96
97         *rule = &internal_rules->rules[entry];
98
99         return 0;
100
101 fail_too_many_rules:
102         return rc;
103 }
104
105 int
106 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
107                                      const efx_mport_sel_t *mport_match,
108                                      const efx_mport_sel_t *mport_deliver,
109                                      int prio, struct sfc_mae_rule **rulep)
110 {
111         struct sfc_mae *mae = &sa->mae;
112         struct sfc_mae_rule *rule;
113         int rc;
114
115         sfc_log_init(sa, "entry");
116
117         if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
118                 rc = EINVAL;
119                 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
120                         mae->nb_action_rule_prios_max);
121                 goto fail_invalid_prio;
122         }
123         if (prio < 0)
124                 prio = mae->nb_action_rule_prios_max - 1;
125
126         rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
127         if (rc != 0)
128                 goto fail_find_empty_slot;
129
130         sfc_log_init(sa, "init MAE match spec");
131         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
132                                      (uint32_t)prio, &rule->spec);
133         if (rc != 0) {
134                 sfc_err(sa, "failed to init MAE match spec");
135                 goto fail_match_init;
136         }
137
138         rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
139         if (rc != 0) {
140                 sfc_err(sa, "failed to get MAE match mport selector");
141                 goto fail_mport_set;
142         }
143
144         rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
145         if (rc != 0) {
146                 sfc_err(sa, "failed to init MAE action set");
147                 goto fail_action_init;
148         }
149
150         rc = efx_mae_action_set_populate_deliver(rule->actions,
151                                                  mport_deliver);
152         if (rc != 0) {
153                 sfc_err(sa, "failed to populate deliver action");
154                 goto fail_populate_deliver;
155         }
156
157         rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
158                                       &rule->action_set);
159         if (rc != 0) {
160                 sfc_err(sa, "failed to allocate action set");
161                 goto fail_action_set_alloc;
162         }
163
164         rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
165                                         &rule->action_set,
166                                         &rule->rule_id);
167         if (rc != 0) {
168                 sfc_err(sa, "failed to insert action rule");
169                 goto fail_rule_insert;
170         }
171
172         *rulep = rule;
173
174         sfc_log_init(sa, "done");
175
176         return 0;
177
178 fail_rule_insert:
179         efx_mae_action_set_free(sa->nic, &rule->action_set);
180
181 fail_action_set_alloc:
182 fail_populate_deliver:
183         efx_mae_action_set_spec_fini(sa->nic, rule->actions);
184
185 fail_action_init:
186 fail_mport_set:
187         efx_mae_match_spec_fini(sa->nic, rule->spec);
188
189 fail_match_init:
190 fail_find_empty_slot:
191 fail_invalid_prio:
192         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
193         return rc;
194 }
195
196 void
197 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
198 {
199         if (rule == NULL || rule->spec == NULL)
200                 return;
201
202         efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
203         efx_mae_action_set_free(sa->nic, &rule->action_set);
204         efx_mae_action_set_spec_fini(sa->nic, rule->actions);
205         efx_mae_match_spec_fini(sa->nic, rule->spec);
206
207         rule->spec = NULL;
208 }
209
210 int
211 sfc_mae_attach(struct sfc_adapter *sa)
212 {
213         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
214         struct sfc_mae_switch_port_request switch_port_request = {0};
215         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
216         efx_mport_sel_t ethdev_mport;
217         efx_mport_sel_t entity_mport;
218         struct sfc_mae *mae = &sa->mae;
219         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
220         efx_mae_limits_t limits;
221         int rc;
222
223         sfc_log_init(sa, "entry");
224
225         if (!encp->enc_mae_supported) {
226                 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
227                 return 0;
228         }
229
230         if (encp->enc_mae_admin) {
231                 sfc_log_init(sa, "init MAE");
232                 rc = efx_mae_init(sa->nic);
233                 if (rc != 0)
234                         goto fail_mae_init;
235
236                 sfc_log_init(sa, "get MAE limits");
237                 rc = efx_mae_get_limits(sa->nic, &limits);
238                 if (rc != 0)
239                         goto fail_mae_get_limits;
240
241                 sfc_log_init(sa, "init MAE counter registry");
242                 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
243                                                    limits.eml_max_n_counters);
244                 if (rc != 0) {
245                         sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
246                                 limits.eml_max_n_counters, rte_strerror(rc));
247                         goto fail_counter_registry_init;
248                 }
249         }
250
251         sfc_log_init(sa, "assign ethdev MPORT");
252         rc = sfc_mae_assign_ethdev_mport(sa, &ethdev_mport);
253         if (rc != 0)
254                 goto fail_mae_assign_ethdev_mport;
255
256         sfc_log_init(sa, "assign entity MPORT");
257         rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
258         if (rc != 0)
259                 goto fail_mae_assign_entity_mport;
260
261         sfc_log_init(sa, "assign RTE switch domain");
262         rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
263         if (rc != 0)
264                 goto fail_mae_assign_switch_domain;
265
266         sfc_log_init(sa, "assign RTE switch port");
267         switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
268         switch_port_request.ethdev_mportp = &ethdev_mport;
269         switch_port_request.entity_mportp = &entity_mport;
270         switch_port_request.ethdev_port_id = sas->port_id;
271         switch_port_request.port_data.indep.mae_admin =
272                 encp->enc_mae_admin == B_TRUE;
273         rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
274                                         &switch_port_request,
275                                         &mae->switch_port_id);
276         if (rc != 0)
277                 goto fail_mae_assign_switch_port;
278
279         if (encp->enc_mae_admin) {
280                 sfc_log_init(sa, "allocate encap. header bounce buffer");
281                 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
282                 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
283                                             bounce_eh->buf_size, 0);
284                 if (bounce_eh->buf == NULL)
285                         goto fail_mae_alloc_bounce_eh;
286
287                 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
288                 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
289                 mae->encap_types_supported = limits.eml_encap_types_supported;
290         }
291
292         TAILQ_INIT(&mae->outer_rules);
293         TAILQ_INIT(&mae->encap_headers);
294         TAILQ_INIT(&mae->action_sets);
295
296         if (encp->enc_mae_admin)
297                 mae->status = SFC_MAE_STATUS_ADMIN;
298         else
299                 mae->status = SFC_MAE_STATUS_SUPPORTED;
300
301         sfc_log_init(sa, "done");
302
303         return 0;
304
305 fail_mae_alloc_bounce_eh:
306 fail_mae_assign_switch_port:
307 fail_mae_assign_switch_domain:
308 fail_mae_assign_entity_mport:
309 fail_mae_assign_ethdev_mport:
310         if (encp->enc_mae_admin)
311                 sfc_mae_counter_registry_fini(&mae->counter_registry);
312
313 fail_counter_registry_init:
314 fail_mae_get_limits:
315         if (encp->enc_mae_admin)
316                 efx_mae_fini(sa->nic);
317
318 fail_mae_init:
319         sfc_log_init(sa, "failed %d", rc);
320
321         return rc;
322 }
323
324 void
325 sfc_mae_detach(struct sfc_adapter *sa)
326 {
327         struct sfc_mae *mae = &sa->mae;
328         enum sfc_mae_status status_prev = mae->status;
329
330         sfc_log_init(sa, "entry");
331
332         mae->nb_action_rule_prios_max = 0;
333         mae->status = SFC_MAE_STATUS_UNKNOWN;
334
335         if (status_prev != SFC_MAE_STATUS_ADMIN)
336                 return;
337
338         rte_free(mae->bounce_eh.buf);
339         sfc_mae_counter_registry_fini(&mae->counter_registry);
340
341         efx_mae_fini(sa->nic);
342
343         sfc_log_init(sa, "done");
344 }
345
346 static struct sfc_mae_outer_rule *
347 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
348                           const efx_mae_match_spec_t *match_spec,
349                           efx_tunnel_protocol_t encap_type)
350 {
351         struct sfc_mae_outer_rule *rule;
352         struct sfc_mae *mae = &sa->mae;
353
354         SFC_ASSERT(sfc_adapter_is_locked(sa));
355
356         TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
357                 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
358                     rule->encap_type == encap_type) {
359                         sfc_dbg(sa, "attaching to outer_rule=%p", rule);
360                         ++(rule->refcnt);
361                         return rule;
362                 }
363         }
364
365         return NULL;
366 }
367
368 static int
369 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
370                        efx_mae_match_spec_t *match_spec,
371                        efx_tunnel_protocol_t encap_type,
372                        struct sfc_mae_outer_rule **rulep)
373 {
374         struct sfc_mae_outer_rule *rule;
375         struct sfc_mae *mae = &sa->mae;
376
377         SFC_ASSERT(sfc_adapter_is_locked(sa));
378
379         rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
380         if (rule == NULL)
381                 return ENOMEM;
382
383         rule->refcnt = 1;
384         rule->match_spec = match_spec;
385         rule->encap_type = encap_type;
386
387         rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
388
389         TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
390
391         *rulep = rule;
392
393         sfc_dbg(sa, "added outer_rule=%p", rule);
394
395         return 0;
396 }
397
398 static void
399 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
400                        struct sfc_mae_outer_rule *rule)
401 {
402         struct sfc_mae *mae = &sa->mae;
403
404         SFC_ASSERT(sfc_adapter_is_locked(sa));
405         SFC_ASSERT(rule->refcnt != 0);
406
407         --(rule->refcnt);
408
409         if (rule->refcnt != 0)
410                 return;
411
412         if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
413             rule->fw_rsrc.refcnt != 0) {
414                 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
415                         rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
416         }
417
418         efx_mae_match_spec_fini(sa->nic, rule->match_spec);
419
420         TAILQ_REMOVE(&mae->outer_rules, rule, entries);
421         rte_free(rule);
422
423         sfc_dbg(sa, "deleted outer_rule=%p", rule);
424 }
425
426 static int
427 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
428                           struct sfc_mae_outer_rule *rule,
429                           efx_mae_match_spec_t *match_spec_action)
430 {
431         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
432         int rc;
433
434         SFC_ASSERT(sfc_adapter_is_locked(sa));
435
436         if (fw_rsrc->refcnt == 0) {
437                 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
438                 SFC_ASSERT(rule->match_spec != NULL);
439
440                 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
441                                                rule->encap_type,
442                                                &fw_rsrc->rule_id);
443                 if (rc != 0) {
444                         sfc_err(sa, "failed to enable outer_rule=%p: %s",
445                                 rule, strerror(rc));
446                         return rc;
447                 }
448         }
449
450         if (match_spec_action == NULL)
451                 goto skip_action_rule;
452
453         rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
454                                                   &fw_rsrc->rule_id);
455         if (rc != 0) {
456                 if (fw_rsrc->refcnt == 0) {
457                         (void)efx_mae_outer_rule_remove(sa->nic,
458                                                         &fw_rsrc->rule_id);
459                         fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
460                 }
461
462                 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
463
464                 return rc;
465         }
466
467 skip_action_rule:
468         if (fw_rsrc->refcnt == 0) {
469                 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
470                         rule, fw_rsrc->rule_id.id);
471         }
472
473         ++(fw_rsrc->refcnt);
474
475         return 0;
476 }
477
478 static void
479 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
480                            struct sfc_mae_outer_rule *rule)
481 {
482         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
483         int rc;
484
485         SFC_ASSERT(sfc_adapter_is_locked(sa));
486
487         if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
488             fw_rsrc->refcnt == 0) {
489                 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
490                         rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
491                 return;
492         }
493
494         if (fw_rsrc->refcnt == 1) {
495                 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
496                 if (rc == 0) {
497                         sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
498                                 rule, fw_rsrc->rule_id.id);
499                 } else {
500                         sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
501                                 rule, fw_rsrc->rule_id.id, strerror(rc));
502                 }
503                 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
504         }
505
506         --(fw_rsrc->refcnt);
507 }
508
509 static struct sfc_mae_encap_header *
510 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
511                             const struct sfc_mae_bounce_eh *bounce_eh)
512 {
513         struct sfc_mae_encap_header *encap_header;
514         struct sfc_mae *mae = &sa->mae;
515
516         SFC_ASSERT(sfc_adapter_is_locked(sa));
517
518         TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
519                 if (encap_header->size == bounce_eh->size &&
520                     memcmp(encap_header->buf, bounce_eh->buf,
521                            bounce_eh->size) == 0) {
522                         sfc_dbg(sa, "attaching to encap_header=%p",
523                                 encap_header);
524                         ++(encap_header->refcnt);
525                         return encap_header;
526                 }
527         }
528
529         return NULL;
530 }
531
532 static int
533 sfc_mae_encap_header_add(struct sfc_adapter *sa,
534                          const struct sfc_mae_bounce_eh *bounce_eh,
535                          struct sfc_mae_encap_header **encap_headerp)
536 {
537         struct sfc_mae_encap_header *encap_header;
538         struct sfc_mae *mae = &sa->mae;
539
540         SFC_ASSERT(sfc_adapter_is_locked(sa));
541
542         encap_header = rte_zmalloc("sfc_mae_encap_header",
543                                    sizeof(*encap_header), 0);
544         if (encap_header == NULL)
545                 return ENOMEM;
546
547         encap_header->size = bounce_eh->size;
548
549         encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
550                                        encap_header->size, 0);
551         if (encap_header->buf == NULL) {
552                 rte_free(encap_header);
553                 return ENOMEM;
554         }
555
556         rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
557
558         encap_header->refcnt = 1;
559         encap_header->type = bounce_eh->type;
560         encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
561
562         TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
563
564         *encap_headerp = encap_header;
565
566         sfc_dbg(sa, "added encap_header=%p", encap_header);
567
568         return 0;
569 }
570
571 static void
572 sfc_mae_encap_header_del(struct sfc_adapter *sa,
573                        struct sfc_mae_encap_header *encap_header)
574 {
575         struct sfc_mae *mae = &sa->mae;
576
577         if (encap_header == NULL)
578                 return;
579
580         SFC_ASSERT(sfc_adapter_is_locked(sa));
581         SFC_ASSERT(encap_header->refcnt != 0);
582
583         --(encap_header->refcnt);
584
585         if (encap_header->refcnt != 0)
586                 return;
587
588         if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
589             encap_header->fw_rsrc.refcnt != 0) {
590                 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
591                         encap_header, encap_header->fw_rsrc.eh_id.id,
592                         encap_header->fw_rsrc.refcnt);
593         }
594
595         TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
596         rte_free(encap_header->buf);
597         rte_free(encap_header);
598
599         sfc_dbg(sa, "deleted encap_header=%p", encap_header);
600 }
601
602 static int
603 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
604                             struct sfc_mae_encap_header *encap_header,
605                             efx_mae_actions_t *action_set_spec)
606 {
607         struct sfc_mae_fw_rsrc *fw_rsrc;
608         int rc;
609
610         if (encap_header == NULL)
611                 return 0;
612
613         SFC_ASSERT(sfc_adapter_is_locked(sa));
614
615         fw_rsrc = &encap_header->fw_rsrc;
616
617         if (fw_rsrc->refcnt == 0) {
618                 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
619                 SFC_ASSERT(encap_header->buf != NULL);
620                 SFC_ASSERT(encap_header->size != 0);
621
622                 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
623                                                 encap_header->buf,
624                                                 encap_header->size,
625                                                 &fw_rsrc->eh_id);
626                 if (rc != 0) {
627                         sfc_err(sa, "failed to enable encap_header=%p: %s",
628                                 encap_header, strerror(rc));
629                         return rc;
630                 }
631         }
632
633         rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
634                                               &fw_rsrc->eh_id);
635         if (rc != 0) {
636                 if (fw_rsrc->refcnt == 0) {
637                         (void)efx_mae_encap_header_free(sa->nic,
638                                                         &fw_rsrc->eh_id);
639                         fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
640                 }
641
642                 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
643
644                 return rc;
645         }
646
647         if (fw_rsrc->refcnt == 0) {
648                 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
649                         encap_header, fw_rsrc->eh_id.id);
650         }
651
652         ++(fw_rsrc->refcnt);
653
654         return 0;
655 }
656
657 static void
658 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
659                              struct sfc_mae_encap_header *encap_header)
660 {
661         struct sfc_mae_fw_rsrc *fw_rsrc;
662         int rc;
663
664         if (encap_header == NULL)
665                 return;
666
667         SFC_ASSERT(sfc_adapter_is_locked(sa));
668
669         fw_rsrc = &encap_header->fw_rsrc;
670
671         if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
672             fw_rsrc->refcnt == 0) {
673                 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
674                         encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
675                 return;
676         }
677
678         if (fw_rsrc->refcnt == 1) {
679                 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
680                 if (rc == 0) {
681                         sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
682                                 encap_header, fw_rsrc->eh_id.id);
683                 } else {
684                         sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
685                                 encap_header, fw_rsrc->eh_id.id, strerror(rc));
686                 }
687                 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
688         }
689
690         --(fw_rsrc->refcnt);
691 }
692
693 static int
694 sfc_mae_counters_enable(struct sfc_adapter *sa,
695                         struct sfc_mae_counter_id *counters,
696                         unsigned int n_counters,
697                         efx_mae_actions_t *action_set_spec)
698 {
699         int rc;
700
701         sfc_log_init(sa, "entry");
702
703         if (n_counters == 0) {
704                 sfc_log_init(sa, "no counters - skip");
705                 return 0;
706         }
707
708         SFC_ASSERT(sfc_adapter_is_locked(sa));
709         SFC_ASSERT(n_counters == 1);
710
711         rc = sfc_mae_counter_enable(sa, &counters[0]);
712         if (rc != 0) {
713                 sfc_err(sa, "failed to enable MAE counter %u: %s",
714                         counters[0].mae_id.id, rte_strerror(rc));
715                 goto fail_counter_add;
716         }
717
718         rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
719                                                    &counters[0].mae_id);
720         if (rc != 0) {
721                 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
722                         counters[0].mae_id.id, rte_strerror(rc));
723                 goto fail_fill_in_id;
724         }
725
726         return 0;
727
728 fail_fill_in_id:
729         (void)sfc_mae_counter_disable(sa, &counters[0]);
730
731 fail_counter_add:
732         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
733         return rc;
734 }
735
736 static int
737 sfc_mae_counters_disable(struct sfc_adapter *sa,
738                          struct sfc_mae_counter_id *counters,
739                          unsigned int n_counters)
740 {
741         if (n_counters == 0)
742                 return 0;
743
744         SFC_ASSERT(sfc_adapter_is_locked(sa));
745         SFC_ASSERT(n_counters == 1);
746
747         if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
748                 sfc_err(sa, "failed to disable: already disabled");
749                 return EALREADY;
750         }
751
752         return sfc_mae_counter_disable(sa, &counters[0]);
753 }
754
755 static struct sfc_mae_action_set *
756 sfc_mae_action_set_attach(struct sfc_adapter *sa,
757                           const struct sfc_mae_encap_header *encap_header,
758                           unsigned int n_count,
759                           const efx_mae_actions_t *spec)
760 {
761         struct sfc_mae_action_set *action_set;
762         struct sfc_mae *mae = &sa->mae;
763
764         SFC_ASSERT(sfc_adapter_is_locked(sa));
765
766         /*
767          * Shared counters are not supported, hence, action
768          * sets with counters are not attachable.
769          */
770         if (n_count != 0)
771                 return NULL;
772
773         TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
774                 if (action_set->encap_header == encap_header &&
775                     efx_mae_action_set_specs_equal(action_set->spec, spec)) {
776                         sfc_dbg(sa, "attaching to action_set=%p", action_set);
777                         ++(action_set->refcnt);
778                         return action_set;
779                 }
780         }
781
782         return NULL;
783 }
784
785 static int
786 sfc_mae_action_set_add(struct sfc_adapter *sa,
787                        const struct rte_flow_action actions[],
788                        efx_mae_actions_t *spec,
789                        struct sfc_mae_encap_header *encap_header,
790                        uint64_t *ft_group_hit_counter,
791                        struct sfc_flow_tunnel *ft,
792                        unsigned int n_counters,
793                        struct sfc_mae_action_set **action_setp)
794 {
795         struct sfc_mae_action_set *action_set;
796         struct sfc_mae *mae = &sa->mae;
797         unsigned int i;
798
799         SFC_ASSERT(sfc_adapter_is_locked(sa));
800
801         action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
802         if (action_set == NULL) {
803                 sfc_err(sa, "failed to alloc action set");
804                 return ENOMEM;
805         }
806
807         if (n_counters > 0) {
808                 const struct rte_flow_action *action;
809
810                 action_set->counters = rte_malloc("sfc_mae_counter_ids",
811                         sizeof(action_set->counters[0]) * n_counters, 0);
812                 if (action_set->counters == NULL) {
813                         rte_free(action_set);
814                         sfc_err(sa, "failed to alloc counters");
815                         return ENOMEM;
816                 }
817
818                 for (i = 0; i < n_counters; ++i) {
819                         action_set->counters[i].rte_id_valid = B_FALSE;
820                         action_set->counters[i].mae_id.id =
821                                 EFX_MAE_RSRC_ID_INVALID;
822
823                         action_set->counters[i].ft_group_hit_counter =
824                                 ft_group_hit_counter;
825                         action_set->counters[i].ft = ft;
826                 }
827
828                 for (action = actions, i = 0;
829                      action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
830                      ++action) {
831                         const struct rte_flow_action_count *conf;
832
833                         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
834                                 continue;
835
836                         conf = action->conf;
837
838                         action_set->counters[i].rte_id_valid = B_TRUE;
839                         action_set->counters[i].rte_id = conf->id;
840                         i++;
841                 }
842                 action_set->n_counters = n_counters;
843         }
844
845         action_set->refcnt = 1;
846         action_set->spec = spec;
847         action_set->encap_header = encap_header;
848
849         action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
850
851         TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
852
853         *action_setp = action_set;
854
855         sfc_dbg(sa, "added action_set=%p", action_set);
856
857         return 0;
858 }
859
860 static void
861 sfc_mae_action_set_del(struct sfc_adapter *sa,
862                        struct sfc_mae_action_set *action_set)
863 {
864         struct sfc_mae *mae = &sa->mae;
865
866         SFC_ASSERT(sfc_adapter_is_locked(sa));
867         SFC_ASSERT(action_set->refcnt != 0);
868
869         --(action_set->refcnt);
870
871         if (action_set->refcnt != 0)
872                 return;
873
874         if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
875             action_set->fw_rsrc.refcnt != 0) {
876                 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
877                         action_set, action_set->fw_rsrc.aset_id.id,
878                         action_set->fw_rsrc.refcnt);
879         }
880
881         efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
882         sfc_mae_encap_header_del(sa, action_set->encap_header);
883         if (action_set->n_counters > 0) {
884                 SFC_ASSERT(action_set->n_counters == 1);
885                 SFC_ASSERT(action_set->counters[0].mae_id.id ==
886                            EFX_MAE_RSRC_ID_INVALID);
887                 rte_free(action_set->counters);
888         }
889         TAILQ_REMOVE(&mae->action_sets, action_set, entries);
890         rte_free(action_set);
891
892         sfc_dbg(sa, "deleted action_set=%p", action_set);
893 }
894
895 static int
896 sfc_mae_action_set_enable(struct sfc_adapter *sa,
897                           struct sfc_mae_action_set *action_set)
898 {
899         struct sfc_mae_encap_header *encap_header = action_set->encap_header;
900         struct sfc_mae_counter_id *counters = action_set->counters;
901         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
902         int rc;
903
904         SFC_ASSERT(sfc_adapter_is_locked(sa));
905
906         if (fw_rsrc->refcnt == 0) {
907                 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
908                 SFC_ASSERT(action_set->spec != NULL);
909
910                 rc = sfc_mae_encap_header_enable(sa, encap_header,
911                                                  action_set->spec);
912                 if (rc != 0)
913                         return rc;
914
915                 rc = sfc_mae_counters_enable(sa, counters,
916                                              action_set->n_counters,
917                                              action_set->spec);
918                 if (rc != 0) {
919                         sfc_err(sa, "failed to enable %u MAE counters: %s",
920                                 action_set->n_counters, rte_strerror(rc));
921
922                         sfc_mae_encap_header_disable(sa, encap_header);
923                         return rc;
924                 }
925
926                 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
927                                               &fw_rsrc->aset_id);
928                 if (rc != 0) {
929                         sfc_err(sa, "failed to enable action_set=%p: %s",
930                                 action_set, strerror(rc));
931
932                         (void)sfc_mae_counters_disable(sa, counters,
933                                                        action_set->n_counters);
934                         sfc_mae_encap_header_disable(sa, encap_header);
935                         return rc;
936                 }
937
938                 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
939                         action_set, fw_rsrc->aset_id.id);
940         }
941
942         ++(fw_rsrc->refcnt);
943
944         return 0;
945 }
946
947 static void
948 sfc_mae_action_set_disable(struct sfc_adapter *sa,
949                            struct sfc_mae_action_set *action_set)
950 {
951         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
952         int rc;
953
954         SFC_ASSERT(sfc_adapter_is_locked(sa));
955
956         if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
957             fw_rsrc->refcnt == 0) {
958                 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
959                         action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
960                 return;
961         }
962
963         if (fw_rsrc->refcnt == 1) {
964                 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
965                 if (rc == 0) {
966                         sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
967                                 action_set, fw_rsrc->aset_id.id);
968                 } else {
969                         sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
970                                 action_set, fw_rsrc->aset_id.id, strerror(rc));
971                 }
972                 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
973
974                 rc = sfc_mae_counters_disable(sa, action_set->counters,
975                                               action_set->n_counters);
976                 if (rc != 0) {
977                         sfc_err(sa, "failed to disable %u MAE counters: %s",
978                                 action_set->n_counters, rte_strerror(rc));
979                 }
980
981                 sfc_mae_encap_header_disable(sa, action_set->encap_header);
982         }
983
984         --(fw_rsrc->refcnt);
985 }
986
987 void
988 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
989                      struct rte_flow *flow)
990 {
991         struct sfc_flow_spec *spec;
992         struct sfc_flow_spec_mae *spec_mae;
993
994         if (flow == NULL)
995                 return;
996
997         spec = &flow->spec;
998
999         if (spec == NULL)
1000                 return;
1001
1002         spec_mae = &spec->mae;
1003
1004         if (spec_mae->ft != NULL) {
1005                 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
1006                         spec_mae->ft->jump_rule_is_set = B_FALSE;
1007
1008                 SFC_ASSERT(spec_mae->ft->refcnt != 0);
1009                 --(spec_mae->ft->refcnt);
1010         }
1011
1012         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
1013
1014         if (spec_mae->outer_rule != NULL)
1015                 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
1016
1017         if (spec_mae->action_set != NULL)
1018                 sfc_mae_action_set_del(sa, spec_mae->action_set);
1019
1020         if (spec_mae->match_spec != NULL)
1021                 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
1022 }
1023
1024 static int
1025 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
1026 {
1027         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1028         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1029         const efx_mae_field_id_t field_ids[] = {
1030                 EFX_MAE_FIELD_VLAN0_PROTO_BE,
1031                 EFX_MAE_FIELD_VLAN1_PROTO_BE,
1032         };
1033         const struct sfc_mae_ethertype *et;
1034         unsigned int i;
1035         int rc;
1036
1037         /*
1038          * In accordance with RTE flow API convention, the innermost L2
1039          * item's "type" ("inner_type") is a L3 EtherType. If there is
1040          * no L3 item, it's 0x0000/0x0000.
1041          */
1042         et = &pdata->ethertypes[pdata->nb_vlan_tags];
1043         rc = efx_mae_match_spec_field_set(ctx->match_spec,
1044                                           fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
1045                                           sizeof(et->value),
1046                                           (const uint8_t *)&et->value,
1047                                           sizeof(et->mask),
1048                                           (const uint8_t *)&et->mask);
1049         if (rc != 0)
1050                 return rc;
1051
1052         /*
1053          * sfc_mae_rule_parse_item_vlan() has already made sure
1054          * that pdata->nb_vlan_tags does not exceed this figure.
1055          */
1056         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1057
1058         for (i = 0; i < pdata->nb_vlan_tags; ++i) {
1059                 et = &pdata->ethertypes[i];
1060
1061                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1062                                                   fremap[field_ids[i]],
1063                                                   sizeof(et->value),
1064                                                   (const uint8_t *)&et->value,
1065                                                   sizeof(et->mask),
1066                                                   (const uint8_t *)&et->mask);
1067                 if (rc != 0)
1068                         return rc;
1069         }
1070
1071         return 0;
1072 }
1073
1074 static int
1075 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1076                                   struct rte_flow_error *error)
1077 {
1078         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1079         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1080         struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1081         const rte_be16_t supported_tpids[] = {
1082                 /* VLAN standard TPID (always the first element) */
1083                 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1084
1085                 /* Double-tagging TPIDs */
1086                 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1087                 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1088                 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1089                 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1090         };
1091         bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1092         unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1093         unsigned int ethertype_idx;
1094         const uint8_t *valuep;
1095         const uint8_t *maskp;
1096         int rc;
1097
1098         if (pdata->innermost_ethertype_restriction.mask != 0 &&
1099             pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1100                 /*
1101                  * If a single item VLAN is followed by a L3 item, value
1102                  * of "type" in item ETH can't be a double-tagging TPID.
1103                  */
1104                 nb_supported_tpids = 1;
1105         }
1106
1107         /*
1108          * sfc_mae_rule_parse_item_vlan() has already made sure
1109          * that pdata->nb_vlan_tags does not exceed this figure.
1110          */
1111         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1112
1113         for (ethertype_idx = 0;
1114              ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1115                 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1116                 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1117                 unsigned int tpid_idx;
1118
1119                 /*
1120                  * This loop can have only two iterations. On the second one,
1121                  * drop outer tag presence enforcement bit because the inner
1122                  * tag presence automatically assumes that for the outer tag.
1123                  */
1124                 enforce_tag_presence[0] = B_FALSE;
1125
1126                 if (tpid_m == RTE_BE16(0)) {
1127                         if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1128                                 enforce_tag_presence[ethertype_idx] = B_TRUE;
1129
1130                         /* No match on this field, and no value check. */
1131                         nb_supported_tpids = 1;
1132                         continue;
1133                 }
1134
1135                 /* Exact match is supported only. */
1136                 if (tpid_m != RTE_BE16(0xffff)) {
1137                         sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1138                                 rte_be_to_cpu_16(tpid_m));
1139                         rc = EINVAL;
1140                         goto fail;
1141                 }
1142
1143                 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1144                      tpid_idx < nb_supported_tpids; ++tpid_idx) {
1145                         if (tpid_v == supported_tpids[tpid_idx])
1146                                 break;
1147                 }
1148
1149                 if (tpid_idx == nb_supported_tpids) {
1150                         sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1151                                 rte_be_to_cpu_16(tpid_v));
1152                         rc = EINVAL;
1153                         goto fail;
1154                 }
1155
1156                 nb_supported_tpids = 1;
1157         }
1158
1159         if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1160                 struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
1161                 rte_be16_t enforced_et;
1162
1163                 enforced_et = pdata->innermost_ethertype_restriction.value;
1164
1165                 if (et->mask == 0) {
1166                         et->mask = RTE_BE16(0xffff);
1167                         et->value = enforced_et;
1168                 } else if (et->mask != RTE_BE16(0xffff) ||
1169                            et->value != enforced_et) {
1170                         sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1171                                 rte_be_to_cpu_16(enforced_et),
1172                                 rte_be_to_cpu_16(et->value),
1173                                 rte_be_to_cpu_16(et->mask));
1174                         rc = EINVAL;
1175                         goto fail;
1176                 }
1177         }
1178
1179         /*
1180          * Now, when the number of VLAN tags is known, set fields
1181          * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1182          * one is either a valid L3 EtherType (or 0x0000/0x0000),
1183          * and the last two are valid TPIDs (or 0x0000/0x0000).
1184          */
1185         rc = sfc_mae_set_ethertypes(ctx);
1186         if (rc != 0)
1187                 goto fail;
1188
1189         if (pdata->l3_next_proto_restriction_mask == 0xff) {
1190                 if (pdata->l3_next_proto_mask == 0) {
1191                         pdata->l3_next_proto_mask = 0xff;
1192                         pdata->l3_next_proto_value =
1193                                 pdata->l3_next_proto_restriction_value;
1194                 } else if (pdata->l3_next_proto_mask != 0xff ||
1195                            pdata->l3_next_proto_value !=
1196                            pdata->l3_next_proto_restriction_value) {
1197                         sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1198                                 pdata->l3_next_proto_restriction_value,
1199                                 pdata->l3_next_proto_value,
1200                                 pdata->l3_next_proto_mask);
1201                         rc = EINVAL;
1202                         goto fail;
1203                 }
1204         }
1205
1206         if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1207                 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1208                                                 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1209                                                 enforce_tag_presence[0] ||
1210                                                 pdata->has_ovlan_value);
1211                 if (rc != 0)
1212                         goto fail;
1213         }
1214
1215         if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1216                 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1217                                                 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1218                                                 enforce_tag_presence[1] ||
1219                                                 pdata->has_ivlan_value);
1220                 if (rc != 0)
1221                         goto fail;
1222         }
1223
1224         valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1225         maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1226         rc = efx_mae_match_spec_field_set(ctx->match_spec,
1227                                           fremap[EFX_MAE_FIELD_IP_PROTO],
1228                                           sizeof(pdata->l3_next_proto_value),
1229                                           valuep,
1230                                           sizeof(pdata->l3_next_proto_mask),
1231                                           maskp);
1232         if (rc != 0)
1233                 goto fail;
1234
1235         return 0;
1236
1237 fail:
1238         return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1239                                   "Failed to process pattern data");
1240 }
1241
1242 static int
1243 sfc_mae_rule_parse_item_mark(const struct rte_flow_item *item,
1244                              struct sfc_flow_parse_ctx *ctx,
1245                              struct rte_flow_error *error)
1246 {
1247         const struct rte_flow_item_mark *spec = item->spec;
1248         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1249
1250         if (spec == NULL) {
1251                 return rte_flow_error_set(error, EINVAL,
1252                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1253                                 "NULL spec in item MARK");
1254         }
1255
1256         /*
1257          * This item is used in tunnel offload support only.
1258          * It must go before any network header items. This
1259          * way, sfc_mae_rule_preparse_item_mark() must have
1260          * already parsed it. Only one item MARK is allowed.
1261          */
1262         if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
1263             spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
1264                 return rte_flow_error_set(error, EINVAL,
1265                                           RTE_FLOW_ERROR_TYPE_ITEM,
1266                                           item, "invalid item MARK");
1267         }
1268
1269         return 0;
1270 }
1271
1272 static int
1273 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1274                                 struct sfc_flow_parse_ctx *ctx,
1275                                 struct rte_flow_error *error)
1276 {
1277         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1278         const struct rte_flow_item_port_id supp_mask = {
1279                 .id = 0xffffffff,
1280         };
1281         const void *def_mask = &rte_flow_item_port_id_mask;
1282         const struct rte_flow_item_port_id *spec = NULL;
1283         const struct rte_flow_item_port_id *mask = NULL;
1284         efx_mport_sel_t mport_sel;
1285         int rc;
1286
1287         if (ctx_mae->match_mport_set) {
1288                 return rte_flow_error_set(error, ENOTSUP,
1289                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1290                                 "Can't handle multiple traffic source items");
1291         }
1292
1293         rc = sfc_flow_parse_init(item,
1294                                  (const void **)&spec, (const void **)&mask,
1295                                  (const void *)&supp_mask, def_mask,
1296                                  sizeof(struct rte_flow_item_port_id), error);
1297         if (rc != 0)
1298                 return rc;
1299
1300         if (mask->id != supp_mask.id) {
1301                 return rte_flow_error_set(error, EINVAL,
1302                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1303                                 "Bad mask in the PORT_ID pattern item");
1304         }
1305
1306         /* If "spec" is not set, could be any port ID */
1307         if (spec == NULL)
1308                 return 0;
1309
1310         if (spec->id > UINT16_MAX) {
1311                 return rte_flow_error_set(error, EOVERFLOW,
1312                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1313                                           "The port ID is too large");
1314         }
1315
1316         rc = sfc_mae_switch_get_ethdev_mport(ctx_mae->sa->mae.switch_domain_id,
1317                                              spec->id, &mport_sel);
1318         if (rc != 0) {
1319                 return rte_flow_error_set(error, rc,
1320                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1321                                 "Can't get m-port for the given ethdev");
1322         }
1323
1324         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1325                                           &mport_sel, NULL);
1326         if (rc != 0) {
1327                 return rte_flow_error_set(error, rc,
1328                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1329                                 "Failed to set MPORT for the port ID");
1330         }
1331
1332         ctx_mae->match_mport_set = B_TRUE;
1333
1334         return 0;
1335 }
1336
1337 static int
1338 sfc_mae_rule_parse_item_ethdev_based(const struct rte_flow_item *item,
1339                                      struct sfc_flow_parse_ctx *ctx,
1340                                      struct rte_flow_error *error)
1341 {
1342         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1343         const struct rte_flow_item_ethdev supp_mask = {
1344                 .port_id = 0xffff,
1345         };
1346         const void *def_mask = &rte_flow_item_ethdev_mask;
1347         const struct rte_flow_item_ethdev *spec = NULL;
1348         const struct rte_flow_item_ethdev *mask = NULL;
1349         efx_mport_sel_t mport_sel;
1350         int rc;
1351
1352         if (ctx_mae->match_mport_set) {
1353                 return rte_flow_error_set(error, ENOTSUP,
1354                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1355                                 "Can't handle multiple traffic source items");
1356         }
1357
1358         rc = sfc_flow_parse_init(item,
1359                                  (const void **)&spec, (const void **)&mask,
1360                                  (const void *)&supp_mask, def_mask,
1361                                  sizeof(struct rte_flow_item_ethdev), error);
1362         if (rc != 0)
1363                 return rc;
1364
1365         if (mask->port_id != supp_mask.port_id) {
1366                 return rte_flow_error_set(error, EINVAL,
1367                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1368                                 "Bad mask in the ethdev-based pattern item");
1369         }
1370
1371         /* If "spec" is not set, could be any port ID */
1372         if (spec == NULL)
1373                 return 0;
1374
1375         switch (item->type) {
1376         case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
1377                 rc = sfc_mae_switch_get_ethdev_mport(
1378                                 ctx_mae->sa->mae.switch_domain_id,
1379                                 spec->port_id, &mport_sel);
1380                 if (rc != 0) {
1381                         return rte_flow_error_set(error, rc,
1382                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1383                                         "Can't get m-port for the given ethdev");
1384                 }
1385                 break;
1386         case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
1387                 rc = sfc_mae_switch_get_entity_mport(
1388                                 ctx_mae->sa->mae.switch_domain_id,
1389                                 spec->port_id, &mport_sel);
1390                 if (rc != 0) {
1391                         return rte_flow_error_set(error, rc,
1392                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1393                                         "Can't get m-port for the given ethdev");
1394                 }
1395                 break;
1396         default:
1397                 return rte_flow_error_set(error, EINVAL,
1398                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1399                                 "Unsupported ethdev-based flow item");
1400         }
1401
1402         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1403                                           &mport_sel, NULL);
1404         if (rc != 0) {
1405                 return rte_flow_error_set(error, rc,
1406                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1407                                 "Failed to set MPORT for the port ID");
1408         }
1409
1410         ctx_mae->match_mport_set = B_TRUE;
1411
1412         return 0;
1413 }
1414
1415 static int
1416 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1417                                  struct sfc_flow_parse_ctx *ctx,
1418                                  struct rte_flow_error *error)
1419 {
1420         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1421         const struct rte_flow_item_phy_port supp_mask = {
1422                 .index = 0xffffffff,
1423         };
1424         const void *def_mask = &rte_flow_item_phy_port_mask;
1425         const struct rte_flow_item_phy_port *spec = NULL;
1426         const struct rte_flow_item_phy_port *mask = NULL;
1427         efx_mport_sel_t mport_v;
1428         int rc;
1429
1430         if (ctx_mae->match_mport_set) {
1431                 return rte_flow_error_set(error, ENOTSUP,
1432                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1433                                 "Can't handle multiple traffic source items");
1434         }
1435
1436         rc = sfc_flow_parse_init(item,
1437                                  (const void **)&spec, (const void **)&mask,
1438                                  (const void *)&supp_mask, def_mask,
1439                                  sizeof(struct rte_flow_item_phy_port), error);
1440         if (rc != 0)
1441                 return rc;
1442
1443         if (mask->index != supp_mask.index) {
1444                 return rte_flow_error_set(error, EINVAL,
1445                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1446                                 "Bad mask in the PHY_PORT pattern item");
1447         }
1448
1449         /* If "spec" is not set, could be any physical port */
1450         if (spec == NULL)
1451                 return 0;
1452
1453         rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1454         if (rc != 0) {
1455                 return rte_flow_error_set(error, rc,
1456                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1457                                 "Failed to convert the PHY_PORT index");
1458         }
1459
1460         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1461         if (rc != 0) {
1462                 return rte_flow_error_set(error, rc,
1463                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1464                                 "Failed to set MPORT for the PHY_PORT");
1465         }
1466
1467         ctx_mae->match_mport_set = B_TRUE;
1468
1469         return 0;
1470 }
1471
1472 static int
1473 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1474                            struct sfc_flow_parse_ctx *ctx,
1475                            struct rte_flow_error *error)
1476 {
1477         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1478         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1479         efx_mport_sel_t mport_v;
1480         int rc;
1481
1482         if (ctx_mae->match_mport_set) {
1483                 return rte_flow_error_set(error, ENOTSUP,
1484                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1485                                 "Can't handle multiple traffic source items");
1486         }
1487
1488         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1489                                             &mport_v);
1490         if (rc != 0) {
1491                 return rte_flow_error_set(error, rc,
1492                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1493                                 "Failed to convert the PF ID");
1494         }
1495
1496         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1497         if (rc != 0) {
1498                 return rte_flow_error_set(error, rc,
1499                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1500                                 "Failed to set MPORT for the PF");
1501         }
1502
1503         ctx_mae->match_mport_set = B_TRUE;
1504
1505         return 0;
1506 }
1507
1508 static int
1509 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1510                            struct sfc_flow_parse_ctx *ctx,
1511                            struct rte_flow_error *error)
1512 {
1513         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1514         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1515         const struct rte_flow_item_vf supp_mask = {
1516                 .id = 0xffffffff,
1517         };
1518         const void *def_mask = &rte_flow_item_vf_mask;
1519         const struct rte_flow_item_vf *spec = NULL;
1520         const struct rte_flow_item_vf *mask = NULL;
1521         efx_mport_sel_t mport_v;
1522         int rc;
1523
1524         if (ctx_mae->match_mport_set) {
1525                 return rte_flow_error_set(error, ENOTSUP,
1526                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1527                                 "Can't handle multiple traffic source items");
1528         }
1529
1530         rc = sfc_flow_parse_init(item,
1531                                  (const void **)&spec, (const void **)&mask,
1532                                  (const void *)&supp_mask, def_mask,
1533                                  sizeof(struct rte_flow_item_vf), error);
1534         if (rc != 0)
1535                 return rc;
1536
1537         if (mask->id != supp_mask.id) {
1538                 return rte_flow_error_set(error, EINVAL,
1539                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1540                                 "Bad mask in the VF pattern item");
1541         }
1542
1543         /*
1544          * If "spec" is not set, the item requests any VF related to the
1545          * PF of the current DPDK port (but not the PF itself).
1546          * Reject this match criterion as unsupported.
1547          */
1548         if (spec == NULL) {
1549                 return rte_flow_error_set(error, EINVAL,
1550                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1551                                 "Bad spec in the VF pattern item");
1552         }
1553
1554         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1555         if (rc != 0) {
1556                 return rte_flow_error_set(error, rc,
1557                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1558                                 "Failed to convert the PF + VF IDs");
1559         }
1560
1561         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1562         if (rc != 0) {
1563                 return rte_flow_error_set(error, rc,
1564                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1565                                 "Failed to set MPORT for the PF + VF");
1566         }
1567
1568         ctx_mae->match_mport_set = B_TRUE;
1569
1570         return 0;
1571 }
1572
1573 /*
1574  * Having this field ID in a field locator means that this
1575  * locator cannot be used to actually set the field at the
1576  * time when the corresponding item gets encountered. Such
1577  * fields get stashed in the parsing context instead. This
1578  * is required to resolve dependencies between the stashed
1579  * fields. See sfc_mae_rule_process_pattern_data().
1580  */
1581 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1582
1583 struct sfc_mae_field_locator {
1584         efx_mae_field_id_t              field_id;
1585         size_t                          size;
1586         /* Field offset in the corresponding rte_flow_item_ struct */
1587         size_t                          ofst;
1588 };
1589
1590 static void
1591 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1592                              unsigned int nb_field_locators, void *mask_ptr,
1593                              size_t mask_size)
1594 {
1595         unsigned int i;
1596
1597         memset(mask_ptr, 0, mask_size);
1598
1599         for (i = 0; i < nb_field_locators; ++i) {
1600                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1601
1602                 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1603                 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1604         }
1605 }
1606
1607 static int
1608 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1609                    unsigned int nb_field_locators, const uint8_t *spec,
1610                    const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1611                    struct rte_flow_error *error)
1612 {
1613         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1614         unsigned int i;
1615         int rc = 0;
1616
1617         for (i = 0; i < nb_field_locators; ++i) {
1618                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1619
1620                 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1621                         continue;
1622
1623                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1624                                                   fremap[fl->field_id],
1625                                                   fl->size, spec + fl->ofst,
1626                                                   fl->size, mask + fl->ofst);
1627                 if (rc != 0)
1628                         break;
1629         }
1630
1631         if (rc != 0) {
1632                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1633                                 NULL, "Failed to process item fields");
1634         }
1635
1636         return rc;
1637 }
1638
1639 static const struct sfc_mae_field_locator flocs_eth[] = {
1640         {
1641                 /*
1642                  * This locator is used only for building supported fields mask.
1643                  * The field is handled by sfc_mae_rule_process_pattern_data().
1644                  */
1645                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1646                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1647                 offsetof(struct rte_flow_item_eth, type),
1648         },
1649         {
1650                 EFX_MAE_FIELD_ETH_DADDR_BE,
1651                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1652                 offsetof(struct rte_flow_item_eth, dst),
1653         },
1654         {
1655                 EFX_MAE_FIELD_ETH_SADDR_BE,
1656                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1657                 offsetof(struct rte_flow_item_eth, src),
1658         },
1659 };
1660
1661 static int
1662 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1663                             struct sfc_flow_parse_ctx *ctx,
1664                             struct rte_flow_error *error)
1665 {
1666         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1667         struct rte_flow_item_eth override_mask;
1668         struct rte_flow_item_eth supp_mask;
1669         const uint8_t *spec = NULL;
1670         const uint8_t *mask = NULL;
1671         int rc;
1672
1673         sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1674                                      &supp_mask, sizeof(supp_mask));
1675         supp_mask.has_vlan = 1;
1676
1677         rc = sfc_flow_parse_init(item,
1678                                  (const void **)&spec, (const void **)&mask,
1679                                  (const void *)&supp_mask,
1680                                  &rte_flow_item_eth_mask,
1681                                  sizeof(struct rte_flow_item_eth), error);
1682         if (rc != 0)
1683                 return rc;
1684
1685         if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
1686                 /*
1687                  * The HW/FW hasn't got support for match on MAC addresses in
1688                  * outer rules yet (this will change). Match on VLAN presence
1689                  * isn't supported either. Ignore these match criteria.
1690                  */
1691                 memcpy(&override_mask, mask, sizeof(override_mask));
1692                 memset(&override_mask.hdr.dst_addr, 0,
1693                        sizeof(override_mask.hdr.dst_addr));
1694                 memset(&override_mask.hdr.src_addr, 0,
1695                        sizeof(override_mask.hdr.src_addr));
1696                 override_mask.has_vlan = 0;
1697
1698                 mask = (const uint8_t *)&override_mask;
1699         }
1700
1701         if (spec != NULL) {
1702                 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1703                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1704                 const struct rte_flow_item_eth *item_spec;
1705                 const struct rte_flow_item_eth *item_mask;
1706
1707                 item_spec = (const struct rte_flow_item_eth *)spec;
1708                 item_mask = (const struct rte_flow_item_eth *)mask;
1709
1710                 /*
1711                  * Remember various match criteria in the parsing context.
1712                  * sfc_mae_rule_process_pattern_data() will consider them
1713                  * altogether when the rest of the items have been parsed.
1714                  */
1715                 ethertypes[0].value = item_spec->type;
1716                 ethertypes[0].mask = item_mask->type;
1717                 if (item_mask->has_vlan) {
1718                         pdata->has_ovlan_mask = B_TRUE;
1719                         if (item_spec->has_vlan)
1720                                 pdata->has_ovlan_value = B_TRUE;
1721                 }
1722         } else {
1723                 /*
1724                  * The specification is empty. The overall pattern
1725                  * validity will be enforced at the end of parsing.
1726                  * See sfc_mae_rule_process_pattern_data().
1727                  */
1728                 return 0;
1729         }
1730
1731         return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1732                                   ctx_mae, error);
1733 }
1734
1735 static const struct sfc_mae_field_locator flocs_vlan[] = {
1736         /* Outermost tag */
1737         {
1738                 EFX_MAE_FIELD_VLAN0_TCI_BE,
1739                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1740                 offsetof(struct rte_flow_item_vlan, tci),
1741         },
1742         {
1743                 /*
1744                  * This locator is used only for building supported fields mask.
1745                  * The field is handled by sfc_mae_rule_process_pattern_data().
1746                  */
1747                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1748                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1749                 offsetof(struct rte_flow_item_vlan, inner_type),
1750         },
1751
1752         /* Innermost tag */
1753         {
1754                 EFX_MAE_FIELD_VLAN1_TCI_BE,
1755                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1756                 offsetof(struct rte_flow_item_vlan, tci),
1757         },
1758         {
1759                 /*
1760                  * This locator is used only for building supported fields mask.
1761                  * The field is handled by sfc_mae_rule_process_pattern_data().
1762                  */
1763                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1764                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1765                 offsetof(struct rte_flow_item_vlan, inner_type),
1766         },
1767 };
1768
1769 static int
1770 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1771                              struct sfc_flow_parse_ctx *ctx,
1772                              struct rte_flow_error *error)
1773 {
1774         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1775         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1776         boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1777                 &pdata->has_ovlan_mask,
1778                 &pdata->has_ivlan_mask,
1779         };
1780         boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1781                 &pdata->has_ovlan_value,
1782                 &pdata->has_ivlan_value,
1783         };
1784         boolean_t *cur_tag_presence_bit_mp;
1785         boolean_t *cur_tag_presence_bit_vp;
1786         const struct sfc_mae_field_locator *flocs;
1787         struct rte_flow_item_vlan supp_mask;
1788         const uint8_t *spec = NULL;
1789         const uint8_t *mask = NULL;
1790         unsigned int nb_flocs;
1791         int rc;
1792
1793         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1794
1795         if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1796                 return rte_flow_error_set(error, ENOTSUP,
1797                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1798                                 "Can't match that many VLAN tags");
1799         }
1800
1801         cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1802         cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1803
1804         if (*cur_tag_presence_bit_mp == B_TRUE &&
1805             *cur_tag_presence_bit_vp == B_FALSE) {
1806                 return rte_flow_error_set(error, EINVAL,
1807                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1808                                 "The previous item enforces no (more) VLAN, "
1809                                 "so the current item (VLAN) must not exist");
1810         }
1811
1812         nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1813         flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1814
1815         sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1816                                      &supp_mask, sizeof(supp_mask));
1817         /*
1818          * This only means that the field is supported by the driver and libefx.
1819          * Support on NIC level will be checked when all items have been parsed.
1820          */
1821         supp_mask.has_more_vlan = 1;
1822
1823         rc = sfc_flow_parse_init(item,
1824                                  (const void **)&spec, (const void **)&mask,
1825                                  (const void *)&supp_mask,
1826                                  &rte_flow_item_vlan_mask,
1827                                  sizeof(struct rte_flow_item_vlan), error);
1828         if (rc != 0)
1829                 return rc;
1830
1831         if (spec != NULL) {
1832                 struct sfc_mae_ethertype *et = pdata->ethertypes;
1833                 const struct rte_flow_item_vlan *item_spec;
1834                 const struct rte_flow_item_vlan *item_mask;
1835
1836                 item_spec = (const struct rte_flow_item_vlan *)spec;
1837                 item_mask = (const struct rte_flow_item_vlan *)mask;
1838
1839                 /*
1840                  * Remember various match criteria in the parsing context.
1841                  * sfc_mae_rule_process_pattern_data() will consider them
1842                  * altogether when the rest of the items have been parsed.
1843                  */
1844                 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1845                 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1846                 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1847                 if (item_mask->has_more_vlan) {
1848                         if (pdata->nb_vlan_tags ==
1849                             SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1850                                 return rte_flow_error_set(error, ENOTSUP,
1851                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1852                                         "Can't use 'has_more_vlan' in "
1853                                         "the second item VLAN");
1854                         }
1855                         pdata->has_ivlan_mask = B_TRUE;
1856                         if (item_spec->has_more_vlan)
1857                                 pdata->has_ivlan_value = B_TRUE;
1858                 }
1859
1860                 /* Convert TCI to MAE representation right now. */
1861                 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1862                                         ctx_mae, error);
1863                 if (rc != 0)
1864                         return rc;
1865         }
1866
1867         ++(pdata->nb_vlan_tags);
1868
1869         return 0;
1870 }
1871
1872 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1873         {
1874                 EFX_MAE_FIELD_SRC_IP4_BE,
1875                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1876                 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1877         },
1878         {
1879                 EFX_MAE_FIELD_DST_IP4_BE,
1880                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1881                 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1882         },
1883         {
1884                 /*
1885                  * This locator is used only for building supported fields mask.
1886                  * The field is handled by sfc_mae_rule_process_pattern_data().
1887                  */
1888                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1889                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1890                 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1891         },
1892         {
1893                 EFX_MAE_FIELD_IP_TOS,
1894                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1895                                  hdr.type_of_service),
1896                 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1897         },
1898         {
1899                 EFX_MAE_FIELD_IP_TTL,
1900                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1901                 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1902         },
1903 };
1904
1905 static int
1906 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1907                              struct sfc_flow_parse_ctx *ctx,
1908                              struct rte_flow_error *error)
1909 {
1910         rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1911         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1912         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1913         struct rte_flow_item_ipv4 supp_mask;
1914         const uint8_t *spec = NULL;
1915         const uint8_t *mask = NULL;
1916         int rc;
1917
1918         sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1919                                      &supp_mask, sizeof(supp_mask));
1920
1921         rc = sfc_flow_parse_init(item,
1922                                  (const void **)&spec, (const void **)&mask,
1923                                  (const void *)&supp_mask,
1924                                  &rte_flow_item_ipv4_mask,
1925                                  sizeof(struct rte_flow_item_ipv4), error);
1926         if (rc != 0)
1927                 return rc;
1928
1929         pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1930         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1931
1932         if (spec != NULL) {
1933                 const struct rte_flow_item_ipv4 *item_spec;
1934                 const struct rte_flow_item_ipv4 *item_mask;
1935
1936                 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1937                 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1938
1939                 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1940                 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1941         } else {
1942                 return 0;
1943         }
1944
1945         return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1946                                   ctx_mae, error);
1947 }
1948
1949 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1950         {
1951                 EFX_MAE_FIELD_SRC_IP6_BE,
1952                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1953                 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1954         },
1955         {
1956                 EFX_MAE_FIELD_DST_IP6_BE,
1957                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1958                 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1959         },
1960         {
1961                 /*
1962                  * This locator is used only for building supported fields mask.
1963                  * The field is handled by sfc_mae_rule_process_pattern_data().
1964                  */
1965                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1966                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1967                 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1968         },
1969         {
1970                 EFX_MAE_FIELD_IP_TTL,
1971                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1972                 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1973         },
1974 };
1975
1976 static int
1977 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1978                              struct sfc_flow_parse_ctx *ctx,
1979                              struct rte_flow_error *error)
1980 {
1981         rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1982         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1983         const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1984         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1985         struct rte_flow_item_ipv6 supp_mask;
1986         const uint8_t *spec = NULL;
1987         const uint8_t *mask = NULL;
1988         rte_be32_t vtc_flow_be;
1989         uint32_t vtc_flow;
1990         uint8_t tc_value;
1991         uint8_t tc_mask;
1992         int rc;
1993
1994         sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1995                                      &supp_mask, sizeof(supp_mask));
1996
1997         vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1998         memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1999
2000         rc = sfc_flow_parse_init(item,
2001                                  (const void **)&spec, (const void **)&mask,
2002                                  (const void *)&supp_mask,
2003                                  &rte_flow_item_ipv6_mask,
2004                                  sizeof(struct rte_flow_item_ipv6), error);
2005         if (rc != 0)
2006                 return rc;
2007
2008         pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
2009         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
2010
2011         if (spec != NULL) {
2012                 const struct rte_flow_item_ipv6 *item_spec;
2013                 const struct rte_flow_item_ipv6 *item_mask;
2014
2015                 item_spec = (const struct rte_flow_item_ipv6 *)spec;
2016                 item_mask = (const struct rte_flow_item_ipv6 *)mask;
2017
2018                 pdata->l3_next_proto_value = item_spec->hdr.proto;
2019                 pdata->l3_next_proto_mask = item_mask->hdr.proto;
2020         } else {
2021                 return 0;
2022         }
2023
2024         rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
2025                                 ctx_mae, error);
2026         if (rc != 0)
2027                 return rc;
2028
2029         memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
2030         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
2031         tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
2032
2033         memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
2034         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
2035         tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
2036
2037         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2038                                           fremap[EFX_MAE_FIELD_IP_TOS],
2039                                           sizeof(tc_value), &tc_value,
2040                                           sizeof(tc_mask), &tc_mask);
2041         if (rc != 0) {
2042                 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2043                                 NULL, "Failed to process item fields");
2044         }
2045
2046         return 0;
2047 }
2048
2049 static const struct sfc_mae_field_locator flocs_tcp[] = {
2050         {
2051                 EFX_MAE_FIELD_L4_SPORT_BE,
2052                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
2053                 offsetof(struct rte_flow_item_tcp, hdr.src_port),
2054         },
2055         {
2056                 EFX_MAE_FIELD_L4_DPORT_BE,
2057                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
2058                 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
2059         },
2060         {
2061                 EFX_MAE_FIELD_TCP_FLAGS_BE,
2062                 /*
2063                  * The values have been picked intentionally since the
2064                  * target MAE field is oversize (16 bit). This mapping
2065                  * relies on the fact that the MAE field is big-endian.
2066                  */
2067                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
2068                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
2069                 offsetof(struct rte_flow_item_tcp, hdr.data_off),
2070         },
2071 };
2072
2073 static int
2074 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
2075                             struct sfc_flow_parse_ctx *ctx,
2076                             struct rte_flow_error *error)
2077 {
2078         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2079         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2080         struct rte_flow_item_tcp supp_mask;
2081         const uint8_t *spec = NULL;
2082         const uint8_t *mask = NULL;
2083         int rc;
2084
2085         /*
2086          * When encountered among outermost items, item TCP is invalid.
2087          * Check which match specification is being constructed now.
2088          */
2089         if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
2090                 return rte_flow_error_set(error, EINVAL,
2091                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2092                                           "TCP in outer frame is invalid");
2093         }
2094
2095         sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
2096                                      &supp_mask, sizeof(supp_mask));
2097
2098         rc = sfc_flow_parse_init(item,
2099                                  (const void **)&spec, (const void **)&mask,
2100                                  (const void *)&supp_mask,
2101                                  &rte_flow_item_tcp_mask,
2102                                  sizeof(struct rte_flow_item_tcp), error);
2103         if (rc != 0)
2104                 return rc;
2105
2106         pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
2107         pdata->l3_next_proto_restriction_mask = 0xff;
2108
2109         if (spec == NULL)
2110                 return 0;
2111
2112         return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
2113                                   ctx_mae, error);
2114 }
2115
2116 static const struct sfc_mae_field_locator flocs_udp[] = {
2117         {
2118                 EFX_MAE_FIELD_L4_SPORT_BE,
2119                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
2120                 offsetof(struct rte_flow_item_udp, hdr.src_port),
2121         },
2122         {
2123                 EFX_MAE_FIELD_L4_DPORT_BE,
2124                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
2125                 offsetof(struct rte_flow_item_udp, hdr.dst_port),
2126         },
2127 };
2128
2129 static int
2130 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
2131                             struct sfc_flow_parse_ctx *ctx,
2132                             struct rte_flow_error *error)
2133 {
2134         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2135         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2136         struct rte_flow_item_udp supp_mask;
2137         const uint8_t *spec = NULL;
2138         const uint8_t *mask = NULL;
2139         int rc;
2140
2141         sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
2142                                      &supp_mask, sizeof(supp_mask));
2143
2144         rc = sfc_flow_parse_init(item,
2145                                  (const void **)&spec, (const void **)&mask,
2146                                  (const void *)&supp_mask,
2147                                  &rte_flow_item_udp_mask,
2148                                  sizeof(struct rte_flow_item_udp), error);
2149         if (rc != 0)
2150                 return rc;
2151
2152         pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
2153         pdata->l3_next_proto_restriction_mask = 0xff;
2154
2155         if (spec == NULL)
2156                 return 0;
2157
2158         return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
2159                                   ctx_mae, error);
2160 }
2161
2162 static const struct sfc_mae_field_locator flocs_tunnel[] = {
2163         {
2164                 /*
2165                  * The size and offset values are relevant
2166                  * for Geneve and NVGRE, too.
2167                  */
2168                 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
2169                 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
2170         },
2171 };
2172
2173 /*
2174  * An auxiliary registry which allows using non-encap. field IDs
2175  * directly when building a match specification of type ACTION.
2176  *
2177  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
2178  */
2179 static const efx_mae_field_id_t field_ids_no_remap[] = {
2180 #define FIELD_ID_NO_REMAP(_field) \
2181         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
2182
2183         FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
2184         FIELD_ID_NO_REMAP(ETH_SADDR_BE),
2185         FIELD_ID_NO_REMAP(ETH_DADDR_BE),
2186         FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
2187         FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
2188         FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
2189         FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
2190         FIELD_ID_NO_REMAP(SRC_IP4_BE),
2191         FIELD_ID_NO_REMAP(DST_IP4_BE),
2192         FIELD_ID_NO_REMAP(IP_PROTO),
2193         FIELD_ID_NO_REMAP(IP_TOS),
2194         FIELD_ID_NO_REMAP(IP_TTL),
2195         FIELD_ID_NO_REMAP(SRC_IP6_BE),
2196         FIELD_ID_NO_REMAP(DST_IP6_BE),
2197         FIELD_ID_NO_REMAP(L4_SPORT_BE),
2198         FIELD_ID_NO_REMAP(L4_DPORT_BE),
2199         FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2200         FIELD_ID_NO_REMAP(HAS_OVLAN),
2201         FIELD_ID_NO_REMAP(HAS_IVLAN),
2202
2203 #undef FIELD_ID_NO_REMAP
2204 };
2205
2206 /*
2207  * An auxiliary registry which allows using "ENC" field IDs
2208  * when building a match specification of type OUTER.
2209  *
2210  * See sfc_mae_rule_encap_parse_init().
2211  */
2212 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2213 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2214         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2215
2216         FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2217         FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2218         FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2219         FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2220         FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2221         FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2222         FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2223         FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2224         FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2225         FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2226         FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2227         FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2228         FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2229         FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2230         FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2231         FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2232         FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2233         FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2234
2235 #undef FIELD_ID_REMAP_TO_ENCAP
2236 };
2237
2238 static int
2239 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2240                                struct sfc_flow_parse_ctx *ctx,
2241                                struct rte_flow_error *error)
2242 {
2243         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2244         uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2245         uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2246         const struct rte_flow_item_vxlan *vxp;
2247         uint8_t supp_mask[sizeof(uint64_t)];
2248         const uint8_t *spec = NULL;
2249         const uint8_t *mask = NULL;
2250         int rc;
2251
2252         if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
2253                 /*
2254                  * As a workaround, pattern processing has started from
2255                  * this (tunnel) item. No pattern data to process yet.
2256                  */
2257         } else {
2258                 /*
2259                  * We're about to start processing inner frame items.
2260                  * Process pattern data that has been deferred so far
2261                  * and reset pattern data storage.
2262                  */
2263                 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2264                 if (rc != 0)
2265                         return rc;
2266         }
2267
2268         memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2269
2270         sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2271                                      &supp_mask, sizeof(supp_mask));
2272
2273         /*
2274          * This tunnel item was preliminarily detected by
2275          * sfc_mae_rule_encap_parse_init(). Default mask
2276          * was also picked by that helper. Use it here.
2277          */
2278         rc = sfc_flow_parse_init(item,
2279                                  (const void **)&spec, (const void **)&mask,
2280                                  (const void *)&supp_mask,
2281                                  ctx_mae->tunnel_def_mask,
2282                                  ctx_mae->tunnel_def_mask_size,  error);
2283         if (rc != 0)
2284                 return rc;
2285
2286         /*
2287          * This item and later ones comprise a
2288          * match specification of type ACTION.
2289          */
2290         ctx_mae->match_spec = ctx_mae->match_spec_action;
2291
2292         /* This item and later ones use non-encap. EFX MAE field IDs. */
2293         ctx_mae->field_ids_remap = field_ids_no_remap;
2294
2295         if (spec == NULL)
2296                 return 0;
2297
2298         /*
2299          * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2300          * Copy 24-bit VNI, which is BE, at offset 1 in it.
2301          * The extra byte is 0 both in the mask and in the value.
2302          */
2303         vxp = (const struct rte_flow_item_vxlan *)spec;
2304         memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2305
2306         vxp = (const struct rte_flow_item_vxlan *)mask;
2307         memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2308
2309         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2310                                           EFX_MAE_FIELD_ENC_VNET_ID_BE,
2311                                           sizeof(vnet_id_v), vnet_id_v,
2312                                           sizeof(vnet_id_m), vnet_id_m);
2313         if (rc != 0) {
2314                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2315                                         item, "Failed to set VXLAN VNI");
2316         }
2317
2318         return rc;
2319 }
2320
2321 static const struct sfc_flow_item sfc_flow_items[] = {
2322         {
2323                 .type = RTE_FLOW_ITEM_TYPE_MARK,
2324                 .name = "MARK",
2325                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2326                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2327                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2328                 .parse = sfc_mae_rule_parse_item_mark,
2329         },
2330         {
2331                 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2332                 .name = "PORT_ID",
2333                 /*
2334                  * In terms of RTE flow, this item is a META one,
2335                  * and its position in the pattern is don't care.
2336                  */
2337                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2338                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2339                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2340                 .parse = sfc_mae_rule_parse_item_port_id,
2341         },
2342         {
2343                 .type = RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR,
2344                 .name = "PORT_REPRESENTOR",
2345                 /*
2346                  * In terms of RTE flow, this item is a META one,
2347                  * and its position in the pattern is don't care.
2348                  */
2349                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2350                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2351                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2352                 .parse = sfc_mae_rule_parse_item_ethdev_based,
2353         },
2354         {
2355                 .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
2356                 .name = "REPRESENTED_PORT",
2357                 /*
2358                  * In terms of RTE flow, this item is a META one,
2359                  * and its position in the pattern is don't care.
2360                  */
2361                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2362                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2363                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2364                 .parse = sfc_mae_rule_parse_item_ethdev_based,
2365         },
2366         {
2367                 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2368                 .name = "PHY_PORT",
2369                 /*
2370                  * In terms of RTE flow, this item is a META one,
2371                  * and its position in the pattern is don't care.
2372                  */
2373                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2374                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2375                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2376                 .parse = sfc_mae_rule_parse_item_phy_port,
2377         },
2378         {
2379                 .type = RTE_FLOW_ITEM_TYPE_PF,
2380                 .name = "PF",
2381                 /*
2382                  * In terms of RTE flow, this item is a META one,
2383                  * and its position in the pattern is don't care.
2384                  */
2385                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2386                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2387                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2388                 .parse = sfc_mae_rule_parse_item_pf,
2389         },
2390         {
2391                 .type = RTE_FLOW_ITEM_TYPE_VF,
2392                 .name = "VF",
2393                 /*
2394                  * In terms of RTE flow, this item is a META one,
2395                  * and its position in the pattern is don't care.
2396                  */
2397                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2398                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2399                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2400                 .parse = sfc_mae_rule_parse_item_vf,
2401         },
2402         {
2403                 .type = RTE_FLOW_ITEM_TYPE_ETH,
2404                 .name = "ETH",
2405                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2406                 .layer = SFC_FLOW_ITEM_L2,
2407                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2408                 .parse = sfc_mae_rule_parse_item_eth,
2409         },
2410         {
2411                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2412                 .name = "VLAN",
2413                 .prev_layer = SFC_FLOW_ITEM_L2,
2414                 .layer = SFC_FLOW_ITEM_L2,
2415                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2416                 .parse = sfc_mae_rule_parse_item_vlan,
2417         },
2418         {
2419                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2420                 .name = "IPV4",
2421                 .prev_layer = SFC_FLOW_ITEM_L2,
2422                 .layer = SFC_FLOW_ITEM_L3,
2423                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2424                 .parse = sfc_mae_rule_parse_item_ipv4,
2425         },
2426         {
2427                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2428                 .name = "IPV6",
2429                 .prev_layer = SFC_FLOW_ITEM_L2,
2430                 .layer = SFC_FLOW_ITEM_L3,
2431                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2432                 .parse = sfc_mae_rule_parse_item_ipv6,
2433         },
2434         {
2435                 .type = RTE_FLOW_ITEM_TYPE_TCP,
2436                 .name = "TCP",
2437                 .prev_layer = SFC_FLOW_ITEM_L3,
2438                 .layer = SFC_FLOW_ITEM_L4,
2439                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2440                 .parse = sfc_mae_rule_parse_item_tcp,
2441         },
2442         {
2443                 .type = RTE_FLOW_ITEM_TYPE_UDP,
2444                 .name = "UDP",
2445                 .prev_layer = SFC_FLOW_ITEM_L3,
2446                 .layer = SFC_FLOW_ITEM_L4,
2447                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2448                 .parse = sfc_mae_rule_parse_item_udp,
2449         },
2450         {
2451                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2452                 .name = "VXLAN",
2453                 .prev_layer = SFC_FLOW_ITEM_L4,
2454                 .layer = SFC_FLOW_ITEM_START_LAYER,
2455                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2456                 .parse = sfc_mae_rule_parse_item_tunnel,
2457         },
2458         {
2459                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2460                 .name = "GENEVE",
2461                 .prev_layer = SFC_FLOW_ITEM_L4,
2462                 .layer = SFC_FLOW_ITEM_START_LAYER,
2463                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2464                 .parse = sfc_mae_rule_parse_item_tunnel,
2465         },
2466         {
2467                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2468                 .name = "NVGRE",
2469                 .prev_layer = SFC_FLOW_ITEM_L3,
2470                 .layer = SFC_FLOW_ITEM_START_LAYER,
2471                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2472                 .parse = sfc_mae_rule_parse_item_tunnel,
2473         },
2474 };
2475
2476 static int
2477 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2478                            struct sfc_mae_parse_ctx *ctx,
2479                            struct sfc_mae_outer_rule **rulep,
2480                            struct rte_flow_error *error)
2481 {
2482         efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2483         int rc;
2484
2485         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2486                 *rulep = NULL;
2487                 goto no_or_id;
2488         }
2489
2490         SFC_ASSERT(ctx->match_spec_outer != NULL);
2491
2492         if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2493                 return rte_flow_error_set(error, ENOTSUP,
2494                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2495                                           "Inconsistent pattern (outer)");
2496         }
2497
2498         *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2499                                            ctx->encap_type);
2500         if (*rulep != NULL) {
2501                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2502         } else {
2503                 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2504                                             ctx->encap_type, rulep);
2505                 if (rc != 0) {
2506                         return rte_flow_error_set(error, rc,
2507                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2508                                         "Failed to process the pattern");
2509                 }
2510         }
2511
2512         /* The spec has now been tracked by the outer rule entry. */
2513         ctx->match_spec_outer = NULL;
2514
2515 no_or_id:
2516         switch (ctx->ft_rule_type) {
2517         case SFC_FT_RULE_NONE:
2518                 break;
2519         case SFC_FT_RULE_JUMP:
2520                 /* No action rule */
2521                 return 0;
2522         case SFC_FT_RULE_GROUP:
2523                 /*
2524                  * Match on recirculation ID rather than
2525                  * on the outer rule allocation handle.
2526                  */
2527                 rc = efx_mae_match_spec_recirc_id_set(ctx->match_spec_action,
2528                                         SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id));
2529                 if (rc != 0) {
2530                         return rte_flow_error_set(error, rc,
2531                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2532                                         "tunnel offload: GROUP: AR: failed to request match on RECIRC_ID");
2533                 }
2534                 return 0;
2535         default:
2536                 SFC_ASSERT(B_FALSE);
2537         }
2538
2539         /*
2540          * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2541          * inner parse (when some outer rule is hit) and action rule lookup.
2542          * If the currently processed flow does not come with an outer rule,
2543          * its action rule must be available only for packets which miss in
2544          * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2545          * in the action rule specification; this ensures correct behaviour.
2546          *
2547          * If, on the other hand, this flow does have an outer rule, its ID
2548          * may be unknown at the moment (not yet allocated), but OR_ID mask
2549          * has to be set to 0xffffffff anyway for correct class comparisons.
2550          * When the outer rule has been allocated, this match field will be
2551          * overridden by sfc_mae_outer_rule_enable() to use the right value.
2552          */
2553         rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2554                                                   &invalid_rule_id);
2555         if (rc != 0) {
2556                 if (*rulep != NULL)
2557                         sfc_mae_outer_rule_del(sa, *rulep);
2558
2559                 *rulep = NULL;
2560
2561                 return rte_flow_error_set(error, rc,
2562                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2563                                           "Failed to process the pattern");
2564         }
2565
2566         return 0;
2567 }
2568
2569 static int
2570 sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
2571                                 struct sfc_mae_parse_ctx *ctx)
2572 {
2573         struct sfc_flow_tunnel *ft;
2574         uint32_t user_mark;
2575
2576         if (spec == NULL) {
2577                 sfc_err(ctx->sa, "tunnel offload: GROUP: NULL spec in item MARK");
2578                 return EINVAL;
2579         }
2580
2581         ft = sfc_flow_tunnel_pick(ctx->sa, spec->id);
2582         if (ft == NULL) {
2583                 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid tunnel");
2584                 return EINVAL;
2585         }
2586
2587         if (ft->refcnt == 0) {
2588                 sfc_err(ctx->sa, "tunnel offload: GROUP: tunnel=%u does not exist",
2589                         ft->id);
2590                 return ENOENT;
2591         }
2592
2593         user_mark = SFC_FT_GET_USER_MARK(spec->id);
2594         if (user_mark != 0) {
2595                 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid item MARK");
2596                 return EINVAL;
2597         }
2598
2599         sfc_dbg(ctx->sa, "tunnel offload: GROUP: detected");
2600
2601         ctx->ft_rule_type = SFC_FT_RULE_GROUP;
2602         ctx->ft = ft;
2603
2604         return 0;
2605 }
2606
2607 static int
2608 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2609                               struct sfc_mae_parse_ctx *ctx,
2610                               struct rte_flow_error *error)
2611 {
2612         const struct rte_flow_item *pattern = ctx->pattern;
2613         struct sfc_mae *mae = &sa->mae;
2614         uint8_t recirc_id = 0;
2615         int rc;
2616
2617         if (pattern == NULL) {
2618                 rte_flow_error_set(error, EINVAL,
2619                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2620                                    "NULL pattern");
2621                 return -rte_errno;
2622         }
2623
2624         for (;;) {
2625                 switch (pattern->type) {
2626                 case RTE_FLOW_ITEM_TYPE_MARK:
2627                         rc = sfc_mae_rule_preparse_item_mark(pattern->spec,
2628                                                              ctx);
2629                         if (rc != 0) {
2630                                 return rte_flow_error_set(error, rc,
2631                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2632                                                   pattern, "tunnel offload: GROUP: invalid item MARK");
2633                         }
2634                         ++pattern;
2635                         continue;
2636                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2637                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2638                         ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2639                         ctx->tunnel_def_mask_size =
2640                                 sizeof(rte_flow_item_vxlan_mask);
2641                         break;
2642                 case RTE_FLOW_ITEM_TYPE_GENEVE:
2643                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2644                         ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2645                         ctx->tunnel_def_mask_size =
2646                                 sizeof(rte_flow_item_geneve_mask);
2647                         break;
2648                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2649                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2650                         ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2651                         ctx->tunnel_def_mask_size =
2652                                 sizeof(rte_flow_item_nvgre_mask);
2653                         break;
2654                 case RTE_FLOW_ITEM_TYPE_END:
2655                         break;
2656                 default:
2657                         ++pattern;
2658                         continue;
2659                 };
2660
2661                 break;
2662         }
2663
2664         switch (ctx->ft_rule_type) {
2665         case SFC_FT_RULE_NONE:
2666                 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2667                         return 0;
2668                 break;
2669         case SFC_FT_RULE_JUMP:
2670                 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
2671                         return rte_flow_error_set(error, ENOTSUP,
2672                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2673                                                   pattern, "tunnel offload: JUMP: invalid item");
2674                 }
2675                 ctx->encap_type = ctx->ft->encap_type;
2676                 break;
2677         case SFC_FT_RULE_GROUP:
2678                 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2679                         return rte_flow_error_set(error, EINVAL,
2680                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2681                                                   NULL, "tunnel offload: GROUP: missing tunnel item");
2682                 } else if (ctx->encap_type != ctx->ft->encap_type) {
2683                         return rte_flow_error_set(error, EINVAL,
2684                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2685                                                   pattern, "tunnel offload: GROUP: tunnel type mismatch");
2686                 }
2687
2688                 /*
2689                  * The HW/FW hasn't got support for the use of "ENC" fields in
2690                  * action rules (except the VNET_ID one) yet. As a workaround,
2691                  * start parsing the pattern from the tunnel item.
2692                  */
2693                 ctx->pattern = pattern;
2694                 break;
2695         default:
2696                 SFC_ASSERT(B_FALSE);
2697                 break;
2698         }
2699
2700         if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2701                 return rte_flow_error_set(error, ENOTSUP,
2702                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2703                                           "OR: unsupported tunnel type");
2704         }
2705
2706         switch (ctx->ft_rule_type) {
2707         case SFC_FT_RULE_JUMP:
2708                 recirc_id = SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id);
2709                 /* FALLTHROUGH */
2710         case SFC_FT_RULE_NONE:
2711                 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2712                         return rte_flow_error_set(error, ENOTSUP,
2713                                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2714                                         NULL, "OR: unsupported priority level");
2715                 }
2716
2717                 rc = efx_mae_match_spec_init(sa->nic,
2718                                              EFX_MAE_RULE_OUTER, ctx->priority,
2719                                              &ctx->match_spec_outer);
2720                 if (rc != 0) {
2721                         return rte_flow_error_set(error, rc,
2722                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2723                                 "OR: failed to initialise the match specification");
2724                 }
2725
2726                 /*
2727                  * Outermost items comprise a match
2728                  * specification of type OUTER.
2729                  */
2730                 ctx->match_spec = ctx->match_spec_outer;
2731
2732                 /* Outermost items use "ENC" EFX MAE field IDs. */
2733                 ctx->field_ids_remap = field_ids_remap_to_encap;
2734
2735                 rc = efx_mae_outer_rule_recirc_id_set(ctx->match_spec,
2736                                                       recirc_id);
2737                 if (rc != 0) {
2738                         return rte_flow_error_set(error, rc,
2739                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2740                                         "OR: failed to initialise RECIRC_ID");
2741                 }
2742                 break;
2743         case SFC_FT_RULE_GROUP:
2744                 /* Outermost items -> "ENC" match fields in the action rule. */
2745                 ctx->field_ids_remap = field_ids_remap_to_encap;
2746                 ctx->match_spec = ctx->match_spec_action;
2747
2748                 /* No own outer rule; match on JUMP OR's RECIRC_ID is used. */
2749                 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2750                 break;
2751         default:
2752                 SFC_ASSERT(B_FALSE);
2753                 break;
2754         }
2755
2756         return 0;
2757 }
2758
2759 static void
2760 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2761                               struct sfc_mae_parse_ctx *ctx)
2762 {
2763         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2764                 return;
2765
2766         if (ctx->match_spec_outer != NULL)
2767                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2768 }
2769
2770 int
2771 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2772                            const struct rte_flow_item pattern[],
2773                            struct sfc_flow_spec_mae *spec,
2774                            struct rte_flow_error *error)
2775 {
2776         struct sfc_mae_parse_ctx ctx_mae;
2777         unsigned int priority_shift = 0;
2778         struct sfc_flow_parse_ctx ctx;
2779         int rc;
2780
2781         memset(&ctx_mae, 0, sizeof(ctx_mae));
2782         ctx_mae.ft_rule_type = spec->ft_rule_type;
2783         ctx_mae.priority = spec->priority;
2784         ctx_mae.ft = spec->ft;
2785         ctx_mae.sa = sa;
2786
2787         switch (ctx_mae.ft_rule_type) {
2788         case SFC_FT_RULE_JUMP:
2789                 /*
2790                  * By design, this flow should be represented solely by the
2791                  * outer rule. But the HW/FW hasn't got support for setting
2792                  * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2793                  * does it support outer rule counters. As a workaround, an
2794                  * action rule of lower priority is used to do the job.
2795                  */
2796                 priority_shift = 1;
2797
2798                 /* FALLTHROUGH */
2799         case SFC_FT_RULE_GROUP:
2800                 if (ctx_mae.priority != 0) {
2801                         /*
2802                          * Because of the above workaround, deny the
2803                          * use of priorities to JUMP and GROUP rules.
2804                          */
2805                         rc = rte_flow_error_set(error, ENOTSUP,
2806                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2807                                 "tunnel offload: priorities are not supported");
2808                         goto fail_priority_check;
2809                 }
2810
2811                 /* FALLTHROUGH */
2812         case SFC_FT_RULE_NONE:
2813                 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2814                                              spec->priority + priority_shift,
2815                                              &ctx_mae.match_spec_action);
2816                 if (rc != 0) {
2817                         rc = rte_flow_error_set(error, rc,
2818                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2819                                 "AR: failed to initialise the match specification");
2820                         goto fail_init_match_spec_action;
2821                 }
2822                 break;
2823         default:
2824                 SFC_ASSERT(B_FALSE);
2825                 break;
2826         }
2827
2828         /*
2829          * As a preliminary setting, assume that there is no encapsulation
2830          * in the pattern. That is, pattern items are about to comprise a
2831          * match specification of type ACTION and use non-encap. field IDs.
2832          *
2833          * sfc_mae_rule_encap_parse_init() below may override this.
2834          */
2835         ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2836         ctx_mae.match_spec = ctx_mae.match_spec_action;
2837         ctx_mae.field_ids_remap = field_ids_no_remap;
2838         ctx_mae.pattern = pattern;
2839
2840         ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2841         ctx.mae = &ctx_mae;
2842
2843         rc = sfc_mae_rule_encap_parse_init(sa, &ctx_mae, error);
2844         if (rc != 0)
2845                 goto fail_encap_parse_init;
2846
2847         /*
2848          * sfc_mae_rule_encap_parse_init() may have detected tunnel offload
2849          * GROUP rule. Remember its properties for later use.
2850          */
2851         spec->ft_rule_type = ctx_mae.ft_rule_type;
2852         spec->ft = ctx_mae.ft;
2853
2854         rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2855                                     ctx_mae.pattern, &ctx, error);
2856         if (rc != 0)
2857                 goto fail_parse_pattern;
2858
2859         rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2860         if (rc != 0)
2861                 goto fail_process_pattern_data;
2862
2863         rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2864         if (rc != 0)
2865                 goto fail_process_outer;
2866
2867         if (ctx_mae.match_spec_action != NULL &&
2868             !efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2869                 rc = rte_flow_error_set(error, ENOTSUP,
2870                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2871                                         "Inconsistent pattern");
2872                 goto fail_validate_match_spec_action;
2873         }
2874
2875         spec->match_spec = ctx_mae.match_spec_action;
2876
2877         return 0;
2878
2879 fail_validate_match_spec_action:
2880 fail_process_outer:
2881 fail_process_pattern_data:
2882 fail_parse_pattern:
2883         sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2884
2885 fail_encap_parse_init:
2886         if (ctx_mae.match_spec_action != NULL)
2887                 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2888
2889 fail_init_match_spec_action:
2890 fail_priority_check:
2891         return rc;
2892 }
2893
2894 /*
2895  * An action supported by MAE may correspond to a bundle of RTE flow actions,
2896  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2897  * That is, related RTE flow actions need to be tracked as parts of a whole
2898  * so that they can be combined into a single action and submitted to MAE
2899  * representation of a given rule's action set.
2900  *
2901  * Each RTE flow action provided by an application gets classified as
2902  * one belonging to some bundle type. If an action is not supposed to
2903  * belong to any bundle, or if this action is END, it is described as
2904  * one belonging to a dummy bundle of type EMPTY.
2905  *
2906  * A currently tracked bundle will be submitted if a repeating
2907  * action or an action of different bundle type follows.
2908  */
2909
2910 enum sfc_mae_actions_bundle_type {
2911         SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2912         SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2913 };
2914
2915 struct sfc_mae_actions_bundle {
2916         enum sfc_mae_actions_bundle_type        type;
2917
2918         /* Indicates actions already tracked by the current bundle */
2919         uint64_t                                actions_mask;
2920
2921         /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2922         rte_be16_t                              vlan_push_tpid;
2923         rte_be16_t                              vlan_push_tci;
2924 };
2925
2926 /*
2927  * Combine configuration of RTE flow actions tracked by the bundle into a
2928  * single action and submit the result to MAE action set specification.
2929  * Do nothing in the case of dummy action bundle.
2930  */
2931 static int
2932 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2933                               efx_mae_actions_t *spec)
2934 {
2935         int rc = 0;
2936
2937         switch (bundle->type) {
2938         case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2939                 break;
2940         case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2941                 rc = efx_mae_action_set_populate_vlan_push(
2942                         spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2943                 break;
2944         default:
2945                 SFC_ASSERT(B_FALSE);
2946                 break;
2947         }
2948
2949         return rc;
2950 }
2951
2952 /*
2953  * Given the type of the next RTE flow action in the line, decide
2954  * whether a new bundle is about to start, and, if this is the case,
2955  * submit and reset the current bundle.
2956  */
2957 static int
2958 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2959                             struct sfc_mae_actions_bundle *bundle,
2960                             efx_mae_actions_t *spec,
2961                             struct rte_flow_error *error)
2962 {
2963         enum sfc_mae_actions_bundle_type bundle_type_new;
2964         int rc;
2965
2966         switch (action->type) {
2967         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2968         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2969         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2970                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2971                 break;
2972         default:
2973                 /*
2974                  * Self-sufficient actions, including END, are handled in this
2975                  * case. No checks for unsupported actions are needed here
2976                  * because parsing doesn't occur at this point.
2977                  */
2978                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2979                 break;
2980         }
2981
2982         if (bundle_type_new != bundle->type ||
2983             (bundle->actions_mask & (1ULL << action->type)) != 0) {
2984                 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2985                 if (rc != 0)
2986                         goto fail_submit;
2987
2988                 memset(bundle, 0, sizeof(*bundle));
2989         }
2990
2991         bundle->type = bundle_type_new;
2992
2993         return 0;
2994
2995 fail_submit:
2996         return rte_flow_error_set(error, rc,
2997                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2998                         "Failed to request the (group of) action(s)");
2999 }
3000
3001 static void
3002 sfc_mae_rule_parse_action_of_push_vlan(
3003                             const struct rte_flow_action_of_push_vlan *conf,
3004                             struct sfc_mae_actions_bundle *bundle)
3005 {
3006         bundle->vlan_push_tpid = conf->ethertype;
3007 }
3008
3009 static void
3010 sfc_mae_rule_parse_action_of_set_vlan_vid(
3011                             const struct rte_flow_action_of_set_vlan_vid *conf,
3012                             struct sfc_mae_actions_bundle *bundle)
3013 {
3014         bundle->vlan_push_tci |= (conf->vlan_vid &
3015                                   rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
3016 }
3017
3018 static void
3019 sfc_mae_rule_parse_action_of_set_vlan_pcp(
3020                             const struct rte_flow_action_of_set_vlan_pcp *conf,
3021                             struct sfc_mae_actions_bundle *bundle)
3022 {
3023         uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
3024                                            RTE_LEN2MASK(3, uint8_t)) << 13;
3025
3026         bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
3027 }
3028
3029 struct sfc_mae_parsed_item {
3030         const struct rte_flow_item      *item;
3031         size_t                          proto_header_ofst;
3032         size_t                          proto_header_size;
3033 };
3034
3035 /*
3036  * For each 16-bit word of the given header, override
3037  * bits enforced by the corresponding 16-bit mask.
3038  */
3039 static void
3040 sfc_mae_header_force_item_masks(uint8_t *header_buf,
3041                                 const struct sfc_mae_parsed_item *parsed_items,
3042                                 unsigned int nb_parsed_items)
3043 {
3044         unsigned int item_idx;
3045
3046         for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
3047                 const struct sfc_mae_parsed_item *parsed_item;
3048                 const struct rte_flow_item *item;
3049                 size_t proto_header_size;
3050                 size_t ofst;
3051
3052                 parsed_item = &parsed_items[item_idx];
3053                 proto_header_size = parsed_item->proto_header_size;
3054                 item = parsed_item->item;
3055
3056                 for (ofst = 0; ofst < proto_header_size;
3057                      ofst += sizeof(rte_be16_t)) {
3058                         rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
3059                         const rte_be16_t *w_maskp;
3060                         const rte_be16_t *w_specp;
3061
3062                         w_maskp = RTE_PTR_ADD(item->mask, ofst);
3063                         w_specp = RTE_PTR_ADD(item->spec, ofst);
3064
3065                         *wp &= ~(*w_maskp);
3066                         *wp |= (*w_specp & *w_maskp);
3067                 }
3068
3069                 header_buf += proto_header_size;
3070         }
3071 }
3072
3073 #define SFC_IPV4_TTL_DEF        0x40
3074 #define SFC_IPV6_VTC_FLOW_DEF   0x60000000
3075 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
3076 #define SFC_VXLAN_FLAGS_DEF     0x08000000
3077
3078 static int
3079 sfc_mae_rule_parse_action_vxlan_encap(
3080                             struct sfc_mae *mae,
3081                             const struct rte_flow_action_vxlan_encap *conf,
3082                             efx_mae_actions_t *spec,
3083                             struct rte_flow_error *error)
3084 {
3085         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
3086         struct rte_flow_item *pattern = conf->definition;
3087         uint8_t *buf = bounce_eh->buf;
3088
3089         /* This array will keep track of non-VOID pattern items. */
3090         struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
3091                                                 2 /* VLAN tags */ +
3092                                                 1 /* IPv4 or IPv6 */ +
3093                                                 1 /* UDP */ +
3094                                                 1 /* VXLAN */];
3095         unsigned int nb_parsed_items = 0;
3096
3097         size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
3098         uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
3099                                   sizeof(struct rte_ipv6_hdr))];
3100         struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
3101         struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
3102         struct rte_vxlan_hdr *vxlan = NULL;
3103         struct rte_udp_hdr *udp = NULL;
3104         unsigned int nb_vlan_tags = 0;
3105         size_t next_proto_ofst = 0;
3106         size_t ethertype_ofst = 0;
3107         uint64_t exp_items;
3108         int rc;
3109
3110         if (pattern == NULL) {
3111                 return rte_flow_error_set(error, EINVAL,
3112                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3113                                 "The encap. header definition is NULL");
3114         }
3115
3116         bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
3117         bounce_eh->size = 0;
3118
3119         /*
3120          * Process pattern items and remember non-VOID ones.
3121          * Defer applying masks until after the complete header
3122          * has been built from the pattern items.
3123          */
3124         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
3125
3126         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
3127                 struct sfc_mae_parsed_item *parsed_item;
3128                 const uint64_t exp_items_extra_vlan[] = {
3129                         RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
3130                 };
3131                 size_t proto_header_size;
3132                 rte_be16_t *ethertypep;
3133                 uint8_t *next_protop;
3134                 uint8_t *buf_cur;
3135
3136                 if (pattern->spec == NULL) {
3137                         return rte_flow_error_set(error, EINVAL,
3138                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3139                                         "NULL item spec in the encap. header");
3140                 }
3141
3142                 if (pattern->mask == NULL) {
3143                         return rte_flow_error_set(error, EINVAL,
3144                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3145                                         "NULL item mask in the encap. header");
3146                 }
3147
3148                 if (pattern->last != NULL) {
3149                         /* This is not a match pattern, so disallow range. */
3150                         return rte_flow_error_set(error, EINVAL,
3151                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3152                                         "Range item in the encap. header");
3153                 }
3154
3155                 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
3156                         /* Handle VOID separately, for clarity. */
3157                         continue;
3158                 }
3159
3160                 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
3161                         return rte_flow_error_set(error, ENOTSUP,
3162                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3163                                         "Unexpected item in the encap. header");
3164                 }
3165
3166                 parsed_item = &parsed_items[nb_parsed_items];
3167                 buf_cur = buf + bounce_eh->size;
3168
3169                 switch (pattern->type) {
3170                 case RTE_FLOW_ITEM_TYPE_ETH:
3171                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
3172                                                exp_items);
3173                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
3174                                                   hdr) != 0);
3175
3176                         proto_header_size = sizeof(struct rte_ether_hdr);
3177
3178                         ethertype_ofst = eth_ethertype_ofst;
3179
3180                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
3181                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3182                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3183                         break;
3184                 case RTE_FLOW_ITEM_TYPE_VLAN:
3185                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
3186                                                exp_items);
3187                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
3188                                                   hdr) != 0);
3189
3190                         proto_header_size = sizeof(struct rte_vlan_hdr);
3191
3192                         ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
3193                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
3194
3195                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3196                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3197
3198                         ethertype_ofst =
3199                             bounce_eh->size +
3200                             offsetof(struct rte_vlan_hdr, eth_proto);
3201
3202                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3203                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3204                         exp_items |= exp_items_extra_vlan[nb_vlan_tags];
3205
3206                         ++nb_vlan_tags;
3207                         break;
3208                 case RTE_FLOW_ITEM_TYPE_IPV4:
3209                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
3210                                                exp_items);
3211                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
3212                                                   hdr) != 0);
3213
3214                         proto_header_size = sizeof(struct rte_ipv4_hdr);
3215
3216                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3217                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3218
3219                         next_proto_ofst =
3220                             bounce_eh->size +
3221                             offsetof(struct rte_ipv4_hdr, next_proto_id);
3222
3223                         ipv4 = (struct rte_ipv4_hdr *)buf_cur;
3224
3225                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3226                         break;
3227                 case RTE_FLOW_ITEM_TYPE_IPV6:
3228                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
3229                                                exp_items);
3230                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
3231                                                   hdr) != 0);
3232
3233                         proto_header_size = sizeof(struct rte_ipv6_hdr);
3234
3235                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3236                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3237
3238                         next_proto_ofst = bounce_eh->size +
3239                                           offsetof(struct rte_ipv6_hdr, proto);
3240
3241                         ipv6 = (struct rte_ipv6_hdr *)buf_cur;
3242
3243                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3244                         break;
3245                 case RTE_FLOW_ITEM_TYPE_UDP:
3246                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
3247                                                exp_items);
3248                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
3249                                                   hdr) != 0);
3250
3251                         proto_header_size = sizeof(struct rte_udp_hdr);
3252
3253                         next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
3254                         *next_protop = IPPROTO_UDP;
3255
3256                         udp = (struct rte_udp_hdr *)buf_cur;
3257
3258                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
3259                         break;
3260                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3261                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
3262                                                exp_items);
3263                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
3264                                                   hdr) != 0);
3265
3266                         proto_header_size = sizeof(struct rte_vxlan_hdr);
3267
3268                         vxlan = (struct rte_vxlan_hdr *)buf_cur;
3269
3270                         udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
3271                         udp->dgram_len = RTE_BE16(sizeof(*udp) +
3272                                                   sizeof(*vxlan));
3273                         udp->dgram_cksum = 0;
3274
3275                         exp_items = 0;
3276                         break;
3277                 default:
3278                         return rte_flow_error_set(error, ENOTSUP,
3279                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3280                                         "Unknown item in the encap. header");
3281                 }
3282
3283                 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
3284                         return rte_flow_error_set(error, E2BIG,
3285                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3286                                         "The encap. header is too big");
3287                 }
3288
3289                 if ((proto_header_size & 1) != 0) {
3290                         return rte_flow_error_set(error, EINVAL,
3291                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3292                                         "Odd layer size in the encap. header");
3293                 }
3294
3295                 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
3296                 bounce_eh->size += proto_header_size;
3297
3298                 parsed_item->item = pattern;
3299                 parsed_item->proto_header_size = proto_header_size;
3300                 ++nb_parsed_items;
3301         }
3302
3303         if (exp_items != 0) {
3304                 /* Parsing item VXLAN would have reset exp_items to 0. */
3305                 return rte_flow_error_set(error, ENOTSUP,
3306                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3307                                         "No item VXLAN in the encap. header");
3308         }
3309
3310         /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
3311         ipv4->version_ihl = RTE_IPV4_VHL_DEF;
3312         ipv4->time_to_live = SFC_IPV4_TTL_DEF;
3313         ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
3314                                       sizeof(*vxlan));
3315         /* The HW cannot compute this checksum. */
3316         ipv4->hdr_checksum = 0;
3317         ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
3318
3319         ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
3320         ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
3321         ipv6->payload_len = udp->dgram_len;
3322
3323         vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
3324
3325         /* Take care of the masks. */
3326         sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
3327
3328         rc = efx_mae_action_set_populate_encap(spec);
3329         if (rc != 0) {
3330                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3331                                 NULL, "failed to request action ENCAP");
3332         }
3333
3334         return rc;
3335 }
3336
3337 static int
3338 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
3339                                const struct rte_flow_action_mark *conf,
3340                                const struct sfc_flow_spec_mae *spec_mae,
3341                                efx_mae_actions_t *spec)
3342 {
3343         int rc;
3344
3345         if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3346                 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3347         } else if (conf->id > SFC_FT_USER_MARK_MASK) {
3348                 sfc_err(sa, "the mark value is too large");
3349                 return EINVAL;
3350         }
3351
3352         rc = efx_mae_action_set_populate_mark(spec, conf->id);
3353         if (rc != 0)
3354                 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
3355
3356         return rc;
3357 }
3358
3359 static int
3360 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
3361                                 const struct rte_flow_action_count *conf
3362                                         __rte_unused,
3363                                 efx_mae_actions_t *spec)
3364 {
3365         int rc;
3366
3367         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
3368                 sfc_err(sa,
3369                         "counter queue is not configured for COUNT action");
3370                 rc = EINVAL;
3371                 goto fail_counter_queue_uninit;
3372         }
3373
3374         if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
3375                 rc = EINVAL;
3376                 goto fail_no_service_core;
3377         }
3378
3379         rc = efx_mae_action_set_populate_count(spec);
3380         if (rc != 0) {
3381                 sfc_err(sa,
3382                         "failed to populate counters in MAE action set: %s",
3383                         rte_strerror(rc));
3384                 goto fail_populate_count;
3385         }
3386
3387         return 0;
3388
3389 fail_populate_count:
3390 fail_no_service_core:
3391 fail_counter_queue_uninit:
3392
3393         return rc;
3394 }
3395
3396 static int
3397 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
3398                                    const struct rte_flow_action_phy_port *conf,
3399                                    efx_mae_actions_t *spec)
3400 {
3401         efx_mport_sel_t mport;
3402         uint32_t phy_port;
3403         int rc;
3404
3405         if (conf->original != 0)
3406                 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
3407         else
3408                 phy_port = conf->index;
3409
3410         rc = efx_mae_mport_by_phy_port(phy_port, &mport);
3411         if (rc != 0) {
3412                 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
3413                         phy_port, strerror(rc));
3414                 return rc;
3415         }
3416
3417         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3418         if (rc != 0) {
3419                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3420                         mport.sel, strerror(rc));
3421         }
3422
3423         return rc;
3424 }
3425
3426 static int
3427 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3428                                 const struct rte_flow_action_vf *vf_conf,
3429                                 efx_mae_actions_t *spec)
3430 {
3431         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3432         efx_mport_sel_t mport;
3433         uint32_t vf;
3434         int rc;
3435
3436         if (vf_conf == NULL)
3437                 vf = EFX_PCI_VF_INVALID;
3438         else if (vf_conf->original != 0)
3439                 vf = encp->enc_vf;
3440         else
3441                 vf = vf_conf->id;
3442
3443         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3444         if (rc != 0) {
3445                 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3446                         encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3447                         strerror(rc));
3448                 return rc;
3449         }
3450
3451         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3452         if (rc != 0) {
3453                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3454                         mport.sel, strerror(rc));
3455         }
3456
3457         return rc;
3458 }
3459
3460 static int
3461 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3462                                   const struct rte_flow_action_port_id *conf,
3463                                   efx_mae_actions_t *spec)
3464 {
3465         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3466         struct sfc_mae *mae = &sa->mae;
3467         efx_mport_sel_t mport;
3468         uint16_t port_id;
3469         int rc;
3470
3471         if (conf->id > UINT16_MAX)
3472                 return EOVERFLOW;
3473
3474         port_id = (conf->original != 0) ? sas->port_id : conf->id;
3475
3476         rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
3477                                              port_id, &mport);
3478         if (rc != 0) {
3479                 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3480                         port_id, strerror(rc));
3481                 return rc;
3482         }
3483
3484         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3485         if (rc != 0) {
3486                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3487                         mport.sel, strerror(rc));
3488         }
3489
3490         return rc;
3491 }
3492
3493 static int
3494 sfc_mae_rule_parse_action_port_representor(struct sfc_adapter *sa,
3495                 const struct rte_flow_action_ethdev *conf,
3496                 efx_mae_actions_t *spec)
3497 {
3498         struct sfc_mae *mae = &sa->mae;
3499         efx_mport_sel_t mport;
3500         int rc;
3501
3502         rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
3503                                              conf->port_id, &mport);
3504         if (rc != 0) {
3505                 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3506                         conf->port_id, strerror(rc));
3507                 return rc;
3508         }
3509
3510         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3511         if (rc != 0) {
3512                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3513                         mport.sel, strerror(rc));
3514         }
3515
3516         return rc;
3517 }
3518
3519 static int
3520 sfc_mae_rule_parse_action_represented_port(struct sfc_adapter *sa,
3521                 const struct rte_flow_action_ethdev *conf,
3522                 efx_mae_actions_t *spec)
3523 {
3524         struct sfc_mae *mae = &sa->mae;
3525         efx_mport_sel_t mport;
3526         int rc;
3527
3528         rc = sfc_mae_switch_get_entity_mport(mae->switch_domain_id,
3529                                              conf->port_id, &mport);
3530         if (rc != 0) {
3531                 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3532                         conf->port_id, strerror(rc));
3533                 return rc;
3534         }
3535
3536         rc = efx_mae_action_set_populate_deliver(spec, &mport);
3537         if (rc != 0) {
3538                 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3539                         mport.sel, strerror(rc));
3540         }
3541
3542         return rc;
3543 }
3544
3545 static const char * const action_names[] = {
3546         [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3547         [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3548         [RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL] = "OF_DEC_NW_TTL",
3549         [RTE_FLOW_ACTION_TYPE_DEC_TTL] = "DEC_TTL",
3550         [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3551         [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3552         [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3553         [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3554         [RTE_FLOW_ACTION_TYPE_COUNT] = "COUNT",
3555         [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3556         [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3557         [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3558         [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3559         [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3560         [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3561         [RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = "PORT_REPRESENTOR",
3562         [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = "REPRESENTED_PORT",
3563         [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3564         [RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
3565 };
3566
3567 static int
3568 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3569                           const struct rte_flow_action *action,
3570                           const struct sfc_flow_spec_mae *spec_mae,
3571                           struct sfc_mae_actions_bundle *bundle,
3572                           efx_mae_actions_t *spec,
3573                           struct rte_flow_error *error)
3574 {
3575         const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3576         const uint64_t rx_metadata = sa->negotiated_rx_metadata;
3577         bool custom_error = B_FALSE;
3578         int rc = 0;
3579
3580         switch (action->type) {
3581         case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3582                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3583                                        bundle->actions_mask);
3584                 if (outer_rule == NULL ||
3585                     outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3586                         rc = EINVAL;
3587                 else
3588                         rc = efx_mae_action_set_populate_decap(spec);
3589                 break;
3590         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3591                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3592                                        bundle->actions_mask);
3593                 rc = efx_mae_action_set_populate_vlan_pop(spec);
3594                 break;
3595         case RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL:
3596         case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3597                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL,
3598                                        bundle->actions_mask);
3599                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DEC_TTL,
3600                                        bundle->actions_mask);
3601                 rc = efx_mae_action_set_populate_decr_ip_ttl(spec);
3602                 break;
3603         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3604                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3605                                        bundle->actions_mask);
3606                 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3607                 break;
3608         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3609                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3610                                        bundle->actions_mask);
3611                 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3612                 break;
3613         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3614                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3615                                        bundle->actions_mask);
3616                 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3617                 break;
3618         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3619                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3620                                        bundle->actions_mask);
3621                 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3622                                                            action->conf,
3623                                                            spec, error);
3624                 custom_error = B_TRUE;
3625                 break;
3626         case RTE_FLOW_ACTION_TYPE_COUNT:
3627                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3628                                        bundle->actions_mask);
3629                 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3630                 break;
3631         case RTE_FLOW_ACTION_TYPE_FLAG:
3632                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3633                                        bundle->actions_mask);
3634                 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
3635                         rc = efx_mae_action_set_populate_flag(spec);
3636                 } else {
3637                         rc = rte_flow_error_set(error, ENOTSUP,
3638                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3639                                                 action,
3640                                                 "flag delivery has not been negotiated");
3641                         custom_error = B_TRUE;
3642                 }
3643                 break;
3644         case RTE_FLOW_ACTION_TYPE_MARK:
3645                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3646                                        bundle->actions_mask);
3647                 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
3648                     spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3649                         rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
3650                                                             spec_mae, spec);
3651                 } else {
3652                         rc = rte_flow_error_set(error, ENOTSUP,
3653                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3654                                                 action,
3655                                                 "mark delivery has not been negotiated");
3656                         custom_error = B_TRUE;
3657                 }
3658                 break;
3659         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3660                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3661                                        bundle->actions_mask);
3662                 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3663                 break;
3664         case RTE_FLOW_ACTION_TYPE_PF:
3665                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3666                                        bundle->actions_mask);
3667                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3668                 break;
3669         case RTE_FLOW_ACTION_TYPE_VF:
3670                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3671                                        bundle->actions_mask);
3672                 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3673                 break;
3674         case RTE_FLOW_ACTION_TYPE_PORT_ID:
3675                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3676                                        bundle->actions_mask);
3677                 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3678                 break;
3679         case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
3680                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
3681                                        bundle->actions_mask);
3682                 rc = sfc_mae_rule_parse_action_port_representor(sa,
3683                                 action->conf, spec);
3684                 break;
3685         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3686                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
3687                                        bundle->actions_mask);
3688                 rc = sfc_mae_rule_parse_action_represented_port(sa,
3689                                 action->conf, spec);
3690                 break;
3691         case RTE_FLOW_ACTION_TYPE_DROP:
3692                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3693                                        bundle->actions_mask);
3694                 rc = efx_mae_action_set_populate_drop(spec);
3695                 break;
3696         case RTE_FLOW_ACTION_TYPE_JUMP:
3697                 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3698                         /* Workaround. See sfc_flow_parse_rte_to_mae() */
3699                         break;
3700                 }
3701                 /* FALLTHROUGH */
3702         default:
3703                 return rte_flow_error_set(error, ENOTSUP,
3704                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3705                                 "Unsupported action");
3706         }
3707
3708         if (rc == 0) {
3709                 bundle->actions_mask |= (1ULL << action->type);
3710         } else if (!custom_error) {
3711                 if (action->type < RTE_DIM(action_names)) {
3712                         const char *action_name = action_names[action->type];
3713
3714                         if (action_name != NULL) {
3715                                 sfc_err(sa, "action %s was rejected: %s",
3716                                         action_name, strerror(rc));
3717                         }
3718                 }
3719                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3720                                 NULL, "Failed to request the action");
3721         }
3722
3723         return rc;
3724 }
3725
3726 static void
3727 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
3728 {
3729         bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
3730 }
3731
3732 static int
3733 sfc_mae_process_encap_header(struct sfc_adapter *sa,
3734                              const struct sfc_mae_bounce_eh *bounce_eh,
3735                              struct sfc_mae_encap_header **encap_headerp)
3736 {
3737         if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
3738                 encap_headerp = NULL;
3739                 return 0;
3740         }
3741
3742         *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
3743         if (*encap_headerp != NULL)
3744                 return 0;
3745
3746         return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
3747 }
3748
3749 int
3750 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
3751                            const struct rte_flow_action actions[],
3752                            struct sfc_flow_spec_mae *spec_mae,
3753                            struct rte_flow_error *error)
3754 {
3755         struct sfc_mae_encap_header *encap_header = NULL;
3756         struct sfc_mae_actions_bundle bundle = {0};
3757         struct sfc_flow_tunnel *counter_ft = NULL;
3758         uint64_t *ft_group_hit_counter = NULL;
3759         const struct rte_flow_action *action;
3760         struct sfc_mae *mae = &sa->mae;
3761         unsigned int n_count = 0;
3762         efx_mae_actions_t *spec;
3763         int rc;
3764
3765         rte_errno = 0;
3766
3767         if (actions == NULL) {
3768                 return rte_flow_error_set(error, EINVAL,
3769                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
3770                                 "NULL actions");
3771         }
3772
3773         rc = efx_mae_action_set_spec_init(sa->nic, &spec);
3774         if (rc != 0)
3775                 goto fail_action_set_spec_init;
3776
3777         for (action = actions;
3778              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3779                 if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
3780                         ++n_count;
3781         }
3782
3783         if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
3784                 /* JUMP rules don't decapsulate packets. GROUP rules do. */
3785                 rc = efx_mae_action_set_populate_decap(spec);
3786                 if (rc != 0)
3787                         goto fail_enforce_ft_decap;
3788
3789                 if (n_count == 0 && sfc_mae_counter_stream_enabled(sa)) {
3790                         /*
3791                          * The user opted not to use action COUNT in this rule,
3792                          * but the counter should be enabled implicitly because
3793                          * packets hitting this rule contribute to the tunnel's
3794                          * total number of hits. See sfc_mae_counter_get().
3795                          */
3796                         rc = efx_mae_action_set_populate_count(spec);
3797                         if (rc != 0)
3798                                 goto fail_enforce_ft_count;
3799
3800                         n_count = 1;
3801                 }
3802         }
3803
3804         /* Cleanup after previous encap. header bounce buffer usage. */
3805         sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
3806
3807         for (action = actions;
3808              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
3809                 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3810                 if (rc != 0)
3811                         goto fail_rule_parse_action;
3812
3813                 rc = sfc_mae_rule_parse_action(sa, action, spec_mae,
3814                                                &bundle, spec, error);
3815                 if (rc != 0)
3816                         goto fail_rule_parse_action;
3817         }
3818
3819         rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
3820         if (rc != 0)
3821                 goto fail_rule_parse_action;
3822
3823         rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
3824         if (rc != 0)
3825                 goto fail_process_encap_header;
3826
3827         if (n_count > 1) {
3828                 rc = ENOTSUP;
3829                 sfc_err(sa, "too many count actions requested: %u", n_count);
3830                 goto fail_nb_count;
3831         }
3832
3833         switch (spec_mae->ft_rule_type) {
3834         case SFC_FT_RULE_NONE:
3835                 break;
3836         case SFC_FT_RULE_JUMP:
3837                 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3838                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3839                 if (rc != 0)
3840                         goto fail_workaround_jump_delivery;
3841
3842                 counter_ft = spec_mae->ft;
3843                 break;
3844         case SFC_FT_RULE_GROUP:
3845                 /*
3846                  * Packets that go to the rule's AR have FT mark set (from the
3847                  * JUMP rule OR's RECIRC_ID). Remove this mark in matching
3848                  * packets. The user may have provided their own action
3849                  * MARK above, so don't check the return value here.
3850                  */
3851                 (void)efx_mae_action_set_populate_mark(spec, 0);
3852
3853                 ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
3854                 break;
3855         default:
3856                 SFC_ASSERT(B_FALSE);
3857         }
3858
3859         spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
3860                                                          n_count, spec);
3861         if (spec_mae->action_set != NULL) {
3862                 sfc_mae_encap_header_del(sa, encap_header);
3863                 efx_mae_action_set_spec_fini(sa->nic, spec);
3864                 return 0;
3865         }
3866
3867         rc = sfc_mae_action_set_add(sa, actions, spec, encap_header,
3868                                     ft_group_hit_counter, counter_ft, n_count,
3869                                     &spec_mae->action_set);
3870         if (rc != 0)
3871                 goto fail_action_set_add;
3872
3873         return 0;
3874
3875 fail_action_set_add:
3876 fail_workaround_jump_delivery:
3877 fail_nb_count:
3878         sfc_mae_encap_header_del(sa, encap_header);
3879
3880 fail_process_encap_header:
3881 fail_rule_parse_action:
3882         efx_mae_action_set_spec_fini(sa->nic, spec);
3883
3884 fail_enforce_ft_count:
3885 fail_enforce_ft_decap:
3886 fail_action_set_spec_init:
3887         if (rc > 0 && rte_errno == 0) {
3888                 rc = rte_flow_error_set(error, rc,
3889                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3890                         NULL, "Failed to process the action");
3891         }
3892         return rc;
3893 }
3894
3895 static bool
3896 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
3897                         const efx_mae_match_spec_t *left,
3898                         const efx_mae_match_spec_t *right)
3899 {
3900         bool have_same_class;
3901         int rc;
3902
3903         rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
3904                                            &have_same_class);
3905
3906         return (rc == 0) ? have_same_class : false;
3907 }
3908
3909 static int
3910 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
3911                                 struct sfc_mae_outer_rule *rule)
3912 {
3913         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
3914         struct sfc_mae_outer_rule *entry;
3915         struct sfc_mae *mae = &sa->mae;
3916
3917         if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
3918                 /* An active rule is reused. It's class is wittingly valid. */
3919                 return 0;
3920         }
3921
3922         TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
3923                               sfc_mae_outer_rules, entries) {
3924                 const efx_mae_match_spec_t *left = entry->match_spec;
3925                 const efx_mae_match_spec_t *right = rule->match_spec;
3926
3927                 if (entry == rule)
3928                         continue;
3929
3930                 if (sfc_mae_rules_class_cmp(sa, left, right))
3931                         return 0;
3932         }
3933
3934         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3935                  "support for outer frame pattern items is not guaranteed; "
3936                  "other than that, the items are valid from SW standpoint");
3937         return 0;
3938 }
3939
3940 static int
3941 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
3942                                  struct sfc_flow_spec_mae *spec)
3943 {
3944         const struct rte_flow *entry;
3945
3946         if (spec->match_spec == NULL)
3947                 return 0;
3948
3949         TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
3950                 const struct sfc_flow_spec *entry_spec = &entry->spec;
3951                 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
3952                 const efx_mae_match_spec_t *left = es_mae->match_spec;
3953                 const efx_mae_match_spec_t *right = spec->match_spec;
3954
3955                 switch (entry_spec->type) {
3956                 case SFC_FLOW_SPEC_FILTER:
3957                         /* Ignore VNIC-level flows */
3958                         break;
3959                 case SFC_FLOW_SPEC_MAE:
3960                         if (sfc_mae_rules_class_cmp(sa, left, right))
3961                                 return 0;
3962                         break;
3963                 default:
3964                         SFC_ASSERT(false);
3965                 }
3966         }
3967
3968         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
3969                  "support for inner frame pattern items is not guaranteed; "
3970                  "other than that, the items are valid from SW standpoint");
3971         return 0;
3972 }
3973
3974 /**
3975  * Confirm that a given flow can be accepted by the FW.
3976  *
3977  * @param sa
3978  *   Software adapter context
3979  * @param flow
3980  *   Flow to be verified
3981  * @return
3982  *   Zero on success and non-zero in the case of error.
3983  *   A special value of EAGAIN indicates that the adapter is
3984  *   not in started state. This state is compulsory because
3985  *   it only makes sense to compare the rule class of the flow
3986  *   being validated with classes of the active rules.
3987  *   Such classes are wittingly supported by the FW.
3988  */
3989 int
3990 sfc_mae_flow_verify(struct sfc_adapter *sa,
3991                     struct rte_flow *flow)
3992 {
3993         struct sfc_flow_spec *spec = &flow->spec;
3994         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3995         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3996         int rc;
3997
3998         SFC_ASSERT(sfc_adapter_is_locked(sa));
3999
4000         if (sa->state != SFC_ETHDEV_STARTED)
4001                 return EAGAIN;
4002
4003         if (outer_rule != NULL) {
4004                 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
4005                 if (rc != 0)
4006                         return rc;
4007         }
4008
4009         return sfc_mae_action_rule_class_verify(sa, spec_mae);
4010 }
4011
4012 int
4013 sfc_mae_flow_insert(struct sfc_adapter *sa,
4014                     struct rte_flow *flow)
4015 {
4016         struct sfc_flow_spec *spec = &flow->spec;
4017         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4018         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4019         struct sfc_mae_action_set *action_set = spec_mae->action_set;
4020         struct sfc_mae_fw_rsrc *fw_rsrc;
4021         int rc;
4022
4023         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
4024
4025         if (outer_rule != NULL) {
4026                 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
4027                                                spec_mae->match_spec);
4028                 if (rc != 0)
4029                         goto fail_outer_rule_enable;
4030         }
4031
4032         if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
4033                 spec_mae->ft->reset_jump_hit_counter =
4034                         spec_mae->ft->group_hit_counter;
4035         }
4036
4037         if (action_set == NULL) {
4038                 sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
4039                 return 0;
4040         }
4041
4042         rc = sfc_mae_action_set_enable(sa, action_set);
4043         if (rc != 0)
4044                 goto fail_action_set_enable;
4045
4046         if (action_set->n_counters > 0) {
4047                 rc = sfc_mae_counter_start(sa);
4048                 if (rc != 0) {
4049                         sfc_err(sa, "failed to start MAE counters support: %s",
4050                                 rte_strerror(rc));
4051                         goto fail_mae_counter_start;
4052                 }
4053         }
4054
4055         fw_rsrc = &action_set->fw_rsrc;
4056
4057         rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
4058                                         NULL, &fw_rsrc->aset_id,
4059                                         &spec_mae->rule_id);
4060         if (rc != 0)
4061                 goto fail_action_rule_insert;
4062
4063         sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
4064                 flow, spec_mae->rule_id.id);
4065
4066         return 0;
4067
4068 fail_action_rule_insert:
4069 fail_mae_counter_start:
4070         sfc_mae_action_set_disable(sa, action_set);
4071
4072 fail_action_set_enable:
4073         if (outer_rule != NULL)
4074                 sfc_mae_outer_rule_disable(sa, outer_rule);
4075
4076 fail_outer_rule_enable:
4077         return rc;
4078 }
4079
4080 int
4081 sfc_mae_flow_remove(struct sfc_adapter *sa,
4082                     struct rte_flow *flow)
4083 {
4084         struct sfc_flow_spec *spec = &flow->spec;
4085         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4086         struct sfc_mae_action_set *action_set = spec_mae->action_set;
4087         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4088         int rc;
4089
4090         if (action_set == NULL) {
4091                 sfc_dbg(sa, "disabled flow=%p (no AR)", flow);
4092                 goto skip_action_rule;
4093         }
4094
4095         SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
4096
4097         rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
4098         if (rc != 0) {
4099                 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
4100                         flow, spec_mae->rule_id.id, strerror(rc));
4101         }
4102         sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
4103                 flow, spec_mae->rule_id.id);
4104         spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
4105
4106         sfc_mae_action_set_disable(sa, action_set);
4107
4108 skip_action_rule:
4109         if (outer_rule != NULL)
4110                 sfc_mae_outer_rule_disable(sa, outer_rule);
4111
4112         return 0;
4113 }
4114
4115 static int
4116 sfc_mae_query_counter(struct sfc_adapter *sa,
4117                       struct sfc_flow_spec_mae *spec,
4118                       const struct rte_flow_action *action,
4119                       struct rte_flow_query_count *data,
4120                       struct rte_flow_error *error)
4121 {
4122         struct sfc_mae_action_set *action_set = spec->action_set;
4123         const struct rte_flow_action_count *conf = action->conf;
4124         unsigned int i;
4125         int rc;
4126
4127         if (action_set == NULL || action_set->n_counters == 0) {
4128                 return rte_flow_error_set(error, EINVAL,
4129                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4130                         "Queried flow rule does not have count actions");
4131         }
4132
4133         for (i = 0; i < action_set->n_counters; i++) {
4134                 /*
4135                  * Get the first available counter of the flow rule if
4136                  * counter ID is not specified, provided that this
4137                  * counter is not an automatic (implicit) one.
4138                  */
4139                 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
4140                         continue;
4141
4142                 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
4143                                          &action_set->counters[i], data);
4144                 if (rc != 0) {
4145                         return rte_flow_error_set(error, EINVAL,
4146                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4147                                 "Queried flow rule counter action is invalid");
4148                 }
4149
4150                 return 0;
4151         }
4152
4153         return rte_flow_error_set(error, ENOENT,
4154                                   RTE_FLOW_ERROR_TYPE_ACTION, action,
4155                                   "no such flow rule action or such count ID");
4156 }
4157
4158 int
4159 sfc_mae_flow_query(struct rte_eth_dev *dev,
4160                    struct rte_flow *flow,
4161                    const struct rte_flow_action *action,
4162                    void *data,
4163                    struct rte_flow_error *error)
4164 {
4165         struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
4166         struct sfc_flow_spec *spec = &flow->spec;
4167         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4168
4169         switch (action->type) {
4170         case RTE_FLOW_ACTION_TYPE_COUNT:
4171                 return sfc_mae_query_counter(sa, spec_mae, action,
4172                                              data, error);
4173         default:
4174                 return rte_flow_error_set(error, ENOTSUP,
4175                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4176                         "Query for action of this type is not supported");
4177         }
4178 }
4179
4180 int
4181 sfc_mae_switchdev_init(struct sfc_adapter *sa)
4182 {
4183         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
4184         struct sfc_mae *mae = &sa->mae;
4185         efx_mport_sel_t pf;
4186         efx_mport_sel_t phy;
4187         int rc;
4188
4189         sfc_log_init(sa, "entry");
4190
4191         if (!sa->switchdev) {
4192                 sfc_log_init(sa, "switchdev is not enabled - skip");
4193                 return 0;
4194         }
4195
4196         if (mae->status != SFC_MAE_STATUS_ADMIN) {
4197                 rc = ENOTSUP;
4198                 sfc_err(sa, "failed to init switchdev - no admin MAE privilege");
4199                 goto fail_no_mae;
4200         }
4201
4202         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
4203                                             &pf);
4204         if (rc != 0) {
4205                 sfc_err(sa, "failed get PF mport");
4206                 goto fail_pf_get;
4207         }
4208
4209         rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
4210         if (rc != 0) {
4211                 sfc_err(sa, "failed get PHY mport");
4212                 goto fail_phy_get;
4213         }
4214
4215         rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
4216                         SFC_MAE_RULE_PRIO_LOWEST,
4217                         &mae->switchdev_rule_pf_to_ext);
4218         if (rc != 0) {
4219                 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
4220                 goto fail_pf_add;
4221         }
4222
4223         rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
4224                         SFC_MAE_RULE_PRIO_LOWEST,
4225                         &mae->switchdev_rule_ext_to_pf);
4226         if (rc != 0) {
4227                 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
4228                 goto fail_phy_add;
4229         }
4230
4231         sfc_log_init(sa, "done");
4232
4233         return 0;
4234
4235 fail_phy_add:
4236         sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4237
4238 fail_pf_add:
4239 fail_phy_get:
4240 fail_pf_get:
4241 fail_no_mae:
4242         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
4243         return rc;
4244 }
4245
4246 void
4247 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
4248 {
4249         struct sfc_mae *mae = &sa->mae;
4250
4251         if (!sa->switchdev)
4252                 return;
4253
4254         sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4255         sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);
4256 }