net/sfc: support VLAN presence match in transfer rules
[dpdk.git] / drivers / net / sfc / sfc_mae.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_log.h"
20 #include "sfc_switch.h"
21
22 static int
23 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
24                             efx_mport_sel_t *mportp)
25 {
26         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
27
28         return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
29                                               mportp);
30 }
31
32 int
33 sfc_mae_attach(struct sfc_adapter *sa)
34 {
35         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
36         struct sfc_mae_switch_port_request switch_port_request = {0};
37         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
38         efx_mport_sel_t entity_mport;
39         struct sfc_mae *mae = &sa->mae;
40         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
41         efx_mae_limits_t limits;
42         int rc;
43
44         sfc_log_init(sa, "entry");
45
46         if (!encp->enc_mae_supported) {
47                 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
48                 return 0;
49         }
50
51         sfc_log_init(sa, "init MAE");
52         rc = efx_mae_init(sa->nic);
53         if (rc != 0)
54                 goto fail_mae_init;
55
56         sfc_log_init(sa, "get MAE limits");
57         rc = efx_mae_get_limits(sa->nic, &limits);
58         if (rc != 0)
59                 goto fail_mae_get_limits;
60
61         sfc_log_init(sa, "assign entity MPORT");
62         rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
63         if (rc != 0)
64                 goto fail_mae_assign_entity_mport;
65
66         sfc_log_init(sa, "assign RTE switch domain");
67         rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
68         if (rc != 0)
69                 goto fail_mae_assign_switch_domain;
70
71         sfc_log_init(sa, "assign RTE switch port");
72         switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
73         switch_port_request.entity_mportp = &entity_mport;
74         /*
75          * As of now, the driver does not support representors, so
76          * RTE ethdev MPORT simply matches that of the entity.
77          */
78         switch_port_request.ethdev_mportp = &entity_mport;
79         switch_port_request.ethdev_port_id = sas->port_id;
80         rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
81                                         &switch_port_request,
82                                         &mae->switch_port_id);
83         if (rc != 0)
84                 goto fail_mae_assign_switch_port;
85
86         sfc_log_init(sa, "allocate encap. header bounce buffer");
87         bounce_eh->buf_size = limits.eml_encap_header_size_limit;
88         bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
89                                     bounce_eh->buf_size, 0);
90         if (bounce_eh->buf == NULL)
91                 goto fail_mae_alloc_bounce_eh;
92
93         mae->status = SFC_MAE_STATUS_SUPPORTED;
94         mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
95         mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
96         mae->encap_types_supported = limits.eml_encap_types_supported;
97         TAILQ_INIT(&mae->outer_rules);
98         TAILQ_INIT(&mae->encap_headers);
99         TAILQ_INIT(&mae->action_sets);
100
101         sfc_log_init(sa, "done");
102
103         return 0;
104
105 fail_mae_alloc_bounce_eh:
106 fail_mae_assign_switch_port:
107 fail_mae_assign_switch_domain:
108 fail_mae_assign_entity_mport:
109 fail_mae_get_limits:
110         efx_mae_fini(sa->nic);
111
112 fail_mae_init:
113         sfc_log_init(sa, "failed %d", rc);
114
115         return rc;
116 }
117
118 void
119 sfc_mae_detach(struct sfc_adapter *sa)
120 {
121         struct sfc_mae *mae = &sa->mae;
122         enum sfc_mae_status status_prev = mae->status;
123
124         sfc_log_init(sa, "entry");
125
126         mae->nb_action_rule_prios_max = 0;
127         mae->status = SFC_MAE_STATUS_UNKNOWN;
128
129         if (status_prev != SFC_MAE_STATUS_SUPPORTED)
130                 return;
131
132         rte_free(mae->bounce_eh.buf);
133
134         efx_mae_fini(sa->nic);
135
136         sfc_log_init(sa, "done");
137 }
138
139 static struct sfc_mae_outer_rule *
140 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
141                           const efx_mae_match_spec_t *match_spec,
142                           efx_tunnel_protocol_t encap_type)
143 {
144         struct sfc_mae_outer_rule *rule;
145         struct sfc_mae *mae = &sa->mae;
146
147         SFC_ASSERT(sfc_adapter_is_locked(sa));
148
149         TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
150                 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
151                     rule->encap_type == encap_type) {
152                         sfc_dbg(sa, "attaching to outer_rule=%p", rule);
153                         ++(rule->refcnt);
154                         return rule;
155                 }
156         }
157
158         return NULL;
159 }
160
161 static int
162 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
163                        efx_mae_match_spec_t *match_spec,
164                        efx_tunnel_protocol_t encap_type,
165                        struct sfc_mae_outer_rule **rulep)
166 {
167         struct sfc_mae_outer_rule *rule;
168         struct sfc_mae *mae = &sa->mae;
169
170         SFC_ASSERT(sfc_adapter_is_locked(sa));
171
172         rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
173         if (rule == NULL)
174                 return ENOMEM;
175
176         rule->refcnt = 1;
177         rule->match_spec = match_spec;
178         rule->encap_type = encap_type;
179
180         rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
181
182         TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
183
184         *rulep = rule;
185
186         sfc_dbg(sa, "added outer_rule=%p", rule);
187
188         return 0;
189 }
190
191 static void
192 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
193                        struct sfc_mae_outer_rule *rule)
194 {
195         struct sfc_mae *mae = &sa->mae;
196
197         SFC_ASSERT(sfc_adapter_is_locked(sa));
198         SFC_ASSERT(rule->refcnt != 0);
199
200         --(rule->refcnt);
201
202         if (rule->refcnt != 0)
203                 return;
204
205         if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
206             rule->fw_rsrc.refcnt != 0) {
207                 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
208                         rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
209         }
210
211         efx_mae_match_spec_fini(sa->nic, rule->match_spec);
212
213         TAILQ_REMOVE(&mae->outer_rules, rule, entries);
214         rte_free(rule);
215
216         sfc_dbg(sa, "deleted outer_rule=%p", rule);
217 }
218
219 static int
220 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
221                           struct sfc_mae_outer_rule *rule,
222                           efx_mae_match_spec_t *match_spec_action)
223 {
224         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
225         int rc;
226
227         SFC_ASSERT(sfc_adapter_is_locked(sa));
228
229         if (fw_rsrc->refcnt == 0) {
230                 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
231                 SFC_ASSERT(rule->match_spec != NULL);
232
233                 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
234                                                rule->encap_type,
235                                                &fw_rsrc->rule_id);
236                 if (rc != 0) {
237                         sfc_err(sa, "failed to enable outer_rule=%p: %s",
238                                 rule, strerror(rc));
239                         return rc;
240                 }
241         }
242
243         rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
244                                                   &fw_rsrc->rule_id);
245         if (rc != 0) {
246                 if (fw_rsrc->refcnt == 0) {
247                         (void)efx_mae_outer_rule_remove(sa->nic,
248                                                         &fw_rsrc->rule_id);
249                         fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
250                 }
251
252                 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
253
254                 return rc;
255         }
256
257         if (fw_rsrc->refcnt == 0) {
258                 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
259                         rule, fw_rsrc->rule_id.id);
260         }
261
262         ++(fw_rsrc->refcnt);
263
264         return 0;
265 }
266
267 static void
268 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
269                            struct sfc_mae_outer_rule *rule)
270 {
271         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
272         int rc;
273
274         SFC_ASSERT(sfc_adapter_is_locked(sa));
275
276         if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
277             fw_rsrc->refcnt == 0) {
278                 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
279                         rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
280                 return;
281         }
282
283         if (fw_rsrc->refcnt == 1) {
284                 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
285                 if (rc == 0) {
286                         sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
287                                 rule, fw_rsrc->rule_id.id);
288                 } else {
289                         sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
290                                 rule, fw_rsrc->rule_id.id, strerror(rc));
291                 }
292                 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
293         }
294
295         --(fw_rsrc->refcnt);
296 }
297
298 static struct sfc_mae_encap_header *
299 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
300                             const struct sfc_mae_bounce_eh *bounce_eh)
301 {
302         struct sfc_mae_encap_header *encap_header;
303         struct sfc_mae *mae = &sa->mae;
304
305         SFC_ASSERT(sfc_adapter_is_locked(sa));
306
307         TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
308                 if (encap_header->size == bounce_eh->size &&
309                     memcmp(encap_header->buf, bounce_eh->buf,
310                            bounce_eh->size) == 0) {
311                         sfc_dbg(sa, "attaching to encap_header=%p",
312                                 encap_header);
313                         ++(encap_header->refcnt);
314                         return encap_header;
315                 }
316         }
317
318         return NULL;
319 }
320
321 static int
322 sfc_mae_encap_header_add(struct sfc_adapter *sa,
323                          const struct sfc_mae_bounce_eh *bounce_eh,
324                          struct sfc_mae_encap_header **encap_headerp)
325 {
326         struct sfc_mae_encap_header *encap_header;
327         struct sfc_mae *mae = &sa->mae;
328
329         SFC_ASSERT(sfc_adapter_is_locked(sa));
330
331         encap_header = rte_zmalloc("sfc_mae_encap_header",
332                                    sizeof(*encap_header), 0);
333         if (encap_header == NULL)
334                 return ENOMEM;
335
336         encap_header->size = bounce_eh->size;
337
338         encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
339                                        encap_header->size, 0);
340         if (encap_header->buf == NULL) {
341                 rte_free(encap_header);
342                 return ENOMEM;
343         }
344
345         rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
346
347         encap_header->refcnt = 1;
348         encap_header->type = bounce_eh->type;
349         encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
350
351         TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
352
353         *encap_headerp = encap_header;
354
355         sfc_dbg(sa, "added encap_header=%p", encap_header);
356
357         return 0;
358 }
359
360 static void
361 sfc_mae_encap_header_del(struct sfc_adapter *sa,
362                        struct sfc_mae_encap_header *encap_header)
363 {
364         struct sfc_mae *mae = &sa->mae;
365
366         if (encap_header == NULL)
367                 return;
368
369         SFC_ASSERT(sfc_adapter_is_locked(sa));
370         SFC_ASSERT(encap_header->refcnt != 0);
371
372         --(encap_header->refcnt);
373
374         if (encap_header->refcnt != 0)
375                 return;
376
377         if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
378             encap_header->fw_rsrc.refcnt != 0) {
379                 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
380                         encap_header, encap_header->fw_rsrc.eh_id.id,
381                         encap_header->fw_rsrc.refcnt);
382         }
383
384         TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
385         rte_free(encap_header->buf);
386         rte_free(encap_header);
387
388         sfc_dbg(sa, "deleted encap_header=%p", encap_header);
389 }
390
391 static int
392 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
393                             struct sfc_mae_encap_header *encap_header,
394                             efx_mae_actions_t *action_set_spec)
395 {
396         struct sfc_mae_fw_rsrc *fw_rsrc;
397         int rc;
398
399         if (encap_header == NULL)
400                 return 0;
401
402         SFC_ASSERT(sfc_adapter_is_locked(sa));
403
404         fw_rsrc = &encap_header->fw_rsrc;
405
406         if (fw_rsrc->refcnt == 0) {
407                 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
408                 SFC_ASSERT(encap_header->buf != NULL);
409                 SFC_ASSERT(encap_header->size != 0);
410
411                 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
412                                                 encap_header->buf,
413                                                 encap_header->size,
414                                                 &fw_rsrc->eh_id);
415                 if (rc != 0) {
416                         sfc_err(sa, "failed to enable encap_header=%p: %s",
417                                 encap_header, strerror(rc));
418                         return rc;
419                 }
420         }
421
422         rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
423                                               &fw_rsrc->eh_id);
424         if (rc != 0) {
425                 if (fw_rsrc->refcnt == 0) {
426                         (void)efx_mae_encap_header_free(sa->nic,
427                                                         &fw_rsrc->eh_id);
428                         fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
429                 }
430
431                 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
432
433                 return rc;
434         }
435
436         if (fw_rsrc->refcnt == 0) {
437                 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
438                         encap_header, fw_rsrc->eh_id.id);
439         }
440
441         ++(fw_rsrc->refcnt);
442
443         return 0;
444 }
445
446 static void
447 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
448                              struct sfc_mae_encap_header *encap_header)
449 {
450         struct sfc_mae_fw_rsrc *fw_rsrc;
451         int rc;
452
453         if (encap_header == NULL)
454                 return;
455
456         SFC_ASSERT(sfc_adapter_is_locked(sa));
457
458         fw_rsrc = &encap_header->fw_rsrc;
459
460         if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
461             fw_rsrc->refcnt == 0) {
462                 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
463                         encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
464                 return;
465         }
466
467         if (fw_rsrc->refcnt == 1) {
468                 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
469                 if (rc == 0) {
470                         sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
471                                 encap_header, fw_rsrc->eh_id.id);
472                 } else {
473                         sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
474                                 encap_header, fw_rsrc->eh_id.id, strerror(rc));
475                 }
476                 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
477         }
478
479         --(fw_rsrc->refcnt);
480 }
481
482 static struct sfc_mae_action_set *
483 sfc_mae_action_set_attach(struct sfc_adapter *sa,
484                           const struct sfc_mae_encap_header *encap_header,
485                           const efx_mae_actions_t *spec)
486 {
487         struct sfc_mae_action_set *action_set;
488         struct sfc_mae *mae = &sa->mae;
489
490         SFC_ASSERT(sfc_adapter_is_locked(sa));
491
492         TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
493                 if (action_set->encap_header == encap_header &&
494                     efx_mae_action_set_specs_equal(action_set->spec, spec)) {
495                         sfc_dbg(sa, "attaching to action_set=%p", action_set);
496                         ++(action_set->refcnt);
497                         return action_set;
498                 }
499         }
500
501         return NULL;
502 }
503
504 static int
505 sfc_mae_action_set_add(struct sfc_adapter *sa,
506                        efx_mae_actions_t *spec,
507                        struct sfc_mae_encap_header *encap_header,
508                        struct sfc_mae_action_set **action_setp)
509 {
510         struct sfc_mae_action_set *action_set;
511         struct sfc_mae *mae = &sa->mae;
512
513         SFC_ASSERT(sfc_adapter_is_locked(sa));
514
515         action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
516         if (action_set == NULL)
517                 return ENOMEM;
518
519         action_set->refcnt = 1;
520         action_set->spec = spec;
521         action_set->encap_header = encap_header;
522
523         action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
524
525         TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
526
527         *action_setp = action_set;
528
529         sfc_dbg(sa, "added action_set=%p", action_set);
530
531         return 0;
532 }
533
534 static void
535 sfc_mae_action_set_del(struct sfc_adapter *sa,
536                        struct sfc_mae_action_set *action_set)
537 {
538         struct sfc_mae *mae = &sa->mae;
539
540         SFC_ASSERT(sfc_adapter_is_locked(sa));
541         SFC_ASSERT(action_set->refcnt != 0);
542
543         --(action_set->refcnt);
544
545         if (action_set->refcnt != 0)
546                 return;
547
548         if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
549             action_set->fw_rsrc.refcnt != 0) {
550                 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
551                         action_set, action_set->fw_rsrc.aset_id.id,
552                         action_set->fw_rsrc.refcnt);
553         }
554
555         efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
556         sfc_mae_encap_header_del(sa, action_set->encap_header);
557         TAILQ_REMOVE(&mae->action_sets, action_set, entries);
558         rte_free(action_set);
559
560         sfc_dbg(sa, "deleted action_set=%p", action_set);
561 }
562
563 static int
564 sfc_mae_action_set_enable(struct sfc_adapter *sa,
565                           struct sfc_mae_action_set *action_set)
566 {
567         struct sfc_mae_encap_header *encap_header = action_set->encap_header;
568         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
569         int rc;
570
571         SFC_ASSERT(sfc_adapter_is_locked(sa));
572
573         if (fw_rsrc->refcnt == 0) {
574                 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
575                 SFC_ASSERT(action_set->spec != NULL);
576
577                 rc = sfc_mae_encap_header_enable(sa, encap_header,
578                                                  action_set->spec);
579                 if (rc != 0)
580                         return rc;
581
582                 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
583                                               &fw_rsrc->aset_id);
584                 if (rc != 0) {
585                         sfc_mae_encap_header_disable(sa, encap_header);
586
587                         sfc_err(sa, "failed to enable action_set=%p: %s",
588                                 action_set, strerror(rc));
589
590                         return rc;
591                 }
592
593                 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
594                         action_set, fw_rsrc->aset_id.id);
595         }
596
597         ++(fw_rsrc->refcnt);
598
599         return 0;
600 }
601
602 static void
603 sfc_mae_action_set_disable(struct sfc_adapter *sa,
604                            struct sfc_mae_action_set *action_set)
605 {
606         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
607         int rc;
608
609         SFC_ASSERT(sfc_adapter_is_locked(sa));
610
611         if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
612             fw_rsrc->refcnt == 0) {
613                 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
614                         action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
615                 return;
616         }
617
618         if (fw_rsrc->refcnt == 1) {
619                 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
620                 if (rc == 0) {
621                         sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
622                                 action_set, fw_rsrc->aset_id.id);
623                 } else {
624                         sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
625                                 action_set, fw_rsrc->aset_id.id, strerror(rc));
626                 }
627                 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
628
629                 sfc_mae_encap_header_disable(sa, action_set->encap_header);
630         }
631
632         --(fw_rsrc->refcnt);
633 }
634
635 void
636 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
637                      struct rte_flow *flow)
638 {
639         struct sfc_flow_spec *spec;
640         struct sfc_flow_spec_mae *spec_mae;
641
642         if (flow == NULL)
643                 return;
644
645         spec = &flow->spec;
646
647         if (spec == NULL)
648                 return;
649
650         spec_mae = &spec->mae;
651
652         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
653
654         if (spec_mae->outer_rule != NULL)
655                 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
656
657         if (spec_mae->action_set != NULL)
658                 sfc_mae_action_set_del(sa, spec_mae->action_set);
659
660         if (spec_mae->match_spec != NULL)
661                 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
662 }
663
664 static int
665 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
666 {
667         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
668         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
669         const efx_mae_field_id_t field_ids[] = {
670                 EFX_MAE_FIELD_VLAN0_PROTO_BE,
671                 EFX_MAE_FIELD_VLAN1_PROTO_BE,
672         };
673         const struct sfc_mae_ethertype *et;
674         unsigned int i;
675         int rc;
676
677         /*
678          * In accordance with RTE flow API convention, the innermost L2
679          * item's "type" ("inner_type") is a L3 EtherType. If there is
680          * no L3 item, it's 0x0000/0x0000.
681          */
682         et = &pdata->ethertypes[pdata->nb_vlan_tags];
683         rc = efx_mae_match_spec_field_set(ctx->match_spec,
684                                           fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
685                                           sizeof(et->value),
686                                           (const uint8_t *)&et->value,
687                                           sizeof(et->mask),
688                                           (const uint8_t *)&et->mask);
689         if (rc != 0)
690                 return rc;
691
692         /*
693          * sfc_mae_rule_parse_item_vlan() has already made sure
694          * that pdata->nb_vlan_tags does not exceed this figure.
695          */
696         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
697
698         for (i = 0; i < pdata->nb_vlan_tags; ++i) {
699                 et = &pdata->ethertypes[i];
700
701                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
702                                                   fremap[field_ids[i]],
703                                                   sizeof(et->value),
704                                                   (const uint8_t *)&et->value,
705                                                   sizeof(et->mask),
706                                                   (const uint8_t *)&et->mask);
707                 if (rc != 0)
708                         return rc;
709         }
710
711         return 0;
712 }
713
714 static int
715 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
716                                   struct rte_flow_error *error)
717 {
718         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
719         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
720         struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
721         const rte_be16_t supported_tpids[] = {
722                 /* VLAN standard TPID (always the first element) */
723                 RTE_BE16(RTE_ETHER_TYPE_VLAN),
724
725                 /* Double-tagging TPIDs */
726                 RTE_BE16(RTE_ETHER_TYPE_QINQ),
727                 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
728                 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
729                 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
730         };
731         bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
732         unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
733         unsigned int ethertype_idx;
734         const uint8_t *valuep;
735         const uint8_t *maskp;
736         int rc;
737
738         if (pdata->innermost_ethertype_restriction.mask != 0 &&
739             pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
740                 /*
741                  * If a single item VLAN is followed by a L3 item, value
742                  * of "type" in item ETH can't be a double-tagging TPID.
743                  */
744                 nb_supported_tpids = 1;
745         }
746
747         /*
748          * sfc_mae_rule_parse_item_vlan() has already made sure
749          * that pdata->nb_vlan_tags does not exceed this figure.
750          */
751         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
752
753         for (ethertype_idx = 0;
754              ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
755                 unsigned int tpid_idx;
756
757                 /*
758                  * This loop can have only two iterations. On the second one,
759                  * drop outer tag presence enforcement bit because the inner
760                  * tag presence automatically assumes that for the outer tag.
761                  */
762                 enforce_tag_presence[0] = B_FALSE;
763
764                 if (ethertypes[ethertype_idx].mask == RTE_BE16(0)) {
765                         if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
766                                 enforce_tag_presence[ethertype_idx] = B_TRUE;
767
768                         /* No match on this field, and no value check. */
769                         nb_supported_tpids = 1;
770                         continue;
771                 }
772
773                 /* Exact match is supported only. */
774                 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
775                         rc = EINVAL;
776                         goto fail;
777                 }
778
779                 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
780                      tpid_idx < nb_supported_tpids; ++tpid_idx) {
781                         if (ethertypes[ethertype_idx].value ==
782                             supported_tpids[tpid_idx])
783                                 break;
784                 }
785
786                 if (tpid_idx == nb_supported_tpids) {
787                         rc = EINVAL;
788                         goto fail;
789                 }
790
791                 nb_supported_tpids = 1;
792         }
793
794         if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
795                 struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
796
797                 if (et->mask == 0) {
798                         et->mask = RTE_BE16(0xffff);
799                         et->value =
800                             pdata->innermost_ethertype_restriction.value;
801                 } else if (et->mask != RTE_BE16(0xffff) ||
802                            et->value !=
803                            pdata->innermost_ethertype_restriction.value) {
804                         rc = EINVAL;
805                         goto fail;
806                 }
807         }
808
809         /*
810          * Now, when the number of VLAN tags is known, set fields
811          * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
812          * one is either a valid L3 EtherType (or 0x0000/0x0000),
813          * and the last two are valid TPIDs (or 0x0000/0x0000).
814          */
815         rc = sfc_mae_set_ethertypes(ctx);
816         if (rc != 0)
817                 goto fail;
818
819         if (pdata->l3_next_proto_restriction_mask == 0xff) {
820                 if (pdata->l3_next_proto_mask == 0) {
821                         pdata->l3_next_proto_mask = 0xff;
822                         pdata->l3_next_proto_value =
823                             pdata->l3_next_proto_restriction_value;
824                 } else if (pdata->l3_next_proto_mask != 0xff ||
825                            pdata->l3_next_proto_value !=
826                            pdata->l3_next_proto_restriction_value) {
827                         rc = EINVAL;
828                         goto fail;
829                 }
830         }
831
832         if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
833                 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
834                                                 fremap[EFX_MAE_FIELD_HAS_OVLAN],
835                                                 enforce_tag_presence[0] ||
836                                                 pdata->has_ovlan_value);
837                 if (rc != 0)
838                         goto fail;
839         }
840
841         if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
842                 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
843                                                 fremap[EFX_MAE_FIELD_HAS_IVLAN],
844                                                 enforce_tag_presence[1] ||
845                                                 pdata->has_ivlan_value);
846                 if (rc != 0)
847                         goto fail;
848         }
849
850         valuep = (const uint8_t *)&pdata->l3_next_proto_value;
851         maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
852         rc = efx_mae_match_spec_field_set(ctx->match_spec,
853                                           fremap[EFX_MAE_FIELD_IP_PROTO],
854                                           sizeof(pdata->l3_next_proto_value),
855                                           valuep,
856                                           sizeof(pdata->l3_next_proto_mask),
857                                           maskp);
858         if (rc != 0)
859                 goto fail;
860
861         return 0;
862
863 fail:
864         return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
865                                   "Failed to process pattern data");
866 }
867
868 static int
869 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
870                                 struct sfc_flow_parse_ctx *ctx,
871                                 struct rte_flow_error *error)
872 {
873         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
874         const struct rte_flow_item_port_id supp_mask = {
875                 .id = 0xffffffff,
876         };
877         const void *def_mask = &rte_flow_item_port_id_mask;
878         const struct rte_flow_item_port_id *spec = NULL;
879         const struct rte_flow_item_port_id *mask = NULL;
880         efx_mport_sel_t mport_sel;
881         int rc;
882
883         if (ctx_mae->match_mport_set) {
884                 return rte_flow_error_set(error, ENOTSUP,
885                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
886                                 "Can't handle multiple traffic source items");
887         }
888
889         rc = sfc_flow_parse_init(item,
890                                  (const void **)&spec, (const void **)&mask,
891                                  (const void *)&supp_mask, def_mask,
892                                  sizeof(struct rte_flow_item_port_id), error);
893         if (rc != 0)
894                 return rc;
895
896         if (mask->id != supp_mask.id) {
897                 return rte_flow_error_set(error, EINVAL,
898                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
899                                 "Bad mask in the PORT_ID pattern item");
900         }
901
902         /* If "spec" is not set, could be any port ID */
903         if (spec == NULL)
904                 return 0;
905
906         if (spec->id > UINT16_MAX) {
907                 return rte_flow_error_set(error, EOVERFLOW,
908                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
909                                           "The port ID is too large");
910         }
911
912         rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
913                                            spec->id, &mport_sel);
914         if (rc != 0) {
915                 return rte_flow_error_set(error, rc,
916                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
917                                 "Can't find RTE ethdev by the port ID");
918         }
919
920         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
921                                           &mport_sel, NULL);
922         if (rc != 0) {
923                 return rte_flow_error_set(error, rc,
924                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
925                                 "Failed to set MPORT for the port ID");
926         }
927
928         ctx_mae->match_mport_set = B_TRUE;
929
930         return 0;
931 }
932
933 static int
934 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
935                                  struct sfc_flow_parse_ctx *ctx,
936                                  struct rte_flow_error *error)
937 {
938         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
939         const struct rte_flow_item_phy_port supp_mask = {
940                 .index = 0xffffffff,
941         };
942         const void *def_mask = &rte_flow_item_phy_port_mask;
943         const struct rte_flow_item_phy_port *spec = NULL;
944         const struct rte_flow_item_phy_port *mask = NULL;
945         efx_mport_sel_t mport_v;
946         int rc;
947
948         if (ctx_mae->match_mport_set) {
949                 return rte_flow_error_set(error, ENOTSUP,
950                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
951                                 "Can't handle multiple traffic source items");
952         }
953
954         rc = sfc_flow_parse_init(item,
955                                  (const void **)&spec, (const void **)&mask,
956                                  (const void *)&supp_mask, def_mask,
957                                  sizeof(struct rte_flow_item_phy_port), error);
958         if (rc != 0)
959                 return rc;
960
961         if (mask->index != supp_mask.index) {
962                 return rte_flow_error_set(error, EINVAL,
963                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
964                                 "Bad mask in the PHY_PORT pattern item");
965         }
966
967         /* If "spec" is not set, could be any physical port */
968         if (spec == NULL)
969                 return 0;
970
971         rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
972         if (rc != 0) {
973                 return rte_flow_error_set(error, rc,
974                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
975                                 "Failed to convert the PHY_PORT index");
976         }
977
978         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
979         if (rc != 0) {
980                 return rte_flow_error_set(error, rc,
981                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
982                                 "Failed to set MPORT for the PHY_PORT");
983         }
984
985         ctx_mae->match_mport_set = B_TRUE;
986
987         return 0;
988 }
989
990 static int
991 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
992                            struct sfc_flow_parse_ctx *ctx,
993                            struct rte_flow_error *error)
994 {
995         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
996         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
997         efx_mport_sel_t mport_v;
998         int rc;
999
1000         if (ctx_mae->match_mport_set) {
1001                 return rte_flow_error_set(error, ENOTSUP,
1002                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1003                                 "Can't handle multiple traffic source items");
1004         }
1005
1006         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1007                                             &mport_v);
1008         if (rc != 0) {
1009                 return rte_flow_error_set(error, rc,
1010                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1011                                 "Failed to convert the PF ID");
1012         }
1013
1014         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1015         if (rc != 0) {
1016                 return rte_flow_error_set(error, rc,
1017                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1018                                 "Failed to set MPORT for the PF");
1019         }
1020
1021         ctx_mae->match_mport_set = B_TRUE;
1022
1023         return 0;
1024 }
1025
1026 static int
1027 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1028                            struct sfc_flow_parse_ctx *ctx,
1029                            struct rte_flow_error *error)
1030 {
1031         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1032         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1033         const struct rte_flow_item_vf supp_mask = {
1034                 .id = 0xffffffff,
1035         };
1036         const void *def_mask = &rte_flow_item_vf_mask;
1037         const struct rte_flow_item_vf *spec = NULL;
1038         const struct rte_flow_item_vf *mask = NULL;
1039         efx_mport_sel_t mport_v;
1040         int rc;
1041
1042         if (ctx_mae->match_mport_set) {
1043                 return rte_flow_error_set(error, ENOTSUP,
1044                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1045                                 "Can't handle multiple traffic source items");
1046         }
1047
1048         rc = sfc_flow_parse_init(item,
1049                                  (const void **)&spec, (const void **)&mask,
1050                                  (const void *)&supp_mask, def_mask,
1051                                  sizeof(struct rte_flow_item_vf), error);
1052         if (rc != 0)
1053                 return rc;
1054
1055         if (mask->id != supp_mask.id) {
1056                 return rte_flow_error_set(error, EINVAL,
1057                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1058                                 "Bad mask in the VF pattern item");
1059         }
1060
1061         /*
1062          * If "spec" is not set, the item requests any VF related to the
1063          * PF of the current DPDK port (but not the PF itself).
1064          * Reject this match criterion as unsupported.
1065          */
1066         if (spec == NULL) {
1067                 return rte_flow_error_set(error, EINVAL,
1068                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1069                                 "Bad spec in the VF pattern item");
1070         }
1071
1072         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1073         if (rc != 0) {
1074                 return rte_flow_error_set(error, rc,
1075                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1076                                 "Failed to convert the PF + VF IDs");
1077         }
1078
1079         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1080         if (rc != 0) {
1081                 return rte_flow_error_set(error, rc,
1082                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1083                                 "Failed to set MPORT for the PF + VF");
1084         }
1085
1086         ctx_mae->match_mport_set = B_TRUE;
1087
1088         return 0;
1089 }
1090
1091 /*
1092  * Having this field ID in a field locator means that this
1093  * locator cannot be used to actually set the field at the
1094  * time when the corresponding item gets encountered. Such
1095  * fields get stashed in the parsing context instead. This
1096  * is required to resolve dependencies between the stashed
1097  * fields. See sfc_mae_rule_process_pattern_data().
1098  */
1099 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1100
1101 struct sfc_mae_field_locator {
1102         efx_mae_field_id_t              field_id;
1103         size_t                          size;
1104         /* Field offset in the corresponding rte_flow_item_ struct */
1105         size_t                          ofst;
1106 };
1107
1108 static void
1109 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1110                              unsigned int nb_field_locators, void *mask_ptr,
1111                              size_t mask_size)
1112 {
1113         unsigned int i;
1114
1115         memset(mask_ptr, 0, mask_size);
1116
1117         for (i = 0; i < nb_field_locators; ++i) {
1118                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1119
1120                 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1121                 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1122         }
1123 }
1124
1125 static int
1126 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1127                    unsigned int nb_field_locators, const uint8_t *spec,
1128                    const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1129                    struct rte_flow_error *error)
1130 {
1131         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1132         unsigned int i;
1133         int rc = 0;
1134
1135         for (i = 0; i < nb_field_locators; ++i) {
1136                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1137
1138                 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1139                         continue;
1140
1141                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1142                                                   fremap[fl->field_id],
1143                                                   fl->size, spec + fl->ofst,
1144                                                   fl->size, mask + fl->ofst);
1145                 if (rc != 0)
1146                         break;
1147         }
1148
1149         if (rc != 0) {
1150                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1151                                 NULL, "Failed to process item fields");
1152         }
1153
1154         return rc;
1155 }
1156
1157 static const struct sfc_mae_field_locator flocs_eth[] = {
1158         {
1159                 /*
1160                  * This locator is used only for building supported fields mask.
1161                  * The field is handled by sfc_mae_rule_process_pattern_data().
1162                  */
1163                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1164                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1165                 offsetof(struct rte_flow_item_eth, type),
1166         },
1167         {
1168                 EFX_MAE_FIELD_ETH_DADDR_BE,
1169                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1170                 offsetof(struct rte_flow_item_eth, dst),
1171         },
1172         {
1173                 EFX_MAE_FIELD_ETH_SADDR_BE,
1174                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1175                 offsetof(struct rte_flow_item_eth, src),
1176         },
1177 };
1178
1179 static int
1180 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1181                             struct sfc_flow_parse_ctx *ctx,
1182                             struct rte_flow_error *error)
1183 {
1184         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1185         struct rte_flow_item_eth supp_mask;
1186         const uint8_t *spec = NULL;
1187         const uint8_t *mask = NULL;
1188         int rc;
1189
1190         sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1191                                      &supp_mask, sizeof(supp_mask));
1192         supp_mask.has_vlan = 1;
1193
1194         rc = sfc_flow_parse_init(item,
1195                                  (const void **)&spec, (const void **)&mask,
1196                                  (const void *)&supp_mask,
1197                                  &rte_flow_item_eth_mask,
1198                                  sizeof(struct rte_flow_item_eth), error);
1199         if (rc != 0)
1200                 return rc;
1201
1202         if (spec != NULL) {
1203                 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1204                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1205                 const struct rte_flow_item_eth *item_spec;
1206                 const struct rte_flow_item_eth *item_mask;
1207
1208                 item_spec = (const struct rte_flow_item_eth *)spec;
1209                 item_mask = (const struct rte_flow_item_eth *)mask;
1210
1211                 /*
1212                  * Remember various match criteria in the parsing context.
1213                  * sfc_mae_rule_process_pattern_data() will consider them
1214                  * altogether when the rest of the items have been parsed.
1215                  */
1216                 ethertypes[0].value = item_spec->type;
1217                 ethertypes[0].mask = item_mask->type;
1218                 if (item_mask->has_vlan) {
1219                         pdata->has_ovlan_mask = B_TRUE;
1220                         if (item_spec->has_vlan)
1221                                 pdata->has_ovlan_value = B_TRUE;
1222                 }
1223         } else {
1224                 /*
1225                  * The specification is empty. The overall pattern
1226                  * validity will be enforced at the end of parsing.
1227                  * See sfc_mae_rule_process_pattern_data().
1228                  */
1229                 return 0;
1230         }
1231
1232         return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1233                                   ctx_mae, error);
1234 }
1235
1236 static const struct sfc_mae_field_locator flocs_vlan[] = {
1237         /* Outermost tag */
1238         {
1239                 EFX_MAE_FIELD_VLAN0_TCI_BE,
1240                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1241                 offsetof(struct rte_flow_item_vlan, tci),
1242         },
1243         {
1244                 /*
1245                  * This locator is used only for building supported fields mask.
1246                  * The field is handled by sfc_mae_rule_process_pattern_data().
1247                  */
1248                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1249                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1250                 offsetof(struct rte_flow_item_vlan, inner_type),
1251         },
1252
1253         /* Innermost tag */
1254         {
1255                 EFX_MAE_FIELD_VLAN1_TCI_BE,
1256                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1257                 offsetof(struct rte_flow_item_vlan, tci),
1258         },
1259         {
1260                 /*
1261                  * This locator is used only for building supported fields mask.
1262                  * The field is handled by sfc_mae_rule_process_pattern_data().
1263                  */
1264                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1265                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1266                 offsetof(struct rte_flow_item_vlan, inner_type),
1267         },
1268 };
1269
1270 static int
1271 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1272                              struct sfc_flow_parse_ctx *ctx,
1273                              struct rte_flow_error *error)
1274 {
1275         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1276         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1277         boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1278                 &pdata->has_ovlan_mask,
1279                 &pdata->has_ivlan_mask,
1280         };
1281         boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1282                 &pdata->has_ovlan_value,
1283                 &pdata->has_ivlan_value,
1284         };
1285         boolean_t *cur_tag_presence_bit_mp;
1286         boolean_t *cur_tag_presence_bit_vp;
1287         const struct sfc_mae_field_locator *flocs;
1288         struct rte_flow_item_vlan supp_mask;
1289         const uint8_t *spec = NULL;
1290         const uint8_t *mask = NULL;
1291         unsigned int nb_flocs;
1292         int rc;
1293
1294         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1295
1296         if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1297                 return rte_flow_error_set(error, ENOTSUP,
1298                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1299                                 "Can't match that many VLAN tags");
1300         }
1301
1302         cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
1303         cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
1304
1305         if (*cur_tag_presence_bit_mp == B_TRUE &&
1306             *cur_tag_presence_bit_vp == B_FALSE) {
1307                 return rte_flow_error_set(error, EINVAL,
1308                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1309                                 "The previous item enforces no (more) VLAN, "
1310                                 "so the current item (VLAN) must not exist");
1311         }
1312
1313         nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1314         flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1315
1316         sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1317                                      &supp_mask, sizeof(supp_mask));
1318         /*
1319          * This only means that the field is supported by the driver and libefx.
1320          * Support on NIC level will be checked when all items have been parsed.
1321          */
1322         supp_mask.has_more_vlan = 1;
1323
1324         rc = sfc_flow_parse_init(item,
1325                                  (const void **)&spec, (const void **)&mask,
1326                                  (const void *)&supp_mask,
1327                                  &rte_flow_item_vlan_mask,
1328                                  sizeof(struct rte_flow_item_vlan), error);
1329         if (rc != 0)
1330                 return rc;
1331
1332         if (spec != NULL) {
1333                 struct sfc_mae_ethertype *et = pdata->ethertypes;
1334                 const struct rte_flow_item_vlan *item_spec;
1335                 const struct rte_flow_item_vlan *item_mask;
1336
1337                 item_spec = (const struct rte_flow_item_vlan *)spec;
1338                 item_mask = (const struct rte_flow_item_vlan *)mask;
1339
1340                 /*
1341                  * Remember various match criteria in the parsing context.
1342                  * sfc_mae_rule_process_pattern_data() will consider them
1343                  * altogether when the rest of the items have been parsed.
1344                  */
1345                 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
1346                 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
1347                 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
1348                 if (item_mask->has_more_vlan) {
1349                         if (pdata->nb_vlan_tags ==
1350                             SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1351                                 return rte_flow_error_set(error, ENOTSUP,
1352                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1353                                         "Can't use 'has_more_vlan' in "
1354                                         "the second item VLAN");
1355                         }
1356                         pdata->has_ivlan_mask = B_TRUE;
1357                         if (item_spec->has_more_vlan)
1358                                 pdata->has_ivlan_value = B_TRUE;
1359                 }
1360
1361                 /* Convert TCI to MAE representation right now. */
1362                 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
1363                                         ctx_mae, error);
1364                 if (rc != 0)
1365                         return rc;
1366         }
1367
1368         ++(pdata->nb_vlan_tags);
1369
1370         return 0;
1371 }
1372
1373 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1374         {
1375                 EFX_MAE_FIELD_SRC_IP4_BE,
1376                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1377                 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1378         },
1379         {
1380                 EFX_MAE_FIELD_DST_IP4_BE,
1381                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1382                 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1383         },
1384         {
1385                 /*
1386                  * This locator is used only for building supported fields mask.
1387                  * The field is handled by sfc_mae_rule_process_pattern_data().
1388                  */
1389                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1390                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1391                 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1392         },
1393         {
1394                 EFX_MAE_FIELD_IP_TOS,
1395                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1396                                  hdr.type_of_service),
1397                 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1398         },
1399         {
1400                 EFX_MAE_FIELD_IP_TTL,
1401                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1402                 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1403         },
1404 };
1405
1406 static int
1407 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1408                              struct sfc_flow_parse_ctx *ctx,
1409                              struct rte_flow_error *error)
1410 {
1411         rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1412         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1413         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1414         struct rte_flow_item_ipv4 supp_mask;
1415         const uint8_t *spec = NULL;
1416         const uint8_t *mask = NULL;
1417         int rc;
1418
1419         sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1420                                      &supp_mask, sizeof(supp_mask));
1421
1422         rc = sfc_flow_parse_init(item,
1423                                  (const void **)&spec, (const void **)&mask,
1424                                  (const void *)&supp_mask,
1425                                  &rte_flow_item_ipv4_mask,
1426                                  sizeof(struct rte_flow_item_ipv4), error);
1427         if (rc != 0)
1428                 return rc;
1429
1430         pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1431         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1432
1433         if (spec != NULL) {
1434                 const struct rte_flow_item_ipv4 *item_spec;
1435                 const struct rte_flow_item_ipv4 *item_mask;
1436
1437                 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1438                 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1439
1440                 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1441                 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1442         } else {
1443                 return 0;
1444         }
1445
1446         return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1447                                   ctx_mae, error);
1448 }
1449
1450 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1451         {
1452                 EFX_MAE_FIELD_SRC_IP6_BE,
1453                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1454                 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1455         },
1456         {
1457                 EFX_MAE_FIELD_DST_IP6_BE,
1458                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1459                 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1460         },
1461         {
1462                 /*
1463                  * This locator is used only for building supported fields mask.
1464                  * The field is handled by sfc_mae_rule_process_pattern_data().
1465                  */
1466                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1467                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1468                 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1469         },
1470         {
1471                 EFX_MAE_FIELD_IP_TTL,
1472                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1473                 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1474         },
1475 };
1476
1477 static int
1478 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1479                              struct sfc_flow_parse_ctx *ctx,
1480                              struct rte_flow_error *error)
1481 {
1482         rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1483         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1484         const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1485         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1486         struct rte_flow_item_ipv6 supp_mask;
1487         const uint8_t *spec = NULL;
1488         const uint8_t *mask = NULL;
1489         rte_be32_t vtc_flow_be;
1490         uint32_t vtc_flow;
1491         uint8_t tc_value;
1492         uint8_t tc_mask;
1493         int rc;
1494
1495         sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1496                                      &supp_mask, sizeof(supp_mask));
1497
1498         vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1499         memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1500
1501         rc = sfc_flow_parse_init(item,
1502                                  (const void **)&spec, (const void **)&mask,
1503                                  (const void *)&supp_mask,
1504                                  &rte_flow_item_ipv6_mask,
1505                                  sizeof(struct rte_flow_item_ipv6), error);
1506         if (rc != 0)
1507                 return rc;
1508
1509         pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1510         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1511
1512         if (spec != NULL) {
1513                 const struct rte_flow_item_ipv6 *item_spec;
1514                 const struct rte_flow_item_ipv6 *item_mask;
1515
1516                 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1517                 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1518
1519                 pdata->l3_next_proto_value = item_spec->hdr.proto;
1520                 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1521         } else {
1522                 return 0;
1523         }
1524
1525         rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1526                                 ctx_mae, error);
1527         if (rc != 0)
1528                 return rc;
1529
1530         memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1531         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1532         tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1533
1534         memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1535         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1536         tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1537
1538         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1539                                           fremap[EFX_MAE_FIELD_IP_TOS],
1540                                           sizeof(tc_value), &tc_value,
1541                                           sizeof(tc_mask), &tc_mask);
1542         if (rc != 0) {
1543                 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1544                                 NULL, "Failed to process item fields");
1545         }
1546
1547         return 0;
1548 }
1549
1550 static const struct sfc_mae_field_locator flocs_tcp[] = {
1551         {
1552                 EFX_MAE_FIELD_L4_SPORT_BE,
1553                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1554                 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1555         },
1556         {
1557                 EFX_MAE_FIELD_L4_DPORT_BE,
1558                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1559                 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1560         },
1561         {
1562                 EFX_MAE_FIELD_TCP_FLAGS_BE,
1563                 /*
1564                  * The values have been picked intentionally since the
1565                  * target MAE field is oversize (16 bit). This mapping
1566                  * relies on the fact that the MAE field is big-endian.
1567                  */
1568                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1569                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1570                 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1571         },
1572 };
1573
1574 static int
1575 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1576                             struct sfc_flow_parse_ctx *ctx,
1577                             struct rte_flow_error *error)
1578 {
1579         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1580         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1581         struct rte_flow_item_tcp supp_mask;
1582         const uint8_t *spec = NULL;
1583         const uint8_t *mask = NULL;
1584         int rc;
1585
1586         /*
1587          * When encountered among outermost items, item TCP is invalid.
1588          * Check which match specification is being constructed now.
1589          */
1590         if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1591                 return rte_flow_error_set(error, EINVAL,
1592                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1593                                           "TCP in outer frame is invalid");
1594         }
1595
1596         sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1597                                      &supp_mask, sizeof(supp_mask));
1598
1599         rc = sfc_flow_parse_init(item,
1600                                  (const void **)&spec, (const void **)&mask,
1601                                  (const void *)&supp_mask,
1602                                  &rte_flow_item_tcp_mask,
1603                                  sizeof(struct rte_flow_item_tcp), error);
1604         if (rc != 0)
1605                 return rc;
1606
1607         pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1608         pdata->l3_next_proto_restriction_mask = 0xff;
1609
1610         if (spec == NULL)
1611                 return 0;
1612
1613         return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1614                                   ctx_mae, error);
1615 }
1616
1617 static const struct sfc_mae_field_locator flocs_udp[] = {
1618         {
1619                 EFX_MAE_FIELD_L4_SPORT_BE,
1620                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1621                 offsetof(struct rte_flow_item_udp, hdr.src_port),
1622         },
1623         {
1624                 EFX_MAE_FIELD_L4_DPORT_BE,
1625                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1626                 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1627         },
1628 };
1629
1630 static int
1631 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1632                             struct sfc_flow_parse_ctx *ctx,
1633                             struct rte_flow_error *error)
1634 {
1635         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1636         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1637         struct rte_flow_item_udp supp_mask;
1638         const uint8_t *spec = NULL;
1639         const uint8_t *mask = NULL;
1640         int rc;
1641
1642         sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1643                                      &supp_mask, sizeof(supp_mask));
1644
1645         rc = sfc_flow_parse_init(item,
1646                                  (const void **)&spec, (const void **)&mask,
1647                                  (const void *)&supp_mask,
1648                                  &rte_flow_item_udp_mask,
1649                                  sizeof(struct rte_flow_item_udp), error);
1650         if (rc != 0)
1651                 return rc;
1652
1653         pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1654         pdata->l3_next_proto_restriction_mask = 0xff;
1655
1656         if (spec == NULL)
1657                 return 0;
1658
1659         return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1660                                   ctx_mae, error);
1661 }
1662
1663 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1664         {
1665                 /*
1666                  * The size and offset values are relevant
1667                  * for Geneve and NVGRE, too.
1668                  */
1669                 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1670                 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1671         },
1672 };
1673
1674 /*
1675  * An auxiliary registry which allows using non-encap. field IDs
1676  * directly when building a match specification of type ACTION.
1677  *
1678  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1679  */
1680 static const efx_mae_field_id_t field_ids_no_remap[] = {
1681 #define FIELD_ID_NO_REMAP(_field) \
1682         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1683
1684         FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1685         FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1686         FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1687         FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1688         FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1689         FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1690         FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1691         FIELD_ID_NO_REMAP(SRC_IP4_BE),
1692         FIELD_ID_NO_REMAP(DST_IP4_BE),
1693         FIELD_ID_NO_REMAP(IP_PROTO),
1694         FIELD_ID_NO_REMAP(IP_TOS),
1695         FIELD_ID_NO_REMAP(IP_TTL),
1696         FIELD_ID_NO_REMAP(SRC_IP6_BE),
1697         FIELD_ID_NO_REMAP(DST_IP6_BE),
1698         FIELD_ID_NO_REMAP(L4_SPORT_BE),
1699         FIELD_ID_NO_REMAP(L4_DPORT_BE),
1700         FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1701         FIELD_ID_NO_REMAP(HAS_OVLAN),
1702         FIELD_ID_NO_REMAP(HAS_IVLAN),
1703
1704 #undef FIELD_ID_NO_REMAP
1705 };
1706
1707 /*
1708  * An auxiliary registry which allows using "ENC" field IDs
1709  * when building a match specification of type OUTER.
1710  *
1711  * See sfc_mae_rule_encap_parse_init().
1712  */
1713 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1714 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1715         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1716
1717         FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1718         FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1719         FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1720         FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1721         FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1722         FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1723         FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1724         FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1725         FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1726         FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1727         FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1728         FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1729         FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1730         FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1731         FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1732         FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1733         FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
1734         FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
1735
1736 #undef FIELD_ID_REMAP_TO_ENCAP
1737 };
1738
1739 static int
1740 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1741                                struct sfc_flow_parse_ctx *ctx,
1742                                struct rte_flow_error *error)
1743 {
1744         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1745         uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1746         uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1747         const struct rte_flow_item_vxlan *vxp;
1748         uint8_t supp_mask[sizeof(uint64_t)];
1749         const uint8_t *spec = NULL;
1750         const uint8_t *mask = NULL;
1751         int rc;
1752
1753         /*
1754          * We're about to start processing inner frame items.
1755          * Process pattern data that has been deferred so far
1756          * and reset pattern data storage.
1757          */
1758         rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1759         if (rc != 0)
1760                 return rc;
1761
1762         memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1763
1764         sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1765                                      &supp_mask, sizeof(supp_mask));
1766
1767         /*
1768          * This tunnel item was preliminarily detected by
1769          * sfc_mae_rule_encap_parse_init(). Default mask
1770          * was also picked by that helper. Use it here.
1771          */
1772         rc = sfc_flow_parse_init(item,
1773                                  (const void **)&spec, (const void **)&mask,
1774                                  (const void *)&supp_mask,
1775                                  ctx_mae->tunnel_def_mask,
1776                                  ctx_mae->tunnel_def_mask_size,  error);
1777         if (rc != 0)
1778                 return rc;
1779
1780         /*
1781          * This item and later ones comprise a
1782          * match specification of type ACTION.
1783          */
1784         ctx_mae->match_spec = ctx_mae->match_spec_action;
1785
1786         /* This item and later ones use non-encap. EFX MAE field IDs. */
1787         ctx_mae->field_ids_remap = field_ids_no_remap;
1788
1789         if (spec == NULL)
1790                 return 0;
1791
1792         /*
1793          * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1794          * Copy 24-bit VNI, which is BE, at offset 1 in it.
1795          * The extra byte is 0 both in the mask and in the value.
1796          */
1797         vxp = (const struct rte_flow_item_vxlan *)spec;
1798         memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1799
1800         vxp = (const struct rte_flow_item_vxlan *)mask;
1801         memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1802
1803         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1804                                           EFX_MAE_FIELD_ENC_VNET_ID_BE,
1805                                           sizeof(vnet_id_v), vnet_id_v,
1806                                           sizeof(vnet_id_m), vnet_id_m);
1807         if (rc != 0) {
1808                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1809                                         item, "Failed to set VXLAN VNI");
1810         }
1811
1812         return rc;
1813 }
1814
1815 static const struct sfc_flow_item sfc_flow_items[] = {
1816         {
1817                 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1818                 /*
1819                  * In terms of RTE flow, this item is a META one,
1820                  * and its position in the pattern is don't care.
1821                  */
1822                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1823                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1824                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1825                 .parse = sfc_mae_rule_parse_item_port_id,
1826         },
1827         {
1828                 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1829                 /*
1830                  * In terms of RTE flow, this item is a META one,
1831                  * and its position in the pattern is don't care.
1832                  */
1833                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1834                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1835                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1836                 .parse = sfc_mae_rule_parse_item_phy_port,
1837         },
1838         {
1839                 .type = RTE_FLOW_ITEM_TYPE_PF,
1840                 /*
1841                  * In terms of RTE flow, this item is a META one,
1842                  * and its position in the pattern is don't care.
1843                  */
1844                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1845                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1846                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1847                 .parse = sfc_mae_rule_parse_item_pf,
1848         },
1849         {
1850                 .type = RTE_FLOW_ITEM_TYPE_VF,
1851                 /*
1852                  * In terms of RTE flow, this item is a META one,
1853                  * and its position in the pattern is don't care.
1854                  */
1855                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1856                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1857                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1858                 .parse = sfc_mae_rule_parse_item_vf,
1859         },
1860         {
1861                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1862                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1863                 .layer = SFC_FLOW_ITEM_L2,
1864                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1865                 .parse = sfc_mae_rule_parse_item_eth,
1866         },
1867         {
1868                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1869                 .prev_layer = SFC_FLOW_ITEM_L2,
1870                 .layer = SFC_FLOW_ITEM_L2,
1871                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1872                 .parse = sfc_mae_rule_parse_item_vlan,
1873         },
1874         {
1875                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1876                 .prev_layer = SFC_FLOW_ITEM_L2,
1877                 .layer = SFC_FLOW_ITEM_L3,
1878                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1879                 .parse = sfc_mae_rule_parse_item_ipv4,
1880         },
1881         {
1882                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1883                 .prev_layer = SFC_FLOW_ITEM_L2,
1884                 .layer = SFC_FLOW_ITEM_L3,
1885                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1886                 .parse = sfc_mae_rule_parse_item_ipv6,
1887         },
1888         {
1889                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1890                 .prev_layer = SFC_FLOW_ITEM_L3,
1891                 .layer = SFC_FLOW_ITEM_L4,
1892                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1893                 .parse = sfc_mae_rule_parse_item_tcp,
1894         },
1895         {
1896                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1897                 .prev_layer = SFC_FLOW_ITEM_L3,
1898                 .layer = SFC_FLOW_ITEM_L4,
1899                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1900                 .parse = sfc_mae_rule_parse_item_udp,
1901         },
1902         {
1903                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1904                 .prev_layer = SFC_FLOW_ITEM_L4,
1905                 .layer = SFC_FLOW_ITEM_START_LAYER,
1906                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1907                 .parse = sfc_mae_rule_parse_item_tunnel,
1908         },
1909         {
1910                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1911                 .prev_layer = SFC_FLOW_ITEM_L4,
1912                 .layer = SFC_FLOW_ITEM_START_LAYER,
1913                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1914                 .parse = sfc_mae_rule_parse_item_tunnel,
1915         },
1916         {
1917                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1918                 .prev_layer = SFC_FLOW_ITEM_L3,
1919                 .layer = SFC_FLOW_ITEM_START_LAYER,
1920                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1921                 .parse = sfc_mae_rule_parse_item_tunnel,
1922         },
1923 };
1924
1925 static int
1926 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
1927                            struct sfc_mae_parse_ctx *ctx,
1928                            struct sfc_mae_outer_rule **rulep,
1929                            struct rte_flow_error *error)
1930 {
1931         struct sfc_mae_outer_rule *rule;
1932         int rc;
1933
1934         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
1935                 *rulep = NULL;
1936                 return 0;
1937         }
1938
1939         SFC_ASSERT(ctx->match_spec_outer != NULL);
1940
1941         if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1944                                           "Inconsistent pattern (outer)");
1945         }
1946
1947         *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
1948                                            ctx->encap_type);
1949         if (*rulep != NULL) {
1950                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1951         } else {
1952                 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
1953                                             ctx->encap_type, rulep);
1954                 if (rc != 0) {
1955                         return rte_flow_error_set(error, rc,
1956                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1957                                         "Failed to process the pattern");
1958                 }
1959         }
1960
1961         /* The spec has now been tracked by the outer rule entry. */
1962         ctx->match_spec_outer = NULL;
1963
1964         /*
1965          * Depending on whether we reuse an existing outer rule or create a
1966          * new one (see above), outer rule ID is either a valid value or
1967          * EFX_MAE_RSRC_ID_INVALID. Set it in the action rule match
1968          * specification (and the full mask, too) in order to have correct
1969          * class comparisons of the new rule with existing ones.
1970          * Also, action rule match specification will be validated shortly,
1971          * and having the full mask set for outer rule ID indicates that we
1972          * will use this field, and support for this field has to be checked.
1973          */
1974         rule = *rulep;
1975         rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
1976                                                   &rule->fw_rsrc.rule_id);
1977         if (rc != 0) {
1978                 sfc_mae_outer_rule_del(sa, *rulep);
1979                 *rulep = NULL;
1980
1981                 return rte_flow_error_set(error, rc,
1982                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1983                                           "Failed to process the pattern");
1984         }
1985
1986         return 0;
1987 }
1988
1989 static int
1990 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
1991                               const struct rte_flow_item pattern[],
1992                               struct sfc_mae_parse_ctx *ctx,
1993                               struct rte_flow_error *error)
1994 {
1995         struct sfc_mae *mae = &sa->mae;
1996         int rc;
1997
1998         if (pattern == NULL) {
1999                 rte_flow_error_set(error, EINVAL,
2000                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2001                                    "NULL pattern");
2002                 return -rte_errno;
2003         }
2004
2005         for (;;) {
2006                 switch (pattern->type) {
2007                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2008                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2009                         ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2010                         ctx->tunnel_def_mask_size =
2011                                 sizeof(rte_flow_item_vxlan_mask);
2012                         break;
2013                 case RTE_FLOW_ITEM_TYPE_GENEVE:
2014                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2015                         ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2016                         ctx->tunnel_def_mask_size =
2017                                 sizeof(rte_flow_item_geneve_mask);
2018                         break;
2019                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2020                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2021                         ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2022                         ctx->tunnel_def_mask_size =
2023                                 sizeof(rte_flow_item_nvgre_mask);
2024                         break;
2025                 case RTE_FLOW_ITEM_TYPE_END:
2026                         break;
2027                 default:
2028                         ++pattern;
2029                         continue;
2030                 };
2031
2032                 break;
2033         }
2034
2035         if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2036                 return 0;
2037
2038         if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2039                 return rte_flow_error_set(error, ENOTSUP,
2040                                           RTE_FLOW_ERROR_TYPE_ITEM,
2041                                           pattern, "Unsupported tunnel item");
2042         }
2043
2044         if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2045                 return rte_flow_error_set(error, ENOTSUP,
2046                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2047                                           NULL, "Unsupported priority level");
2048         }
2049
2050         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
2051                                      &ctx->match_spec_outer);
2052         if (rc != 0) {
2053                 return rte_flow_error_set(error, rc,
2054                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
2055                         "Failed to initialise outer rule match specification");
2056         }
2057
2058         /* Outermost items comprise a match specification of type OUTER. */
2059         ctx->match_spec = ctx->match_spec_outer;
2060
2061         /* Outermost items use "ENC" EFX MAE field IDs. */
2062         ctx->field_ids_remap = field_ids_remap_to_encap;
2063
2064         return 0;
2065 }
2066
2067 static void
2068 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2069                               struct sfc_mae_parse_ctx *ctx)
2070 {
2071         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2072                 return;
2073
2074         if (ctx->match_spec_outer != NULL)
2075                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2076 }
2077
2078 int
2079 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2080                            const struct rte_flow_item pattern[],
2081                            struct sfc_flow_spec_mae *spec,
2082                            struct rte_flow_error *error)
2083 {
2084         struct sfc_mae_parse_ctx ctx_mae;
2085         struct sfc_flow_parse_ctx ctx;
2086         int rc;
2087
2088         memset(&ctx_mae, 0, sizeof(ctx_mae));
2089         ctx_mae.priority = spec->priority;
2090         ctx_mae.sa = sa;
2091
2092         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2093                                      spec->priority,
2094                                      &ctx_mae.match_spec_action);
2095         if (rc != 0) {
2096                 rc = rte_flow_error_set(error, rc,
2097                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2098                         "Failed to initialise action rule match specification");
2099                 goto fail_init_match_spec_action;
2100         }
2101
2102         /*
2103          * As a preliminary setting, assume that there is no encapsulation
2104          * in the pattern. That is, pattern items are about to comprise a
2105          * match specification of type ACTION and use non-encap. field IDs.
2106          *
2107          * sfc_mae_rule_encap_parse_init() below may override this.
2108          */
2109         ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2110         ctx_mae.match_spec = ctx_mae.match_spec_action;
2111         ctx_mae.field_ids_remap = field_ids_no_remap;
2112
2113         ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2114         ctx.mae = &ctx_mae;
2115
2116         rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2117         if (rc != 0)
2118                 goto fail_encap_parse_init;
2119
2120         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2121                                     pattern, &ctx, error);
2122         if (rc != 0)
2123                 goto fail_parse_pattern;
2124
2125         rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2126         if (rc != 0)
2127                 goto fail_process_pattern_data;
2128
2129         rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2130         if (rc != 0)
2131                 goto fail_process_outer;
2132
2133         if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2134                 rc = rte_flow_error_set(error, ENOTSUP,
2135                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2136                                         "Inconsistent pattern");
2137                 goto fail_validate_match_spec_action;
2138         }
2139
2140         spec->match_spec = ctx_mae.match_spec_action;
2141
2142         return 0;
2143
2144 fail_validate_match_spec_action:
2145 fail_process_outer:
2146 fail_process_pattern_data:
2147 fail_parse_pattern:
2148         sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2149
2150 fail_encap_parse_init:
2151         efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2152
2153 fail_init_match_spec_action:
2154         return rc;
2155 }
2156
2157 /*
2158  * An action supported by MAE may correspond to a bundle of RTE flow actions,
2159  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2160  * That is, related RTE flow actions need to be tracked as parts of a whole
2161  * so that they can be combined into a single action and submitted to MAE
2162  * representation of a given rule's action set.
2163  *
2164  * Each RTE flow action provided by an application gets classified as
2165  * one belonging to some bundle type. If an action is not supposed to
2166  * belong to any bundle, or if this action is END, it is described as
2167  * one belonging to a dummy bundle of type EMPTY.
2168  *
2169  * A currently tracked bundle will be submitted if a repeating
2170  * action or an action of different bundle type follows.
2171  */
2172
2173 enum sfc_mae_actions_bundle_type {
2174         SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2175         SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2176 };
2177
2178 struct sfc_mae_actions_bundle {
2179         enum sfc_mae_actions_bundle_type        type;
2180
2181         /* Indicates actions already tracked by the current bundle */
2182         uint64_t                                actions_mask;
2183
2184         /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2185         rte_be16_t                              vlan_push_tpid;
2186         rte_be16_t                              vlan_push_tci;
2187 };
2188
2189 /*
2190  * Combine configuration of RTE flow actions tracked by the bundle into a
2191  * single action and submit the result to MAE action set specification.
2192  * Do nothing in the case of dummy action bundle.
2193  */
2194 static int
2195 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2196                               efx_mae_actions_t *spec)
2197 {
2198         int rc = 0;
2199
2200         switch (bundle->type) {
2201         case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2202                 break;
2203         case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2204                 rc = efx_mae_action_set_populate_vlan_push(
2205                         spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2206                 break;
2207         default:
2208                 SFC_ASSERT(B_FALSE);
2209                 break;
2210         }
2211
2212         return rc;
2213 }
2214
2215 /*
2216  * Given the type of the next RTE flow action in the line, decide
2217  * whether a new bundle is about to start, and, if this is the case,
2218  * submit and reset the current bundle.
2219  */
2220 static int
2221 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2222                             struct sfc_mae_actions_bundle *bundle,
2223                             efx_mae_actions_t *spec,
2224                             struct rte_flow_error *error)
2225 {
2226         enum sfc_mae_actions_bundle_type bundle_type_new;
2227         int rc;
2228
2229         switch (action->type) {
2230         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2231         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2232         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2233                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2234                 break;
2235         default:
2236                 /*
2237                  * Self-sufficient actions, including END, are handled in this
2238                  * case. No checks for unsupported actions are needed here
2239                  * because parsing doesn't occur at this point.
2240                  */
2241                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2242                 break;
2243         }
2244
2245         if (bundle_type_new != bundle->type ||
2246             (bundle->actions_mask & (1ULL << action->type)) != 0) {
2247                 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2248                 if (rc != 0)
2249                         goto fail_submit;
2250
2251                 memset(bundle, 0, sizeof(*bundle));
2252         }
2253
2254         bundle->type = bundle_type_new;
2255
2256         return 0;
2257
2258 fail_submit:
2259         return rte_flow_error_set(error, rc,
2260                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2261                         "Failed to request the (group of) action(s)");
2262 }
2263
2264 static void
2265 sfc_mae_rule_parse_action_of_push_vlan(
2266                             const struct rte_flow_action_of_push_vlan *conf,
2267                             struct sfc_mae_actions_bundle *bundle)
2268 {
2269         bundle->vlan_push_tpid = conf->ethertype;
2270 }
2271
2272 static void
2273 sfc_mae_rule_parse_action_of_set_vlan_vid(
2274                             const struct rte_flow_action_of_set_vlan_vid *conf,
2275                             struct sfc_mae_actions_bundle *bundle)
2276 {
2277         bundle->vlan_push_tci |= (conf->vlan_vid &
2278                                   rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2279 }
2280
2281 static void
2282 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2283                             const struct rte_flow_action_of_set_vlan_pcp *conf,
2284                             struct sfc_mae_actions_bundle *bundle)
2285 {
2286         uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2287                                            RTE_LEN2MASK(3, uint8_t)) << 13;
2288
2289         bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2290 }
2291
2292 struct sfc_mae_parsed_item {
2293         const struct rte_flow_item      *item;
2294         size_t                          proto_header_ofst;
2295         size_t                          proto_header_size;
2296 };
2297
2298 /*
2299  * For each 16-bit word of the given header, override
2300  * bits enforced by the corresponding 16-bit mask.
2301  */
2302 static void
2303 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2304                                 const struct sfc_mae_parsed_item *parsed_items,
2305                                 unsigned int nb_parsed_items)
2306 {
2307         unsigned int item_idx;
2308
2309         for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2310                 const struct sfc_mae_parsed_item *parsed_item;
2311                 const struct rte_flow_item *item;
2312                 size_t proto_header_size;
2313                 size_t ofst;
2314
2315                 parsed_item = &parsed_items[item_idx];
2316                 proto_header_size = parsed_item->proto_header_size;
2317                 item = parsed_item->item;
2318
2319                 for (ofst = 0; ofst < proto_header_size;
2320                      ofst += sizeof(rte_be16_t)) {
2321                         rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2322                         const rte_be16_t *w_maskp;
2323                         const rte_be16_t *w_specp;
2324
2325                         w_maskp = RTE_PTR_ADD(item->mask, ofst);
2326                         w_specp = RTE_PTR_ADD(item->spec, ofst);
2327
2328                         *wp &= ~(*w_maskp);
2329                         *wp |= (*w_specp & *w_maskp);
2330                 }
2331
2332                 header_buf += proto_header_size;
2333         }
2334 }
2335
2336 #define SFC_IPV4_TTL_DEF        0x40
2337 #define SFC_IPV6_VTC_FLOW_DEF   0x60000000
2338 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2339 #define SFC_VXLAN_FLAGS_DEF     0x08000000
2340
2341 static int
2342 sfc_mae_rule_parse_action_vxlan_encap(
2343                             struct sfc_mae *mae,
2344                             const struct rte_flow_action_vxlan_encap *conf,
2345                             efx_mae_actions_t *spec,
2346                             struct rte_flow_error *error)
2347 {
2348         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2349         struct rte_flow_item *pattern = conf->definition;
2350         uint8_t *buf = bounce_eh->buf;
2351
2352         /* This array will keep track of non-VOID pattern items. */
2353         struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2354                                                 2 /* VLAN tags */ +
2355                                                 1 /* IPv4 or IPv6 */ +
2356                                                 1 /* UDP */ +
2357                                                 1 /* VXLAN */];
2358         unsigned int nb_parsed_items = 0;
2359
2360         size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2361         uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2362                                   sizeof(struct rte_ipv6_hdr))];
2363         struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2364         struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2365         struct rte_vxlan_hdr *vxlan = NULL;
2366         struct rte_udp_hdr *udp = NULL;
2367         unsigned int nb_vlan_tags = 0;
2368         size_t next_proto_ofst = 0;
2369         size_t ethertype_ofst = 0;
2370         uint64_t exp_items;
2371
2372         if (pattern == NULL) {
2373                 return rte_flow_error_set(error, EINVAL,
2374                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2375                                 "The encap. header definition is NULL");
2376         }
2377
2378         bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2379         bounce_eh->size = 0;
2380
2381         /*
2382          * Process pattern items and remember non-VOID ones.
2383          * Defer applying masks until after the complete header
2384          * has been built from the pattern items.
2385          */
2386         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2387
2388         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2389                 struct sfc_mae_parsed_item *parsed_item;
2390                 const uint64_t exp_items_extra_vlan[] = {
2391                         RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2392                 };
2393                 size_t proto_header_size;
2394                 rte_be16_t *ethertypep;
2395                 uint8_t *next_protop;
2396                 uint8_t *buf_cur;
2397
2398                 if (pattern->spec == NULL) {
2399                         return rte_flow_error_set(error, EINVAL,
2400                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2401                                         "NULL item spec in the encap. header");
2402                 }
2403
2404                 if (pattern->mask == NULL) {
2405                         return rte_flow_error_set(error, EINVAL,
2406                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2407                                         "NULL item mask in the encap. header");
2408                 }
2409
2410                 if (pattern->last != NULL) {
2411                         /* This is not a match pattern, so disallow range. */
2412                         return rte_flow_error_set(error, EINVAL,
2413                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2414                                         "Range item in the encap. header");
2415                 }
2416
2417                 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2418                         /* Handle VOID separately, for clarity. */
2419                         continue;
2420                 }
2421
2422                 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2423                         return rte_flow_error_set(error, ENOTSUP,
2424                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2425                                         "Unexpected item in the encap. header");
2426                 }
2427
2428                 parsed_item = &parsed_items[nb_parsed_items];
2429                 buf_cur = buf + bounce_eh->size;
2430
2431                 switch (pattern->type) {
2432                 case RTE_FLOW_ITEM_TYPE_ETH:
2433                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2434                                                exp_items);
2435                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2436                                                   hdr) != 0);
2437
2438                         proto_header_size = sizeof(struct rte_ether_hdr);
2439
2440                         ethertype_ofst = eth_ethertype_ofst;
2441
2442                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2443                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2444                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2445                         break;
2446                 case RTE_FLOW_ITEM_TYPE_VLAN:
2447                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2448                                                exp_items);
2449                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2450                                                   hdr) != 0);
2451
2452                         proto_header_size = sizeof(struct rte_vlan_hdr);
2453
2454                         ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2455                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2456
2457                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2458                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2459
2460                         ethertype_ofst =
2461                             bounce_eh->size +
2462                             offsetof(struct rte_vlan_hdr, eth_proto);
2463
2464                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2465                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2466                         exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2467
2468                         ++nb_vlan_tags;
2469                         break;
2470                 case RTE_FLOW_ITEM_TYPE_IPV4:
2471                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2472                                                exp_items);
2473                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2474                                                   hdr) != 0);
2475
2476                         proto_header_size = sizeof(struct rte_ipv4_hdr);
2477
2478                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2479                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2480
2481                         next_proto_ofst =
2482                             bounce_eh->size +
2483                             offsetof(struct rte_ipv4_hdr, next_proto_id);
2484
2485                         ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2486
2487                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2488                         break;
2489                 case RTE_FLOW_ITEM_TYPE_IPV6:
2490                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2491                                                exp_items);
2492                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2493                                                   hdr) != 0);
2494
2495                         proto_header_size = sizeof(struct rte_ipv6_hdr);
2496
2497                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2498                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2499
2500                         next_proto_ofst = bounce_eh->size +
2501                                           offsetof(struct rte_ipv6_hdr, proto);
2502
2503                         ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2504
2505                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2506                         break;
2507                 case RTE_FLOW_ITEM_TYPE_UDP:
2508                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2509                                                exp_items);
2510                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2511                                                   hdr) != 0);
2512
2513                         proto_header_size = sizeof(struct rte_udp_hdr);
2514
2515                         next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2516                         *next_protop = IPPROTO_UDP;
2517
2518                         udp = (struct rte_udp_hdr *)buf_cur;
2519
2520                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2521                         break;
2522                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2523                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2524                                                exp_items);
2525                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2526                                                   hdr) != 0);
2527
2528                         proto_header_size = sizeof(struct rte_vxlan_hdr);
2529
2530                         vxlan = (struct rte_vxlan_hdr *)buf_cur;
2531
2532                         udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2533                         udp->dgram_len = RTE_BE16(sizeof(*udp) +
2534                                                   sizeof(*vxlan));
2535                         udp->dgram_cksum = 0;
2536
2537                         exp_items = 0;
2538                         break;
2539                 default:
2540                         return rte_flow_error_set(error, ENOTSUP,
2541                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2542                                         "Unknown item in the encap. header");
2543                 }
2544
2545                 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2546                         return rte_flow_error_set(error, E2BIG,
2547                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2548                                         "The encap. header is too big");
2549                 }
2550
2551                 if ((proto_header_size & 1) != 0) {
2552                         return rte_flow_error_set(error, EINVAL,
2553                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2554                                         "Odd layer size in the encap. header");
2555                 }
2556
2557                 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2558                 bounce_eh->size += proto_header_size;
2559
2560                 parsed_item->item = pattern;
2561                 parsed_item->proto_header_size = proto_header_size;
2562                 ++nb_parsed_items;
2563         }
2564
2565         if (exp_items != 0) {
2566                 /* Parsing item VXLAN would have reset exp_items to 0. */
2567                 return rte_flow_error_set(error, ENOTSUP,
2568                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2569                                         "No item VXLAN in the encap. header");
2570         }
2571
2572         /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2573         ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2574         ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2575         ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2576                                       sizeof(*vxlan));
2577         /* The HW cannot compute this checksum. */
2578         ipv4->hdr_checksum = 0;
2579         ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2580
2581         ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2582         ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2583         ipv6->payload_len = udp->dgram_len;
2584
2585         vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2586
2587         /* Take care of the masks. */
2588         sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2589
2590         return (spec != NULL) ? efx_mae_action_set_populate_encap(spec) : 0;
2591 }
2592
2593 static int
2594 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
2595                                efx_mae_actions_t *spec)
2596 {
2597         return efx_mae_action_set_populate_mark(spec, conf->id);
2598 }
2599
2600 static int
2601 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2602                                    const struct rte_flow_action_phy_port *conf,
2603                                    efx_mae_actions_t *spec)
2604 {
2605         efx_mport_sel_t mport;
2606         uint32_t phy_port;
2607         int rc;
2608
2609         if (conf->original != 0)
2610                 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2611         else
2612                 phy_port = conf->index;
2613
2614         rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2615         if (rc != 0)
2616                 return rc;
2617
2618         return efx_mae_action_set_populate_deliver(spec, &mport);
2619 }
2620
2621 static int
2622 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
2623                                 const struct rte_flow_action_vf *vf_conf,
2624                                 efx_mae_actions_t *spec)
2625 {
2626         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2627         efx_mport_sel_t mport;
2628         uint32_t vf;
2629         int rc;
2630
2631         if (vf_conf == NULL)
2632                 vf = EFX_PCI_VF_INVALID;
2633         else if (vf_conf->original != 0)
2634                 vf = encp->enc_vf;
2635         else
2636                 vf = vf_conf->id;
2637
2638         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
2639         if (rc != 0)
2640                 return rc;
2641
2642         return efx_mae_action_set_populate_deliver(spec, &mport);
2643 }
2644
2645 static int
2646 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
2647                                   const struct rte_flow_action_port_id *conf,
2648                                   efx_mae_actions_t *spec)
2649 {
2650         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2651         struct sfc_mae *mae = &sa->mae;
2652         efx_mport_sel_t mport;
2653         uint16_t port_id;
2654         int rc;
2655
2656         port_id = (conf->original != 0) ? sas->port_id : conf->id;
2657
2658         rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2659                                            port_id, &mport);
2660         if (rc != 0)
2661                 return rc;
2662
2663         return efx_mae_action_set_populate_deliver(spec, &mport);
2664 }
2665
2666 static int
2667 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2668                           const struct rte_flow_action *action,
2669                           const struct sfc_mae_outer_rule *outer_rule,
2670                           struct sfc_mae_actions_bundle *bundle,
2671                           efx_mae_actions_t *spec,
2672                           struct rte_flow_error *error)
2673 {
2674         bool custom_error = B_FALSE;
2675         int rc = 0;
2676
2677         switch (action->type) {
2678         case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2679                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
2680                                        bundle->actions_mask);
2681                 if (outer_rule == NULL ||
2682                     outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
2683                         rc = EINVAL;
2684                 else
2685                         rc = efx_mae_action_set_populate_decap(spec);
2686                 break;
2687         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2688                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2689                                        bundle->actions_mask);
2690                 rc = efx_mae_action_set_populate_vlan_pop(spec);
2691                 break;
2692         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2693                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2694                                        bundle->actions_mask);
2695                 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2696                 break;
2697         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2698                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2699                                        bundle->actions_mask);
2700                 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2701                 break;
2702         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2703                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2704                                        bundle->actions_mask);
2705                 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2706                 break;
2707         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2708                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
2709                                        bundle->actions_mask);
2710                 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
2711                                                            action->conf,
2712                                                            spec, error);
2713                 custom_error = B_TRUE;
2714                 break;
2715         case RTE_FLOW_ACTION_TYPE_FLAG:
2716                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
2717                                        bundle->actions_mask);
2718                 rc = efx_mae_action_set_populate_flag(spec);
2719                 break;
2720         case RTE_FLOW_ACTION_TYPE_MARK:
2721                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
2722                                        bundle->actions_mask);
2723                 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
2724                 break;
2725         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
2726                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
2727                                        bundle->actions_mask);
2728                 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
2729                 break;
2730         case RTE_FLOW_ACTION_TYPE_PF:
2731                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
2732                                        bundle->actions_mask);
2733                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
2734                 break;
2735         case RTE_FLOW_ACTION_TYPE_VF:
2736                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
2737                                        bundle->actions_mask);
2738                 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
2739                 break;
2740         case RTE_FLOW_ACTION_TYPE_PORT_ID:
2741                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
2742                                        bundle->actions_mask);
2743                 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
2744                 break;
2745         case RTE_FLOW_ACTION_TYPE_DROP:
2746                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
2747                                        bundle->actions_mask);
2748                 rc = efx_mae_action_set_populate_drop(spec);
2749                 break;
2750         default:
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2753                                 "Unsupported action");
2754         }
2755
2756         if (rc == 0) {
2757                 bundle->actions_mask |= (1ULL << action->type);
2758         } else if (!custom_error) {
2759                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2760                                 NULL, "Failed to request the action");
2761         }
2762
2763         return rc;
2764 }
2765
2766 static void
2767 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
2768 {
2769         bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
2770 }
2771
2772 static int
2773 sfc_mae_process_encap_header(struct sfc_adapter *sa,
2774                              const struct sfc_mae_bounce_eh *bounce_eh,
2775                              struct sfc_mae_encap_header **encap_headerp)
2776 {
2777         if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
2778                 encap_headerp = NULL;
2779                 return 0;
2780         }
2781
2782         *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
2783         if (*encap_headerp != NULL)
2784                 return 0;
2785
2786         return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
2787 }
2788
2789 int
2790 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
2791                            const struct rte_flow_action actions[],
2792                            struct sfc_flow_spec_mae *spec_mae,
2793                            struct rte_flow_error *error)
2794 {
2795         struct sfc_mae_encap_header *encap_header = NULL;
2796         struct sfc_mae_actions_bundle bundle = {0};
2797         const struct rte_flow_action *action;
2798         struct sfc_mae *mae = &sa->mae;
2799         efx_mae_actions_t *spec;
2800         int rc;
2801
2802         rte_errno = 0;
2803
2804         if (actions == NULL) {
2805                 return rte_flow_error_set(error, EINVAL,
2806                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
2807                                 "NULL actions");
2808         }
2809
2810         rc = efx_mae_action_set_spec_init(sa->nic, &spec);
2811         if (rc != 0)
2812                 goto fail_action_set_spec_init;
2813
2814         /* Cleanup after previous encap. header bounce buffer usage. */
2815         sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
2816
2817         for (action = actions;
2818              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
2819                 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2820                 if (rc != 0)
2821                         goto fail_rule_parse_action;
2822
2823                 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
2824                                                &bundle, spec, error);
2825                 if (rc != 0)
2826                         goto fail_rule_parse_action;
2827         }
2828
2829         rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2830         if (rc != 0)
2831                 goto fail_rule_parse_action;
2832
2833         rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
2834         if (rc != 0)
2835                 goto fail_process_encap_header;
2836
2837         spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
2838                                                          spec);
2839         if (spec_mae->action_set != NULL) {
2840                 sfc_mae_encap_header_del(sa, encap_header);
2841                 efx_mae_action_set_spec_fini(sa->nic, spec);
2842                 return 0;
2843         }
2844
2845         rc = sfc_mae_action_set_add(sa, spec, encap_header,
2846                                     &spec_mae->action_set);
2847         if (rc != 0)
2848                 goto fail_action_set_add;
2849
2850         return 0;
2851
2852 fail_action_set_add:
2853         sfc_mae_encap_header_del(sa, encap_header);
2854
2855 fail_process_encap_header:
2856 fail_rule_parse_action:
2857         efx_mae_action_set_spec_fini(sa->nic, spec);
2858
2859 fail_action_set_spec_init:
2860         if (rc > 0 && rte_errno == 0) {
2861                 rc = rte_flow_error_set(error, rc,
2862                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2863                         NULL, "Failed to process the action");
2864         }
2865         return rc;
2866 }
2867
2868 static bool
2869 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
2870                         const efx_mae_match_spec_t *left,
2871                         const efx_mae_match_spec_t *right)
2872 {
2873         bool have_same_class;
2874         int rc;
2875
2876         rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
2877                                            &have_same_class);
2878
2879         return (rc == 0) ? have_same_class : false;
2880 }
2881
2882 static int
2883 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
2884                                 struct sfc_mae_outer_rule *rule)
2885 {
2886         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
2887         struct sfc_mae_outer_rule *entry;
2888         struct sfc_mae *mae = &sa->mae;
2889
2890         if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
2891                 /* An active rule is reused. It's class is wittingly valid. */
2892                 return 0;
2893         }
2894
2895         TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
2896                               sfc_mae_outer_rules, entries) {
2897                 const efx_mae_match_spec_t *left = entry->match_spec;
2898                 const efx_mae_match_spec_t *right = rule->match_spec;
2899
2900                 if (entry == rule)
2901                         continue;
2902
2903                 if (sfc_mae_rules_class_cmp(sa, left, right))
2904                         return 0;
2905         }
2906
2907         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2908                  "support for outer frame pattern items is not guaranteed; "
2909                  "other than that, the items are valid from SW standpoint");
2910         return 0;
2911 }
2912
2913 static int
2914 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
2915                                  struct sfc_flow_spec_mae *spec)
2916 {
2917         const struct rte_flow *entry;
2918
2919         TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
2920                 const struct sfc_flow_spec *entry_spec = &entry->spec;
2921                 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
2922                 const efx_mae_match_spec_t *left = es_mae->match_spec;
2923                 const efx_mae_match_spec_t *right = spec->match_spec;
2924
2925                 switch (entry_spec->type) {
2926                 case SFC_FLOW_SPEC_FILTER:
2927                         /* Ignore VNIC-level flows */
2928                         break;
2929                 case SFC_FLOW_SPEC_MAE:
2930                         if (sfc_mae_rules_class_cmp(sa, left, right))
2931                                 return 0;
2932                         break;
2933                 default:
2934                         SFC_ASSERT(false);
2935                 }
2936         }
2937
2938         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2939                  "support for inner frame pattern items is not guaranteed; "
2940                  "other than that, the items are valid from SW standpoint");
2941         return 0;
2942 }
2943
2944 /**
2945  * Confirm that a given flow can be accepted by the FW.
2946  *
2947  * @param sa
2948  *   Software adapter context
2949  * @param flow
2950  *   Flow to be verified
2951  * @return
2952  *   Zero on success and non-zero in the case of error.
2953  *   A special value of EAGAIN indicates that the adapter is
2954  *   not in started state. This state is compulsory because
2955  *   it only makes sense to compare the rule class of the flow
2956  *   being validated with classes of the active rules.
2957  *   Such classes are wittingly supported by the FW.
2958  */
2959 int
2960 sfc_mae_flow_verify(struct sfc_adapter *sa,
2961                     struct rte_flow *flow)
2962 {
2963         struct sfc_flow_spec *spec = &flow->spec;
2964         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2965         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2966         int rc;
2967
2968         SFC_ASSERT(sfc_adapter_is_locked(sa));
2969
2970         if (sa->state != SFC_ADAPTER_STARTED)
2971                 return EAGAIN;
2972
2973         if (outer_rule != NULL) {
2974                 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
2975                 if (rc != 0)
2976                         return rc;
2977         }
2978
2979         return sfc_mae_action_rule_class_verify(sa, spec_mae);
2980 }
2981
2982 int
2983 sfc_mae_flow_insert(struct sfc_adapter *sa,
2984                     struct rte_flow *flow)
2985 {
2986         struct sfc_flow_spec *spec = &flow->spec;
2987         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2988         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2989         struct sfc_mae_action_set *action_set = spec_mae->action_set;
2990         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
2991         int rc;
2992
2993         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
2994         SFC_ASSERT(action_set != NULL);
2995
2996         if (outer_rule != NULL) {
2997                 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
2998                                                spec_mae->match_spec);
2999                 if (rc != 0)
3000                         goto fail_outer_rule_enable;
3001         }
3002
3003         rc = sfc_mae_action_set_enable(sa, action_set);
3004         if (rc != 0)
3005                 goto fail_action_set_enable;
3006
3007         rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
3008                                         NULL, &fw_rsrc->aset_id,
3009                                         &spec_mae->rule_id);
3010         if (rc != 0)
3011                 goto fail_action_rule_insert;
3012
3013         sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
3014                 flow, spec_mae->rule_id.id);
3015
3016         return 0;
3017
3018 fail_action_rule_insert:
3019         sfc_mae_action_set_disable(sa, action_set);
3020
3021 fail_action_set_enable:
3022         if (outer_rule != NULL)
3023                 sfc_mae_outer_rule_disable(sa, outer_rule);
3024
3025 fail_outer_rule_enable:
3026         return rc;
3027 }
3028
3029 int
3030 sfc_mae_flow_remove(struct sfc_adapter *sa,
3031                     struct rte_flow *flow)
3032 {
3033         struct sfc_flow_spec *spec = &flow->spec;
3034         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
3035         struct sfc_mae_action_set *action_set = spec_mae->action_set;
3036         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3037         int rc;
3038
3039         SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
3040         SFC_ASSERT(action_set != NULL);
3041
3042         rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
3043         if (rc != 0) {
3044                 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
3045                         flow, spec_mae->rule_id.id, strerror(rc));
3046         }
3047         sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
3048                 flow, spec_mae->rule_id.id);
3049         spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
3050
3051         sfc_mae_action_set_disable(sa, action_set);
3052
3053         if (outer_rule != NULL)
3054                 sfc_mae_outer_rule_disable(sa, outer_rule);
3055
3056         return 0;
3057 }