a2c0aa14362f0358f35485768d8b8911975426c8
[dpdk.git] / drivers / net / sfc / sfc_mae.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_log.h"
20 #include "sfc_switch.h"
21
22 static int
23 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
24                             efx_mport_sel_t *mportp)
25 {
26         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
27
28         return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
29                                               mportp);
30 }
31
32 int
33 sfc_mae_attach(struct sfc_adapter *sa)
34 {
35         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
36         struct sfc_mae_switch_port_request switch_port_request = {0};
37         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
38         efx_mport_sel_t entity_mport;
39         struct sfc_mae *mae = &sa->mae;
40         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
41         efx_mae_limits_t limits;
42         int rc;
43
44         sfc_log_init(sa, "entry");
45
46         if (!encp->enc_mae_supported) {
47                 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
48                 return 0;
49         }
50
51         sfc_log_init(sa, "init MAE");
52         rc = efx_mae_init(sa->nic);
53         if (rc != 0)
54                 goto fail_mae_init;
55
56         sfc_log_init(sa, "get MAE limits");
57         rc = efx_mae_get_limits(sa->nic, &limits);
58         if (rc != 0)
59                 goto fail_mae_get_limits;
60
61         sfc_log_init(sa, "assign entity MPORT");
62         rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
63         if (rc != 0)
64                 goto fail_mae_assign_entity_mport;
65
66         sfc_log_init(sa, "assign RTE switch domain");
67         rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
68         if (rc != 0)
69                 goto fail_mae_assign_switch_domain;
70
71         sfc_log_init(sa, "assign RTE switch port");
72         switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
73         switch_port_request.entity_mportp = &entity_mport;
74         /*
75          * As of now, the driver does not support representors, so
76          * RTE ethdev MPORT simply matches that of the entity.
77          */
78         switch_port_request.ethdev_mportp = &entity_mport;
79         switch_port_request.ethdev_port_id = sas->port_id;
80         rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
81                                         &switch_port_request,
82                                         &mae->switch_port_id);
83         if (rc != 0)
84                 goto fail_mae_assign_switch_port;
85
86         sfc_log_init(sa, "allocate encap. header bounce buffer");
87         bounce_eh->buf_size = limits.eml_encap_header_size_limit;
88         bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
89                                     bounce_eh->buf_size, 0);
90         if (bounce_eh->buf == NULL)
91                 goto fail_mae_alloc_bounce_eh;
92
93         mae->status = SFC_MAE_STATUS_SUPPORTED;
94         mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
95         mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
96         mae->encap_types_supported = limits.eml_encap_types_supported;
97         TAILQ_INIT(&mae->outer_rules);
98         TAILQ_INIT(&mae->encap_headers);
99         TAILQ_INIT(&mae->action_sets);
100
101         sfc_log_init(sa, "done");
102
103         return 0;
104
105 fail_mae_alloc_bounce_eh:
106 fail_mae_assign_switch_port:
107 fail_mae_assign_switch_domain:
108 fail_mae_assign_entity_mport:
109 fail_mae_get_limits:
110         efx_mae_fini(sa->nic);
111
112 fail_mae_init:
113         sfc_log_init(sa, "failed %d", rc);
114
115         return rc;
116 }
117
118 void
119 sfc_mae_detach(struct sfc_adapter *sa)
120 {
121         struct sfc_mae *mae = &sa->mae;
122         enum sfc_mae_status status_prev = mae->status;
123
124         sfc_log_init(sa, "entry");
125
126         mae->nb_action_rule_prios_max = 0;
127         mae->status = SFC_MAE_STATUS_UNKNOWN;
128
129         if (status_prev != SFC_MAE_STATUS_SUPPORTED)
130                 return;
131
132         rte_free(mae->bounce_eh.buf);
133
134         efx_mae_fini(sa->nic);
135
136         sfc_log_init(sa, "done");
137 }
138
139 static struct sfc_mae_outer_rule *
140 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
141                           const efx_mae_match_spec_t *match_spec,
142                           efx_tunnel_protocol_t encap_type)
143 {
144         struct sfc_mae_outer_rule *rule;
145         struct sfc_mae *mae = &sa->mae;
146
147         SFC_ASSERT(sfc_adapter_is_locked(sa));
148
149         TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
150                 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
151                     rule->encap_type == encap_type) {
152                         sfc_dbg(sa, "attaching to outer_rule=%p", rule);
153                         ++(rule->refcnt);
154                         return rule;
155                 }
156         }
157
158         return NULL;
159 }
160
161 static int
162 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
163                        efx_mae_match_spec_t *match_spec,
164                        efx_tunnel_protocol_t encap_type,
165                        struct sfc_mae_outer_rule **rulep)
166 {
167         struct sfc_mae_outer_rule *rule;
168         struct sfc_mae *mae = &sa->mae;
169
170         SFC_ASSERT(sfc_adapter_is_locked(sa));
171
172         rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
173         if (rule == NULL)
174                 return ENOMEM;
175
176         rule->refcnt = 1;
177         rule->match_spec = match_spec;
178         rule->encap_type = encap_type;
179
180         rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
181
182         TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
183
184         *rulep = rule;
185
186         sfc_dbg(sa, "added outer_rule=%p", rule);
187
188         return 0;
189 }
190
191 static void
192 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
193                        struct sfc_mae_outer_rule *rule)
194 {
195         struct sfc_mae *mae = &sa->mae;
196
197         SFC_ASSERT(sfc_adapter_is_locked(sa));
198         SFC_ASSERT(rule->refcnt != 0);
199
200         --(rule->refcnt);
201
202         if (rule->refcnt != 0)
203                 return;
204
205         if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
206             rule->fw_rsrc.refcnt != 0) {
207                 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
208                         rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
209         }
210
211         efx_mae_match_spec_fini(sa->nic, rule->match_spec);
212
213         TAILQ_REMOVE(&mae->outer_rules, rule, entries);
214         rte_free(rule);
215
216         sfc_dbg(sa, "deleted outer_rule=%p", rule);
217 }
218
219 static int
220 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
221                           struct sfc_mae_outer_rule *rule,
222                           efx_mae_match_spec_t *match_spec_action)
223 {
224         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
225         int rc;
226
227         SFC_ASSERT(sfc_adapter_is_locked(sa));
228
229         if (fw_rsrc->refcnt == 0) {
230                 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
231                 SFC_ASSERT(rule->match_spec != NULL);
232
233                 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
234                                                rule->encap_type,
235                                                &fw_rsrc->rule_id);
236                 if (rc != 0) {
237                         sfc_err(sa, "failed to enable outer_rule=%p: %s",
238                                 rule, strerror(rc));
239                         return rc;
240                 }
241         }
242
243         rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
244                                                   &fw_rsrc->rule_id);
245         if (rc != 0) {
246                 if (fw_rsrc->refcnt == 0) {
247                         (void)efx_mae_outer_rule_remove(sa->nic,
248                                                         &fw_rsrc->rule_id);
249                         fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
250                 }
251
252                 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
253
254                 return rc;
255         }
256
257         if (fw_rsrc->refcnt == 0) {
258                 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
259                         rule, fw_rsrc->rule_id.id);
260         }
261
262         ++(fw_rsrc->refcnt);
263
264         return 0;
265 }
266
267 static void
268 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
269                            struct sfc_mae_outer_rule *rule)
270 {
271         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
272         int rc;
273
274         SFC_ASSERT(sfc_adapter_is_locked(sa));
275
276         if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
277             fw_rsrc->refcnt == 0) {
278                 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
279                         rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
280                 return;
281         }
282
283         if (fw_rsrc->refcnt == 1) {
284                 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
285                 if (rc == 0) {
286                         sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
287                                 rule, fw_rsrc->rule_id.id);
288                 } else {
289                         sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
290                                 rule, fw_rsrc->rule_id.id, strerror(rc));
291                 }
292                 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
293         }
294
295         --(fw_rsrc->refcnt);
296 }
297
298 static struct sfc_mae_encap_header *
299 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
300                             const struct sfc_mae_bounce_eh *bounce_eh)
301 {
302         struct sfc_mae_encap_header *encap_header;
303         struct sfc_mae *mae = &sa->mae;
304
305         SFC_ASSERT(sfc_adapter_is_locked(sa));
306
307         TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
308                 if (encap_header->size == bounce_eh->size &&
309                     memcmp(encap_header->buf, bounce_eh->buf,
310                            bounce_eh->size) == 0) {
311                         sfc_dbg(sa, "attaching to encap_header=%p",
312                                 encap_header);
313                         ++(encap_header->refcnt);
314                         return encap_header;
315                 }
316         }
317
318         return NULL;
319 }
320
321 static int
322 sfc_mae_encap_header_add(struct sfc_adapter *sa,
323                          const struct sfc_mae_bounce_eh *bounce_eh,
324                          struct sfc_mae_encap_header **encap_headerp)
325 {
326         struct sfc_mae_encap_header *encap_header;
327         struct sfc_mae *mae = &sa->mae;
328
329         SFC_ASSERT(sfc_adapter_is_locked(sa));
330
331         encap_header = rte_zmalloc("sfc_mae_encap_header",
332                                    sizeof(*encap_header), 0);
333         if (encap_header == NULL)
334                 return ENOMEM;
335
336         encap_header->size = bounce_eh->size;
337
338         encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
339                                        encap_header->size, 0);
340         if (encap_header->buf == NULL) {
341                 rte_free(encap_header);
342                 return ENOMEM;
343         }
344
345         rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
346
347         encap_header->refcnt = 1;
348         encap_header->type = bounce_eh->type;
349         encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
350
351         TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
352
353         *encap_headerp = encap_header;
354
355         sfc_dbg(sa, "added encap_header=%p", encap_header);
356
357         return 0;
358 }
359
360 static void
361 sfc_mae_encap_header_del(struct sfc_adapter *sa,
362                        struct sfc_mae_encap_header *encap_header)
363 {
364         struct sfc_mae *mae = &sa->mae;
365
366         if (encap_header == NULL)
367                 return;
368
369         SFC_ASSERT(sfc_adapter_is_locked(sa));
370         SFC_ASSERT(encap_header->refcnt != 0);
371
372         --(encap_header->refcnt);
373
374         if (encap_header->refcnt != 0)
375                 return;
376
377         if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
378             encap_header->fw_rsrc.refcnt != 0) {
379                 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
380                         encap_header, encap_header->fw_rsrc.eh_id.id,
381                         encap_header->fw_rsrc.refcnt);
382         }
383
384         TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
385         rte_free(encap_header->buf);
386         rte_free(encap_header);
387
388         sfc_dbg(sa, "deleted encap_header=%p", encap_header);
389 }
390
391 static int
392 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
393                             struct sfc_mae_encap_header *encap_header,
394                             efx_mae_actions_t *action_set_spec)
395 {
396         struct sfc_mae_fw_rsrc *fw_rsrc;
397         int rc;
398
399         if (encap_header == NULL)
400                 return 0;
401
402         SFC_ASSERT(sfc_adapter_is_locked(sa));
403
404         fw_rsrc = &encap_header->fw_rsrc;
405
406         if (fw_rsrc->refcnt == 0) {
407                 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
408                 SFC_ASSERT(encap_header->buf != NULL);
409                 SFC_ASSERT(encap_header->size != 0);
410
411                 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
412                                                 encap_header->buf,
413                                                 encap_header->size,
414                                                 &fw_rsrc->eh_id);
415                 if (rc != 0) {
416                         sfc_err(sa, "failed to enable encap_header=%p: %s",
417                                 encap_header, strerror(rc));
418                         return rc;
419                 }
420         }
421
422         rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
423                                               &fw_rsrc->eh_id);
424         if (rc != 0) {
425                 if (fw_rsrc->refcnt == 0) {
426                         (void)efx_mae_encap_header_free(sa->nic,
427                                                         &fw_rsrc->eh_id);
428                         fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
429                 }
430
431                 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
432
433                 return rc;
434         }
435
436         if (fw_rsrc->refcnt == 0) {
437                 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
438                         encap_header, fw_rsrc->eh_id.id);
439         }
440
441         ++(fw_rsrc->refcnt);
442
443         return 0;
444 }
445
446 static void
447 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
448                              struct sfc_mae_encap_header *encap_header)
449 {
450         struct sfc_mae_fw_rsrc *fw_rsrc;
451         int rc;
452
453         if (encap_header == NULL)
454                 return;
455
456         SFC_ASSERT(sfc_adapter_is_locked(sa));
457
458         fw_rsrc = &encap_header->fw_rsrc;
459
460         if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
461             fw_rsrc->refcnt == 0) {
462                 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
463                         encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
464                 return;
465         }
466
467         if (fw_rsrc->refcnt == 1) {
468                 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
469                 if (rc == 0) {
470                         sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
471                                 encap_header, fw_rsrc->eh_id.id);
472                 } else {
473                         sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
474                                 encap_header, fw_rsrc->eh_id.id, strerror(rc));
475                 }
476                 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
477         }
478
479         --(fw_rsrc->refcnt);
480 }
481
482 static struct sfc_mae_action_set *
483 sfc_mae_action_set_attach(struct sfc_adapter *sa,
484                           const struct sfc_mae_encap_header *encap_header,
485                           const efx_mae_actions_t *spec)
486 {
487         struct sfc_mae_action_set *action_set;
488         struct sfc_mae *mae = &sa->mae;
489
490         SFC_ASSERT(sfc_adapter_is_locked(sa));
491
492         TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
493                 if (action_set->encap_header == encap_header &&
494                     efx_mae_action_set_specs_equal(action_set->spec, spec)) {
495                         sfc_dbg(sa, "attaching to action_set=%p", action_set);
496                         ++(action_set->refcnt);
497                         return action_set;
498                 }
499         }
500
501         return NULL;
502 }
503
504 static int
505 sfc_mae_action_set_add(struct sfc_adapter *sa,
506                        efx_mae_actions_t *spec,
507                        struct sfc_mae_encap_header *encap_header,
508                        struct sfc_mae_action_set **action_setp)
509 {
510         struct sfc_mae_action_set *action_set;
511         struct sfc_mae *mae = &sa->mae;
512
513         SFC_ASSERT(sfc_adapter_is_locked(sa));
514
515         action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
516         if (action_set == NULL)
517                 return ENOMEM;
518
519         action_set->refcnt = 1;
520         action_set->spec = spec;
521         action_set->encap_header = encap_header;
522
523         action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
524
525         TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
526
527         *action_setp = action_set;
528
529         sfc_dbg(sa, "added action_set=%p", action_set);
530
531         return 0;
532 }
533
534 static void
535 sfc_mae_action_set_del(struct sfc_adapter *sa,
536                        struct sfc_mae_action_set *action_set)
537 {
538         struct sfc_mae *mae = &sa->mae;
539
540         SFC_ASSERT(sfc_adapter_is_locked(sa));
541         SFC_ASSERT(action_set->refcnt != 0);
542
543         --(action_set->refcnt);
544
545         if (action_set->refcnt != 0)
546                 return;
547
548         if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
549             action_set->fw_rsrc.refcnt != 0) {
550                 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
551                         action_set, action_set->fw_rsrc.aset_id.id,
552                         action_set->fw_rsrc.refcnt);
553         }
554
555         efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
556         sfc_mae_encap_header_del(sa, action_set->encap_header);
557         TAILQ_REMOVE(&mae->action_sets, action_set, entries);
558         rte_free(action_set);
559
560         sfc_dbg(sa, "deleted action_set=%p", action_set);
561 }
562
563 static int
564 sfc_mae_action_set_enable(struct sfc_adapter *sa,
565                           struct sfc_mae_action_set *action_set)
566 {
567         struct sfc_mae_encap_header *encap_header = action_set->encap_header;
568         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
569         int rc;
570
571         SFC_ASSERT(sfc_adapter_is_locked(sa));
572
573         if (fw_rsrc->refcnt == 0) {
574                 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
575                 SFC_ASSERT(action_set->spec != NULL);
576
577                 rc = sfc_mae_encap_header_enable(sa, encap_header,
578                                                  action_set->spec);
579                 if (rc != 0)
580                         return rc;
581
582                 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
583                                               &fw_rsrc->aset_id);
584                 if (rc != 0) {
585                         sfc_mae_encap_header_disable(sa, encap_header);
586
587                         sfc_err(sa, "failed to enable action_set=%p: %s",
588                                 action_set, strerror(rc));
589
590                         return rc;
591                 }
592
593                 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
594                         action_set, fw_rsrc->aset_id.id);
595         }
596
597         ++(fw_rsrc->refcnt);
598
599         return 0;
600 }
601
602 static void
603 sfc_mae_action_set_disable(struct sfc_adapter *sa,
604                            struct sfc_mae_action_set *action_set)
605 {
606         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
607         int rc;
608
609         SFC_ASSERT(sfc_adapter_is_locked(sa));
610
611         if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
612             fw_rsrc->refcnt == 0) {
613                 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
614                         action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
615                 return;
616         }
617
618         if (fw_rsrc->refcnt == 1) {
619                 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
620                 if (rc == 0) {
621                         sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
622                                 action_set, fw_rsrc->aset_id.id);
623                 } else {
624                         sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
625                                 action_set, fw_rsrc->aset_id.id, strerror(rc));
626                 }
627                 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
628
629                 sfc_mae_encap_header_disable(sa, action_set->encap_header);
630         }
631
632         --(fw_rsrc->refcnt);
633 }
634
635 void
636 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
637                      struct rte_flow *flow)
638 {
639         struct sfc_flow_spec *spec;
640         struct sfc_flow_spec_mae *spec_mae;
641
642         if (flow == NULL)
643                 return;
644
645         spec = &flow->spec;
646
647         if (spec == NULL)
648                 return;
649
650         spec_mae = &spec->mae;
651
652         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
653
654         if (spec_mae->outer_rule != NULL)
655                 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
656
657         if (spec_mae->action_set != NULL)
658                 sfc_mae_action_set_del(sa, spec_mae->action_set);
659
660         if (spec_mae->match_spec != NULL)
661                 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
662 }
663
664 static int
665 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
666 {
667         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
668         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
669         const efx_mae_field_id_t field_ids[] = {
670                 EFX_MAE_FIELD_VLAN0_PROTO_BE,
671                 EFX_MAE_FIELD_VLAN1_PROTO_BE,
672         };
673         const struct sfc_mae_ethertype *et;
674         unsigned int i;
675         int rc;
676
677         /*
678          * In accordance with RTE flow API convention, the innermost L2
679          * item's "type" ("inner_type") is a L3 EtherType. If there is
680          * no L3 item, it's 0x0000/0x0000.
681          */
682         et = &pdata->ethertypes[pdata->nb_vlan_tags];
683         rc = efx_mae_match_spec_field_set(ctx->match_spec,
684                                           fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
685                                           sizeof(et->value),
686                                           (const uint8_t *)&et->value,
687                                           sizeof(et->mask),
688                                           (const uint8_t *)&et->mask);
689         if (rc != 0)
690                 return rc;
691
692         /*
693          * sfc_mae_rule_parse_item_vlan() has already made sure
694          * that pdata->nb_vlan_tags does not exceed this figure.
695          */
696         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
697
698         for (i = 0; i < pdata->nb_vlan_tags; ++i) {
699                 et = &pdata->ethertypes[i];
700
701                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
702                                                   fremap[field_ids[i]],
703                                                   sizeof(et->value),
704                                                   (const uint8_t *)&et->value,
705                                                   sizeof(et->mask),
706                                                   (const uint8_t *)&et->mask);
707                 if (rc != 0)
708                         return rc;
709         }
710
711         return 0;
712 }
713
714 static int
715 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
716                                   struct rte_flow_error *error)
717 {
718         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
719         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
720         struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
721         const rte_be16_t supported_tpids[] = {
722                 /* VLAN standard TPID (always the first element) */
723                 RTE_BE16(RTE_ETHER_TYPE_VLAN),
724
725                 /* Double-tagging TPIDs */
726                 RTE_BE16(RTE_ETHER_TYPE_QINQ),
727                 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
728                 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
729                 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
730         };
731         unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
732         unsigned int ethertype_idx;
733         const uint8_t *valuep;
734         const uint8_t *maskp;
735         int rc;
736
737         if (pdata->innermost_ethertype_restriction.mask != 0 &&
738             pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
739                 /*
740                  * If a single item VLAN is followed by a L3 item, value
741                  * of "type" in item ETH can't be a double-tagging TPID.
742                  */
743                 nb_supported_tpids = 1;
744         }
745
746         /*
747          * sfc_mae_rule_parse_item_vlan() has already made sure
748          * that pdata->nb_vlan_tags does not exceed this figure.
749          */
750         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
751
752         for (ethertype_idx = 0;
753              ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
754                 unsigned int tpid_idx;
755
756                 /* Exact match is supported only. */
757                 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
758                         rc = EINVAL;
759                         goto fail;
760                 }
761
762                 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
763                      tpid_idx < nb_supported_tpids; ++tpid_idx) {
764                         if (ethertypes[ethertype_idx].value ==
765                             supported_tpids[tpid_idx])
766                                 break;
767                 }
768
769                 if (tpid_idx == nb_supported_tpids) {
770                         rc = EINVAL;
771                         goto fail;
772                 }
773
774                 nb_supported_tpids = 1;
775         }
776
777         if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
778                 struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
779
780                 if (et->mask == 0) {
781                         et->mask = RTE_BE16(0xffff);
782                         et->value =
783                             pdata->innermost_ethertype_restriction.value;
784                 } else if (et->mask != RTE_BE16(0xffff) ||
785                            et->value !=
786                            pdata->innermost_ethertype_restriction.value) {
787                         rc = EINVAL;
788                         goto fail;
789                 }
790         }
791
792         /*
793          * Now, when the number of VLAN tags is known, set fields
794          * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
795          * one is either a valid L3 EtherType (or 0x0000/0x0000),
796          * and the last two are valid TPIDs (or 0x0000/0x0000).
797          */
798         rc = sfc_mae_set_ethertypes(ctx);
799         if (rc != 0)
800                 goto fail;
801
802         if (pdata->l3_next_proto_restriction_mask == 0xff) {
803                 if (pdata->l3_next_proto_mask == 0) {
804                         pdata->l3_next_proto_mask = 0xff;
805                         pdata->l3_next_proto_value =
806                             pdata->l3_next_proto_restriction_value;
807                 } else if (pdata->l3_next_proto_mask != 0xff ||
808                            pdata->l3_next_proto_value !=
809                            pdata->l3_next_proto_restriction_value) {
810                         rc = EINVAL;
811                         goto fail;
812                 }
813         }
814
815         valuep = (const uint8_t *)&pdata->l3_next_proto_value;
816         maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
817         rc = efx_mae_match_spec_field_set(ctx->match_spec,
818                                           fremap[EFX_MAE_FIELD_IP_PROTO],
819                                           sizeof(pdata->l3_next_proto_value),
820                                           valuep,
821                                           sizeof(pdata->l3_next_proto_mask),
822                                           maskp);
823         if (rc != 0)
824                 goto fail;
825
826         return 0;
827
828 fail:
829         return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
830                                   "Failed to process pattern data");
831 }
832
833 static int
834 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
835                                 struct sfc_flow_parse_ctx *ctx,
836                                 struct rte_flow_error *error)
837 {
838         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
839         const struct rte_flow_item_port_id supp_mask = {
840                 .id = 0xffffffff,
841         };
842         const void *def_mask = &rte_flow_item_port_id_mask;
843         const struct rte_flow_item_port_id *spec = NULL;
844         const struct rte_flow_item_port_id *mask = NULL;
845         efx_mport_sel_t mport_sel;
846         int rc;
847
848         if (ctx_mae->match_mport_set) {
849                 return rte_flow_error_set(error, ENOTSUP,
850                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
851                                 "Can't handle multiple traffic source items");
852         }
853
854         rc = sfc_flow_parse_init(item,
855                                  (const void **)&spec, (const void **)&mask,
856                                  (const void *)&supp_mask, def_mask,
857                                  sizeof(struct rte_flow_item_port_id), error);
858         if (rc != 0)
859                 return rc;
860
861         if (mask->id != supp_mask.id) {
862                 return rte_flow_error_set(error, EINVAL,
863                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
864                                 "Bad mask in the PORT_ID pattern item");
865         }
866
867         /* If "spec" is not set, could be any port ID */
868         if (spec == NULL)
869                 return 0;
870
871         if (spec->id > UINT16_MAX) {
872                 return rte_flow_error_set(error, EOVERFLOW,
873                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
874                                           "The port ID is too large");
875         }
876
877         rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
878                                            spec->id, &mport_sel);
879         if (rc != 0) {
880                 return rte_flow_error_set(error, rc,
881                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
882                                 "Can't find RTE ethdev by the port ID");
883         }
884
885         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
886                                           &mport_sel, NULL);
887         if (rc != 0) {
888                 return rte_flow_error_set(error, rc,
889                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
890                                 "Failed to set MPORT for the port ID");
891         }
892
893         ctx_mae->match_mport_set = B_TRUE;
894
895         return 0;
896 }
897
898 static int
899 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
900                                  struct sfc_flow_parse_ctx *ctx,
901                                  struct rte_flow_error *error)
902 {
903         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
904         const struct rte_flow_item_phy_port supp_mask = {
905                 .index = 0xffffffff,
906         };
907         const void *def_mask = &rte_flow_item_phy_port_mask;
908         const struct rte_flow_item_phy_port *spec = NULL;
909         const struct rte_flow_item_phy_port *mask = NULL;
910         efx_mport_sel_t mport_v;
911         int rc;
912
913         if (ctx_mae->match_mport_set) {
914                 return rte_flow_error_set(error, ENOTSUP,
915                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
916                                 "Can't handle multiple traffic source items");
917         }
918
919         rc = sfc_flow_parse_init(item,
920                                  (const void **)&spec, (const void **)&mask,
921                                  (const void *)&supp_mask, def_mask,
922                                  sizeof(struct rte_flow_item_phy_port), error);
923         if (rc != 0)
924                 return rc;
925
926         if (mask->index != supp_mask.index) {
927                 return rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
929                                 "Bad mask in the PHY_PORT pattern item");
930         }
931
932         /* If "spec" is not set, could be any physical port */
933         if (spec == NULL)
934                 return 0;
935
936         rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
937         if (rc != 0) {
938                 return rte_flow_error_set(error, rc,
939                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
940                                 "Failed to convert the PHY_PORT index");
941         }
942
943         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
944         if (rc != 0) {
945                 return rte_flow_error_set(error, rc,
946                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
947                                 "Failed to set MPORT for the PHY_PORT");
948         }
949
950         ctx_mae->match_mport_set = B_TRUE;
951
952         return 0;
953 }
954
955 static int
956 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
957                            struct sfc_flow_parse_ctx *ctx,
958                            struct rte_flow_error *error)
959 {
960         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
961         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
962         efx_mport_sel_t mport_v;
963         int rc;
964
965         if (ctx_mae->match_mport_set) {
966                 return rte_flow_error_set(error, ENOTSUP,
967                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
968                                 "Can't handle multiple traffic source items");
969         }
970
971         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
972                                             &mport_v);
973         if (rc != 0) {
974                 return rte_flow_error_set(error, rc,
975                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
976                                 "Failed to convert the PF ID");
977         }
978
979         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
980         if (rc != 0) {
981                 return rte_flow_error_set(error, rc,
982                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
983                                 "Failed to set MPORT for the PF");
984         }
985
986         ctx_mae->match_mport_set = B_TRUE;
987
988         return 0;
989 }
990
991 static int
992 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
993                            struct sfc_flow_parse_ctx *ctx,
994                            struct rte_flow_error *error)
995 {
996         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
997         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
998         const struct rte_flow_item_vf supp_mask = {
999                 .id = 0xffffffff,
1000         };
1001         const void *def_mask = &rte_flow_item_vf_mask;
1002         const struct rte_flow_item_vf *spec = NULL;
1003         const struct rte_flow_item_vf *mask = NULL;
1004         efx_mport_sel_t mport_v;
1005         int rc;
1006
1007         if (ctx_mae->match_mport_set) {
1008                 return rte_flow_error_set(error, ENOTSUP,
1009                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1010                                 "Can't handle multiple traffic source items");
1011         }
1012
1013         rc = sfc_flow_parse_init(item,
1014                                  (const void **)&spec, (const void **)&mask,
1015                                  (const void *)&supp_mask, def_mask,
1016                                  sizeof(struct rte_flow_item_vf), error);
1017         if (rc != 0)
1018                 return rc;
1019
1020         if (mask->id != supp_mask.id) {
1021                 return rte_flow_error_set(error, EINVAL,
1022                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1023                                 "Bad mask in the VF pattern item");
1024         }
1025
1026         /*
1027          * If "spec" is not set, the item requests any VF related to the
1028          * PF of the current DPDK port (but not the PF itself).
1029          * Reject this match criterion as unsupported.
1030          */
1031         if (spec == NULL) {
1032                 return rte_flow_error_set(error, EINVAL,
1033                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1034                                 "Bad spec in the VF pattern item");
1035         }
1036
1037         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1038         if (rc != 0) {
1039                 return rte_flow_error_set(error, rc,
1040                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1041                                 "Failed to convert the PF + VF IDs");
1042         }
1043
1044         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1045         if (rc != 0) {
1046                 return rte_flow_error_set(error, rc,
1047                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1048                                 "Failed to set MPORT for the PF + VF");
1049         }
1050
1051         ctx_mae->match_mport_set = B_TRUE;
1052
1053         return 0;
1054 }
1055
1056 /*
1057  * Having this field ID in a field locator means that this
1058  * locator cannot be used to actually set the field at the
1059  * time when the corresponding item gets encountered. Such
1060  * fields get stashed in the parsing context instead. This
1061  * is required to resolve dependencies between the stashed
1062  * fields. See sfc_mae_rule_process_pattern_data().
1063  */
1064 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1065
1066 struct sfc_mae_field_locator {
1067         efx_mae_field_id_t              field_id;
1068         size_t                          size;
1069         /* Field offset in the corresponding rte_flow_item_ struct */
1070         size_t                          ofst;
1071 };
1072
1073 static void
1074 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1075                              unsigned int nb_field_locators, void *mask_ptr,
1076                              size_t mask_size)
1077 {
1078         unsigned int i;
1079
1080         memset(mask_ptr, 0, mask_size);
1081
1082         for (i = 0; i < nb_field_locators; ++i) {
1083                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1084
1085                 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1086                 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1087         }
1088 }
1089
1090 static int
1091 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1092                    unsigned int nb_field_locators, const uint8_t *spec,
1093                    const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1094                    struct rte_flow_error *error)
1095 {
1096         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1097         unsigned int i;
1098         int rc = 0;
1099
1100         for (i = 0; i < nb_field_locators; ++i) {
1101                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1102
1103                 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1104                         continue;
1105
1106                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1107                                                   fremap[fl->field_id],
1108                                                   fl->size, spec + fl->ofst,
1109                                                   fl->size, mask + fl->ofst);
1110                 if (rc != 0)
1111                         break;
1112         }
1113
1114         if (rc != 0) {
1115                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1116                                 NULL, "Failed to process item fields");
1117         }
1118
1119         return rc;
1120 }
1121
1122 static const struct sfc_mae_field_locator flocs_eth[] = {
1123         {
1124                 /*
1125                  * This locator is used only for building supported fields mask.
1126                  * The field is handled by sfc_mae_rule_process_pattern_data().
1127                  */
1128                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1129                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1130                 offsetof(struct rte_flow_item_eth, type),
1131         },
1132         {
1133                 EFX_MAE_FIELD_ETH_DADDR_BE,
1134                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1135                 offsetof(struct rte_flow_item_eth, dst),
1136         },
1137         {
1138                 EFX_MAE_FIELD_ETH_SADDR_BE,
1139                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1140                 offsetof(struct rte_flow_item_eth, src),
1141         },
1142 };
1143
1144 static int
1145 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1146                             struct sfc_flow_parse_ctx *ctx,
1147                             struct rte_flow_error *error)
1148 {
1149         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1150         struct rte_flow_item_eth supp_mask;
1151         const uint8_t *spec = NULL;
1152         const uint8_t *mask = NULL;
1153         int rc;
1154
1155         sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1156                                      &supp_mask, sizeof(supp_mask));
1157
1158         rc = sfc_flow_parse_init(item,
1159                                  (const void **)&spec, (const void **)&mask,
1160                                  (const void *)&supp_mask,
1161                                  &rte_flow_item_eth_mask,
1162                                  sizeof(struct rte_flow_item_eth), error);
1163         if (rc != 0)
1164                 return rc;
1165
1166         if (spec != NULL) {
1167                 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1168                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1169                 const struct rte_flow_item_eth *item_spec;
1170                 const struct rte_flow_item_eth *item_mask;
1171
1172                 item_spec = (const struct rte_flow_item_eth *)spec;
1173                 item_mask = (const struct rte_flow_item_eth *)mask;
1174
1175                 ethertypes[0].value = item_spec->type;
1176                 ethertypes[0].mask = item_mask->type;
1177         } else {
1178                 /*
1179                  * The specification is empty. This is wrong in the case
1180                  * when there are more network patterns in line. Other
1181                  * than that, any Ethernet can match. All of that is
1182                  * checked at the end of parsing.
1183                  */
1184                 return 0;
1185         }
1186
1187         return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1188                                   ctx_mae, error);
1189 }
1190
1191 static const struct sfc_mae_field_locator flocs_vlan[] = {
1192         /* Outermost tag */
1193         {
1194                 EFX_MAE_FIELD_VLAN0_TCI_BE,
1195                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1196                 offsetof(struct rte_flow_item_vlan, tci),
1197         },
1198         {
1199                 /*
1200                  * This locator is used only for building supported fields mask.
1201                  * The field is handled by sfc_mae_rule_process_pattern_data().
1202                  */
1203                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1204                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1205                 offsetof(struct rte_flow_item_vlan, inner_type),
1206         },
1207
1208         /* Innermost tag */
1209         {
1210                 EFX_MAE_FIELD_VLAN1_TCI_BE,
1211                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1212                 offsetof(struct rte_flow_item_vlan, tci),
1213         },
1214         {
1215                 /*
1216                  * This locator is used only for building supported fields mask.
1217                  * The field is handled by sfc_mae_rule_process_pattern_data().
1218                  */
1219                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1220                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1221                 offsetof(struct rte_flow_item_vlan, inner_type),
1222         },
1223 };
1224
1225 static int
1226 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1227                              struct sfc_flow_parse_ctx *ctx,
1228                              struct rte_flow_error *error)
1229 {
1230         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1231         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1232         const struct sfc_mae_field_locator *flocs;
1233         struct rte_flow_item_vlan supp_mask;
1234         const uint8_t *spec = NULL;
1235         const uint8_t *mask = NULL;
1236         unsigned int nb_flocs;
1237         int rc;
1238
1239         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1240
1241         if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1242                 return rte_flow_error_set(error, ENOTSUP,
1243                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1244                                 "Can't match that many VLAN tags");
1245         }
1246
1247         nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1248         flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1249
1250         /* If parsing fails, this can remain incremented. */
1251         ++pdata->nb_vlan_tags;
1252
1253         sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1254                                      &supp_mask, sizeof(supp_mask));
1255
1256         rc = sfc_flow_parse_init(item,
1257                                  (const void **)&spec, (const void **)&mask,
1258                                  (const void *)&supp_mask,
1259                                  &rte_flow_item_vlan_mask,
1260                                  sizeof(struct rte_flow_item_vlan), error);
1261         if (rc != 0)
1262                 return rc;
1263
1264         if (spec != NULL) {
1265                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1266                 const struct rte_flow_item_vlan *item_spec;
1267                 const struct rte_flow_item_vlan *item_mask;
1268
1269                 item_spec = (const struct rte_flow_item_vlan *)spec;
1270                 item_mask = (const struct rte_flow_item_vlan *)mask;
1271
1272                 ethertypes[pdata->nb_vlan_tags].value = item_spec->inner_type;
1273                 ethertypes[pdata->nb_vlan_tags].mask = item_mask->inner_type;
1274         } else {
1275                 /*
1276                  * The specification is empty. This is wrong in the case
1277                  * when there are more network patterns in line. Other
1278                  * than that, any Ethernet can match. All of that is
1279                  * checked at the end of parsing.
1280                  */
1281                 return 0;
1282         }
1283
1284         return sfc_mae_parse_item(flocs, nb_flocs, spec, mask, ctx_mae, error);
1285 }
1286
1287 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1288         {
1289                 EFX_MAE_FIELD_SRC_IP4_BE,
1290                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1291                 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1292         },
1293         {
1294                 EFX_MAE_FIELD_DST_IP4_BE,
1295                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1296                 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1297         },
1298         {
1299                 /*
1300                  * This locator is used only for building supported fields mask.
1301                  * The field is handled by sfc_mae_rule_process_pattern_data().
1302                  */
1303                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1304                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1305                 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1306         },
1307         {
1308                 EFX_MAE_FIELD_IP_TOS,
1309                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1310                                  hdr.type_of_service),
1311                 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1312         },
1313         {
1314                 EFX_MAE_FIELD_IP_TTL,
1315                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1316                 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1317         },
1318 };
1319
1320 static int
1321 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1322                              struct sfc_flow_parse_ctx *ctx,
1323                              struct rte_flow_error *error)
1324 {
1325         rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1326         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1327         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1328         struct rte_flow_item_ipv4 supp_mask;
1329         const uint8_t *spec = NULL;
1330         const uint8_t *mask = NULL;
1331         int rc;
1332
1333         sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1334                                      &supp_mask, sizeof(supp_mask));
1335
1336         rc = sfc_flow_parse_init(item,
1337                                  (const void **)&spec, (const void **)&mask,
1338                                  (const void *)&supp_mask,
1339                                  &rte_flow_item_ipv4_mask,
1340                                  sizeof(struct rte_flow_item_ipv4), error);
1341         if (rc != 0)
1342                 return rc;
1343
1344         pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1345         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1346
1347         if (spec != NULL) {
1348                 const struct rte_flow_item_ipv4 *item_spec;
1349                 const struct rte_flow_item_ipv4 *item_mask;
1350
1351                 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1352                 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1353
1354                 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1355                 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1356         } else {
1357                 return 0;
1358         }
1359
1360         return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1361                                   ctx_mae, error);
1362 }
1363
1364 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1365         {
1366                 EFX_MAE_FIELD_SRC_IP6_BE,
1367                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1368                 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1369         },
1370         {
1371                 EFX_MAE_FIELD_DST_IP6_BE,
1372                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1373                 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1374         },
1375         {
1376                 /*
1377                  * This locator is used only for building supported fields mask.
1378                  * The field is handled by sfc_mae_rule_process_pattern_data().
1379                  */
1380                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1381                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1382                 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1383         },
1384         {
1385                 EFX_MAE_FIELD_IP_TTL,
1386                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1387                 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1388         },
1389 };
1390
1391 static int
1392 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1393                              struct sfc_flow_parse_ctx *ctx,
1394                              struct rte_flow_error *error)
1395 {
1396         rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1397         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1398         const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1399         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1400         struct rte_flow_item_ipv6 supp_mask;
1401         const uint8_t *spec = NULL;
1402         const uint8_t *mask = NULL;
1403         rte_be32_t vtc_flow_be;
1404         uint32_t vtc_flow;
1405         uint8_t tc_value;
1406         uint8_t tc_mask;
1407         int rc;
1408
1409         sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1410                                      &supp_mask, sizeof(supp_mask));
1411
1412         vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1413         memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1414
1415         rc = sfc_flow_parse_init(item,
1416                                  (const void **)&spec, (const void **)&mask,
1417                                  (const void *)&supp_mask,
1418                                  &rte_flow_item_ipv6_mask,
1419                                  sizeof(struct rte_flow_item_ipv6), error);
1420         if (rc != 0)
1421                 return rc;
1422
1423         pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1424         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1425
1426         if (spec != NULL) {
1427                 const struct rte_flow_item_ipv6 *item_spec;
1428                 const struct rte_flow_item_ipv6 *item_mask;
1429
1430                 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1431                 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1432
1433                 pdata->l3_next_proto_value = item_spec->hdr.proto;
1434                 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1435         } else {
1436                 return 0;
1437         }
1438
1439         rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1440                                 ctx_mae, error);
1441         if (rc != 0)
1442                 return rc;
1443
1444         memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1445         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1446         tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1447
1448         memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1449         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1450         tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1451
1452         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1453                                           fremap[EFX_MAE_FIELD_IP_TOS],
1454                                           sizeof(tc_value), &tc_value,
1455                                           sizeof(tc_mask), &tc_mask);
1456         if (rc != 0) {
1457                 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1458                                 NULL, "Failed to process item fields");
1459         }
1460
1461         return 0;
1462 }
1463
1464 static const struct sfc_mae_field_locator flocs_tcp[] = {
1465         {
1466                 EFX_MAE_FIELD_L4_SPORT_BE,
1467                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1468                 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1469         },
1470         {
1471                 EFX_MAE_FIELD_L4_DPORT_BE,
1472                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1473                 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1474         },
1475         {
1476                 EFX_MAE_FIELD_TCP_FLAGS_BE,
1477                 /*
1478                  * The values have been picked intentionally since the
1479                  * target MAE field is oversize (16 bit). This mapping
1480                  * relies on the fact that the MAE field is big-endian.
1481                  */
1482                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1483                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1484                 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1485         },
1486 };
1487
1488 static int
1489 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1490                             struct sfc_flow_parse_ctx *ctx,
1491                             struct rte_flow_error *error)
1492 {
1493         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1494         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1495         struct rte_flow_item_tcp supp_mask;
1496         const uint8_t *spec = NULL;
1497         const uint8_t *mask = NULL;
1498         int rc;
1499
1500         /*
1501          * When encountered among outermost items, item TCP is invalid.
1502          * Check which match specification is being constructed now.
1503          */
1504         if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1505                 return rte_flow_error_set(error, EINVAL,
1506                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1507                                           "TCP in outer frame is invalid");
1508         }
1509
1510         sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1511                                      &supp_mask, sizeof(supp_mask));
1512
1513         rc = sfc_flow_parse_init(item,
1514                                  (const void **)&spec, (const void **)&mask,
1515                                  (const void *)&supp_mask,
1516                                  &rte_flow_item_tcp_mask,
1517                                  sizeof(struct rte_flow_item_tcp), error);
1518         if (rc != 0)
1519                 return rc;
1520
1521         pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1522         pdata->l3_next_proto_restriction_mask = 0xff;
1523
1524         if (spec == NULL)
1525                 return 0;
1526
1527         return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1528                                   ctx_mae, error);
1529 }
1530
1531 static const struct sfc_mae_field_locator flocs_udp[] = {
1532         {
1533                 EFX_MAE_FIELD_L4_SPORT_BE,
1534                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1535                 offsetof(struct rte_flow_item_udp, hdr.src_port),
1536         },
1537         {
1538                 EFX_MAE_FIELD_L4_DPORT_BE,
1539                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1540                 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1541         },
1542 };
1543
1544 static int
1545 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1546                             struct sfc_flow_parse_ctx *ctx,
1547                             struct rte_flow_error *error)
1548 {
1549         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1550         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1551         struct rte_flow_item_udp supp_mask;
1552         const uint8_t *spec = NULL;
1553         const uint8_t *mask = NULL;
1554         int rc;
1555
1556         sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1557                                      &supp_mask, sizeof(supp_mask));
1558
1559         rc = sfc_flow_parse_init(item,
1560                                  (const void **)&spec, (const void **)&mask,
1561                                  (const void *)&supp_mask,
1562                                  &rte_flow_item_udp_mask,
1563                                  sizeof(struct rte_flow_item_udp), error);
1564         if (rc != 0)
1565                 return rc;
1566
1567         pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1568         pdata->l3_next_proto_restriction_mask = 0xff;
1569
1570         if (spec == NULL)
1571                 return 0;
1572
1573         return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1574                                   ctx_mae, error);
1575 }
1576
1577 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1578         {
1579                 /*
1580                  * The size and offset values are relevant
1581                  * for Geneve and NVGRE, too.
1582                  */
1583                 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1584                 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1585         },
1586 };
1587
1588 /*
1589  * An auxiliary registry which allows using non-encap. field IDs
1590  * directly when building a match specification of type ACTION.
1591  *
1592  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1593  */
1594 static const efx_mae_field_id_t field_ids_no_remap[] = {
1595 #define FIELD_ID_NO_REMAP(_field) \
1596         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1597
1598         FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1599         FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1600         FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1601         FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1602         FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1603         FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1604         FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1605         FIELD_ID_NO_REMAP(SRC_IP4_BE),
1606         FIELD_ID_NO_REMAP(DST_IP4_BE),
1607         FIELD_ID_NO_REMAP(IP_PROTO),
1608         FIELD_ID_NO_REMAP(IP_TOS),
1609         FIELD_ID_NO_REMAP(IP_TTL),
1610         FIELD_ID_NO_REMAP(SRC_IP6_BE),
1611         FIELD_ID_NO_REMAP(DST_IP6_BE),
1612         FIELD_ID_NO_REMAP(L4_SPORT_BE),
1613         FIELD_ID_NO_REMAP(L4_DPORT_BE),
1614         FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1615
1616 #undef FIELD_ID_NO_REMAP
1617 };
1618
1619 /*
1620  * An auxiliary registry which allows using "ENC" field IDs
1621  * when building a match specification of type OUTER.
1622  *
1623  * See sfc_mae_rule_encap_parse_init().
1624  */
1625 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1626 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1627         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1628
1629         FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1630         FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1631         FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1632         FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1633         FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1634         FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1635         FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1636         FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1637         FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1638         FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1639         FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1640         FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1641         FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1642         FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1643         FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1644         FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1645
1646 #undef FIELD_ID_REMAP_TO_ENCAP
1647 };
1648
1649 static int
1650 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1651                                struct sfc_flow_parse_ctx *ctx,
1652                                struct rte_flow_error *error)
1653 {
1654         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1655         uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1656         uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1657         const struct rte_flow_item_vxlan *vxp;
1658         uint8_t supp_mask[sizeof(uint64_t)];
1659         const uint8_t *spec = NULL;
1660         const uint8_t *mask = NULL;
1661         int rc;
1662
1663         /*
1664          * We're about to start processing inner frame items.
1665          * Process pattern data that has been deferred so far
1666          * and reset pattern data storage.
1667          */
1668         rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1669         if (rc != 0)
1670                 return rc;
1671
1672         memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1673
1674         sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1675                                      &supp_mask, sizeof(supp_mask));
1676
1677         /*
1678          * This tunnel item was preliminarily detected by
1679          * sfc_mae_rule_encap_parse_init(). Default mask
1680          * was also picked by that helper. Use it here.
1681          */
1682         rc = sfc_flow_parse_init(item,
1683                                  (const void **)&spec, (const void **)&mask,
1684                                  (const void *)&supp_mask,
1685                                  ctx_mae->tunnel_def_mask,
1686                                  ctx_mae->tunnel_def_mask_size,  error);
1687         if (rc != 0)
1688                 return rc;
1689
1690         /*
1691          * This item and later ones comprise a
1692          * match specification of type ACTION.
1693          */
1694         ctx_mae->match_spec = ctx_mae->match_spec_action;
1695
1696         /* This item and later ones use non-encap. EFX MAE field IDs. */
1697         ctx_mae->field_ids_remap = field_ids_no_remap;
1698
1699         if (spec == NULL)
1700                 return 0;
1701
1702         /*
1703          * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1704          * Copy 24-bit VNI, which is BE, at offset 1 in it.
1705          * The extra byte is 0 both in the mask and in the value.
1706          */
1707         vxp = (const struct rte_flow_item_vxlan *)spec;
1708         memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1709
1710         vxp = (const struct rte_flow_item_vxlan *)mask;
1711         memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1712
1713         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1714                                           EFX_MAE_FIELD_ENC_VNET_ID_BE,
1715                                           sizeof(vnet_id_v), vnet_id_v,
1716                                           sizeof(vnet_id_m), vnet_id_m);
1717         if (rc != 0) {
1718                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1719                                         item, "Failed to set VXLAN VNI");
1720         }
1721
1722         return rc;
1723 }
1724
1725 static const struct sfc_flow_item sfc_flow_items[] = {
1726         {
1727                 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1728                 /*
1729                  * In terms of RTE flow, this item is a META one,
1730                  * and its position in the pattern is don't care.
1731                  */
1732                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1733                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1734                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1735                 .parse = sfc_mae_rule_parse_item_port_id,
1736         },
1737         {
1738                 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1739                 /*
1740                  * In terms of RTE flow, this item is a META one,
1741                  * and its position in the pattern is don't care.
1742                  */
1743                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1744                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1745                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1746                 .parse = sfc_mae_rule_parse_item_phy_port,
1747         },
1748         {
1749                 .type = RTE_FLOW_ITEM_TYPE_PF,
1750                 /*
1751                  * In terms of RTE flow, this item is a META one,
1752                  * and its position in the pattern is don't care.
1753                  */
1754                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1755                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1756                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1757                 .parse = sfc_mae_rule_parse_item_pf,
1758         },
1759         {
1760                 .type = RTE_FLOW_ITEM_TYPE_VF,
1761                 /*
1762                  * In terms of RTE flow, this item is a META one,
1763                  * and its position in the pattern is don't care.
1764                  */
1765                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1766                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1767                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1768                 .parse = sfc_mae_rule_parse_item_vf,
1769         },
1770         {
1771                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1772                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1773                 .layer = SFC_FLOW_ITEM_L2,
1774                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1775                 .parse = sfc_mae_rule_parse_item_eth,
1776         },
1777         {
1778                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1779                 .prev_layer = SFC_FLOW_ITEM_L2,
1780                 .layer = SFC_FLOW_ITEM_L2,
1781                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1782                 .parse = sfc_mae_rule_parse_item_vlan,
1783         },
1784         {
1785                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1786                 .prev_layer = SFC_FLOW_ITEM_L2,
1787                 .layer = SFC_FLOW_ITEM_L3,
1788                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1789                 .parse = sfc_mae_rule_parse_item_ipv4,
1790         },
1791         {
1792                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1793                 .prev_layer = SFC_FLOW_ITEM_L2,
1794                 .layer = SFC_FLOW_ITEM_L3,
1795                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1796                 .parse = sfc_mae_rule_parse_item_ipv6,
1797         },
1798         {
1799                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1800                 .prev_layer = SFC_FLOW_ITEM_L3,
1801                 .layer = SFC_FLOW_ITEM_L4,
1802                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1803                 .parse = sfc_mae_rule_parse_item_tcp,
1804         },
1805         {
1806                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1807                 .prev_layer = SFC_FLOW_ITEM_L3,
1808                 .layer = SFC_FLOW_ITEM_L4,
1809                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1810                 .parse = sfc_mae_rule_parse_item_udp,
1811         },
1812         {
1813                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1814                 .prev_layer = SFC_FLOW_ITEM_L4,
1815                 .layer = SFC_FLOW_ITEM_START_LAYER,
1816                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1817                 .parse = sfc_mae_rule_parse_item_tunnel,
1818         },
1819         {
1820                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1821                 .prev_layer = SFC_FLOW_ITEM_L4,
1822                 .layer = SFC_FLOW_ITEM_START_LAYER,
1823                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1824                 .parse = sfc_mae_rule_parse_item_tunnel,
1825         },
1826         {
1827                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1828                 .prev_layer = SFC_FLOW_ITEM_L3,
1829                 .layer = SFC_FLOW_ITEM_START_LAYER,
1830                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1831                 .parse = sfc_mae_rule_parse_item_tunnel,
1832         },
1833 };
1834
1835 static int
1836 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
1837                            struct sfc_mae_parse_ctx *ctx,
1838                            struct sfc_mae_outer_rule **rulep,
1839                            struct rte_flow_error *error)
1840 {
1841         struct sfc_mae_outer_rule *rule;
1842         int rc;
1843
1844         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
1845                 *rulep = NULL;
1846                 return 0;
1847         }
1848
1849         SFC_ASSERT(ctx->match_spec_outer != NULL);
1850
1851         if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
1852                 return rte_flow_error_set(error, ENOTSUP,
1853                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1854                                           "Inconsistent pattern (outer)");
1855         }
1856
1857         *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
1858                                            ctx->encap_type);
1859         if (*rulep != NULL) {
1860                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1861         } else {
1862                 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
1863                                             ctx->encap_type, rulep);
1864                 if (rc != 0) {
1865                         return rte_flow_error_set(error, rc,
1866                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1867                                         "Failed to process the pattern");
1868                 }
1869         }
1870
1871         /* The spec has now been tracked by the outer rule entry. */
1872         ctx->match_spec_outer = NULL;
1873
1874         /*
1875          * Depending on whether we reuse an existing outer rule or create a
1876          * new one (see above), outer rule ID is either a valid value or
1877          * EFX_MAE_RSRC_ID_INVALID. Set it in the action rule match
1878          * specification (and the full mask, too) in order to have correct
1879          * class comparisons of the new rule with existing ones.
1880          * Also, action rule match specification will be validated shortly,
1881          * and having the full mask set for outer rule ID indicates that we
1882          * will use this field, and support for this field has to be checked.
1883          */
1884         rule = *rulep;
1885         rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
1886                                                   &rule->fw_rsrc.rule_id);
1887         if (rc != 0) {
1888                 sfc_mae_outer_rule_del(sa, *rulep);
1889                 *rulep = NULL;
1890
1891                 return rte_flow_error_set(error, rc,
1892                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1893                                           "Failed to process the pattern");
1894         }
1895
1896         return 0;
1897 }
1898
1899 static int
1900 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
1901                               const struct rte_flow_item pattern[],
1902                               struct sfc_mae_parse_ctx *ctx,
1903                               struct rte_flow_error *error)
1904 {
1905         struct sfc_mae *mae = &sa->mae;
1906         int rc;
1907
1908         if (pattern == NULL) {
1909                 rte_flow_error_set(error, EINVAL,
1910                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1911                                    "NULL pattern");
1912                 return -rte_errno;
1913         }
1914
1915         for (;;) {
1916                 switch (pattern->type) {
1917                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1918                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
1919                         ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
1920                         ctx->tunnel_def_mask_size =
1921                                 sizeof(rte_flow_item_vxlan_mask);
1922                         break;
1923                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1924                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
1925                         ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
1926                         ctx->tunnel_def_mask_size =
1927                                 sizeof(rte_flow_item_geneve_mask);
1928                         break;
1929                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1930                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1931                         ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
1932                         ctx->tunnel_def_mask_size =
1933                                 sizeof(rte_flow_item_nvgre_mask);
1934                         break;
1935                 case RTE_FLOW_ITEM_TYPE_END:
1936                         break;
1937                 default:
1938                         ++pattern;
1939                         continue;
1940                 };
1941
1942                 break;
1943         }
1944
1945         if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
1946                 return 0;
1947
1948         if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
1949                 return rte_flow_error_set(error, ENOTSUP,
1950                                           RTE_FLOW_ERROR_TYPE_ITEM,
1951                                           pattern, "Unsupported tunnel item");
1952         }
1953
1954         if (ctx->priority >= mae->nb_outer_rule_prios_max) {
1955                 return rte_flow_error_set(error, ENOTSUP,
1956                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1957                                           NULL, "Unsupported priority level");
1958         }
1959
1960         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
1961                                      &ctx->match_spec_outer);
1962         if (rc != 0) {
1963                 return rte_flow_error_set(error, rc,
1964                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1965                         "Failed to initialise outer rule match specification");
1966         }
1967
1968         /* Outermost items comprise a match specification of type OUTER. */
1969         ctx->match_spec = ctx->match_spec_outer;
1970
1971         /* Outermost items use "ENC" EFX MAE field IDs. */
1972         ctx->field_ids_remap = field_ids_remap_to_encap;
1973
1974         return 0;
1975 }
1976
1977 static void
1978 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
1979                               struct sfc_mae_parse_ctx *ctx)
1980 {
1981         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1982                 return;
1983
1984         if (ctx->match_spec_outer != NULL)
1985                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1986 }
1987
1988 int
1989 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
1990                            const struct rte_flow_item pattern[],
1991                            struct sfc_flow_spec_mae *spec,
1992                            struct rte_flow_error *error)
1993 {
1994         struct sfc_mae_parse_ctx ctx_mae;
1995         struct sfc_flow_parse_ctx ctx;
1996         int rc;
1997
1998         memset(&ctx_mae, 0, sizeof(ctx_mae));
1999         ctx_mae.priority = spec->priority;
2000         ctx_mae.sa = sa;
2001
2002         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
2003                                      spec->priority,
2004                                      &ctx_mae.match_spec_action);
2005         if (rc != 0) {
2006                 rc = rte_flow_error_set(error, rc,
2007                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2008                         "Failed to initialise action rule match specification");
2009                 goto fail_init_match_spec_action;
2010         }
2011
2012         /*
2013          * As a preliminary setting, assume that there is no encapsulation
2014          * in the pattern. That is, pattern items are about to comprise a
2015          * match specification of type ACTION and use non-encap. field IDs.
2016          *
2017          * sfc_mae_rule_encap_parse_init() below may override this.
2018          */
2019         ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2020         ctx_mae.match_spec = ctx_mae.match_spec_action;
2021         ctx_mae.field_ids_remap = field_ids_no_remap;
2022
2023         ctx.type = SFC_FLOW_PARSE_CTX_MAE;
2024         ctx.mae = &ctx_mae;
2025
2026         rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
2027         if (rc != 0)
2028                 goto fail_encap_parse_init;
2029
2030         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2031                                     pattern, &ctx, error);
2032         if (rc != 0)
2033                 goto fail_parse_pattern;
2034
2035         rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
2036         if (rc != 0)
2037                 goto fail_process_pattern_data;
2038
2039         rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
2040         if (rc != 0)
2041                 goto fail_process_outer;
2042
2043         if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
2044                 rc = rte_flow_error_set(error, ENOTSUP,
2045                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2046                                         "Inconsistent pattern");
2047                 goto fail_validate_match_spec_action;
2048         }
2049
2050         spec->match_spec = ctx_mae.match_spec_action;
2051
2052         return 0;
2053
2054 fail_validate_match_spec_action:
2055 fail_process_outer:
2056 fail_process_pattern_data:
2057 fail_parse_pattern:
2058         sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
2059
2060 fail_encap_parse_init:
2061         efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
2062
2063 fail_init_match_spec_action:
2064         return rc;
2065 }
2066
2067 /*
2068  * An action supported by MAE may correspond to a bundle of RTE flow actions,
2069  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
2070  * That is, related RTE flow actions need to be tracked as parts of a whole
2071  * so that they can be combined into a single action and submitted to MAE
2072  * representation of a given rule's action set.
2073  *
2074  * Each RTE flow action provided by an application gets classified as
2075  * one belonging to some bundle type. If an action is not supposed to
2076  * belong to any bundle, or if this action is END, it is described as
2077  * one belonging to a dummy bundle of type EMPTY.
2078  *
2079  * A currently tracked bundle will be submitted if a repeating
2080  * action or an action of different bundle type follows.
2081  */
2082
2083 enum sfc_mae_actions_bundle_type {
2084         SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2085         SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2086 };
2087
2088 struct sfc_mae_actions_bundle {
2089         enum sfc_mae_actions_bundle_type        type;
2090
2091         /* Indicates actions already tracked by the current bundle */
2092         uint64_t                                actions_mask;
2093
2094         /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2095         rte_be16_t                              vlan_push_tpid;
2096         rte_be16_t                              vlan_push_tci;
2097 };
2098
2099 /*
2100  * Combine configuration of RTE flow actions tracked by the bundle into a
2101  * single action and submit the result to MAE action set specification.
2102  * Do nothing in the case of dummy action bundle.
2103  */
2104 static int
2105 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2106                               efx_mae_actions_t *spec)
2107 {
2108         int rc = 0;
2109
2110         switch (bundle->type) {
2111         case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2112                 break;
2113         case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2114                 rc = efx_mae_action_set_populate_vlan_push(
2115                         spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2116                 break;
2117         default:
2118                 SFC_ASSERT(B_FALSE);
2119                 break;
2120         }
2121
2122         return rc;
2123 }
2124
2125 /*
2126  * Given the type of the next RTE flow action in the line, decide
2127  * whether a new bundle is about to start, and, if this is the case,
2128  * submit and reset the current bundle.
2129  */
2130 static int
2131 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2132                             struct sfc_mae_actions_bundle *bundle,
2133                             efx_mae_actions_t *spec,
2134                             struct rte_flow_error *error)
2135 {
2136         enum sfc_mae_actions_bundle_type bundle_type_new;
2137         int rc;
2138
2139         switch (action->type) {
2140         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2141         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2142         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2143                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2144                 break;
2145         default:
2146                 /*
2147                  * Self-sufficient actions, including END, are handled in this
2148                  * case. No checks for unsupported actions are needed here
2149                  * because parsing doesn't occur at this point.
2150                  */
2151                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2152                 break;
2153         }
2154
2155         if (bundle_type_new != bundle->type ||
2156             (bundle->actions_mask & (1ULL << action->type)) != 0) {
2157                 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2158                 if (rc != 0)
2159                         goto fail_submit;
2160
2161                 memset(bundle, 0, sizeof(*bundle));
2162         }
2163
2164         bundle->type = bundle_type_new;
2165
2166         return 0;
2167
2168 fail_submit:
2169         return rte_flow_error_set(error, rc,
2170                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2171                         "Failed to request the (group of) action(s)");
2172 }
2173
2174 static void
2175 sfc_mae_rule_parse_action_of_push_vlan(
2176                             const struct rte_flow_action_of_push_vlan *conf,
2177                             struct sfc_mae_actions_bundle *bundle)
2178 {
2179         bundle->vlan_push_tpid = conf->ethertype;
2180 }
2181
2182 static void
2183 sfc_mae_rule_parse_action_of_set_vlan_vid(
2184                             const struct rte_flow_action_of_set_vlan_vid *conf,
2185                             struct sfc_mae_actions_bundle *bundle)
2186 {
2187         bundle->vlan_push_tci |= (conf->vlan_vid &
2188                                   rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2189 }
2190
2191 static void
2192 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2193                             const struct rte_flow_action_of_set_vlan_pcp *conf,
2194                             struct sfc_mae_actions_bundle *bundle)
2195 {
2196         uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2197                                            RTE_LEN2MASK(3, uint8_t)) << 13;
2198
2199         bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2200 }
2201
2202 struct sfc_mae_parsed_item {
2203         const struct rte_flow_item      *item;
2204         size_t                          proto_header_ofst;
2205         size_t                          proto_header_size;
2206 };
2207
2208 /*
2209  * For each 16-bit word of the given header, override
2210  * bits enforced by the corresponding 16-bit mask.
2211  */
2212 static void
2213 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2214                                 const struct sfc_mae_parsed_item *parsed_items,
2215                                 unsigned int nb_parsed_items)
2216 {
2217         unsigned int item_idx;
2218
2219         for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2220                 const struct sfc_mae_parsed_item *parsed_item;
2221                 const struct rte_flow_item *item;
2222                 size_t proto_header_size;
2223                 size_t ofst;
2224
2225                 parsed_item = &parsed_items[item_idx];
2226                 proto_header_size = parsed_item->proto_header_size;
2227                 item = parsed_item->item;
2228
2229                 for (ofst = 0; ofst < proto_header_size;
2230                      ofst += sizeof(rte_be16_t)) {
2231                         rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2232                         const rte_be16_t *w_maskp;
2233                         const rte_be16_t *w_specp;
2234
2235                         w_maskp = RTE_PTR_ADD(item->mask, ofst);
2236                         w_specp = RTE_PTR_ADD(item->spec, ofst);
2237
2238                         *wp &= ~(*w_maskp);
2239                         *wp |= (*w_specp & *w_maskp);
2240                 }
2241
2242                 header_buf += proto_header_size;
2243         }
2244 }
2245
2246 #define SFC_IPV4_TTL_DEF        0x40
2247 #define SFC_IPV6_VTC_FLOW_DEF   0x60000000
2248 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2249 #define SFC_VXLAN_FLAGS_DEF     0x08000000
2250
2251 static int
2252 sfc_mae_rule_parse_action_vxlan_encap(
2253                             struct sfc_mae *mae,
2254                             const struct rte_flow_action_vxlan_encap *conf,
2255                             efx_mae_actions_t *spec,
2256                             struct rte_flow_error *error)
2257 {
2258         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2259         struct rte_flow_item *pattern = conf->definition;
2260         uint8_t *buf = bounce_eh->buf;
2261
2262         /* This array will keep track of non-VOID pattern items. */
2263         struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2264                                                 2 /* VLAN tags */ +
2265                                                 1 /* IPv4 or IPv6 */ +
2266                                                 1 /* UDP */ +
2267                                                 1 /* VXLAN */];
2268         unsigned int nb_parsed_items = 0;
2269
2270         size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2271         uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2272                                   sizeof(struct rte_ipv6_hdr))];
2273         struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2274         struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2275         struct rte_vxlan_hdr *vxlan = NULL;
2276         struct rte_udp_hdr *udp = NULL;
2277         unsigned int nb_vlan_tags = 0;
2278         size_t next_proto_ofst = 0;
2279         size_t ethertype_ofst = 0;
2280         uint64_t exp_items;
2281
2282         if (pattern == NULL) {
2283                 return rte_flow_error_set(error, EINVAL,
2284                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2285                                 "The encap. header definition is NULL");
2286         }
2287
2288         bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2289         bounce_eh->size = 0;
2290
2291         /*
2292          * Process pattern items and remember non-VOID ones.
2293          * Defer applying masks until after the complete header
2294          * has been built from the pattern items.
2295          */
2296         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2297
2298         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2299                 struct sfc_mae_parsed_item *parsed_item;
2300                 const uint64_t exp_items_extra_vlan[] = {
2301                         RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2302                 };
2303                 size_t proto_header_size;
2304                 rte_be16_t *ethertypep;
2305                 uint8_t *next_protop;
2306                 uint8_t *buf_cur;
2307
2308                 if (pattern->spec == NULL) {
2309                         return rte_flow_error_set(error, EINVAL,
2310                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2311                                         "NULL item spec in the encap. header");
2312                 }
2313
2314                 if (pattern->mask == NULL) {
2315                         return rte_flow_error_set(error, EINVAL,
2316                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2317                                         "NULL item mask in the encap. header");
2318                 }
2319
2320                 if (pattern->last != NULL) {
2321                         /* This is not a match pattern, so disallow range. */
2322                         return rte_flow_error_set(error, EINVAL,
2323                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2324                                         "Range item in the encap. header");
2325                 }
2326
2327                 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2328                         /* Handle VOID separately, for clarity. */
2329                         continue;
2330                 }
2331
2332                 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2333                         return rte_flow_error_set(error, ENOTSUP,
2334                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2335                                         "Unexpected item in the encap. header");
2336                 }
2337
2338                 parsed_item = &parsed_items[nb_parsed_items];
2339                 buf_cur = buf + bounce_eh->size;
2340
2341                 switch (pattern->type) {
2342                 case RTE_FLOW_ITEM_TYPE_ETH:
2343                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2344                                                exp_items);
2345                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2346                                                   hdr) != 0);
2347
2348                         proto_header_size = sizeof(struct rte_ether_hdr);
2349
2350                         ethertype_ofst = eth_ethertype_ofst;
2351
2352                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2353                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2354                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2355                         break;
2356                 case RTE_FLOW_ITEM_TYPE_VLAN:
2357                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2358                                                exp_items);
2359                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2360                                                   hdr) != 0);
2361
2362                         proto_header_size = sizeof(struct rte_vlan_hdr);
2363
2364                         ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2365                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2366
2367                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2368                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2369
2370                         ethertype_ofst =
2371                             bounce_eh->size +
2372                             offsetof(struct rte_vlan_hdr, eth_proto);
2373
2374                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2375                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2376                         exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2377
2378                         ++nb_vlan_tags;
2379                         break;
2380                 case RTE_FLOW_ITEM_TYPE_IPV4:
2381                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2382                                                exp_items);
2383                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2384                                                   hdr) != 0);
2385
2386                         proto_header_size = sizeof(struct rte_ipv4_hdr);
2387
2388                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2389                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2390
2391                         next_proto_ofst =
2392                             bounce_eh->size +
2393                             offsetof(struct rte_ipv4_hdr, next_proto_id);
2394
2395                         ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2396
2397                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2398                         break;
2399                 case RTE_FLOW_ITEM_TYPE_IPV6:
2400                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2401                                                exp_items);
2402                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2403                                                   hdr) != 0);
2404
2405                         proto_header_size = sizeof(struct rte_ipv6_hdr);
2406
2407                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2408                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2409
2410                         next_proto_ofst = bounce_eh->size +
2411                                           offsetof(struct rte_ipv6_hdr, proto);
2412
2413                         ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2414
2415                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2416                         break;
2417                 case RTE_FLOW_ITEM_TYPE_UDP:
2418                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2419                                                exp_items);
2420                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2421                                                   hdr) != 0);
2422
2423                         proto_header_size = sizeof(struct rte_udp_hdr);
2424
2425                         next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2426                         *next_protop = IPPROTO_UDP;
2427
2428                         udp = (struct rte_udp_hdr *)buf_cur;
2429
2430                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2431                         break;
2432                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2433                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2434                                                exp_items);
2435                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2436                                                   hdr) != 0);
2437
2438                         proto_header_size = sizeof(struct rte_vxlan_hdr);
2439
2440                         vxlan = (struct rte_vxlan_hdr *)buf_cur;
2441
2442                         udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2443                         udp->dgram_len = RTE_BE16(sizeof(*udp) +
2444                                                   sizeof(*vxlan));
2445                         udp->dgram_cksum = 0;
2446
2447                         exp_items = 0;
2448                         break;
2449                 default:
2450                         return rte_flow_error_set(error, ENOTSUP,
2451                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2452                                         "Unknown item in the encap. header");
2453                 }
2454
2455                 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2456                         return rte_flow_error_set(error, E2BIG,
2457                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2458                                         "The encap. header is too big");
2459                 }
2460
2461                 if ((proto_header_size & 1) != 0) {
2462                         return rte_flow_error_set(error, EINVAL,
2463                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2464                                         "Odd layer size in the encap. header");
2465                 }
2466
2467                 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2468                 bounce_eh->size += proto_header_size;
2469
2470                 parsed_item->item = pattern;
2471                 parsed_item->proto_header_size = proto_header_size;
2472                 ++nb_parsed_items;
2473         }
2474
2475         if (exp_items != 0) {
2476                 /* Parsing item VXLAN would have reset exp_items to 0. */
2477                 return rte_flow_error_set(error, ENOTSUP,
2478                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2479                                         "No item VXLAN in the encap. header");
2480         }
2481
2482         /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2483         ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2484         ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2485         ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2486                                       sizeof(*vxlan));
2487         /* The HW cannot compute this checksum. */
2488         ipv4->hdr_checksum = 0;
2489         ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2490
2491         ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2492         ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2493         ipv6->payload_len = udp->dgram_len;
2494
2495         vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2496
2497         /* Take care of the masks. */
2498         sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2499
2500         return (spec != NULL) ? efx_mae_action_set_populate_encap(spec) : 0;
2501 }
2502
2503 static int
2504 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
2505                                efx_mae_actions_t *spec)
2506 {
2507         return efx_mae_action_set_populate_mark(spec, conf->id);
2508 }
2509
2510 static int
2511 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2512                                    const struct rte_flow_action_phy_port *conf,
2513                                    efx_mae_actions_t *spec)
2514 {
2515         efx_mport_sel_t mport;
2516         uint32_t phy_port;
2517         int rc;
2518
2519         if (conf->original != 0)
2520                 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2521         else
2522                 phy_port = conf->index;
2523
2524         rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2525         if (rc != 0)
2526                 return rc;
2527
2528         return efx_mae_action_set_populate_deliver(spec, &mport);
2529 }
2530
2531 static int
2532 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
2533                                 const struct rte_flow_action_vf *vf_conf,
2534                                 efx_mae_actions_t *spec)
2535 {
2536         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2537         efx_mport_sel_t mport;
2538         uint32_t vf;
2539         int rc;
2540
2541         if (vf_conf == NULL)
2542                 vf = EFX_PCI_VF_INVALID;
2543         else if (vf_conf->original != 0)
2544                 vf = encp->enc_vf;
2545         else
2546                 vf = vf_conf->id;
2547
2548         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
2549         if (rc != 0)
2550                 return rc;
2551
2552         return efx_mae_action_set_populate_deliver(spec, &mport);
2553 }
2554
2555 static int
2556 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
2557                                   const struct rte_flow_action_port_id *conf,
2558                                   efx_mae_actions_t *spec)
2559 {
2560         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2561         struct sfc_mae *mae = &sa->mae;
2562         efx_mport_sel_t mport;
2563         uint16_t port_id;
2564         int rc;
2565
2566         port_id = (conf->original != 0) ? sas->port_id : conf->id;
2567
2568         rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2569                                            port_id, &mport);
2570         if (rc != 0)
2571                 return rc;
2572
2573         return efx_mae_action_set_populate_deliver(spec, &mport);
2574 }
2575
2576 static int
2577 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2578                           const struct rte_flow_action *action,
2579                           const struct sfc_mae_outer_rule *outer_rule,
2580                           struct sfc_mae_actions_bundle *bundle,
2581                           efx_mae_actions_t *spec,
2582                           struct rte_flow_error *error)
2583 {
2584         bool custom_error = B_FALSE;
2585         int rc = 0;
2586
2587         switch (action->type) {
2588         case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2589                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
2590                                        bundle->actions_mask);
2591                 if (outer_rule == NULL ||
2592                     outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
2593                         rc = EINVAL;
2594                 else
2595                         rc = efx_mae_action_set_populate_decap(spec);
2596                 break;
2597         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2598                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2599                                        bundle->actions_mask);
2600                 rc = efx_mae_action_set_populate_vlan_pop(spec);
2601                 break;
2602         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2603                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2604                                        bundle->actions_mask);
2605                 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2606                 break;
2607         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2608                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2609                                        bundle->actions_mask);
2610                 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2611                 break;
2612         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2613                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2614                                        bundle->actions_mask);
2615                 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2616                 break;
2617         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2618                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
2619                                        bundle->actions_mask);
2620                 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
2621                                                            action->conf,
2622                                                            spec, error);
2623                 custom_error = B_TRUE;
2624                 break;
2625         case RTE_FLOW_ACTION_TYPE_FLAG:
2626                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
2627                                        bundle->actions_mask);
2628                 rc = efx_mae_action_set_populate_flag(spec);
2629                 break;
2630         case RTE_FLOW_ACTION_TYPE_MARK:
2631                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
2632                                        bundle->actions_mask);
2633                 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
2634                 break;
2635         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
2636                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
2637                                        bundle->actions_mask);
2638                 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
2639                 break;
2640         case RTE_FLOW_ACTION_TYPE_PF:
2641                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
2642                                        bundle->actions_mask);
2643                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
2644                 break;
2645         case RTE_FLOW_ACTION_TYPE_VF:
2646                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
2647                                        bundle->actions_mask);
2648                 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
2649                 break;
2650         case RTE_FLOW_ACTION_TYPE_PORT_ID:
2651                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
2652                                        bundle->actions_mask);
2653                 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
2654                 break;
2655         case RTE_FLOW_ACTION_TYPE_DROP:
2656                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
2657                                        bundle->actions_mask);
2658                 rc = efx_mae_action_set_populate_drop(spec);
2659                 break;
2660         default:
2661                 return rte_flow_error_set(error, ENOTSUP,
2662                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663                                 "Unsupported action");
2664         }
2665
2666         if (rc == 0) {
2667                 bundle->actions_mask |= (1ULL << action->type);
2668         } else if (!custom_error) {
2669                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2670                                 NULL, "Failed to request the action");
2671         }
2672
2673         return rc;
2674 }
2675
2676 static void
2677 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
2678 {
2679         bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
2680 }
2681
2682 static int
2683 sfc_mae_process_encap_header(struct sfc_adapter *sa,
2684                              const struct sfc_mae_bounce_eh *bounce_eh,
2685                              struct sfc_mae_encap_header **encap_headerp)
2686 {
2687         if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
2688                 encap_headerp = NULL;
2689                 return 0;
2690         }
2691
2692         *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
2693         if (*encap_headerp != NULL)
2694                 return 0;
2695
2696         return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
2697 }
2698
2699 int
2700 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
2701                            const struct rte_flow_action actions[],
2702                            struct sfc_flow_spec_mae *spec_mae,
2703                            struct rte_flow_error *error)
2704 {
2705         struct sfc_mae_encap_header *encap_header = NULL;
2706         struct sfc_mae_actions_bundle bundle = {0};
2707         const struct rte_flow_action *action;
2708         struct sfc_mae *mae = &sa->mae;
2709         efx_mae_actions_t *spec;
2710         int rc;
2711
2712         rte_errno = 0;
2713
2714         if (actions == NULL) {
2715                 return rte_flow_error_set(error, EINVAL,
2716                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
2717                                 "NULL actions");
2718         }
2719
2720         rc = efx_mae_action_set_spec_init(sa->nic, &spec);
2721         if (rc != 0)
2722                 goto fail_action_set_spec_init;
2723
2724         /* Cleanup after previous encap. header bounce buffer usage. */
2725         sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
2726
2727         for (action = actions;
2728              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
2729                 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2730                 if (rc != 0)
2731                         goto fail_rule_parse_action;
2732
2733                 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
2734                                                &bundle, spec, error);
2735                 if (rc != 0)
2736                         goto fail_rule_parse_action;
2737         }
2738
2739         rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2740         if (rc != 0)
2741                 goto fail_rule_parse_action;
2742
2743         rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
2744         if (rc != 0)
2745                 goto fail_process_encap_header;
2746
2747         spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
2748                                                          spec);
2749         if (spec_mae->action_set != NULL) {
2750                 sfc_mae_encap_header_del(sa, encap_header);
2751                 efx_mae_action_set_spec_fini(sa->nic, spec);
2752                 return 0;
2753         }
2754
2755         rc = sfc_mae_action_set_add(sa, spec, encap_header,
2756                                     &spec_mae->action_set);
2757         if (rc != 0)
2758                 goto fail_action_set_add;
2759
2760         return 0;
2761
2762 fail_action_set_add:
2763         sfc_mae_encap_header_del(sa, encap_header);
2764
2765 fail_process_encap_header:
2766 fail_rule_parse_action:
2767         efx_mae_action_set_spec_fini(sa->nic, spec);
2768
2769 fail_action_set_spec_init:
2770         if (rc > 0 && rte_errno == 0) {
2771                 rc = rte_flow_error_set(error, rc,
2772                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2773                         NULL, "Failed to process the action");
2774         }
2775         return rc;
2776 }
2777
2778 static bool
2779 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
2780                         const efx_mae_match_spec_t *left,
2781                         const efx_mae_match_spec_t *right)
2782 {
2783         bool have_same_class;
2784         int rc;
2785
2786         rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
2787                                            &have_same_class);
2788
2789         return (rc == 0) ? have_same_class : false;
2790 }
2791
2792 static int
2793 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
2794                                 struct sfc_mae_outer_rule *rule)
2795 {
2796         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
2797         struct sfc_mae_outer_rule *entry;
2798         struct sfc_mae *mae = &sa->mae;
2799
2800         if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
2801                 /* An active rule is reused. It's class is wittingly valid. */
2802                 return 0;
2803         }
2804
2805         TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
2806                               sfc_mae_outer_rules, entries) {
2807                 const efx_mae_match_spec_t *left = entry->match_spec;
2808                 const efx_mae_match_spec_t *right = rule->match_spec;
2809
2810                 if (entry == rule)
2811                         continue;
2812
2813                 if (sfc_mae_rules_class_cmp(sa, left, right))
2814                         return 0;
2815         }
2816
2817         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2818                  "support for outer frame pattern items is not guaranteed; "
2819                  "other than that, the items are valid from SW standpoint");
2820         return 0;
2821 }
2822
2823 static int
2824 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
2825                                  struct sfc_flow_spec_mae *spec)
2826 {
2827         const struct rte_flow *entry;
2828
2829         TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
2830                 const struct sfc_flow_spec *entry_spec = &entry->spec;
2831                 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
2832                 const efx_mae_match_spec_t *left = es_mae->match_spec;
2833                 const efx_mae_match_spec_t *right = spec->match_spec;
2834
2835                 switch (entry_spec->type) {
2836                 case SFC_FLOW_SPEC_FILTER:
2837                         /* Ignore VNIC-level flows */
2838                         break;
2839                 case SFC_FLOW_SPEC_MAE:
2840                         if (sfc_mae_rules_class_cmp(sa, left, right))
2841                                 return 0;
2842                         break;
2843                 default:
2844                         SFC_ASSERT(false);
2845                 }
2846         }
2847
2848         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2849                  "support for inner frame pattern items is not guaranteed; "
2850                  "other than that, the items are valid from SW standpoint");
2851         return 0;
2852 }
2853
2854 /**
2855  * Confirm that a given flow can be accepted by the FW.
2856  *
2857  * @param sa
2858  *   Software adapter context
2859  * @param flow
2860  *   Flow to be verified
2861  * @return
2862  *   Zero on success and non-zero in the case of error.
2863  *   A special value of EAGAIN indicates that the adapter is
2864  *   not in started state. This state is compulsory because
2865  *   it only makes sense to compare the rule class of the flow
2866  *   being validated with classes of the active rules.
2867  *   Such classes are wittingly supported by the FW.
2868  */
2869 int
2870 sfc_mae_flow_verify(struct sfc_adapter *sa,
2871                     struct rte_flow *flow)
2872 {
2873         struct sfc_flow_spec *spec = &flow->spec;
2874         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2875         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2876         int rc;
2877
2878         SFC_ASSERT(sfc_adapter_is_locked(sa));
2879
2880         if (sa->state != SFC_ADAPTER_STARTED)
2881                 return EAGAIN;
2882
2883         if (outer_rule != NULL) {
2884                 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
2885                 if (rc != 0)
2886                         return rc;
2887         }
2888
2889         return sfc_mae_action_rule_class_verify(sa, spec_mae);
2890 }
2891
2892 int
2893 sfc_mae_flow_insert(struct sfc_adapter *sa,
2894                     struct rte_flow *flow)
2895 {
2896         struct sfc_flow_spec *spec = &flow->spec;
2897         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2898         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2899         struct sfc_mae_action_set *action_set = spec_mae->action_set;
2900         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
2901         int rc;
2902
2903         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
2904         SFC_ASSERT(action_set != NULL);
2905
2906         if (outer_rule != NULL) {
2907                 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
2908                                                spec_mae->match_spec);
2909                 if (rc != 0)
2910                         goto fail_outer_rule_enable;
2911         }
2912
2913         rc = sfc_mae_action_set_enable(sa, action_set);
2914         if (rc != 0)
2915                 goto fail_action_set_enable;
2916
2917         rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
2918                                         NULL, &fw_rsrc->aset_id,
2919                                         &spec_mae->rule_id);
2920         if (rc != 0)
2921                 goto fail_action_rule_insert;
2922
2923         sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
2924                 flow, spec_mae->rule_id.id);
2925
2926         return 0;
2927
2928 fail_action_rule_insert:
2929         sfc_mae_action_set_disable(sa, action_set);
2930
2931 fail_action_set_enable:
2932         if (outer_rule != NULL)
2933                 sfc_mae_outer_rule_disable(sa, outer_rule);
2934
2935 fail_outer_rule_enable:
2936         return rc;
2937 }
2938
2939 int
2940 sfc_mae_flow_remove(struct sfc_adapter *sa,
2941                     struct rte_flow *flow)
2942 {
2943         struct sfc_flow_spec *spec = &flow->spec;
2944         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2945         struct sfc_mae_action_set *action_set = spec_mae->action_set;
2946         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2947         int rc;
2948
2949         SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
2950         SFC_ASSERT(action_set != NULL);
2951
2952         rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
2953         if (rc != 0) {
2954                 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
2955                         flow, spec_mae->rule_id.id, strerror(rc));
2956         }
2957         sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
2958                 flow, spec_mae->rule_id.id);
2959         spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
2960
2961         sfc_mae_action_set_disable(sa, action_set);
2962
2963         if (outer_rule != NULL)
2964                 sfc_mae_outer_rule_disable(sa, outer_rule);
2965
2966         return 0;
2967 }