net/sfc: fix outer rule and encap rollback on error
[dpdk.git] / drivers / net / sfc / sfc_mae.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_log.h"
20 #include "sfc_switch.h"
21
22 static int
23 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
24                             efx_mport_sel_t *mportp)
25 {
26         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
27
28         return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
29                                               mportp);
30 }
31
32 int
33 sfc_mae_attach(struct sfc_adapter *sa)
34 {
35         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
36         struct sfc_mae_switch_port_request switch_port_request = {0};
37         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
38         efx_mport_sel_t entity_mport;
39         struct sfc_mae *mae = &sa->mae;
40         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
41         efx_mae_limits_t limits;
42         int rc;
43
44         sfc_log_init(sa, "entry");
45
46         if (!encp->enc_mae_supported) {
47                 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
48                 return 0;
49         }
50
51         sfc_log_init(sa, "init MAE");
52         rc = efx_mae_init(sa->nic);
53         if (rc != 0)
54                 goto fail_mae_init;
55
56         sfc_log_init(sa, "get MAE limits");
57         rc = efx_mae_get_limits(sa->nic, &limits);
58         if (rc != 0)
59                 goto fail_mae_get_limits;
60
61         sfc_log_init(sa, "assign entity MPORT");
62         rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
63         if (rc != 0)
64                 goto fail_mae_assign_entity_mport;
65
66         sfc_log_init(sa, "assign RTE switch domain");
67         rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
68         if (rc != 0)
69                 goto fail_mae_assign_switch_domain;
70
71         sfc_log_init(sa, "assign RTE switch port");
72         switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
73         switch_port_request.entity_mportp = &entity_mport;
74         /*
75          * As of now, the driver does not support representors, so
76          * RTE ethdev MPORT simply matches that of the entity.
77          */
78         switch_port_request.ethdev_mportp = &entity_mport;
79         switch_port_request.ethdev_port_id = sas->port_id;
80         rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
81                                         &switch_port_request,
82                                         &mae->switch_port_id);
83         if (rc != 0)
84                 goto fail_mae_assign_switch_port;
85
86         sfc_log_init(sa, "allocate encap. header bounce buffer");
87         bounce_eh->buf_size = limits.eml_encap_header_size_limit;
88         bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
89                                     bounce_eh->buf_size, 0);
90         if (bounce_eh->buf == NULL)
91                 goto fail_mae_alloc_bounce_eh;
92
93         mae->status = SFC_MAE_STATUS_SUPPORTED;
94         mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
95         mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
96         mae->encap_types_supported = limits.eml_encap_types_supported;
97         TAILQ_INIT(&mae->outer_rules);
98         TAILQ_INIT(&mae->encap_headers);
99         TAILQ_INIT(&mae->action_sets);
100
101         sfc_log_init(sa, "done");
102
103         return 0;
104
105 fail_mae_alloc_bounce_eh:
106 fail_mae_assign_switch_port:
107 fail_mae_assign_switch_domain:
108 fail_mae_assign_entity_mport:
109 fail_mae_get_limits:
110         efx_mae_fini(sa->nic);
111
112 fail_mae_init:
113         sfc_log_init(sa, "failed %d", rc);
114
115         return rc;
116 }
117
118 void
119 sfc_mae_detach(struct sfc_adapter *sa)
120 {
121         struct sfc_mae *mae = &sa->mae;
122         enum sfc_mae_status status_prev = mae->status;
123
124         sfc_log_init(sa, "entry");
125
126         mae->nb_action_rule_prios_max = 0;
127         mae->status = SFC_MAE_STATUS_UNKNOWN;
128
129         if (status_prev != SFC_MAE_STATUS_SUPPORTED)
130                 return;
131
132         rte_free(mae->bounce_eh.buf);
133
134         efx_mae_fini(sa->nic);
135
136         sfc_log_init(sa, "done");
137 }
138
139 static struct sfc_mae_outer_rule *
140 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
141                           const efx_mae_match_spec_t *match_spec,
142                           efx_tunnel_protocol_t encap_type)
143 {
144         struct sfc_mae_outer_rule *rule;
145         struct sfc_mae *mae = &sa->mae;
146
147         SFC_ASSERT(sfc_adapter_is_locked(sa));
148
149         TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
150                 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
151                     rule->encap_type == encap_type) {
152                         ++(rule->refcnt);
153                         return rule;
154                 }
155         }
156
157         return NULL;
158 }
159
160 static int
161 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
162                        efx_mae_match_spec_t *match_spec,
163                        efx_tunnel_protocol_t encap_type,
164                        struct sfc_mae_outer_rule **rulep)
165 {
166         struct sfc_mae_outer_rule *rule;
167         struct sfc_mae *mae = &sa->mae;
168
169         SFC_ASSERT(sfc_adapter_is_locked(sa));
170
171         rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
172         if (rule == NULL)
173                 return ENOMEM;
174
175         rule->refcnt = 1;
176         rule->match_spec = match_spec;
177         rule->encap_type = encap_type;
178
179         rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
180
181         TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
182
183         *rulep = rule;
184
185         return 0;
186 }
187
188 static void
189 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
190                        struct sfc_mae_outer_rule *rule)
191 {
192         struct sfc_mae *mae = &sa->mae;
193
194         SFC_ASSERT(sfc_adapter_is_locked(sa));
195         SFC_ASSERT(rule->refcnt != 0);
196
197         --(rule->refcnt);
198
199         if (rule->refcnt != 0)
200                 return;
201
202         SFC_ASSERT(rule->fw_rsrc.rule_id.id == EFX_MAE_RSRC_ID_INVALID);
203         SFC_ASSERT(rule->fw_rsrc.refcnt == 0);
204
205         efx_mae_match_spec_fini(sa->nic, rule->match_spec);
206
207         TAILQ_REMOVE(&mae->outer_rules, rule, entries);
208         rte_free(rule);
209 }
210
211 static int
212 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
213                           struct sfc_mae_outer_rule *rule,
214                           efx_mae_match_spec_t *match_spec_action)
215 {
216         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
217         int rc;
218
219         SFC_ASSERT(sfc_adapter_is_locked(sa));
220
221         if (fw_rsrc->refcnt == 0) {
222                 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
223                 SFC_ASSERT(rule->match_spec != NULL);
224
225                 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
226                                                rule->encap_type,
227                                                &fw_rsrc->rule_id);
228                 if (rc != 0)
229                         return rc;
230         }
231
232         rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
233                                                   &fw_rsrc->rule_id);
234         if (rc != 0) {
235                 if (fw_rsrc->refcnt == 0) {
236                         (void)efx_mae_outer_rule_remove(sa->nic,
237                                                         &fw_rsrc->rule_id);
238                         fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
239                 }
240                 return rc;
241         }
242
243         ++(fw_rsrc->refcnt);
244
245         return 0;
246 }
247
248 static int
249 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
250                            struct sfc_mae_outer_rule *rule)
251 {
252         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
253         int rc;
254
255         SFC_ASSERT(sfc_adapter_is_locked(sa));
256         SFC_ASSERT(fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
257         SFC_ASSERT(fw_rsrc->refcnt != 0);
258
259         if (fw_rsrc->refcnt == 1) {
260                 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
261                 if (rc != 0)
262                         return rc;
263
264                 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
265         }
266
267         --(fw_rsrc->refcnt);
268
269         return 0;
270 }
271
272 static struct sfc_mae_encap_header *
273 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
274                             const struct sfc_mae_bounce_eh *bounce_eh)
275 {
276         struct sfc_mae_encap_header *encap_header;
277         struct sfc_mae *mae = &sa->mae;
278
279         SFC_ASSERT(sfc_adapter_is_locked(sa));
280
281         TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
282                 if (encap_header->size == bounce_eh->size &&
283                     memcmp(encap_header->buf, bounce_eh->buf,
284                            bounce_eh->size) == 0) {
285                         ++(encap_header->refcnt);
286                         return encap_header;
287                 }
288         }
289
290         return NULL;
291 }
292
293 static int
294 sfc_mae_encap_header_add(struct sfc_adapter *sa,
295                          const struct sfc_mae_bounce_eh *bounce_eh,
296                          struct sfc_mae_encap_header **encap_headerp)
297 {
298         struct sfc_mae_encap_header *encap_header;
299         struct sfc_mae *mae = &sa->mae;
300
301         SFC_ASSERT(sfc_adapter_is_locked(sa));
302
303         encap_header = rte_zmalloc("sfc_mae_encap_header",
304                                    sizeof(*encap_header), 0);
305         if (encap_header == NULL)
306                 return ENOMEM;
307
308         encap_header->size = bounce_eh->size;
309
310         encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
311                                        encap_header->size, 0);
312         if (encap_header->buf == NULL) {
313                 rte_free(encap_header);
314                 return ENOMEM;
315         }
316
317         rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
318
319         encap_header->refcnt = 1;
320         encap_header->type = bounce_eh->type;
321         encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
322
323         TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
324
325         *encap_headerp = encap_header;
326
327         return 0;
328 }
329
330 static void
331 sfc_mae_encap_header_del(struct sfc_adapter *sa,
332                        struct sfc_mae_encap_header *encap_header)
333 {
334         struct sfc_mae *mae = &sa->mae;
335
336         if (encap_header == NULL)
337                 return;
338
339         SFC_ASSERT(sfc_adapter_is_locked(sa));
340         SFC_ASSERT(encap_header->refcnt != 0);
341
342         --(encap_header->refcnt);
343
344         if (encap_header->refcnt != 0)
345                 return;
346
347         SFC_ASSERT(encap_header->fw_rsrc.eh_id.id == EFX_MAE_RSRC_ID_INVALID);
348         SFC_ASSERT(encap_header->fw_rsrc.refcnt == 0);
349
350         TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
351         rte_free(encap_header->buf);
352         rte_free(encap_header);
353 }
354
355 static int
356 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
357                             struct sfc_mae_encap_header *encap_header,
358                             efx_mae_actions_t *action_set_spec)
359 {
360         struct sfc_mae_fw_rsrc *fw_rsrc;
361         int rc;
362
363         if (encap_header == NULL)
364                 return 0;
365
366         SFC_ASSERT(sfc_adapter_is_locked(sa));
367
368         fw_rsrc = &encap_header->fw_rsrc;
369
370         if (fw_rsrc->refcnt == 0) {
371                 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
372                 SFC_ASSERT(encap_header->buf != NULL);
373                 SFC_ASSERT(encap_header->size != 0);
374
375                 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
376                                                 encap_header->buf,
377                                                 encap_header->size,
378                                                 &fw_rsrc->eh_id);
379                 if (rc != 0)
380                         return rc;
381         }
382
383         rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
384                                               &fw_rsrc->eh_id);
385         if (rc != 0) {
386                 if (fw_rsrc->refcnt == 0) {
387                         (void)efx_mae_encap_header_free(sa->nic,
388                                                         &fw_rsrc->eh_id);
389                         fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
390                 }
391                 return rc;
392         }
393
394         ++(fw_rsrc->refcnt);
395
396         return 0;
397 }
398
399 static int
400 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
401                              struct sfc_mae_encap_header *encap_header)
402 {
403         struct sfc_mae_fw_rsrc *fw_rsrc;
404         int rc;
405
406         if (encap_header == NULL)
407                 return 0;
408
409         SFC_ASSERT(sfc_adapter_is_locked(sa));
410
411         fw_rsrc = &encap_header->fw_rsrc;
412
413         SFC_ASSERT(fw_rsrc->eh_id.id != EFX_MAE_RSRC_ID_INVALID);
414         SFC_ASSERT(fw_rsrc->refcnt != 0);
415
416         if (fw_rsrc->refcnt == 1) {
417                 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
418                 if (rc != 0)
419                         return rc;
420
421                 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
422         }
423
424         --(fw_rsrc->refcnt);
425
426         return 0;
427 }
428
429 static struct sfc_mae_action_set *
430 sfc_mae_action_set_attach(struct sfc_adapter *sa,
431                           const struct sfc_mae_encap_header *encap_header,
432                           const efx_mae_actions_t *spec)
433 {
434         struct sfc_mae_action_set *action_set;
435         struct sfc_mae *mae = &sa->mae;
436
437         SFC_ASSERT(sfc_adapter_is_locked(sa));
438
439         TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
440                 if (action_set->encap_header == encap_header &&
441                     efx_mae_action_set_specs_equal(action_set->spec, spec)) {
442                         ++(action_set->refcnt);
443                         return action_set;
444                 }
445         }
446
447         return NULL;
448 }
449
450 static int
451 sfc_mae_action_set_add(struct sfc_adapter *sa,
452                        efx_mae_actions_t *spec,
453                        struct sfc_mae_encap_header *encap_header,
454                        struct sfc_mae_action_set **action_setp)
455 {
456         struct sfc_mae_action_set *action_set;
457         struct sfc_mae *mae = &sa->mae;
458
459         SFC_ASSERT(sfc_adapter_is_locked(sa));
460
461         action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
462         if (action_set == NULL)
463                 return ENOMEM;
464
465         action_set->refcnt = 1;
466         action_set->spec = spec;
467         action_set->encap_header = encap_header;
468
469         action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
470
471         TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
472
473         *action_setp = action_set;
474
475         return 0;
476 }
477
478 static void
479 sfc_mae_action_set_del(struct sfc_adapter *sa,
480                        struct sfc_mae_action_set *action_set)
481 {
482         struct sfc_mae *mae = &sa->mae;
483
484         SFC_ASSERT(sfc_adapter_is_locked(sa));
485         SFC_ASSERT(action_set->refcnt != 0);
486
487         --(action_set->refcnt);
488
489         if (action_set->refcnt != 0)
490                 return;
491
492         SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
493         SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
494
495         efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
496         sfc_mae_encap_header_del(sa, action_set->encap_header);
497         TAILQ_REMOVE(&mae->action_sets, action_set, entries);
498         rte_free(action_set);
499 }
500
501 static int
502 sfc_mae_action_set_enable(struct sfc_adapter *sa,
503                           struct sfc_mae_action_set *action_set)
504 {
505         struct sfc_mae_encap_header *encap_header = action_set->encap_header;
506         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
507         int rc;
508
509         SFC_ASSERT(sfc_adapter_is_locked(sa));
510
511         if (fw_rsrc->refcnt == 0) {
512                 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
513                 SFC_ASSERT(action_set->spec != NULL);
514
515                 rc = sfc_mae_encap_header_enable(sa, encap_header,
516                                                  action_set->spec);
517                 if (rc != 0)
518                         return rc;
519
520                 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
521                                               &fw_rsrc->aset_id);
522                 if (rc != 0) {
523                         (void)sfc_mae_encap_header_disable(sa, encap_header);
524
525                         return rc;
526                 }
527         }
528
529         ++(fw_rsrc->refcnt);
530
531         return 0;
532 }
533
534 static int
535 sfc_mae_action_set_disable(struct sfc_adapter *sa,
536                            struct sfc_mae_action_set *action_set)
537 {
538         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
539         int rc;
540
541         SFC_ASSERT(sfc_adapter_is_locked(sa));
542         SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
543         SFC_ASSERT(fw_rsrc->refcnt != 0);
544
545         if (fw_rsrc->refcnt == 1) {
546                 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
547                 if (rc != 0)
548                         return rc;
549
550                 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
551
552                 rc = sfc_mae_encap_header_disable(sa, action_set->encap_header);
553                 if (rc != 0)
554                         return rc;
555         }
556
557         --(fw_rsrc->refcnt);
558
559         return 0;
560 }
561
562 void
563 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
564                      struct rte_flow *flow)
565 {
566         struct sfc_flow_spec *spec;
567         struct sfc_flow_spec_mae *spec_mae;
568
569         if (flow == NULL)
570                 return;
571
572         spec = &flow->spec;
573
574         if (spec == NULL)
575                 return;
576
577         spec_mae = &spec->mae;
578
579         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
580
581         if (spec_mae->outer_rule != NULL)
582                 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
583
584         if (spec_mae->action_set != NULL)
585                 sfc_mae_action_set_del(sa, spec_mae->action_set);
586
587         if (spec_mae->match_spec != NULL)
588                 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
589 }
590
591 static int
592 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
593 {
594         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
595         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
596         const efx_mae_field_id_t field_ids[] = {
597                 EFX_MAE_FIELD_VLAN0_PROTO_BE,
598                 EFX_MAE_FIELD_VLAN1_PROTO_BE,
599         };
600         const struct sfc_mae_ethertype *et;
601         unsigned int i;
602         int rc;
603
604         /*
605          * In accordance with RTE flow API convention, the innermost L2
606          * item's "type" ("inner_type") is a L3 EtherType. If there is
607          * no L3 item, it's 0x0000/0x0000.
608          */
609         et = &pdata->ethertypes[pdata->nb_vlan_tags];
610         rc = efx_mae_match_spec_field_set(ctx->match_spec,
611                                           fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
612                                           sizeof(et->value),
613                                           (const uint8_t *)&et->value,
614                                           sizeof(et->mask),
615                                           (const uint8_t *)&et->mask);
616         if (rc != 0)
617                 return rc;
618
619         /*
620          * sfc_mae_rule_parse_item_vlan() has already made sure
621          * that pdata->nb_vlan_tags does not exceed this figure.
622          */
623         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
624
625         for (i = 0; i < pdata->nb_vlan_tags; ++i) {
626                 et = &pdata->ethertypes[i];
627
628                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
629                                                   fremap[field_ids[i]],
630                                                   sizeof(et->value),
631                                                   (const uint8_t *)&et->value,
632                                                   sizeof(et->mask),
633                                                   (const uint8_t *)&et->mask);
634                 if (rc != 0)
635                         return rc;
636         }
637
638         return 0;
639 }
640
641 static int
642 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
643                                   struct rte_flow_error *error)
644 {
645         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
646         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
647         struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
648         const rte_be16_t supported_tpids[] = {
649                 /* VLAN standard TPID (always the first element) */
650                 RTE_BE16(RTE_ETHER_TYPE_VLAN),
651
652                 /* Double-tagging TPIDs */
653                 RTE_BE16(RTE_ETHER_TYPE_QINQ),
654                 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
655                 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
656                 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
657         };
658         unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
659         unsigned int ethertype_idx;
660         const uint8_t *valuep;
661         const uint8_t *maskp;
662         int rc;
663
664         if (pdata->innermost_ethertype_restriction.mask != 0 &&
665             pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
666                 /*
667                  * If a single item VLAN is followed by a L3 item, value
668                  * of "type" in item ETH can't be a double-tagging TPID.
669                  */
670                 nb_supported_tpids = 1;
671         }
672
673         /*
674          * sfc_mae_rule_parse_item_vlan() has already made sure
675          * that pdata->nb_vlan_tags does not exceed this figure.
676          */
677         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
678
679         for (ethertype_idx = 0;
680              ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
681                 unsigned int tpid_idx;
682
683                 /* Exact match is supported only. */
684                 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
685                         rc = EINVAL;
686                         goto fail;
687                 }
688
689                 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
690                      tpid_idx < nb_supported_tpids; ++tpid_idx) {
691                         if (ethertypes[ethertype_idx].value ==
692                             supported_tpids[tpid_idx])
693                                 break;
694                 }
695
696                 if (tpid_idx == nb_supported_tpids) {
697                         rc = EINVAL;
698                         goto fail;
699                 }
700
701                 nb_supported_tpids = 1;
702         }
703
704         if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
705                 struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
706
707                 if (et->mask == 0) {
708                         et->mask = RTE_BE16(0xffff);
709                         et->value =
710                             pdata->innermost_ethertype_restriction.value;
711                 } else if (et->mask != RTE_BE16(0xffff) ||
712                            et->value !=
713                            pdata->innermost_ethertype_restriction.value) {
714                         rc = EINVAL;
715                         goto fail;
716                 }
717         }
718
719         /*
720          * Now, when the number of VLAN tags is known, set fields
721          * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
722          * one is either a valid L3 EtherType (or 0x0000/0x0000),
723          * and the last two are valid TPIDs (or 0x0000/0x0000).
724          */
725         rc = sfc_mae_set_ethertypes(ctx);
726         if (rc != 0)
727                 goto fail;
728
729         if (pdata->l3_next_proto_restriction_mask == 0xff) {
730                 if (pdata->l3_next_proto_mask == 0) {
731                         pdata->l3_next_proto_mask = 0xff;
732                         pdata->l3_next_proto_value =
733                             pdata->l3_next_proto_restriction_value;
734                 } else if (pdata->l3_next_proto_mask != 0xff ||
735                            pdata->l3_next_proto_value !=
736                            pdata->l3_next_proto_restriction_value) {
737                         rc = EINVAL;
738                         goto fail;
739                 }
740         }
741
742         valuep = (const uint8_t *)&pdata->l3_next_proto_value;
743         maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
744         rc = efx_mae_match_spec_field_set(ctx->match_spec,
745                                           fremap[EFX_MAE_FIELD_IP_PROTO],
746                                           sizeof(pdata->l3_next_proto_value),
747                                           valuep,
748                                           sizeof(pdata->l3_next_proto_mask),
749                                           maskp);
750         if (rc != 0)
751                 goto fail;
752
753         return 0;
754
755 fail:
756         return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
757                                   "Failed to process pattern data");
758 }
759
760 static int
761 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
762                                 struct sfc_flow_parse_ctx *ctx,
763                                 struct rte_flow_error *error)
764 {
765         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
766         const struct rte_flow_item_port_id supp_mask = {
767                 .id = 0xffffffff,
768         };
769         const void *def_mask = &rte_flow_item_port_id_mask;
770         const struct rte_flow_item_port_id *spec = NULL;
771         const struct rte_flow_item_port_id *mask = NULL;
772         efx_mport_sel_t mport_sel;
773         int rc;
774
775         if (ctx_mae->match_mport_set) {
776                 return rte_flow_error_set(error, ENOTSUP,
777                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
778                                 "Can't handle multiple traffic source items");
779         }
780
781         rc = sfc_flow_parse_init(item,
782                                  (const void **)&spec, (const void **)&mask,
783                                  (const void *)&supp_mask, def_mask,
784                                  sizeof(struct rte_flow_item_port_id), error);
785         if (rc != 0)
786                 return rc;
787
788         if (mask->id != supp_mask.id) {
789                 return rte_flow_error_set(error, EINVAL,
790                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
791                                 "Bad mask in the PORT_ID pattern item");
792         }
793
794         /* If "spec" is not set, could be any port ID */
795         if (spec == NULL)
796                 return 0;
797
798         if (spec->id > UINT16_MAX) {
799                 return rte_flow_error_set(error, EOVERFLOW,
800                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
801                                           "The port ID is too large");
802         }
803
804         rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
805                                            spec->id, &mport_sel);
806         if (rc != 0) {
807                 return rte_flow_error_set(error, rc,
808                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
809                                 "Can't find RTE ethdev by the port ID");
810         }
811
812         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
813                                           &mport_sel, NULL);
814         if (rc != 0) {
815                 return rte_flow_error_set(error, rc,
816                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
817                                 "Failed to set MPORT for the port ID");
818         }
819
820         ctx_mae->match_mport_set = B_TRUE;
821
822         return 0;
823 }
824
825 static int
826 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
827                                  struct sfc_flow_parse_ctx *ctx,
828                                  struct rte_flow_error *error)
829 {
830         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
831         const struct rte_flow_item_phy_port supp_mask = {
832                 .index = 0xffffffff,
833         };
834         const void *def_mask = &rte_flow_item_phy_port_mask;
835         const struct rte_flow_item_phy_port *spec = NULL;
836         const struct rte_flow_item_phy_port *mask = NULL;
837         efx_mport_sel_t mport_v;
838         int rc;
839
840         if (ctx_mae->match_mport_set) {
841                 return rte_flow_error_set(error, ENOTSUP,
842                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
843                                 "Can't handle multiple traffic source items");
844         }
845
846         rc = sfc_flow_parse_init(item,
847                                  (const void **)&spec, (const void **)&mask,
848                                  (const void *)&supp_mask, def_mask,
849                                  sizeof(struct rte_flow_item_phy_port), error);
850         if (rc != 0)
851                 return rc;
852
853         if (mask->index != supp_mask.index) {
854                 return rte_flow_error_set(error, EINVAL,
855                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
856                                 "Bad mask in the PHY_PORT pattern item");
857         }
858
859         /* If "spec" is not set, could be any physical port */
860         if (spec == NULL)
861                 return 0;
862
863         rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
864         if (rc != 0) {
865                 return rte_flow_error_set(error, rc,
866                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
867                                 "Failed to convert the PHY_PORT index");
868         }
869
870         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
871         if (rc != 0) {
872                 return rte_flow_error_set(error, rc,
873                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
874                                 "Failed to set MPORT for the PHY_PORT");
875         }
876
877         ctx_mae->match_mport_set = B_TRUE;
878
879         return 0;
880 }
881
882 static int
883 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
884                            struct sfc_flow_parse_ctx *ctx,
885                            struct rte_flow_error *error)
886 {
887         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
888         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
889         efx_mport_sel_t mport_v;
890         int rc;
891
892         if (ctx_mae->match_mport_set) {
893                 return rte_flow_error_set(error, ENOTSUP,
894                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
895                                 "Can't handle multiple traffic source items");
896         }
897
898         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
899                                             &mport_v);
900         if (rc != 0) {
901                 return rte_flow_error_set(error, rc,
902                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
903                                 "Failed to convert the PF ID");
904         }
905
906         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
907         if (rc != 0) {
908                 return rte_flow_error_set(error, rc,
909                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
910                                 "Failed to set MPORT for the PF");
911         }
912
913         ctx_mae->match_mport_set = B_TRUE;
914
915         return 0;
916 }
917
918 static int
919 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
920                            struct sfc_flow_parse_ctx *ctx,
921                            struct rte_flow_error *error)
922 {
923         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
924         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
925         const struct rte_flow_item_vf supp_mask = {
926                 .id = 0xffffffff,
927         };
928         const void *def_mask = &rte_flow_item_vf_mask;
929         const struct rte_flow_item_vf *spec = NULL;
930         const struct rte_flow_item_vf *mask = NULL;
931         efx_mport_sel_t mport_v;
932         int rc;
933
934         if (ctx_mae->match_mport_set) {
935                 return rte_flow_error_set(error, ENOTSUP,
936                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
937                                 "Can't handle multiple traffic source items");
938         }
939
940         rc = sfc_flow_parse_init(item,
941                                  (const void **)&spec, (const void **)&mask,
942                                  (const void *)&supp_mask, def_mask,
943                                  sizeof(struct rte_flow_item_vf), error);
944         if (rc != 0)
945                 return rc;
946
947         if (mask->id != supp_mask.id) {
948                 return rte_flow_error_set(error, EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
950                                 "Bad mask in the VF pattern item");
951         }
952
953         /*
954          * If "spec" is not set, the item requests any VF related to the
955          * PF of the current DPDK port (but not the PF itself).
956          * Reject this match criterion as unsupported.
957          */
958         if (spec == NULL) {
959                 return rte_flow_error_set(error, EINVAL,
960                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
961                                 "Bad spec in the VF pattern item");
962         }
963
964         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
965         if (rc != 0) {
966                 return rte_flow_error_set(error, rc,
967                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
968                                 "Failed to convert the PF + VF IDs");
969         }
970
971         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
972         if (rc != 0) {
973                 return rte_flow_error_set(error, rc,
974                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
975                                 "Failed to set MPORT for the PF + VF");
976         }
977
978         ctx_mae->match_mport_set = B_TRUE;
979
980         return 0;
981 }
982
983 /*
984  * Having this field ID in a field locator means that this
985  * locator cannot be used to actually set the field at the
986  * time when the corresponding item gets encountered. Such
987  * fields get stashed in the parsing context instead. This
988  * is required to resolve dependencies between the stashed
989  * fields. See sfc_mae_rule_process_pattern_data().
990  */
991 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
992
993 struct sfc_mae_field_locator {
994         efx_mae_field_id_t              field_id;
995         size_t                          size;
996         /* Field offset in the corresponding rte_flow_item_ struct */
997         size_t                          ofst;
998 };
999
1000 static void
1001 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1002                              unsigned int nb_field_locators, void *mask_ptr,
1003                              size_t mask_size)
1004 {
1005         unsigned int i;
1006
1007         memset(mask_ptr, 0, mask_size);
1008
1009         for (i = 0; i < nb_field_locators; ++i) {
1010                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1011
1012                 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1013                 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1014         }
1015 }
1016
1017 static int
1018 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1019                    unsigned int nb_field_locators, const uint8_t *spec,
1020                    const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1021                    struct rte_flow_error *error)
1022 {
1023         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1024         unsigned int i;
1025         int rc = 0;
1026
1027         for (i = 0; i < nb_field_locators; ++i) {
1028                 const struct sfc_mae_field_locator *fl = &field_locators[i];
1029
1030                 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1031                         continue;
1032
1033                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1034                                                   fremap[fl->field_id],
1035                                                   fl->size, spec + fl->ofst,
1036                                                   fl->size, mask + fl->ofst);
1037                 if (rc != 0)
1038                         break;
1039         }
1040
1041         if (rc != 0) {
1042                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1043                                 NULL, "Failed to process item fields");
1044         }
1045
1046         return rc;
1047 }
1048
1049 static const struct sfc_mae_field_locator flocs_eth[] = {
1050         {
1051                 /*
1052                  * This locator is used only for building supported fields mask.
1053                  * The field is handled by sfc_mae_rule_process_pattern_data().
1054                  */
1055                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1056                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1057                 offsetof(struct rte_flow_item_eth, type),
1058         },
1059         {
1060                 EFX_MAE_FIELD_ETH_DADDR_BE,
1061                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1062                 offsetof(struct rte_flow_item_eth, dst),
1063         },
1064         {
1065                 EFX_MAE_FIELD_ETH_SADDR_BE,
1066                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1067                 offsetof(struct rte_flow_item_eth, src),
1068         },
1069 };
1070
1071 static int
1072 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1073                             struct sfc_flow_parse_ctx *ctx,
1074                             struct rte_flow_error *error)
1075 {
1076         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1077         struct rte_flow_item_eth supp_mask;
1078         const uint8_t *spec = NULL;
1079         const uint8_t *mask = NULL;
1080         int rc;
1081
1082         sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1083                                      &supp_mask, sizeof(supp_mask));
1084
1085         rc = sfc_flow_parse_init(item,
1086                                  (const void **)&spec, (const void **)&mask,
1087                                  (const void *)&supp_mask,
1088                                  &rte_flow_item_eth_mask,
1089                                  sizeof(struct rte_flow_item_eth), error);
1090         if (rc != 0)
1091                 return rc;
1092
1093         if (spec != NULL) {
1094                 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1095                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1096                 const struct rte_flow_item_eth *item_spec;
1097                 const struct rte_flow_item_eth *item_mask;
1098
1099                 item_spec = (const struct rte_flow_item_eth *)spec;
1100                 item_mask = (const struct rte_flow_item_eth *)mask;
1101
1102                 ethertypes[0].value = item_spec->type;
1103                 ethertypes[0].mask = item_mask->type;
1104         } else {
1105                 /*
1106                  * The specification is empty. This is wrong in the case
1107                  * when there are more network patterns in line. Other
1108                  * than that, any Ethernet can match. All of that is
1109                  * checked at the end of parsing.
1110                  */
1111                 return 0;
1112         }
1113
1114         return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1115                                   ctx_mae, error);
1116 }
1117
1118 static const struct sfc_mae_field_locator flocs_vlan[] = {
1119         /* Outermost tag */
1120         {
1121                 EFX_MAE_FIELD_VLAN0_TCI_BE,
1122                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1123                 offsetof(struct rte_flow_item_vlan, tci),
1124         },
1125         {
1126                 /*
1127                  * This locator is used only for building supported fields mask.
1128                  * The field is handled by sfc_mae_rule_process_pattern_data().
1129                  */
1130                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1131                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1132                 offsetof(struct rte_flow_item_vlan, inner_type),
1133         },
1134
1135         /* Innermost tag */
1136         {
1137                 EFX_MAE_FIELD_VLAN1_TCI_BE,
1138                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1139                 offsetof(struct rte_flow_item_vlan, tci),
1140         },
1141         {
1142                 /*
1143                  * This locator is used only for building supported fields mask.
1144                  * The field is handled by sfc_mae_rule_process_pattern_data().
1145                  */
1146                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1147                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1148                 offsetof(struct rte_flow_item_vlan, inner_type),
1149         },
1150 };
1151
1152 static int
1153 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1154                              struct sfc_flow_parse_ctx *ctx,
1155                              struct rte_flow_error *error)
1156 {
1157         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1158         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1159         const struct sfc_mae_field_locator *flocs;
1160         struct rte_flow_item_vlan supp_mask;
1161         const uint8_t *spec = NULL;
1162         const uint8_t *mask = NULL;
1163         unsigned int nb_flocs;
1164         int rc;
1165
1166         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1167
1168         if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1169                 return rte_flow_error_set(error, ENOTSUP,
1170                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1171                                 "Can't match that many VLAN tags");
1172         }
1173
1174         nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
1175         flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
1176
1177         /* If parsing fails, this can remain incremented. */
1178         ++pdata->nb_vlan_tags;
1179
1180         sfc_mae_item_build_supp_mask(flocs, nb_flocs,
1181                                      &supp_mask, sizeof(supp_mask));
1182
1183         rc = sfc_flow_parse_init(item,
1184                                  (const void **)&spec, (const void **)&mask,
1185                                  (const void *)&supp_mask,
1186                                  &rte_flow_item_vlan_mask,
1187                                  sizeof(struct rte_flow_item_vlan), error);
1188         if (rc != 0)
1189                 return rc;
1190
1191         if (spec != NULL) {
1192                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1193                 const struct rte_flow_item_vlan *item_spec;
1194                 const struct rte_flow_item_vlan *item_mask;
1195
1196                 item_spec = (const struct rte_flow_item_vlan *)spec;
1197                 item_mask = (const struct rte_flow_item_vlan *)mask;
1198
1199                 ethertypes[pdata->nb_vlan_tags].value = item_spec->inner_type;
1200                 ethertypes[pdata->nb_vlan_tags].mask = item_mask->inner_type;
1201         } else {
1202                 /*
1203                  * The specification is empty. This is wrong in the case
1204                  * when there are more network patterns in line. Other
1205                  * than that, any Ethernet can match. All of that is
1206                  * checked at the end of parsing.
1207                  */
1208                 return 0;
1209         }
1210
1211         return sfc_mae_parse_item(flocs, nb_flocs, spec, mask, ctx_mae, error);
1212 }
1213
1214 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1215         {
1216                 EFX_MAE_FIELD_SRC_IP4_BE,
1217                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1218                 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1219         },
1220         {
1221                 EFX_MAE_FIELD_DST_IP4_BE,
1222                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1223                 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1224         },
1225         {
1226                 /*
1227                  * This locator is used only for building supported fields mask.
1228                  * The field is handled by sfc_mae_rule_process_pattern_data().
1229                  */
1230                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1231                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1232                 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1233         },
1234         {
1235                 EFX_MAE_FIELD_IP_TOS,
1236                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1237                                  hdr.type_of_service),
1238                 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1239         },
1240         {
1241                 EFX_MAE_FIELD_IP_TTL,
1242                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1243                 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1244         },
1245 };
1246
1247 static int
1248 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1249                              struct sfc_flow_parse_ctx *ctx,
1250                              struct rte_flow_error *error)
1251 {
1252         rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1253         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1254         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1255         struct rte_flow_item_ipv4 supp_mask;
1256         const uint8_t *spec = NULL;
1257         const uint8_t *mask = NULL;
1258         int rc;
1259
1260         sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1261                                      &supp_mask, sizeof(supp_mask));
1262
1263         rc = sfc_flow_parse_init(item,
1264                                  (const void **)&spec, (const void **)&mask,
1265                                  (const void *)&supp_mask,
1266                                  &rte_flow_item_ipv4_mask,
1267                                  sizeof(struct rte_flow_item_ipv4), error);
1268         if (rc != 0)
1269                 return rc;
1270
1271         pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1272         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1273
1274         if (spec != NULL) {
1275                 const struct rte_flow_item_ipv4 *item_spec;
1276                 const struct rte_flow_item_ipv4 *item_mask;
1277
1278                 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1279                 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1280
1281                 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1282                 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1283         } else {
1284                 return 0;
1285         }
1286
1287         return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1288                                   ctx_mae, error);
1289 }
1290
1291 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1292         {
1293                 EFX_MAE_FIELD_SRC_IP6_BE,
1294                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1295                 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1296         },
1297         {
1298                 EFX_MAE_FIELD_DST_IP6_BE,
1299                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1300                 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1301         },
1302         {
1303                 /*
1304                  * This locator is used only for building supported fields mask.
1305                  * The field is handled by sfc_mae_rule_process_pattern_data().
1306                  */
1307                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1308                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1309                 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1310         },
1311         {
1312                 EFX_MAE_FIELD_IP_TTL,
1313                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1314                 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1315         },
1316 };
1317
1318 static int
1319 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1320                              struct sfc_flow_parse_ctx *ctx,
1321                              struct rte_flow_error *error)
1322 {
1323         rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1324         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1325         const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1326         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1327         struct rte_flow_item_ipv6 supp_mask;
1328         const uint8_t *spec = NULL;
1329         const uint8_t *mask = NULL;
1330         rte_be32_t vtc_flow_be;
1331         uint32_t vtc_flow;
1332         uint8_t tc_value;
1333         uint8_t tc_mask;
1334         int rc;
1335
1336         sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1337                                      &supp_mask, sizeof(supp_mask));
1338
1339         vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1340         memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1341
1342         rc = sfc_flow_parse_init(item,
1343                                  (const void **)&spec, (const void **)&mask,
1344                                  (const void *)&supp_mask,
1345                                  &rte_flow_item_ipv6_mask,
1346                                  sizeof(struct rte_flow_item_ipv6), error);
1347         if (rc != 0)
1348                 return rc;
1349
1350         pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1351         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1352
1353         if (spec != NULL) {
1354                 const struct rte_flow_item_ipv6 *item_spec;
1355                 const struct rte_flow_item_ipv6 *item_mask;
1356
1357                 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1358                 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1359
1360                 pdata->l3_next_proto_value = item_spec->hdr.proto;
1361                 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1362         } else {
1363                 return 0;
1364         }
1365
1366         rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1367                                 ctx_mae, error);
1368         if (rc != 0)
1369                 return rc;
1370
1371         memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1372         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1373         tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1374
1375         memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1376         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1377         tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1378
1379         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1380                                           fremap[EFX_MAE_FIELD_IP_TOS],
1381                                           sizeof(tc_value), &tc_value,
1382                                           sizeof(tc_mask), &tc_mask);
1383         if (rc != 0) {
1384                 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1385                                 NULL, "Failed to process item fields");
1386         }
1387
1388         return 0;
1389 }
1390
1391 static const struct sfc_mae_field_locator flocs_tcp[] = {
1392         {
1393                 EFX_MAE_FIELD_L4_SPORT_BE,
1394                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1395                 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1396         },
1397         {
1398                 EFX_MAE_FIELD_L4_DPORT_BE,
1399                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1400                 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1401         },
1402         {
1403                 EFX_MAE_FIELD_TCP_FLAGS_BE,
1404                 /*
1405                  * The values have been picked intentionally since the
1406                  * target MAE field is oversize (16 bit). This mapping
1407                  * relies on the fact that the MAE field is big-endian.
1408                  */
1409                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1410                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1411                 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1412         },
1413 };
1414
1415 static int
1416 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1417                             struct sfc_flow_parse_ctx *ctx,
1418                             struct rte_flow_error *error)
1419 {
1420         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1421         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1422         struct rte_flow_item_tcp supp_mask;
1423         const uint8_t *spec = NULL;
1424         const uint8_t *mask = NULL;
1425         int rc;
1426
1427         /*
1428          * When encountered among outermost items, item TCP is invalid.
1429          * Check which match specification is being constructed now.
1430          */
1431         if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1432                 return rte_flow_error_set(error, EINVAL,
1433                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1434                                           "TCP in outer frame is invalid");
1435         }
1436
1437         sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1438                                      &supp_mask, sizeof(supp_mask));
1439
1440         rc = sfc_flow_parse_init(item,
1441                                  (const void **)&spec, (const void **)&mask,
1442                                  (const void *)&supp_mask,
1443                                  &rte_flow_item_tcp_mask,
1444                                  sizeof(struct rte_flow_item_tcp), error);
1445         if (rc != 0)
1446                 return rc;
1447
1448         pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1449         pdata->l3_next_proto_restriction_mask = 0xff;
1450
1451         if (spec == NULL)
1452                 return 0;
1453
1454         return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1455                                   ctx_mae, error);
1456 }
1457
1458 static const struct sfc_mae_field_locator flocs_udp[] = {
1459         {
1460                 EFX_MAE_FIELD_L4_SPORT_BE,
1461                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1462                 offsetof(struct rte_flow_item_udp, hdr.src_port),
1463         },
1464         {
1465                 EFX_MAE_FIELD_L4_DPORT_BE,
1466                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1467                 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1468         },
1469 };
1470
1471 static int
1472 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1473                             struct sfc_flow_parse_ctx *ctx,
1474                             struct rte_flow_error *error)
1475 {
1476         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1477         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1478         struct rte_flow_item_udp supp_mask;
1479         const uint8_t *spec = NULL;
1480         const uint8_t *mask = NULL;
1481         int rc;
1482
1483         sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1484                                      &supp_mask, sizeof(supp_mask));
1485
1486         rc = sfc_flow_parse_init(item,
1487                                  (const void **)&spec, (const void **)&mask,
1488                                  (const void *)&supp_mask,
1489                                  &rte_flow_item_udp_mask,
1490                                  sizeof(struct rte_flow_item_udp), error);
1491         if (rc != 0)
1492                 return rc;
1493
1494         pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1495         pdata->l3_next_proto_restriction_mask = 0xff;
1496
1497         if (spec == NULL)
1498                 return 0;
1499
1500         return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1501                                   ctx_mae, error);
1502 }
1503
1504 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1505         {
1506                 /*
1507                  * The size and offset values are relevant
1508                  * for Geneve and NVGRE, too.
1509                  */
1510                 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1511                 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1512         },
1513 };
1514
1515 /*
1516  * An auxiliary registry which allows using non-encap. field IDs
1517  * directly when building a match specification of type ACTION.
1518  *
1519  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1520  */
1521 static const efx_mae_field_id_t field_ids_no_remap[] = {
1522 #define FIELD_ID_NO_REMAP(_field) \
1523         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1524
1525         FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1526         FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1527         FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1528         FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1529         FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1530         FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1531         FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1532         FIELD_ID_NO_REMAP(SRC_IP4_BE),
1533         FIELD_ID_NO_REMAP(DST_IP4_BE),
1534         FIELD_ID_NO_REMAP(IP_PROTO),
1535         FIELD_ID_NO_REMAP(IP_TOS),
1536         FIELD_ID_NO_REMAP(IP_TTL),
1537         FIELD_ID_NO_REMAP(SRC_IP6_BE),
1538         FIELD_ID_NO_REMAP(DST_IP6_BE),
1539         FIELD_ID_NO_REMAP(L4_SPORT_BE),
1540         FIELD_ID_NO_REMAP(L4_DPORT_BE),
1541         FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1542
1543 #undef FIELD_ID_NO_REMAP
1544 };
1545
1546 /*
1547  * An auxiliary registry which allows using "ENC" field IDs
1548  * when building a match specification of type OUTER.
1549  *
1550  * See sfc_mae_rule_encap_parse_init().
1551  */
1552 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1553 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1554         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1555
1556         FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1557         FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1558         FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1559         FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1560         FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1561         FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1562         FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1563         FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1564         FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1565         FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1566         FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1567         FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1568         FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1569         FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1570         FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1571         FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1572
1573 #undef FIELD_ID_REMAP_TO_ENCAP
1574 };
1575
1576 static int
1577 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1578                                struct sfc_flow_parse_ctx *ctx,
1579                                struct rte_flow_error *error)
1580 {
1581         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1582         uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1583         uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1584         const struct rte_flow_item_vxlan *vxp;
1585         uint8_t supp_mask[sizeof(uint64_t)];
1586         const uint8_t *spec = NULL;
1587         const uint8_t *mask = NULL;
1588         int rc;
1589
1590         /*
1591          * We're about to start processing inner frame items.
1592          * Process pattern data that has been deferred so far
1593          * and reset pattern data storage.
1594          */
1595         rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1596         if (rc != 0)
1597                 return rc;
1598
1599         memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1600
1601         sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1602                                      &supp_mask, sizeof(supp_mask));
1603
1604         /*
1605          * This tunnel item was preliminarily detected by
1606          * sfc_mae_rule_encap_parse_init(). Default mask
1607          * was also picked by that helper. Use it here.
1608          */
1609         rc = sfc_flow_parse_init(item,
1610                                  (const void **)&spec, (const void **)&mask,
1611                                  (const void *)&supp_mask,
1612                                  ctx_mae->tunnel_def_mask,
1613                                  ctx_mae->tunnel_def_mask_size,  error);
1614         if (rc != 0)
1615                 return rc;
1616
1617         /*
1618          * This item and later ones comprise a
1619          * match specification of type ACTION.
1620          */
1621         ctx_mae->match_spec = ctx_mae->match_spec_action;
1622
1623         /* This item and later ones use non-encap. EFX MAE field IDs. */
1624         ctx_mae->field_ids_remap = field_ids_no_remap;
1625
1626         if (spec == NULL)
1627                 return 0;
1628
1629         /*
1630          * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1631          * Copy 24-bit VNI, which is BE, at offset 1 in it.
1632          * The extra byte is 0 both in the mask and in the value.
1633          */
1634         vxp = (const struct rte_flow_item_vxlan *)spec;
1635         memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1636
1637         vxp = (const struct rte_flow_item_vxlan *)mask;
1638         memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1639
1640         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1641                                           EFX_MAE_FIELD_ENC_VNET_ID_BE,
1642                                           sizeof(vnet_id_v), vnet_id_v,
1643                                           sizeof(vnet_id_m), vnet_id_m);
1644         if (rc != 0) {
1645                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1646                                         item, "Failed to set VXLAN VNI");
1647         }
1648
1649         return rc;
1650 }
1651
1652 static const struct sfc_flow_item sfc_flow_items[] = {
1653         {
1654                 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1655                 /*
1656                  * In terms of RTE flow, this item is a META one,
1657                  * and its position in the pattern is don't care.
1658                  */
1659                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1660                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1661                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1662                 .parse = sfc_mae_rule_parse_item_port_id,
1663         },
1664         {
1665                 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1666                 /*
1667                  * In terms of RTE flow, this item is a META one,
1668                  * and its position in the pattern is don't care.
1669                  */
1670                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1671                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1672                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1673                 .parse = sfc_mae_rule_parse_item_phy_port,
1674         },
1675         {
1676                 .type = RTE_FLOW_ITEM_TYPE_PF,
1677                 /*
1678                  * In terms of RTE flow, this item is a META one,
1679                  * and its position in the pattern is don't care.
1680                  */
1681                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1682                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1683                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1684                 .parse = sfc_mae_rule_parse_item_pf,
1685         },
1686         {
1687                 .type = RTE_FLOW_ITEM_TYPE_VF,
1688                 /*
1689                  * In terms of RTE flow, this item is a META one,
1690                  * and its position in the pattern is don't care.
1691                  */
1692                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1693                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1694                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1695                 .parse = sfc_mae_rule_parse_item_vf,
1696         },
1697         {
1698                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1699                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1700                 .layer = SFC_FLOW_ITEM_L2,
1701                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1702                 .parse = sfc_mae_rule_parse_item_eth,
1703         },
1704         {
1705                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1706                 .prev_layer = SFC_FLOW_ITEM_L2,
1707                 .layer = SFC_FLOW_ITEM_L2,
1708                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1709                 .parse = sfc_mae_rule_parse_item_vlan,
1710         },
1711         {
1712                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1713                 .prev_layer = SFC_FLOW_ITEM_L2,
1714                 .layer = SFC_FLOW_ITEM_L3,
1715                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1716                 .parse = sfc_mae_rule_parse_item_ipv4,
1717         },
1718         {
1719                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1720                 .prev_layer = SFC_FLOW_ITEM_L2,
1721                 .layer = SFC_FLOW_ITEM_L3,
1722                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1723                 .parse = sfc_mae_rule_parse_item_ipv6,
1724         },
1725         {
1726                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1727                 .prev_layer = SFC_FLOW_ITEM_L3,
1728                 .layer = SFC_FLOW_ITEM_L4,
1729                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1730                 .parse = sfc_mae_rule_parse_item_tcp,
1731         },
1732         {
1733                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1734                 .prev_layer = SFC_FLOW_ITEM_L3,
1735                 .layer = SFC_FLOW_ITEM_L4,
1736                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1737                 .parse = sfc_mae_rule_parse_item_udp,
1738         },
1739         {
1740                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1741                 .prev_layer = SFC_FLOW_ITEM_L4,
1742                 .layer = SFC_FLOW_ITEM_START_LAYER,
1743                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1744                 .parse = sfc_mae_rule_parse_item_tunnel,
1745         },
1746         {
1747                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1748                 .prev_layer = SFC_FLOW_ITEM_L4,
1749                 .layer = SFC_FLOW_ITEM_START_LAYER,
1750                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1751                 .parse = sfc_mae_rule_parse_item_tunnel,
1752         },
1753         {
1754                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1755                 .prev_layer = SFC_FLOW_ITEM_L3,
1756                 .layer = SFC_FLOW_ITEM_START_LAYER,
1757                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1758                 .parse = sfc_mae_rule_parse_item_tunnel,
1759         },
1760 };
1761
1762 static int
1763 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
1764                            struct sfc_mae_parse_ctx *ctx,
1765                            struct sfc_mae_outer_rule **rulep,
1766                            struct rte_flow_error *error)
1767 {
1768         struct sfc_mae_outer_rule *rule;
1769         int rc;
1770
1771         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
1772                 *rulep = NULL;
1773                 return 0;
1774         }
1775
1776         SFC_ASSERT(ctx->match_spec_outer != NULL);
1777
1778         if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
1779                 return rte_flow_error_set(error, ENOTSUP,
1780                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1781                                           "Inconsistent pattern (outer)");
1782         }
1783
1784         *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
1785                                            ctx->encap_type);
1786         if (*rulep != NULL) {
1787                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1788         } else {
1789                 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
1790                                             ctx->encap_type, rulep);
1791                 if (rc != 0) {
1792                         return rte_flow_error_set(error, rc,
1793                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1794                                         "Failed to process the pattern");
1795                 }
1796         }
1797
1798         /* The spec has now been tracked by the outer rule entry. */
1799         ctx->match_spec_outer = NULL;
1800
1801         /*
1802          * Depending on whether we reuse an existing outer rule or create a
1803          * new one (see above), outer rule ID is either a valid value or
1804          * EFX_MAE_RSRC_ID_INVALID. Set it in the action rule match
1805          * specification (and the full mask, too) in order to have correct
1806          * class comparisons of the new rule with existing ones.
1807          * Also, action rule match specification will be validated shortly,
1808          * and having the full mask set for outer rule ID indicates that we
1809          * will use this field, and support for this field has to be checked.
1810          */
1811         rule = *rulep;
1812         rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
1813                                                   &rule->fw_rsrc.rule_id);
1814         if (rc != 0) {
1815                 sfc_mae_outer_rule_del(sa, *rulep);
1816                 *rulep = NULL;
1817
1818                 return rte_flow_error_set(error, rc,
1819                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1820                                           "Failed to process the pattern");
1821         }
1822
1823         return 0;
1824 }
1825
1826 static int
1827 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
1828                               const struct rte_flow_item pattern[],
1829                               struct sfc_mae_parse_ctx *ctx,
1830                               struct rte_flow_error *error)
1831 {
1832         struct sfc_mae *mae = &sa->mae;
1833         int rc;
1834
1835         if (pattern == NULL) {
1836                 rte_flow_error_set(error, EINVAL,
1837                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1838                                    "NULL pattern");
1839                 return -rte_errno;
1840         }
1841
1842         for (;;) {
1843                 switch (pattern->type) {
1844                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1845                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
1846                         ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
1847                         ctx->tunnel_def_mask_size =
1848                                 sizeof(rte_flow_item_vxlan_mask);
1849                         break;
1850                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1851                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
1852                         ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
1853                         ctx->tunnel_def_mask_size =
1854                                 sizeof(rte_flow_item_geneve_mask);
1855                         break;
1856                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1857                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1858                         ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
1859                         ctx->tunnel_def_mask_size =
1860                                 sizeof(rte_flow_item_nvgre_mask);
1861                         break;
1862                 case RTE_FLOW_ITEM_TYPE_END:
1863                         break;
1864                 default:
1865                         ++pattern;
1866                         continue;
1867                 };
1868
1869                 break;
1870         }
1871
1872         if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
1873                 return 0;
1874
1875         if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
1876                 return rte_flow_error_set(error, ENOTSUP,
1877                                           RTE_FLOW_ERROR_TYPE_ITEM,
1878                                           pattern, "Unsupported tunnel item");
1879         }
1880
1881         if (ctx->priority >= mae->nb_outer_rule_prios_max) {
1882                 return rte_flow_error_set(error, ENOTSUP,
1883                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1884                                           NULL, "Unsupported priority level");
1885         }
1886
1887         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
1888                                      &ctx->match_spec_outer);
1889         if (rc != 0) {
1890                 return rte_flow_error_set(error, rc,
1891                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1892                         "Failed to initialise outer rule match specification");
1893         }
1894
1895         /* Outermost items comprise a match specification of type OUTER. */
1896         ctx->match_spec = ctx->match_spec_outer;
1897
1898         /* Outermost items use "ENC" EFX MAE field IDs. */
1899         ctx->field_ids_remap = field_ids_remap_to_encap;
1900
1901         return 0;
1902 }
1903
1904 static void
1905 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
1906                               struct sfc_mae_parse_ctx *ctx)
1907 {
1908         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1909                 return;
1910
1911         if (ctx->match_spec_outer != NULL)
1912                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1913 }
1914
1915 int
1916 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
1917                            const struct rte_flow_item pattern[],
1918                            struct sfc_flow_spec_mae *spec,
1919                            struct rte_flow_error *error)
1920 {
1921         struct sfc_mae_parse_ctx ctx_mae;
1922         struct sfc_flow_parse_ctx ctx;
1923         int rc;
1924
1925         memset(&ctx_mae, 0, sizeof(ctx_mae));
1926         ctx_mae.priority = spec->priority;
1927         ctx_mae.sa = sa;
1928
1929         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
1930                                      spec->priority,
1931                                      &ctx_mae.match_spec_action);
1932         if (rc != 0) {
1933                 rc = rte_flow_error_set(error, rc,
1934                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1935                         "Failed to initialise action rule match specification");
1936                 goto fail_init_match_spec_action;
1937         }
1938
1939         /*
1940          * As a preliminary setting, assume that there is no encapsulation
1941          * in the pattern. That is, pattern items are about to comprise a
1942          * match specification of type ACTION and use non-encap. field IDs.
1943          *
1944          * sfc_mae_rule_encap_parse_init() below may override this.
1945          */
1946         ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
1947         ctx_mae.match_spec = ctx_mae.match_spec_action;
1948         ctx_mae.field_ids_remap = field_ids_no_remap;
1949
1950         ctx.type = SFC_FLOW_PARSE_CTX_MAE;
1951         ctx.mae = &ctx_mae;
1952
1953         rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
1954         if (rc != 0)
1955                 goto fail_encap_parse_init;
1956
1957         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
1958                                     pattern, &ctx, error);
1959         if (rc != 0)
1960                 goto fail_parse_pattern;
1961
1962         rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
1963         if (rc != 0)
1964                 goto fail_process_pattern_data;
1965
1966         rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
1967         if (rc != 0)
1968                 goto fail_process_outer;
1969
1970         if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
1971                 rc = rte_flow_error_set(error, ENOTSUP,
1972                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1973                                         "Inconsistent pattern");
1974                 goto fail_validate_match_spec_action;
1975         }
1976
1977         spec->match_spec = ctx_mae.match_spec_action;
1978
1979         return 0;
1980
1981 fail_validate_match_spec_action:
1982 fail_process_outer:
1983 fail_process_pattern_data:
1984 fail_parse_pattern:
1985         sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
1986
1987 fail_encap_parse_init:
1988         efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
1989
1990 fail_init_match_spec_action:
1991         return rc;
1992 }
1993
1994 /*
1995  * An action supported by MAE may correspond to a bundle of RTE flow actions,
1996  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
1997  * That is, related RTE flow actions need to be tracked as parts of a whole
1998  * so that they can be combined into a single action and submitted to MAE
1999  * representation of a given rule's action set.
2000  *
2001  * Each RTE flow action provided by an application gets classified as
2002  * one belonging to some bundle type. If an action is not supposed to
2003  * belong to any bundle, or if this action is END, it is described as
2004  * one belonging to a dummy bundle of type EMPTY.
2005  *
2006  * A currently tracked bundle will be submitted if a repeating
2007  * action or an action of different bundle type follows.
2008  */
2009
2010 enum sfc_mae_actions_bundle_type {
2011         SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
2012         SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
2013 };
2014
2015 struct sfc_mae_actions_bundle {
2016         enum sfc_mae_actions_bundle_type        type;
2017
2018         /* Indicates actions already tracked by the current bundle */
2019         uint64_t                                actions_mask;
2020
2021         /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
2022         rte_be16_t                              vlan_push_tpid;
2023         rte_be16_t                              vlan_push_tci;
2024 };
2025
2026 /*
2027  * Combine configuration of RTE flow actions tracked by the bundle into a
2028  * single action and submit the result to MAE action set specification.
2029  * Do nothing in the case of dummy action bundle.
2030  */
2031 static int
2032 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
2033                               efx_mae_actions_t *spec)
2034 {
2035         int rc = 0;
2036
2037         switch (bundle->type) {
2038         case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
2039                 break;
2040         case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
2041                 rc = efx_mae_action_set_populate_vlan_push(
2042                         spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
2043                 break;
2044         default:
2045                 SFC_ASSERT(B_FALSE);
2046                 break;
2047         }
2048
2049         return rc;
2050 }
2051
2052 /*
2053  * Given the type of the next RTE flow action in the line, decide
2054  * whether a new bundle is about to start, and, if this is the case,
2055  * submit and reset the current bundle.
2056  */
2057 static int
2058 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
2059                             struct sfc_mae_actions_bundle *bundle,
2060                             efx_mae_actions_t *spec,
2061                             struct rte_flow_error *error)
2062 {
2063         enum sfc_mae_actions_bundle_type bundle_type_new;
2064         int rc;
2065
2066         switch (action->type) {
2067         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2068         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2069         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2070                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
2071                 break;
2072         default:
2073                 /*
2074                  * Self-sufficient actions, including END, are handled in this
2075                  * case. No checks for unsupported actions are needed here
2076                  * because parsing doesn't occur at this point.
2077                  */
2078                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
2079                 break;
2080         }
2081
2082         if (bundle_type_new != bundle->type ||
2083             (bundle->actions_mask & (1ULL << action->type)) != 0) {
2084                 rc = sfc_mae_actions_bundle_submit(bundle, spec);
2085                 if (rc != 0)
2086                         goto fail_submit;
2087
2088                 memset(bundle, 0, sizeof(*bundle));
2089         }
2090
2091         bundle->type = bundle_type_new;
2092
2093         return 0;
2094
2095 fail_submit:
2096         return rte_flow_error_set(error, rc,
2097                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2098                         "Failed to request the (group of) action(s)");
2099 }
2100
2101 static void
2102 sfc_mae_rule_parse_action_of_push_vlan(
2103                             const struct rte_flow_action_of_push_vlan *conf,
2104                             struct sfc_mae_actions_bundle *bundle)
2105 {
2106         bundle->vlan_push_tpid = conf->ethertype;
2107 }
2108
2109 static void
2110 sfc_mae_rule_parse_action_of_set_vlan_vid(
2111                             const struct rte_flow_action_of_set_vlan_vid *conf,
2112                             struct sfc_mae_actions_bundle *bundle)
2113 {
2114         bundle->vlan_push_tci |= (conf->vlan_vid &
2115                                   rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
2116 }
2117
2118 static void
2119 sfc_mae_rule_parse_action_of_set_vlan_pcp(
2120                             const struct rte_flow_action_of_set_vlan_pcp *conf,
2121                             struct sfc_mae_actions_bundle *bundle)
2122 {
2123         uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
2124                                            RTE_LEN2MASK(3, uint8_t)) << 13;
2125
2126         bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
2127 }
2128
2129 struct sfc_mae_parsed_item {
2130         const struct rte_flow_item      *item;
2131         size_t                          proto_header_ofst;
2132         size_t                          proto_header_size;
2133 };
2134
2135 /*
2136  * For each 16-bit word of the given header, override
2137  * bits enforced by the corresponding 16-bit mask.
2138  */
2139 static void
2140 sfc_mae_header_force_item_masks(uint8_t *header_buf,
2141                                 const struct sfc_mae_parsed_item *parsed_items,
2142                                 unsigned int nb_parsed_items)
2143 {
2144         unsigned int item_idx;
2145
2146         for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
2147                 const struct sfc_mae_parsed_item *parsed_item;
2148                 const struct rte_flow_item *item;
2149                 size_t proto_header_size;
2150                 size_t ofst;
2151
2152                 parsed_item = &parsed_items[item_idx];
2153                 proto_header_size = parsed_item->proto_header_size;
2154                 item = parsed_item->item;
2155
2156                 for (ofst = 0; ofst < proto_header_size;
2157                      ofst += sizeof(rte_be16_t)) {
2158                         rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
2159                         const rte_be16_t *w_maskp;
2160                         const rte_be16_t *w_specp;
2161
2162                         w_maskp = RTE_PTR_ADD(item->mask, ofst);
2163                         w_specp = RTE_PTR_ADD(item->spec, ofst);
2164
2165                         *wp &= ~(*w_maskp);
2166                         *wp |= (*w_specp & *w_maskp);
2167                 }
2168
2169                 header_buf += proto_header_size;
2170         }
2171 }
2172
2173 #define SFC_IPV4_TTL_DEF        0x40
2174 #define SFC_IPV6_VTC_FLOW_DEF   0x60000000
2175 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
2176 #define SFC_VXLAN_FLAGS_DEF     0x08000000
2177
2178 static int
2179 sfc_mae_rule_parse_action_vxlan_encap(
2180                             struct sfc_mae *mae,
2181                             const struct rte_flow_action_vxlan_encap *conf,
2182                             efx_mae_actions_t *spec,
2183                             struct rte_flow_error *error)
2184 {
2185         struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
2186         struct rte_flow_item *pattern = conf->definition;
2187         uint8_t *buf = bounce_eh->buf;
2188
2189         /* This array will keep track of non-VOID pattern items. */
2190         struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
2191                                                 2 /* VLAN tags */ +
2192                                                 1 /* IPv4 or IPv6 */ +
2193                                                 1 /* UDP */ +
2194                                                 1 /* VXLAN */];
2195         unsigned int nb_parsed_items = 0;
2196
2197         size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
2198         uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
2199                                   sizeof(struct rte_ipv6_hdr))];
2200         struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
2201         struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
2202         struct rte_vxlan_hdr *vxlan = NULL;
2203         struct rte_udp_hdr *udp = NULL;
2204         unsigned int nb_vlan_tags = 0;
2205         size_t next_proto_ofst = 0;
2206         size_t ethertype_ofst = 0;
2207         uint64_t exp_items;
2208
2209         if (pattern == NULL) {
2210                 return rte_flow_error_set(error, EINVAL,
2211                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2212                                 "The encap. header definition is NULL");
2213         }
2214
2215         bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
2216         bounce_eh->size = 0;
2217
2218         /*
2219          * Process pattern items and remember non-VOID ones.
2220          * Defer applying masks until after the complete header
2221          * has been built from the pattern items.
2222          */
2223         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
2224
2225         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
2226                 struct sfc_mae_parsed_item *parsed_item;
2227                 const uint64_t exp_items_extra_vlan[] = {
2228                         RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
2229                 };
2230                 size_t proto_header_size;
2231                 rte_be16_t *ethertypep;
2232                 uint8_t *next_protop;
2233                 uint8_t *buf_cur;
2234
2235                 if (pattern->spec == NULL) {
2236                         return rte_flow_error_set(error, EINVAL,
2237                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2238                                         "NULL item spec in the encap. header");
2239                 }
2240
2241                 if (pattern->mask == NULL) {
2242                         return rte_flow_error_set(error, EINVAL,
2243                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2244                                         "NULL item mask in the encap. header");
2245                 }
2246
2247                 if (pattern->last != NULL) {
2248                         /* This is not a match pattern, so disallow range. */
2249                         return rte_flow_error_set(error, EINVAL,
2250                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2251                                         "Range item in the encap. header");
2252                 }
2253
2254                 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
2255                         /* Handle VOID separately, for clarity. */
2256                         continue;
2257                 }
2258
2259                 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
2260                         return rte_flow_error_set(error, ENOTSUP,
2261                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2262                                         "Unexpected item in the encap. header");
2263                 }
2264
2265                 parsed_item = &parsed_items[nb_parsed_items];
2266                 buf_cur = buf + bounce_eh->size;
2267
2268                 switch (pattern->type) {
2269                 case RTE_FLOW_ITEM_TYPE_ETH:
2270                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
2271                                                exp_items);
2272                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
2273                                                   hdr) != 0);
2274
2275                         proto_header_size = sizeof(struct rte_ether_hdr);
2276
2277                         ethertype_ofst = eth_ethertype_ofst;
2278
2279                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
2280                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2281                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2282                         break;
2283                 case RTE_FLOW_ITEM_TYPE_VLAN:
2284                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
2285                                                exp_items);
2286                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
2287                                                   hdr) != 0);
2288
2289                         proto_header_size = sizeof(struct rte_vlan_hdr);
2290
2291                         ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
2292                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
2293
2294                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2295                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2296
2297                         ethertype_ofst =
2298                             bounce_eh->size +
2299                             offsetof(struct rte_vlan_hdr, eth_proto);
2300
2301                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
2302                                     RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
2303                         exp_items |= exp_items_extra_vlan[nb_vlan_tags];
2304
2305                         ++nb_vlan_tags;
2306                         break;
2307                 case RTE_FLOW_ITEM_TYPE_IPV4:
2308                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
2309                                                exp_items);
2310                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
2311                                                   hdr) != 0);
2312
2313                         proto_header_size = sizeof(struct rte_ipv4_hdr);
2314
2315                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2316                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2317
2318                         next_proto_ofst =
2319                             bounce_eh->size +
2320                             offsetof(struct rte_ipv4_hdr, next_proto_id);
2321
2322                         ipv4 = (struct rte_ipv4_hdr *)buf_cur;
2323
2324                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2325                         break;
2326                 case RTE_FLOW_ITEM_TYPE_IPV6:
2327                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
2328                                                exp_items);
2329                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
2330                                                   hdr) != 0);
2331
2332                         proto_header_size = sizeof(struct rte_ipv6_hdr);
2333
2334                         ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
2335                         *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2336
2337                         next_proto_ofst = bounce_eh->size +
2338                                           offsetof(struct rte_ipv6_hdr, proto);
2339
2340                         ipv6 = (struct rte_ipv6_hdr *)buf_cur;
2341
2342                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
2343                         break;
2344                 case RTE_FLOW_ITEM_TYPE_UDP:
2345                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
2346                                                exp_items);
2347                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
2348                                                   hdr) != 0);
2349
2350                         proto_header_size = sizeof(struct rte_udp_hdr);
2351
2352                         next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
2353                         *next_protop = IPPROTO_UDP;
2354
2355                         udp = (struct rte_udp_hdr *)buf_cur;
2356
2357                         exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
2358                         break;
2359                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2360                         SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
2361                                                exp_items);
2362                         RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
2363                                                   hdr) != 0);
2364
2365                         proto_header_size = sizeof(struct rte_vxlan_hdr);
2366
2367                         vxlan = (struct rte_vxlan_hdr *)buf_cur;
2368
2369                         udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
2370                         udp->dgram_len = RTE_BE16(sizeof(*udp) +
2371                                                   sizeof(*vxlan));
2372                         udp->dgram_cksum = 0;
2373
2374                         exp_items = 0;
2375                         break;
2376                 default:
2377                         return rte_flow_error_set(error, ENOTSUP,
2378                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2379                                         "Unknown item in the encap. header");
2380                 }
2381
2382                 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
2383                         return rte_flow_error_set(error, E2BIG,
2384                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2385                                         "The encap. header is too big");
2386                 }
2387
2388                 if ((proto_header_size & 1) != 0) {
2389                         return rte_flow_error_set(error, EINVAL,
2390                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2391                                         "Odd layer size in the encap. header");
2392                 }
2393
2394                 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
2395                 bounce_eh->size += proto_header_size;
2396
2397                 parsed_item->item = pattern;
2398                 parsed_item->proto_header_size = proto_header_size;
2399                 ++nb_parsed_items;
2400         }
2401
2402         if (exp_items != 0) {
2403                 /* Parsing item VXLAN would have reset exp_items to 0. */
2404                 return rte_flow_error_set(error, ENOTSUP,
2405                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2406                                         "No item VXLAN in the encap. header");
2407         }
2408
2409         /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
2410         ipv4->version_ihl = RTE_IPV4_VHL_DEF;
2411         ipv4->time_to_live = SFC_IPV4_TTL_DEF;
2412         ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
2413                                       sizeof(*vxlan));
2414         /* The HW cannot compute this checksum. */
2415         ipv4->hdr_checksum = 0;
2416         ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
2417
2418         ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
2419         ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
2420         ipv6->payload_len = udp->dgram_len;
2421
2422         vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
2423
2424         /* Take care of the masks. */
2425         sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
2426
2427         return (spec != NULL) ? efx_mae_action_set_populate_encap(spec) : 0;
2428 }
2429
2430 static int
2431 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
2432                                efx_mae_actions_t *spec)
2433 {
2434         return efx_mae_action_set_populate_mark(spec, conf->id);
2435 }
2436
2437 static int
2438 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
2439                                    const struct rte_flow_action_phy_port *conf,
2440                                    efx_mae_actions_t *spec)
2441 {
2442         efx_mport_sel_t mport;
2443         uint32_t phy_port;
2444         int rc;
2445
2446         if (conf->original != 0)
2447                 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
2448         else
2449                 phy_port = conf->index;
2450
2451         rc = efx_mae_mport_by_phy_port(phy_port, &mport);
2452         if (rc != 0)
2453                 return rc;
2454
2455         return efx_mae_action_set_populate_deliver(spec, &mport);
2456 }
2457
2458 static int
2459 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
2460                                 const struct rte_flow_action_vf *vf_conf,
2461                                 efx_mae_actions_t *spec)
2462 {
2463         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2464         efx_mport_sel_t mport;
2465         uint32_t vf;
2466         int rc;
2467
2468         if (vf_conf == NULL)
2469                 vf = EFX_PCI_VF_INVALID;
2470         else if (vf_conf->original != 0)
2471                 vf = encp->enc_vf;
2472         else
2473                 vf = vf_conf->id;
2474
2475         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
2476         if (rc != 0)
2477                 return rc;
2478
2479         return efx_mae_action_set_populate_deliver(spec, &mport);
2480 }
2481
2482 static int
2483 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
2484                                   const struct rte_flow_action_port_id *conf,
2485                                   efx_mae_actions_t *spec)
2486 {
2487         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2488         struct sfc_mae *mae = &sa->mae;
2489         efx_mport_sel_t mport;
2490         uint16_t port_id;
2491         int rc;
2492
2493         port_id = (conf->original != 0) ? sas->port_id : conf->id;
2494
2495         rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2496                                            port_id, &mport);
2497         if (rc != 0)
2498                 return rc;
2499
2500         return efx_mae_action_set_populate_deliver(spec, &mport);
2501 }
2502
2503 static int
2504 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2505                           const struct rte_flow_action *action,
2506                           const struct sfc_mae_outer_rule *outer_rule,
2507                           struct sfc_mae_actions_bundle *bundle,
2508                           efx_mae_actions_t *spec,
2509                           struct rte_flow_error *error)
2510 {
2511         bool custom_error = B_FALSE;
2512         int rc = 0;
2513
2514         switch (action->type) {
2515         case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2516                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
2517                                        bundle->actions_mask);
2518                 if (outer_rule == NULL ||
2519                     outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
2520                         rc = EINVAL;
2521                 else
2522                         rc = efx_mae_action_set_populate_decap(spec);
2523                 break;
2524         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2525                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2526                                        bundle->actions_mask);
2527                 rc = efx_mae_action_set_populate_vlan_pop(spec);
2528                 break;
2529         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2530                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2531                                        bundle->actions_mask);
2532                 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2533                 break;
2534         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2535                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2536                                        bundle->actions_mask);
2537                 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2538                 break;
2539         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2540                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2541                                        bundle->actions_mask);
2542                 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2543                 break;
2544         case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2545                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
2546                                        bundle->actions_mask);
2547                 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
2548                                                            action->conf,
2549                                                            spec, error);
2550                 custom_error = B_TRUE;
2551                 break;
2552         case RTE_FLOW_ACTION_TYPE_FLAG:
2553                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
2554                                        bundle->actions_mask);
2555                 rc = efx_mae_action_set_populate_flag(spec);
2556                 break;
2557         case RTE_FLOW_ACTION_TYPE_MARK:
2558                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
2559                                        bundle->actions_mask);
2560                 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
2561                 break;
2562         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
2563                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
2564                                        bundle->actions_mask);
2565                 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
2566                 break;
2567         case RTE_FLOW_ACTION_TYPE_PF:
2568                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
2569                                        bundle->actions_mask);
2570                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
2571                 break;
2572         case RTE_FLOW_ACTION_TYPE_VF:
2573                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
2574                                        bundle->actions_mask);
2575                 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
2576                 break;
2577         case RTE_FLOW_ACTION_TYPE_PORT_ID:
2578                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
2579                                        bundle->actions_mask);
2580                 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
2581                 break;
2582         case RTE_FLOW_ACTION_TYPE_DROP:
2583                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
2584                                        bundle->actions_mask);
2585                 rc = efx_mae_action_set_populate_drop(spec);
2586                 break;
2587         default:
2588                 return rte_flow_error_set(error, ENOTSUP,
2589                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2590                                 "Unsupported action");
2591         }
2592
2593         if (rc == 0) {
2594                 bundle->actions_mask |= (1ULL << action->type);
2595         } else if (!custom_error) {
2596                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2597                                 NULL, "Failed to request the action");
2598         }
2599
2600         return rc;
2601 }
2602
2603 static void
2604 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
2605 {
2606         bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
2607 }
2608
2609 static int
2610 sfc_mae_process_encap_header(struct sfc_adapter *sa,
2611                              const struct sfc_mae_bounce_eh *bounce_eh,
2612                              struct sfc_mae_encap_header **encap_headerp)
2613 {
2614         if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
2615                 encap_headerp = NULL;
2616                 return 0;
2617         }
2618
2619         *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
2620         if (*encap_headerp != NULL)
2621                 return 0;
2622
2623         return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
2624 }
2625
2626 int
2627 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
2628                            const struct rte_flow_action actions[],
2629                            struct sfc_flow_spec_mae *spec_mae,
2630                            struct rte_flow_error *error)
2631 {
2632         struct sfc_mae_encap_header *encap_header = NULL;
2633         struct sfc_mae_actions_bundle bundle = {0};
2634         const struct rte_flow_action *action;
2635         struct sfc_mae *mae = &sa->mae;
2636         efx_mae_actions_t *spec;
2637         int rc;
2638
2639         rte_errno = 0;
2640
2641         if (actions == NULL) {
2642                 return rte_flow_error_set(error, EINVAL,
2643                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
2644                                 "NULL actions");
2645         }
2646
2647         rc = efx_mae_action_set_spec_init(sa->nic, &spec);
2648         if (rc != 0)
2649                 goto fail_action_set_spec_init;
2650
2651         /* Cleanup after previous encap. header bounce buffer usage. */
2652         sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
2653
2654         for (action = actions;
2655              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
2656                 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2657                 if (rc != 0)
2658                         goto fail_rule_parse_action;
2659
2660                 rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
2661                                                &bundle, spec, error);
2662                 if (rc != 0)
2663                         goto fail_rule_parse_action;
2664         }
2665
2666         rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2667         if (rc != 0)
2668                 goto fail_rule_parse_action;
2669
2670         rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh, &encap_header);
2671         if (rc != 0)
2672                 goto fail_process_encap_header;
2673
2674         spec_mae->action_set = sfc_mae_action_set_attach(sa, encap_header,
2675                                                          spec);
2676         if (spec_mae->action_set != NULL) {
2677                 sfc_mae_encap_header_del(sa, encap_header);
2678                 efx_mae_action_set_spec_fini(sa->nic, spec);
2679                 return 0;
2680         }
2681
2682         rc = sfc_mae_action_set_add(sa, spec, encap_header,
2683                                     &spec_mae->action_set);
2684         if (rc != 0)
2685                 goto fail_action_set_add;
2686
2687         return 0;
2688
2689 fail_action_set_add:
2690         sfc_mae_encap_header_del(sa, encap_header);
2691
2692 fail_process_encap_header:
2693 fail_rule_parse_action:
2694         efx_mae_action_set_spec_fini(sa->nic, spec);
2695
2696 fail_action_set_spec_init:
2697         if (rc > 0 && rte_errno == 0) {
2698                 rc = rte_flow_error_set(error, rc,
2699                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2700                         NULL, "Failed to process the action");
2701         }
2702         return rc;
2703 }
2704
2705 static bool
2706 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
2707                         const efx_mae_match_spec_t *left,
2708                         const efx_mae_match_spec_t *right)
2709 {
2710         bool have_same_class;
2711         int rc;
2712
2713         rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
2714                                            &have_same_class);
2715
2716         return (rc == 0) ? have_same_class : false;
2717 }
2718
2719 static int
2720 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
2721                                 struct sfc_mae_outer_rule *rule)
2722 {
2723         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
2724         struct sfc_mae_outer_rule *entry;
2725         struct sfc_mae *mae = &sa->mae;
2726
2727         if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
2728                 /* An active rule is reused. It's class is wittingly valid. */
2729                 return 0;
2730         }
2731
2732         TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
2733                               sfc_mae_outer_rules, entries) {
2734                 const efx_mae_match_spec_t *left = entry->match_spec;
2735                 const efx_mae_match_spec_t *right = rule->match_spec;
2736
2737                 if (entry == rule)
2738                         continue;
2739
2740                 if (sfc_mae_rules_class_cmp(sa, left, right))
2741                         return 0;
2742         }
2743
2744         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2745                  "support for outer frame pattern items is not guaranteed; "
2746                  "other than that, the items are valid from SW standpoint");
2747         return 0;
2748 }
2749
2750 static int
2751 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
2752                                  struct sfc_flow_spec_mae *spec)
2753 {
2754         const struct rte_flow *entry;
2755
2756         TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
2757                 const struct sfc_flow_spec *entry_spec = &entry->spec;
2758                 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
2759                 const efx_mae_match_spec_t *left = es_mae->match_spec;
2760                 const efx_mae_match_spec_t *right = spec->match_spec;
2761
2762                 switch (entry_spec->type) {
2763                 case SFC_FLOW_SPEC_FILTER:
2764                         /* Ignore VNIC-level flows */
2765                         break;
2766                 case SFC_FLOW_SPEC_MAE:
2767                         if (sfc_mae_rules_class_cmp(sa, left, right))
2768                                 return 0;
2769                         break;
2770                 default:
2771                         SFC_ASSERT(false);
2772                 }
2773         }
2774
2775         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2776                  "support for inner frame pattern items is not guaranteed; "
2777                  "other than that, the items are valid from SW standpoint");
2778         return 0;
2779 }
2780
2781 /**
2782  * Confirm that a given flow can be accepted by the FW.
2783  *
2784  * @param sa
2785  *   Software adapter context
2786  * @param flow
2787  *   Flow to be verified
2788  * @return
2789  *   Zero on success and non-zero in the case of error.
2790  *   A special value of EAGAIN indicates that the adapter is
2791  *   not in started state. This state is compulsory because
2792  *   it only makes sense to compare the rule class of the flow
2793  *   being validated with classes of the active rules.
2794  *   Such classes are wittingly supported by the FW.
2795  */
2796 int
2797 sfc_mae_flow_verify(struct sfc_adapter *sa,
2798                     struct rte_flow *flow)
2799 {
2800         struct sfc_flow_spec *spec = &flow->spec;
2801         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2802         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2803         int rc;
2804
2805         SFC_ASSERT(sfc_adapter_is_locked(sa));
2806
2807         if (sa->state != SFC_ADAPTER_STARTED)
2808                 return EAGAIN;
2809
2810         if (outer_rule != NULL) {
2811                 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
2812                 if (rc != 0)
2813                         return rc;
2814         }
2815
2816         return sfc_mae_action_rule_class_verify(sa, spec_mae);
2817 }
2818
2819 int
2820 sfc_mae_flow_insert(struct sfc_adapter *sa,
2821                     struct rte_flow *flow)
2822 {
2823         struct sfc_flow_spec *spec = &flow->spec;
2824         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2825         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2826         struct sfc_mae_action_set *action_set = spec_mae->action_set;
2827         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
2828         int rc;
2829
2830         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
2831         SFC_ASSERT(action_set != NULL);
2832
2833         if (outer_rule != NULL) {
2834                 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
2835                                                spec_mae->match_spec);
2836                 if (rc != 0)
2837                         goto fail_outer_rule_enable;
2838         }
2839
2840         rc = sfc_mae_action_set_enable(sa, action_set);
2841         if (rc != 0)
2842                 goto fail_action_set_enable;
2843
2844         rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
2845                                         NULL, &fw_rsrc->aset_id,
2846                                         &spec_mae->rule_id);
2847         if (rc != 0)
2848                 goto fail_action_rule_insert;
2849
2850         return 0;
2851
2852 fail_action_rule_insert:
2853         (void)sfc_mae_action_set_disable(sa, action_set);
2854
2855 fail_action_set_enable:
2856         if (outer_rule != NULL)
2857                 (void)sfc_mae_outer_rule_disable(sa, outer_rule);
2858
2859 fail_outer_rule_enable:
2860         return rc;
2861 }
2862
2863 int
2864 sfc_mae_flow_remove(struct sfc_adapter *sa,
2865                     struct rte_flow *flow)
2866 {
2867         struct sfc_flow_spec *spec = &flow->spec;
2868         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2869         struct sfc_mae_action_set *action_set = spec_mae->action_set;
2870         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2871         int rc;
2872
2873         SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
2874         SFC_ASSERT(action_set != NULL);
2875
2876         rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
2877         if (rc != 0)
2878                 return rc;
2879
2880         spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
2881
2882         rc = sfc_mae_action_set_disable(sa, action_set);
2883         if (rc != 0) {
2884                 sfc_err(sa, "failed to disable the action set (rc = %d)", rc);
2885                 /* Despite the error, proceed with outer rule removal. */
2886         }
2887
2888         if (outer_rule != NULL)
2889                 return sfc_mae_outer_rule_disable(sa, outer_rule);
2890
2891         return 0;
2892 }