net/sfc: support encap flow items in transfer rules
[dpdk.git] / drivers / net / sfc / sfc_mae.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_common.h>
13
14 #include "efx.h"
15
16 #include "sfc.h"
17 #include "sfc_log.h"
18 #include "sfc_switch.h"
19
20 static int
21 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
22                             efx_mport_sel_t *mportp)
23 {
24         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
25
26         return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
27                                               mportp);
28 }
29
30 int
31 sfc_mae_attach(struct sfc_adapter *sa)
32 {
33         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
34         struct sfc_mae_switch_port_request switch_port_request = {0};
35         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
36         efx_mport_sel_t entity_mport;
37         struct sfc_mae *mae = &sa->mae;
38         efx_mae_limits_t limits;
39         int rc;
40
41         sfc_log_init(sa, "entry");
42
43         if (!encp->enc_mae_supported) {
44                 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
45                 return 0;
46         }
47
48         sfc_log_init(sa, "init MAE");
49         rc = efx_mae_init(sa->nic);
50         if (rc != 0)
51                 goto fail_mae_init;
52
53         sfc_log_init(sa, "get MAE limits");
54         rc = efx_mae_get_limits(sa->nic, &limits);
55         if (rc != 0)
56                 goto fail_mae_get_limits;
57
58         sfc_log_init(sa, "assign entity MPORT");
59         rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
60         if (rc != 0)
61                 goto fail_mae_assign_entity_mport;
62
63         sfc_log_init(sa, "assign RTE switch domain");
64         rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
65         if (rc != 0)
66                 goto fail_mae_assign_switch_domain;
67
68         sfc_log_init(sa, "assign RTE switch port");
69         switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
70         switch_port_request.entity_mportp = &entity_mport;
71         /*
72          * As of now, the driver does not support representors, so
73          * RTE ethdev MPORT simply matches that of the entity.
74          */
75         switch_port_request.ethdev_mportp = &entity_mport;
76         switch_port_request.ethdev_port_id = sas->port_id;
77         rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
78                                         &switch_port_request,
79                                         &mae->switch_port_id);
80         if (rc != 0)
81                 goto fail_mae_assign_switch_port;
82
83         mae->status = SFC_MAE_STATUS_SUPPORTED;
84         mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
85         mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
86         mae->encap_types_supported = limits.eml_encap_types_supported;
87         TAILQ_INIT(&mae->outer_rules);
88         TAILQ_INIT(&mae->action_sets);
89
90         sfc_log_init(sa, "done");
91
92         return 0;
93
94 fail_mae_assign_switch_port:
95 fail_mae_assign_switch_domain:
96 fail_mae_assign_entity_mport:
97 fail_mae_get_limits:
98         efx_mae_fini(sa->nic);
99
100 fail_mae_init:
101         sfc_log_init(sa, "failed %d", rc);
102
103         return rc;
104 }
105
106 void
107 sfc_mae_detach(struct sfc_adapter *sa)
108 {
109         struct sfc_mae *mae = &sa->mae;
110         enum sfc_mae_status status_prev = mae->status;
111
112         sfc_log_init(sa, "entry");
113
114         mae->nb_action_rule_prios_max = 0;
115         mae->status = SFC_MAE_STATUS_UNKNOWN;
116
117         if (status_prev != SFC_MAE_STATUS_SUPPORTED)
118                 return;
119
120         efx_mae_fini(sa->nic);
121
122         sfc_log_init(sa, "done");
123 }
124
125 static struct sfc_mae_outer_rule *
126 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
127                           const efx_mae_match_spec_t *match_spec,
128                           efx_tunnel_protocol_t encap_type)
129 {
130         struct sfc_mae_outer_rule *rule;
131         struct sfc_mae *mae = &sa->mae;
132
133         SFC_ASSERT(sfc_adapter_is_locked(sa));
134
135         TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
136                 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
137                     rule->encap_type == encap_type) {
138                         ++(rule->refcnt);
139                         return rule;
140                 }
141         }
142
143         return NULL;
144 }
145
146 static int
147 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
148                        efx_mae_match_spec_t *match_spec,
149                        efx_tunnel_protocol_t encap_type,
150                        struct sfc_mae_outer_rule **rulep)
151 {
152         struct sfc_mae_outer_rule *rule;
153         struct sfc_mae *mae = &sa->mae;
154
155         SFC_ASSERT(sfc_adapter_is_locked(sa));
156
157         rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
158         if (rule == NULL)
159                 return ENOMEM;
160
161         rule->refcnt = 1;
162         rule->match_spec = match_spec;
163         rule->encap_type = encap_type;
164
165         rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
166
167         TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
168
169         *rulep = rule;
170
171         return 0;
172 }
173
174 static void
175 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
176                        struct sfc_mae_outer_rule *rule)
177 {
178         struct sfc_mae *mae = &sa->mae;
179
180         SFC_ASSERT(sfc_adapter_is_locked(sa));
181         SFC_ASSERT(rule->refcnt != 0);
182
183         --(rule->refcnt);
184
185         if (rule->refcnt != 0)
186                 return;
187
188         SFC_ASSERT(rule->fw_rsrc.rule_id.id == EFX_MAE_RSRC_ID_INVALID);
189         SFC_ASSERT(rule->fw_rsrc.refcnt == 0);
190
191         efx_mae_match_spec_fini(sa->nic, rule->match_spec);
192
193         TAILQ_REMOVE(&mae->outer_rules, rule, entries);
194         rte_free(rule);
195 }
196
197 static int
198 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
199                           struct sfc_mae_outer_rule *rule,
200                           efx_mae_match_spec_t *match_spec_action)
201 {
202         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
203         int rc;
204
205         SFC_ASSERT(sfc_adapter_is_locked(sa));
206
207         if (fw_rsrc->refcnt == 0) {
208                 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
209                 SFC_ASSERT(rule->match_spec != NULL);
210
211                 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
212                                                rule->encap_type,
213                                                &fw_rsrc->rule_id);
214                 if (rc != 0)
215                         return rc;
216         }
217
218         rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
219                                                   &fw_rsrc->rule_id);
220         if (rc != 0) {
221                 if (fw_rsrc->refcnt == 0) {
222                         (void)efx_mae_outer_rule_remove(sa->nic,
223                                                         &fw_rsrc->rule_id);
224                 }
225                 return rc;
226         }
227
228         ++(fw_rsrc->refcnt);
229
230         return 0;
231 }
232
233 static int
234 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
235                            struct sfc_mae_outer_rule *rule)
236 {
237         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
238         int rc;
239
240         SFC_ASSERT(sfc_adapter_is_locked(sa));
241         SFC_ASSERT(fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
242         SFC_ASSERT(fw_rsrc->refcnt != 0);
243
244         if (fw_rsrc->refcnt == 1) {
245                 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
246                 if (rc != 0)
247                         return rc;
248
249                 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
250         }
251
252         --(fw_rsrc->refcnt);
253
254         return 0;
255 }
256
257 static struct sfc_mae_action_set *
258 sfc_mae_action_set_attach(struct sfc_adapter *sa,
259                           const efx_mae_actions_t *spec)
260 {
261         struct sfc_mae_action_set *action_set;
262         struct sfc_mae *mae = &sa->mae;
263
264         SFC_ASSERT(sfc_adapter_is_locked(sa));
265
266         TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
267                 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
268                         ++(action_set->refcnt);
269                         return action_set;
270                 }
271         }
272
273         return NULL;
274 }
275
276 static int
277 sfc_mae_action_set_add(struct sfc_adapter *sa,
278                        efx_mae_actions_t *spec,
279                        struct sfc_mae_action_set **action_setp)
280 {
281         struct sfc_mae_action_set *action_set;
282         struct sfc_mae *mae = &sa->mae;
283
284         SFC_ASSERT(sfc_adapter_is_locked(sa));
285
286         action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
287         if (action_set == NULL)
288                 return ENOMEM;
289
290         action_set->refcnt = 1;
291         action_set->spec = spec;
292
293         action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
294
295         TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
296
297         *action_setp = action_set;
298
299         return 0;
300 }
301
302 static void
303 sfc_mae_action_set_del(struct sfc_adapter *sa,
304                        struct sfc_mae_action_set *action_set)
305 {
306         struct sfc_mae *mae = &sa->mae;
307
308         SFC_ASSERT(sfc_adapter_is_locked(sa));
309         SFC_ASSERT(action_set->refcnt != 0);
310
311         --(action_set->refcnt);
312
313         if (action_set->refcnt != 0)
314                 return;
315
316         SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
317         SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
318
319         efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
320         TAILQ_REMOVE(&mae->action_sets, action_set, entries);
321         rte_free(action_set);
322 }
323
324 static int
325 sfc_mae_action_set_enable(struct sfc_adapter *sa,
326                           struct sfc_mae_action_set *action_set)
327 {
328         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
329         int rc;
330
331         SFC_ASSERT(sfc_adapter_is_locked(sa));
332
333         if (fw_rsrc->refcnt == 0) {
334                 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
335                 SFC_ASSERT(action_set->spec != NULL);
336
337                 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
338                                               &fw_rsrc->aset_id);
339                 if (rc != 0)
340                         return rc;
341         }
342
343         ++(fw_rsrc->refcnt);
344
345         return 0;
346 }
347
348 static int
349 sfc_mae_action_set_disable(struct sfc_adapter *sa,
350                            struct sfc_mae_action_set *action_set)
351 {
352         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
353         int rc;
354
355         SFC_ASSERT(sfc_adapter_is_locked(sa));
356         SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
357         SFC_ASSERT(fw_rsrc->refcnt != 0);
358
359         if (fw_rsrc->refcnt == 1) {
360                 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
361                 if (rc != 0)
362                         return rc;
363
364                 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
365         }
366
367         --(fw_rsrc->refcnt);
368
369         return 0;
370 }
371
372 void
373 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
374                      struct rte_flow *flow)
375 {
376         struct sfc_flow_spec *spec;
377         struct sfc_flow_spec_mae *spec_mae;
378
379         if (flow == NULL)
380                 return;
381
382         spec = &flow->spec;
383
384         if (spec == NULL)
385                 return;
386
387         spec_mae = &spec->mae;
388
389         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
390
391         if (spec_mae->outer_rule != NULL)
392                 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
393
394         if (spec_mae->action_set != NULL)
395                 sfc_mae_action_set_del(sa, spec_mae->action_set);
396
397         if (spec_mae->match_spec != NULL)
398                 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
399 }
400
401 static int
402 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
403 {
404         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
405         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
406         const efx_mae_field_id_t field_ids[] = {
407                 EFX_MAE_FIELD_VLAN0_PROTO_BE,
408                 EFX_MAE_FIELD_VLAN1_PROTO_BE,
409         };
410         const struct sfc_mae_ethertype *et;
411         unsigned int i;
412         int rc;
413
414         /*
415          * In accordance with RTE flow API convention, the innermost L2
416          * item's "type" ("inner_type") is a L3 EtherType. If there is
417          * no L3 item, it's 0x0000/0x0000.
418          */
419         et = &pdata->ethertypes[pdata->nb_vlan_tags];
420         rc = efx_mae_match_spec_field_set(ctx->match_spec,
421                                           fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
422                                           sizeof(et->value),
423                                           (const uint8_t *)&et->value,
424                                           sizeof(et->mask),
425                                           (const uint8_t *)&et->mask);
426         if (rc != 0)
427                 return rc;
428
429         /*
430          * sfc_mae_rule_parse_item_vlan() has already made sure
431          * that pdata->nb_vlan_tags does not exceed this figure.
432          */
433         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
434
435         for (i = 0; i < pdata->nb_vlan_tags; ++i) {
436                 et = &pdata->ethertypes[i];
437
438                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
439                                                   fremap[field_ids[i]],
440                                                   sizeof(et->value),
441                                                   (const uint8_t *)&et->value,
442                                                   sizeof(et->mask),
443                                                   (const uint8_t *)&et->mask);
444                 if (rc != 0)
445                         return rc;
446         }
447
448         return 0;
449 }
450
451 static int
452 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
453                                   struct rte_flow_error *error)
454 {
455         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
456         struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
457         struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
458         const rte_be16_t supported_tpids[] = {
459                 /* VLAN standard TPID (always the first element) */
460                 RTE_BE16(RTE_ETHER_TYPE_VLAN),
461
462                 /* Double-tagging TPIDs */
463                 RTE_BE16(RTE_ETHER_TYPE_QINQ),
464                 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
465                 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
466                 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
467         };
468         unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
469         unsigned int ethertype_idx;
470         const uint8_t *valuep;
471         const uint8_t *maskp;
472         int rc;
473
474         if (pdata->innermost_ethertype_restriction.mask != 0 &&
475             pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
476                 /*
477                  * If a single item VLAN is followed by a L3 item, value
478                  * of "type" in item ETH can't be a double-tagging TPID.
479                  */
480                 nb_supported_tpids = 1;
481         }
482
483         /*
484          * sfc_mae_rule_parse_item_vlan() has already made sure
485          * that pdata->nb_vlan_tags does not exceed this figure.
486          */
487         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
488
489         for (ethertype_idx = 0;
490              ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
491                 unsigned int tpid_idx;
492
493                 /* Exact match is supported only. */
494                 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
495                         rc = EINVAL;
496                         goto fail;
497                 }
498
499                 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
500                      tpid_idx < nb_supported_tpids; ++tpid_idx) {
501                         if (ethertypes[ethertype_idx].value ==
502                             supported_tpids[tpid_idx])
503                                 break;
504                 }
505
506                 if (tpid_idx == nb_supported_tpids) {
507                         rc = EINVAL;
508                         goto fail;
509                 }
510
511                 nb_supported_tpids = 1;
512         }
513
514         if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
515                 struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
516
517                 if (et->mask == 0) {
518                         et->mask = RTE_BE16(0xffff);
519                         et->value =
520                             pdata->innermost_ethertype_restriction.value;
521                 } else if (et->mask != RTE_BE16(0xffff) ||
522                            et->value !=
523                            pdata->innermost_ethertype_restriction.value) {
524                         rc = EINVAL;
525                         goto fail;
526                 }
527         }
528
529         /*
530          * Now, when the number of VLAN tags is known, set fields
531          * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
532          * one is either a valid L3 EtherType (or 0x0000/0x0000),
533          * and the last two are valid TPIDs (or 0x0000/0x0000).
534          */
535         rc = sfc_mae_set_ethertypes(ctx);
536         if (rc != 0)
537                 goto fail;
538
539         if (pdata->l3_next_proto_restriction_mask == 0xff) {
540                 if (pdata->l3_next_proto_mask == 0) {
541                         pdata->l3_next_proto_mask = 0xff;
542                         pdata->l3_next_proto_value =
543                             pdata->l3_next_proto_restriction_value;
544                 } else if (pdata->l3_next_proto_mask != 0xff ||
545                            pdata->l3_next_proto_value !=
546                            pdata->l3_next_proto_restriction_value) {
547                         rc = EINVAL;
548                         goto fail;
549                 }
550         }
551
552         valuep = (const uint8_t *)&pdata->l3_next_proto_value;
553         maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
554         rc = efx_mae_match_spec_field_set(ctx->match_spec,
555                                           fremap[EFX_MAE_FIELD_IP_PROTO],
556                                           sizeof(pdata->l3_next_proto_value),
557                                           valuep,
558                                           sizeof(pdata->l3_next_proto_mask),
559                                           maskp);
560         if (rc != 0)
561                 goto fail;
562
563         return 0;
564
565 fail:
566         return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
567                                   "Failed to process pattern data");
568 }
569
570 static int
571 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
572                                 struct sfc_flow_parse_ctx *ctx,
573                                 struct rte_flow_error *error)
574 {
575         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
576         const struct rte_flow_item_port_id supp_mask = {
577                 .id = 0xffffffff,
578         };
579         const void *def_mask = &rte_flow_item_port_id_mask;
580         const struct rte_flow_item_port_id *spec = NULL;
581         const struct rte_flow_item_port_id *mask = NULL;
582         efx_mport_sel_t mport_sel;
583         int rc;
584
585         if (ctx_mae->match_mport_set) {
586                 return rte_flow_error_set(error, ENOTSUP,
587                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
588                                 "Can't handle multiple traffic source items");
589         }
590
591         rc = sfc_flow_parse_init(item,
592                                  (const void **)&spec, (const void **)&mask,
593                                  (const void *)&supp_mask, def_mask,
594                                  sizeof(struct rte_flow_item_port_id), error);
595         if (rc != 0)
596                 return rc;
597
598         if (mask->id != supp_mask.id) {
599                 return rte_flow_error_set(error, EINVAL,
600                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
601                                 "Bad mask in the PORT_ID pattern item");
602         }
603
604         /* If "spec" is not set, could be any port ID */
605         if (spec == NULL)
606                 return 0;
607
608         if (spec->id > UINT16_MAX) {
609                 return rte_flow_error_set(error, EOVERFLOW,
610                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
611                                           "The port ID is too large");
612         }
613
614         rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
615                                            spec->id, &mport_sel);
616         if (rc != 0) {
617                 return rte_flow_error_set(error, rc,
618                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
619                                 "Can't find RTE ethdev by the port ID");
620         }
621
622         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
623                                           &mport_sel, NULL);
624         if (rc != 0) {
625                 return rte_flow_error_set(error, rc,
626                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
627                                 "Failed to set MPORT for the port ID");
628         }
629
630         ctx_mae->match_mport_set = B_TRUE;
631
632         return 0;
633 }
634
635 static int
636 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
637                                  struct sfc_flow_parse_ctx *ctx,
638                                  struct rte_flow_error *error)
639 {
640         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
641         const struct rte_flow_item_phy_port supp_mask = {
642                 .index = 0xffffffff,
643         };
644         const void *def_mask = &rte_flow_item_phy_port_mask;
645         const struct rte_flow_item_phy_port *spec = NULL;
646         const struct rte_flow_item_phy_port *mask = NULL;
647         efx_mport_sel_t mport_v;
648         int rc;
649
650         if (ctx_mae->match_mport_set) {
651                 return rte_flow_error_set(error, ENOTSUP,
652                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
653                                 "Can't handle multiple traffic source items");
654         }
655
656         rc = sfc_flow_parse_init(item,
657                                  (const void **)&spec, (const void **)&mask,
658                                  (const void *)&supp_mask, def_mask,
659                                  sizeof(struct rte_flow_item_phy_port), error);
660         if (rc != 0)
661                 return rc;
662
663         if (mask->index != supp_mask.index) {
664                 return rte_flow_error_set(error, EINVAL,
665                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
666                                 "Bad mask in the PHY_PORT pattern item");
667         }
668
669         /* If "spec" is not set, could be any physical port */
670         if (spec == NULL)
671                 return 0;
672
673         rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
674         if (rc != 0) {
675                 return rte_flow_error_set(error, rc,
676                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
677                                 "Failed to convert the PHY_PORT index");
678         }
679
680         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
681         if (rc != 0) {
682                 return rte_flow_error_set(error, rc,
683                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
684                                 "Failed to set MPORT for the PHY_PORT");
685         }
686
687         ctx_mae->match_mport_set = B_TRUE;
688
689         return 0;
690 }
691
692 static int
693 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
694                            struct sfc_flow_parse_ctx *ctx,
695                            struct rte_flow_error *error)
696 {
697         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
698         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
699         efx_mport_sel_t mport_v;
700         int rc;
701
702         if (ctx_mae->match_mport_set) {
703                 return rte_flow_error_set(error, ENOTSUP,
704                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
705                                 "Can't handle multiple traffic source items");
706         }
707
708         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
709                                             &mport_v);
710         if (rc != 0) {
711                 return rte_flow_error_set(error, rc,
712                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
713                                 "Failed to convert the PF ID");
714         }
715
716         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
717         if (rc != 0) {
718                 return rte_flow_error_set(error, rc,
719                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
720                                 "Failed to set MPORT for the PF");
721         }
722
723         ctx_mae->match_mport_set = B_TRUE;
724
725         return 0;
726 }
727
728 static int
729 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
730                            struct sfc_flow_parse_ctx *ctx,
731                            struct rte_flow_error *error)
732 {
733         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
734         const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
735         const struct rte_flow_item_vf supp_mask = {
736                 .id = 0xffffffff,
737         };
738         const void *def_mask = &rte_flow_item_vf_mask;
739         const struct rte_flow_item_vf *spec = NULL;
740         const struct rte_flow_item_vf *mask = NULL;
741         efx_mport_sel_t mport_v;
742         int rc;
743
744         if (ctx_mae->match_mport_set) {
745                 return rte_flow_error_set(error, ENOTSUP,
746                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
747                                 "Can't handle multiple traffic source items");
748         }
749
750         rc = sfc_flow_parse_init(item,
751                                  (const void **)&spec, (const void **)&mask,
752                                  (const void *)&supp_mask, def_mask,
753                                  sizeof(struct rte_flow_item_vf), error);
754         if (rc != 0)
755                 return rc;
756
757         if (mask->id != supp_mask.id) {
758                 return rte_flow_error_set(error, EINVAL,
759                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
760                                 "Bad mask in the VF pattern item");
761         }
762
763         /*
764          * If "spec" is not set, the item requests any VF related to the
765          * PF of the current DPDK port (but not the PF itself).
766          * Reject this match criterion as unsupported.
767          */
768         if (spec == NULL) {
769                 return rte_flow_error_set(error, EINVAL,
770                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
771                                 "Bad spec in the VF pattern item");
772         }
773
774         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
775         if (rc != 0) {
776                 return rte_flow_error_set(error, rc,
777                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
778                                 "Failed to convert the PF + VF IDs");
779         }
780
781         rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
782         if (rc != 0) {
783                 return rte_flow_error_set(error, rc,
784                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
785                                 "Failed to set MPORT for the PF + VF");
786         }
787
788         ctx_mae->match_mport_set = B_TRUE;
789
790         return 0;
791 }
792
793 /*
794  * Having this field ID in a field locator means that this
795  * locator cannot be used to actually set the field at the
796  * time when the corresponding item gets encountered. Such
797  * fields get stashed in the parsing context instead. This
798  * is required to resolve dependencies between the stashed
799  * fields. See sfc_mae_rule_process_pattern_data().
800  */
801 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
802
803 struct sfc_mae_field_locator {
804         efx_mae_field_id_t              field_id;
805         size_t                          size;
806         /* Field offset in the corresponding rte_flow_item_ struct */
807         size_t                          ofst;
808 };
809
810 static void
811 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
812                              unsigned int nb_field_locators, void *mask_ptr,
813                              size_t mask_size)
814 {
815         unsigned int i;
816
817         memset(mask_ptr, 0, mask_size);
818
819         for (i = 0; i < nb_field_locators; ++i) {
820                 const struct sfc_mae_field_locator *fl = &field_locators[i];
821
822                 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
823                 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
824         }
825 }
826
827 static int
828 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
829                    unsigned int nb_field_locators, const uint8_t *spec,
830                    const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
831                    struct rte_flow_error *error)
832 {
833         const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
834         unsigned int i;
835         int rc = 0;
836
837         for (i = 0; i < nb_field_locators; ++i) {
838                 const struct sfc_mae_field_locator *fl = &field_locators[i];
839
840                 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
841                         continue;
842
843                 rc = efx_mae_match_spec_field_set(ctx->match_spec,
844                                                   fremap[fl->field_id],
845                                                   fl->size, spec + fl->ofst,
846                                                   fl->size, mask + fl->ofst);
847                 if (rc != 0)
848                         break;
849         }
850
851         if (rc != 0) {
852                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
853                                 NULL, "Failed to process item fields");
854         }
855
856         return rc;
857 }
858
859 static const struct sfc_mae_field_locator flocs_eth[] = {
860         {
861                 /*
862                  * This locator is used only for building supported fields mask.
863                  * The field is handled by sfc_mae_rule_process_pattern_data().
864                  */
865                 SFC_MAE_FIELD_HANDLING_DEFERRED,
866                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
867                 offsetof(struct rte_flow_item_eth, type),
868         },
869         {
870                 EFX_MAE_FIELD_ETH_DADDR_BE,
871                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
872                 offsetof(struct rte_flow_item_eth, dst),
873         },
874         {
875                 EFX_MAE_FIELD_ETH_SADDR_BE,
876                 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
877                 offsetof(struct rte_flow_item_eth, src),
878         },
879 };
880
881 static int
882 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
883                             struct sfc_flow_parse_ctx *ctx,
884                             struct rte_flow_error *error)
885 {
886         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
887         struct rte_flow_item_eth supp_mask;
888         const uint8_t *spec = NULL;
889         const uint8_t *mask = NULL;
890         int rc;
891
892         sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
893                                      &supp_mask, sizeof(supp_mask));
894
895         rc = sfc_flow_parse_init(item,
896                                  (const void **)&spec, (const void **)&mask,
897                                  (const void *)&supp_mask,
898                                  &rte_flow_item_eth_mask,
899                                  sizeof(struct rte_flow_item_eth), error);
900         if (rc != 0)
901                 return rc;
902
903         if (spec != NULL) {
904                 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
905                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
906                 const struct rte_flow_item_eth *item_spec;
907                 const struct rte_flow_item_eth *item_mask;
908
909                 item_spec = (const struct rte_flow_item_eth *)spec;
910                 item_mask = (const struct rte_flow_item_eth *)mask;
911
912                 ethertypes[0].value = item_spec->type;
913                 ethertypes[0].mask = item_mask->type;
914         } else {
915                 /*
916                  * The specification is empty. This is wrong in the case
917                  * when there are more network patterns in line. Other
918                  * than that, any Ethernet can match. All of that is
919                  * checked at the end of parsing.
920                  */
921                 return 0;
922         }
923
924         return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
925                                   ctx_mae, error);
926 }
927
928 static const struct sfc_mae_field_locator flocs_vlan[] = {
929         /* Outermost tag */
930         {
931                 EFX_MAE_FIELD_VLAN0_TCI_BE,
932                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
933                 offsetof(struct rte_flow_item_vlan, tci),
934         },
935         {
936                 /*
937                  * This locator is used only for building supported fields mask.
938                  * The field is handled by sfc_mae_rule_process_pattern_data().
939                  */
940                 SFC_MAE_FIELD_HANDLING_DEFERRED,
941                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
942                 offsetof(struct rte_flow_item_vlan, inner_type),
943         },
944
945         /* Innermost tag */
946         {
947                 EFX_MAE_FIELD_VLAN1_TCI_BE,
948                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
949                 offsetof(struct rte_flow_item_vlan, tci),
950         },
951         {
952                 /*
953                  * This locator is used only for building supported fields mask.
954                  * The field is handled by sfc_mae_rule_process_pattern_data().
955                  */
956                 SFC_MAE_FIELD_HANDLING_DEFERRED,
957                 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
958                 offsetof(struct rte_flow_item_vlan, inner_type),
959         },
960 };
961
962 static int
963 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
964                              struct sfc_flow_parse_ctx *ctx,
965                              struct rte_flow_error *error)
966 {
967         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
968         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
969         const struct sfc_mae_field_locator *flocs;
970         struct rte_flow_item_vlan supp_mask;
971         const uint8_t *spec = NULL;
972         const uint8_t *mask = NULL;
973         unsigned int nb_flocs;
974         int rc;
975
976         RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
977
978         if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
979                 return rte_flow_error_set(error, ENOTSUP,
980                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
981                                 "Can't match that many VLAN tags");
982         }
983
984         nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
985         flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
986
987         /* If parsing fails, this can remain incremented. */
988         ++pdata->nb_vlan_tags;
989
990         sfc_mae_item_build_supp_mask(flocs, nb_flocs,
991                                      &supp_mask, sizeof(supp_mask));
992
993         rc = sfc_flow_parse_init(item,
994                                  (const void **)&spec, (const void **)&mask,
995                                  (const void *)&supp_mask,
996                                  &rte_flow_item_vlan_mask,
997                                  sizeof(struct rte_flow_item_vlan), error);
998         if (rc != 0)
999                 return rc;
1000
1001         if (spec != NULL) {
1002                 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1003                 const struct rte_flow_item_vlan *item_spec;
1004                 const struct rte_flow_item_vlan *item_mask;
1005
1006                 item_spec = (const struct rte_flow_item_vlan *)spec;
1007                 item_mask = (const struct rte_flow_item_vlan *)mask;
1008
1009                 ethertypes[pdata->nb_vlan_tags].value = item_spec->inner_type;
1010                 ethertypes[pdata->nb_vlan_tags].mask = item_mask->inner_type;
1011         } else {
1012                 /*
1013                  * The specification is empty. This is wrong in the case
1014                  * when there are more network patterns in line. Other
1015                  * than that, any Ethernet can match. All of that is
1016                  * checked at the end of parsing.
1017                  */
1018                 return 0;
1019         }
1020
1021         return sfc_mae_parse_item(flocs, nb_flocs, spec, mask, ctx_mae, error);
1022 }
1023
1024 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1025         {
1026                 EFX_MAE_FIELD_SRC_IP4_BE,
1027                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1028                 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1029         },
1030         {
1031                 EFX_MAE_FIELD_DST_IP4_BE,
1032                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1033                 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1034         },
1035         {
1036                 /*
1037                  * This locator is used only for building supported fields mask.
1038                  * The field is handled by sfc_mae_rule_process_pattern_data().
1039                  */
1040                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1041                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1042                 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1043         },
1044         {
1045                 EFX_MAE_FIELD_IP_TOS,
1046                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1047                                  hdr.type_of_service),
1048                 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1049         },
1050         {
1051                 EFX_MAE_FIELD_IP_TTL,
1052                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1053                 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1054         },
1055 };
1056
1057 static int
1058 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1059                              struct sfc_flow_parse_ctx *ctx,
1060                              struct rte_flow_error *error)
1061 {
1062         rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1063         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1064         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1065         struct rte_flow_item_ipv4 supp_mask;
1066         const uint8_t *spec = NULL;
1067         const uint8_t *mask = NULL;
1068         int rc;
1069
1070         sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1071                                      &supp_mask, sizeof(supp_mask));
1072
1073         rc = sfc_flow_parse_init(item,
1074                                  (const void **)&spec, (const void **)&mask,
1075                                  (const void *)&supp_mask,
1076                                  &rte_flow_item_ipv4_mask,
1077                                  sizeof(struct rte_flow_item_ipv4), error);
1078         if (rc != 0)
1079                 return rc;
1080
1081         pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1082         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1083
1084         if (spec != NULL) {
1085                 const struct rte_flow_item_ipv4 *item_spec;
1086                 const struct rte_flow_item_ipv4 *item_mask;
1087
1088                 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1089                 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1090
1091                 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1092                 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1093         } else {
1094                 return 0;
1095         }
1096
1097         return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1098                                   ctx_mae, error);
1099 }
1100
1101 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1102         {
1103                 EFX_MAE_FIELD_SRC_IP6_BE,
1104                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1105                 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1106         },
1107         {
1108                 EFX_MAE_FIELD_DST_IP6_BE,
1109                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1110                 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1111         },
1112         {
1113                 /*
1114                  * This locator is used only for building supported fields mask.
1115                  * The field is handled by sfc_mae_rule_process_pattern_data().
1116                  */
1117                 SFC_MAE_FIELD_HANDLING_DEFERRED,
1118                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1119                 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1120         },
1121         {
1122                 EFX_MAE_FIELD_IP_TTL,
1123                 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1124                 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1125         },
1126 };
1127
1128 static int
1129 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1130                              struct sfc_flow_parse_ctx *ctx,
1131                              struct rte_flow_error *error)
1132 {
1133         rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1134         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1135         const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1136         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1137         struct rte_flow_item_ipv6 supp_mask;
1138         const uint8_t *spec = NULL;
1139         const uint8_t *mask = NULL;
1140         rte_be32_t vtc_flow_be;
1141         uint32_t vtc_flow;
1142         uint8_t tc_value;
1143         uint8_t tc_mask;
1144         int rc;
1145
1146         sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1147                                      &supp_mask, sizeof(supp_mask));
1148
1149         vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1150         memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1151
1152         rc = sfc_flow_parse_init(item,
1153                                  (const void **)&spec, (const void **)&mask,
1154                                  (const void *)&supp_mask,
1155                                  &rte_flow_item_ipv6_mask,
1156                                  sizeof(struct rte_flow_item_ipv6), error);
1157         if (rc != 0)
1158                 return rc;
1159
1160         pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1161         pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1162
1163         if (spec != NULL) {
1164                 const struct rte_flow_item_ipv6 *item_spec;
1165                 const struct rte_flow_item_ipv6 *item_mask;
1166
1167                 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1168                 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1169
1170                 pdata->l3_next_proto_value = item_spec->hdr.proto;
1171                 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1172         } else {
1173                 return 0;
1174         }
1175
1176         rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1177                                 ctx_mae, error);
1178         if (rc != 0)
1179                 return rc;
1180
1181         memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1182         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1183         tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1184
1185         memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1186         vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1187         tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1188
1189         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1190                                           fremap[EFX_MAE_FIELD_IP_TOS],
1191                                           sizeof(tc_value), &tc_value,
1192                                           sizeof(tc_mask), &tc_mask);
1193         if (rc != 0) {
1194                 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1195                                 NULL, "Failed to process item fields");
1196         }
1197
1198         return 0;
1199 }
1200
1201 static const struct sfc_mae_field_locator flocs_tcp[] = {
1202         {
1203                 EFX_MAE_FIELD_L4_SPORT_BE,
1204                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1205                 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1206         },
1207         {
1208                 EFX_MAE_FIELD_L4_DPORT_BE,
1209                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1210                 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1211         },
1212         {
1213                 EFX_MAE_FIELD_TCP_FLAGS_BE,
1214                 /*
1215                  * The values have been picked intentionally since the
1216                  * target MAE field is oversize (16 bit). This mapping
1217                  * relies on the fact that the MAE field is big-endian.
1218                  */
1219                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1220                 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1221                 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1222         },
1223 };
1224
1225 static int
1226 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1227                             struct sfc_flow_parse_ctx *ctx,
1228                             struct rte_flow_error *error)
1229 {
1230         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1231         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1232         struct rte_flow_item_tcp supp_mask;
1233         const uint8_t *spec = NULL;
1234         const uint8_t *mask = NULL;
1235         int rc;
1236
1237         /*
1238          * When encountered among outermost items, item TCP is invalid.
1239          * Check which match specification is being constructed now.
1240          */
1241         if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1242                 return rte_flow_error_set(error, EINVAL,
1243                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1244                                           "TCP in outer frame is invalid");
1245         }
1246
1247         sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1248                                      &supp_mask, sizeof(supp_mask));
1249
1250         rc = sfc_flow_parse_init(item,
1251                                  (const void **)&spec, (const void **)&mask,
1252                                  (const void *)&supp_mask,
1253                                  &rte_flow_item_tcp_mask,
1254                                  sizeof(struct rte_flow_item_tcp), error);
1255         if (rc != 0)
1256                 return rc;
1257
1258         pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1259         pdata->l3_next_proto_restriction_mask = 0xff;
1260
1261         if (spec == NULL)
1262                 return 0;
1263
1264         return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1265                                   ctx_mae, error);
1266 }
1267
1268 static const struct sfc_mae_field_locator flocs_udp[] = {
1269         {
1270                 EFX_MAE_FIELD_L4_SPORT_BE,
1271                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1272                 offsetof(struct rte_flow_item_udp, hdr.src_port),
1273         },
1274         {
1275                 EFX_MAE_FIELD_L4_DPORT_BE,
1276                 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1277                 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1278         },
1279 };
1280
1281 static int
1282 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1283                             struct sfc_flow_parse_ctx *ctx,
1284                             struct rte_flow_error *error)
1285 {
1286         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1287         struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1288         struct rte_flow_item_udp supp_mask;
1289         const uint8_t *spec = NULL;
1290         const uint8_t *mask = NULL;
1291         int rc;
1292
1293         sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1294                                      &supp_mask, sizeof(supp_mask));
1295
1296         rc = sfc_flow_parse_init(item,
1297                                  (const void **)&spec, (const void **)&mask,
1298                                  (const void *)&supp_mask,
1299                                  &rte_flow_item_udp_mask,
1300                                  sizeof(struct rte_flow_item_udp), error);
1301         if (rc != 0)
1302                 return rc;
1303
1304         pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1305         pdata->l3_next_proto_restriction_mask = 0xff;
1306
1307         if (spec == NULL)
1308                 return 0;
1309
1310         return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1311                                   ctx_mae, error);
1312 }
1313
1314 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1315         {
1316                 /*
1317                  * The size and offset values are relevant
1318                  * for Geneve and NVGRE, too.
1319                  */
1320                 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1321                 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1322         },
1323 };
1324
1325 /*
1326  * An auxiliary registry which allows using non-encap. field IDs
1327  * directly when building a match specification of type ACTION.
1328  *
1329  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1330  */
1331 static const efx_mae_field_id_t field_ids_no_remap[] = {
1332 #define FIELD_ID_NO_REMAP(_field) \
1333         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1334
1335         FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1336         FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1337         FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1338         FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1339         FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1340         FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1341         FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1342         FIELD_ID_NO_REMAP(SRC_IP4_BE),
1343         FIELD_ID_NO_REMAP(DST_IP4_BE),
1344         FIELD_ID_NO_REMAP(IP_PROTO),
1345         FIELD_ID_NO_REMAP(IP_TOS),
1346         FIELD_ID_NO_REMAP(IP_TTL),
1347         FIELD_ID_NO_REMAP(SRC_IP6_BE),
1348         FIELD_ID_NO_REMAP(DST_IP6_BE),
1349         FIELD_ID_NO_REMAP(L4_SPORT_BE),
1350         FIELD_ID_NO_REMAP(L4_DPORT_BE),
1351         FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1352
1353 #undef FIELD_ID_NO_REMAP
1354 };
1355
1356 /*
1357  * An auxiliary registry which allows using "ENC" field IDs
1358  * when building a match specification of type OUTER.
1359  *
1360  * See sfc_mae_rule_encap_parse_init().
1361  */
1362 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1363 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1364         [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1365
1366         FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1367         FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1368         FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1369         FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1370         FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1371         FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1372         FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1373         FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1374         FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1375         FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1376         FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1377         FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1378         FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1379         FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1380         FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1381         FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1382
1383 #undef FIELD_ID_REMAP_TO_ENCAP
1384 };
1385
1386 static int
1387 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1388                                struct sfc_flow_parse_ctx *ctx,
1389                                struct rte_flow_error *error)
1390 {
1391         struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1392         uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1393         uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1394         const struct rte_flow_item_vxlan *vxp;
1395         uint8_t supp_mask[sizeof(uint64_t)];
1396         const uint8_t *spec = NULL;
1397         const uint8_t *mask = NULL;
1398         const void *def_mask;
1399         int rc;
1400
1401         /*
1402          * We're about to start processing inner frame items.
1403          * Process pattern data that has been deferred so far
1404          * and reset pattern data storage.
1405          */
1406         rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1407         if (rc != 0)
1408                 return rc;
1409
1410         memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1411
1412         sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1413                                      &supp_mask, sizeof(supp_mask));
1414
1415         /*
1416          * This tunnel item was preliminarily detected by
1417          * sfc_mae_rule_encap_parse_init(). Default mask
1418          * was also picked by that helper. Use it here.
1419          */
1420         def_mask = ctx_mae->tunnel_def_mask;
1421
1422         rc = sfc_flow_parse_init(item,
1423                                  (const void **)&spec, (const void **)&mask,
1424                                  (const void *)&supp_mask, def_mask,
1425                                  sizeof(def_mask), error);
1426         if (rc != 0)
1427                 return rc;
1428
1429         /*
1430          * This item and later ones comprise a
1431          * match specification of type ACTION.
1432          */
1433         ctx_mae->match_spec = ctx_mae->match_spec_action;
1434
1435         /* This item and later ones use non-encap. EFX MAE field IDs. */
1436         ctx_mae->field_ids_remap = field_ids_no_remap;
1437
1438         if (spec == NULL)
1439                 return 0;
1440
1441         /*
1442          * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1443          * Copy 24-bit VNI, which is BE, at offset 1 in it.
1444          * The extra byte is 0 both in the mask and in the value.
1445          */
1446         vxp = (const struct rte_flow_item_vxlan *)spec;
1447         memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1448
1449         vxp = (const struct rte_flow_item_vxlan *)mask;
1450         memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1451
1452         rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1453                                           EFX_MAE_FIELD_ENC_VNET_ID_BE,
1454                                           sizeof(vnet_id_v), vnet_id_v,
1455                                           sizeof(vnet_id_m), vnet_id_m);
1456         if (rc != 0) {
1457                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1458                                         item, "Failed to set VXLAN VNI");
1459         }
1460
1461         return rc;
1462 }
1463
1464 static const struct sfc_flow_item sfc_flow_items[] = {
1465         {
1466                 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1467                 /*
1468                  * In terms of RTE flow, this item is a META one,
1469                  * and its position in the pattern is don't care.
1470                  */
1471                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1472                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1473                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1474                 .parse = sfc_mae_rule_parse_item_port_id,
1475         },
1476         {
1477                 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1478                 /*
1479                  * In terms of RTE flow, this item is a META one,
1480                  * and its position in the pattern is don't care.
1481                  */
1482                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1483                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1484                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1485                 .parse = sfc_mae_rule_parse_item_phy_port,
1486         },
1487         {
1488                 .type = RTE_FLOW_ITEM_TYPE_PF,
1489                 /*
1490                  * In terms of RTE flow, this item is a META one,
1491                  * and its position in the pattern is don't care.
1492                  */
1493                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1494                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1495                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1496                 .parse = sfc_mae_rule_parse_item_pf,
1497         },
1498         {
1499                 .type = RTE_FLOW_ITEM_TYPE_VF,
1500                 /*
1501                  * In terms of RTE flow, this item is a META one,
1502                  * and its position in the pattern is don't care.
1503                  */
1504                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1505                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1506                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1507                 .parse = sfc_mae_rule_parse_item_vf,
1508         },
1509         {
1510                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1511                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1512                 .layer = SFC_FLOW_ITEM_L2,
1513                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1514                 .parse = sfc_mae_rule_parse_item_eth,
1515         },
1516         {
1517                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1518                 .prev_layer = SFC_FLOW_ITEM_L2,
1519                 .layer = SFC_FLOW_ITEM_L2,
1520                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1521                 .parse = sfc_mae_rule_parse_item_vlan,
1522         },
1523         {
1524                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1525                 .prev_layer = SFC_FLOW_ITEM_L2,
1526                 .layer = SFC_FLOW_ITEM_L3,
1527                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1528                 .parse = sfc_mae_rule_parse_item_ipv4,
1529         },
1530         {
1531                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1532                 .prev_layer = SFC_FLOW_ITEM_L2,
1533                 .layer = SFC_FLOW_ITEM_L3,
1534                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1535                 .parse = sfc_mae_rule_parse_item_ipv6,
1536         },
1537         {
1538                 .type = RTE_FLOW_ITEM_TYPE_TCP,
1539                 .prev_layer = SFC_FLOW_ITEM_L3,
1540                 .layer = SFC_FLOW_ITEM_L4,
1541                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1542                 .parse = sfc_mae_rule_parse_item_tcp,
1543         },
1544         {
1545                 .type = RTE_FLOW_ITEM_TYPE_UDP,
1546                 .prev_layer = SFC_FLOW_ITEM_L3,
1547                 .layer = SFC_FLOW_ITEM_L4,
1548                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1549                 .parse = sfc_mae_rule_parse_item_udp,
1550         },
1551         {
1552                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1553                 .prev_layer = SFC_FLOW_ITEM_L4,
1554                 .layer = SFC_FLOW_ITEM_START_LAYER,
1555                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1556                 .parse = sfc_mae_rule_parse_item_tunnel,
1557         },
1558         {
1559                 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1560                 .prev_layer = SFC_FLOW_ITEM_L4,
1561                 .layer = SFC_FLOW_ITEM_START_LAYER,
1562                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1563                 .parse = sfc_mae_rule_parse_item_tunnel,
1564         },
1565         {
1566                 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1567                 .prev_layer = SFC_FLOW_ITEM_L3,
1568                 .layer = SFC_FLOW_ITEM_START_LAYER,
1569                 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1570                 .parse = sfc_mae_rule_parse_item_tunnel,
1571         },
1572 };
1573
1574 static int
1575 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
1576                            struct sfc_mae_parse_ctx *ctx,
1577                            struct sfc_mae_outer_rule **rulep,
1578                            struct rte_flow_error *error)
1579 {
1580         struct sfc_mae_outer_rule *rule;
1581         int rc;
1582
1583         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
1584                 *rulep = NULL;
1585                 return 0;
1586         }
1587
1588         SFC_ASSERT(ctx->match_spec_outer != NULL);
1589
1590         if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
1591                 return rte_flow_error_set(error, ENOTSUP,
1592                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1593                                           "Inconsistent pattern (outer)");
1594         }
1595
1596         *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
1597                                            ctx->encap_type);
1598         if (*rulep != NULL) {
1599                 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1600         } else {
1601                 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
1602                                             ctx->encap_type, rulep);
1603                 if (rc != 0) {
1604                         return rte_flow_error_set(error, rc,
1605                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1606                                         "Failed to process the pattern");
1607                 }
1608         }
1609
1610         /*
1611          * Depending on whether we reuse an existing outer rule or create a
1612          * new one (see above), outer rule ID is either a valid value or
1613          * EFX_MAE_RSRC_ID_INVALID. Set it in the action rule match
1614          * specification (and the full mask, too) in order to have correct
1615          * class comparisons of the new rule with existing ones.
1616          * Also, action rule match specification will be validated shortly,
1617          * and having the full mask set for outer rule ID indicates that we
1618          * will use this field, and support for this field has to be checked.
1619          */
1620         rule = *rulep;
1621         rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
1622                                                   &rule->fw_rsrc.rule_id);
1623         if (rc != 0) {
1624                 sfc_mae_outer_rule_del(sa, *rulep);
1625                 *rulep = NULL;
1626
1627                 return rte_flow_error_set(error, rc,
1628                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1629                                           "Failed to process the pattern");
1630         }
1631
1632         return 0;
1633 }
1634
1635 static int
1636 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
1637                               const struct rte_flow_item pattern[],
1638                               struct sfc_mae_parse_ctx *ctx,
1639                               struct rte_flow_error *error)
1640 {
1641         struct sfc_mae *mae = &sa->mae;
1642         int rc;
1643
1644         if (pattern == NULL) {
1645                 rte_flow_error_set(error, EINVAL,
1646                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1647                                    "NULL pattern");
1648                 return -rte_errno;
1649         }
1650
1651         for (;;) {
1652                 switch (pattern->type) {
1653                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1654                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
1655                         ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
1656                         RTE_BUILD_BUG_ON(sizeof(ctx->tunnel_def_mask) !=
1657                                          sizeof(rte_flow_item_vxlan_mask));
1658                         break;
1659                 case RTE_FLOW_ITEM_TYPE_GENEVE:
1660                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
1661                         ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
1662                         RTE_BUILD_BUG_ON(sizeof(ctx->tunnel_def_mask) !=
1663                                          sizeof(rte_flow_item_geneve_mask));
1664                         break;
1665                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1666                         ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1667                         ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
1668                         RTE_BUILD_BUG_ON(sizeof(ctx->tunnel_def_mask) !=
1669                                          sizeof(rte_flow_item_nvgre_mask));
1670                         break;
1671                 case RTE_FLOW_ITEM_TYPE_END:
1672                         break;
1673                 default:
1674                         ++pattern;
1675                         continue;
1676                 };
1677
1678                 break;
1679         }
1680
1681         if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
1682                 return 0;
1683
1684         if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
1685                 return rte_flow_error_set(error, ENOTSUP,
1686                                           RTE_FLOW_ERROR_TYPE_ITEM,
1687                                           pattern, "Unsupported tunnel item");
1688         }
1689
1690         if (ctx->priority >= mae->nb_outer_rule_prios_max) {
1691                 return rte_flow_error_set(error, ENOTSUP,
1692                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1693                                           NULL, "Unsupported priority level");
1694         }
1695
1696         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
1697                                      &ctx->match_spec_outer);
1698         if (rc != 0) {
1699                 return rte_flow_error_set(error, rc,
1700                         RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1701                         "Failed to initialise outer rule match specification");
1702         }
1703
1704         /* Outermost items comprise a match specification of type OUTER. */
1705         ctx->match_spec = ctx->match_spec_outer;
1706
1707         /* Outermost items use "ENC" EFX MAE field IDs. */
1708         ctx->field_ids_remap = field_ids_remap_to_encap;
1709
1710         return 0;
1711 }
1712
1713 static void
1714 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
1715                               struct sfc_mae_parse_ctx *ctx)
1716 {
1717         if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1718                 return;
1719
1720         efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1721 }
1722
1723 int
1724 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
1725                            const struct rte_flow_item pattern[],
1726                            struct sfc_flow_spec_mae *spec,
1727                            struct rte_flow_error *error)
1728 {
1729         struct sfc_mae_parse_ctx ctx_mae;
1730         struct sfc_flow_parse_ctx ctx;
1731         int rc;
1732
1733         memset(&ctx_mae, 0, sizeof(ctx_mae));
1734         ctx_mae.priority = spec->priority;
1735         ctx_mae.sa = sa;
1736
1737         rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
1738                                      spec->priority,
1739                                      &ctx_mae.match_spec_action);
1740         if (rc != 0) {
1741                 rc = rte_flow_error_set(error, rc,
1742                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1743                         "Failed to initialise action rule match specification");
1744                 goto fail_init_match_spec_action;
1745         }
1746
1747         /*
1748          * As a preliminary setting, assume that there is no encapsulation
1749          * in the pattern. That is, pattern items are about to comprise a
1750          * match specification of type ACTION and use non-encap. field IDs.
1751          *
1752          * sfc_mae_rule_encap_parse_init() below may override this.
1753          */
1754         ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
1755         ctx_mae.match_spec = ctx_mae.match_spec_action;
1756         ctx_mae.field_ids_remap = field_ids_no_remap;
1757
1758         ctx.type = SFC_FLOW_PARSE_CTX_MAE;
1759         ctx.mae = &ctx_mae;
1760
1761         rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
1762         if (rc != 0)
1763                 goto fail_encap_parse_init;
1764
1765         rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
1766                                     pattern, &ctx, error);
1767         if (rc != 0)
1768                 goto fail_parse_pattern;
1769
1770         rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
1771         if (rc != 0)
1772                 goto fail_process_pattern_data;
1773
1774         rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
1775         if (rc != 0)
1776                 goto fail_process_outer;
1777
1778         if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
1779                 rc = rte_flow_error_set(error, ENOTSUP,
1780                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1781                                         "Inconsistent pattern");
1782                 goto fail_validate_match_spec_action;
1783         }
1784
1785         spec->match_spec = ctx_mae.match_spec_action;
1786
1787         return 0;
1788
1789 fail_validate_match_spec_action:
1790 fail_process_outer:
1791 fail_process_pattern_data:
1792 fail_parse_pattern:
1793         sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
1794
1795 fail_encap_parse_init:
1796         efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
1797
1798 fail_init_match_spec_action:
1799         return rc;
1800 }
1801
1802 /*
1803  * An action supported by MAE may correspond to a bundle of RTE flow actions,
1804  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
1805  * That is, related RTE flow actions need to be tracked as parts of a whole
1806  * so that they can be combined into a single action and submitted to MAE
1807  * representation of a given rule's action set.
1808  *
1809  * Each RTE flow action provided by an application gets classified as
1810  * one belonging to some bundle type. If an action is not supposed to
1811  * belong to any bundle, or if this action is END, it is described as
1812  * one belonging to a dummy bundle of type EMPTY.
1813  *
1814  * A currently tracked bundle will be submitted if a repeating
1815  * action or an action of different bundle type follows.
1816  */
1817
1818 enum sfc_mae_actions_bundle_type {
1819         SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
1820         SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
1821 };
1822
1823 struct sfc_mae_actions_bundle {
1824         enum sfc_mae_actions_bundle_type        type;
1825
1826         /* Indicates actions already tracked by the current bundle */
1827         uint64_t                                actions_mask;
1828
1829         /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
1830         rte_be16_t                              vlan_push_tpid;
1831         rte_be16_t                              vlan_push_tci;
1832 };
1833
1834 /*
1835  * Combine configuration of RTE flow actions tracked by the bundle into a
1836  * single action and submit the result to MAE action set specification.
1837  * Do nothing in the case of dummy action bundle.
1838  */
1839 static int
1840 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
1841                               efx_mae_actions_t *spec)
1842 {
1843         int rc = 0;
1844
1845         switch (bundle->type) {
1846         case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
1847                 break;
1848         case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
1849                 rc = efx_mae_action_set_populate_vlan_push(
1850                         spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
1851                 break;
1852         default:
1853                 SFC_ASSERT(B_FALSE);
1854                 break;
1855         }
1856
1857         return rc;
1858 }
1859
1860 /*
1861  * Given the type of the next RTE flow action in the line, decide
1862  * whether a new bundle is about to start, and, if this is the case,
1863  * submit and reset the current bundle.
1864  */
1865 static int
1866 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
1867                             struct sfc_mae_actions_bundle *bundle,
1868                             efx_mae_actions_t *spec,
1869                             struct rte_flow_error *error)
1870 {
1871         enum sfc_mae_actions_bundle_type bundle_type_new;
1872         int rc;
1873
1874         switch (action->type) {
1875         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1876         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1877         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1878                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
1879                 break;
1880         default:
1881                 /*
1882                  * Self-sufficient actions, including END, are handled in this
1883                  * case. No checks for unsupported actions are needed here
1884                  * because parsing doesn't occur at this point.
1885                  */
1886                 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
1887                 break;
1888         }
1889
1890         if (bundle_type_new != bundle->type ||
1891             (bundle->actions_mask & (1ULL << action->type)) != 0) {
1892                 rc = sfc_mae_actions_bundle_submit(bundle, spec);
1893                 if (rc != 0)
1894                         goto fail_submit;
1895
1896                 memset(bundle, 0, sizeof(*bundle));
1897         }
1898
1899         bundle->type = bundle_type_new;
1900
1901         return 0;
1902
1903 fail_submit:
1904         return rte_flow_error_set(error, rc,
1905                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1906                         "Failed to request the (group of) action(s)");
1907 }
1908
1909 static void
1910 sfc_mae_rule_parse_action_of_push_vlan(
1911                             const struct rte_flow_action_of_push_vlan *conf,
1912                             struct sfc_mae_actions_bundle *bundle)
1913 {
1914         bundle->vlan_push_tpid = conf->ethertype;
1915 }
1916
1917 static void
1918 sfc_mae_rule_parse_action_of_set_vlan_vid(
1919                             const struct rte_flow_action_of_set_vlan_vid *conf,
1920                             struct sfc_mae_actions_bundle *bundle)
1921 {
1922         bundle->vlan_push_tci |= (conf->vlan_vid &
1923                                   rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
1924 }
1925
1926 static void
1927 sfc_mae_rule_parse_action_of_set_vlan_pcp(
1928                             const struct rte_flow_action_of_set_vlan_pcp *conf,
1929                             struct sfc_mae_actions_bundle *bundle)
1930 {
1931         uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
1932                                            RTE_LEN2MASK(3, uint8_t)) << 13;
1933
1934         bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
1935 }
1936
1937 static int
1938 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
1939                                efx_mae_actions_t *spec)
1940 {
1941         return efx_mae_action_set_populate_mark(spec, conf->id);
1942 }
1943
1944 static int
1945 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
1946                                    const struct rte_flow_action_phy_port *conf,
1947                                    efx_mae_actions_t *spec)
1948 {
1949         efx_mport_sel_t mport;
1950         uint32_t phy_port;
1951         int rc;
1952
1953         if (conf->original != 0)
1954                 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
1955         else
1956                 phy_port = conf->index;
1957
1958         rc = efx_mae_mport_by_phy_port(phy_port, &mport);
1959         if (rc != 0)
1960                 return rc;
1961
1962         return efx_mae_action_set_populate_deliver(spec, &mport);
1963 }
1964
1965 static int
1966 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
1967                                 const struct rte_flow_action_vf *vf_conf,
1968                                 efx_mae_actions_t *spec)
1969 {
1970         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1971         efx_mport_sel_t mport;
1972         uint32_t vf;
1973         int rc;
1974
1975         if (vf_conf == NULL)
1976                 vf = EFX_PCI_VF_INVALID;
1977         else if (vf_conf->original != 0)
1978                 vf = encp->enc_vf;
1979         else
1980                 vf = vf_conf->id;
1981
1982         rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
1983         if (rc != 0)
1984                 return rc;
1985
1986         return efx_mae_action_set_populate_deliver(spec, &mport);
1987 }
1988
1989 static int
1990 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
1991                                   const struct rte_flow_action_port_id *conf,
1992                                   efx_mae_actions_t *spec)
1993 {
1994         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1995         struct sfc_mae *mae = &sa->mae;
1996         efx_mport_sel_t mport;
1997         uint16_t port_id;
1998         int rc;
1999
2000         port_id = (conf->original != 0) ? sas->port_id : conf->id;
2001
2002         rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2003                                            port_id, &mport);
2004         if (rc != 0)
2005                 return rc;
2006
2007         return efx_mae_action_set_populate_deliver(spec, &mport);
2008 }
2009
2010 static int
2011 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2012                           const struct rte_flow_action *action,
2013                           struct sfc_mae_actions_bundle *bundle,
2014                           efx_mae_actions_t *spec,
2015                           struct rte_flow_error *error)
2016 {
2017         int rc = 0;
2018
2019         switch (action->type) {
2020         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2021                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2022                                        bundle->actions_mask);
2023                 rc = efx_mae_action_set_populate_vlan_pop(spec);
2024                 break;
2025         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2026                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2027                                        bundle->actions_mask);
2028                 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2029                 break;
2030         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2031                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2032                                        bundle->actions_mask);
2033                 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2034                 break;
2035         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2036                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2037                                        bundle->actions_mask);
2038                 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2039                 break;
2040         case RTE_FLOW_ACTION_TYPE_FLAG:
2041                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
2042                                        bundle->actions_mask);
2043                 rc = efx_mae_action_set_populate_flag(spec);
2044                 break;
2045         case RTE_FLOW_ACTION_TYPE_MARK:
2046                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
2047                                        bundle->actions_mask);
2048                 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
2049                 break;
2050         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
2051                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
2052                                        bundle->actions_mask);
2053                 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
2054                 break;
2055         case RTE_FLOW_ACTION_TYPE_PF:
2056                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
2057                                        bundle->actions_mask);
2058                 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
2059                 break;
2060         case RTE_FLOW_ACTION_TYPE_VF:
2061                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
2062                                        bundle->actions_mask);
2063                 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
2064                 break;
2065         case RTE_FLOW_ACTION_TYPE_PORT_ID:
2066                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
2067                                        bundle->actions_mask);
2068                 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
2069                 break;
2070         case RTE_FLOW_ACTION_TYPE_DROP:
2071                 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
2072                                        bundle->actions_mask);
2073                 rc = efx_mae_action_set_populate_drop(spec);
2074                 break;
2075         default:
2076                 return rte_flow_error_set(error, ENOTSUP,
2077                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2078                                 "Unsupported action");
2079         }
2080
2081         if (rc != 0) {
2082                 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2083                                 NULL, "Failed to request the action");
2084         } else {
2085                 bundle->actions_mask |= (1ULL << action->type);
2086         }
2087
2088         return rc;
2089 }
2090
2091 int
2092 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
2093                            const struct rte_flow_action actions[],
2094                            struct sfc_mae_action_set **action_setp,
2095                            struct rte_flow_error *error)
2096 {
2097         struct sfc_mae_actions_bundle bundle = {0};
2098         const struct rte_flow_action *action;
2099         efx_mae_actions_t *spec;
2100         int rc;
2101
2102         if (actions == NULL) {
2103                 return rte_flow_error_set(error, EINVAL,
2104                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
2105                                 "NULL actions");
2106         }
2107
2108         rc = efx_mae_action_set_spec_init(sa->nic, &spec);
2109         if (rc != 0)
2110                 goto fail_action_set_spec_init;
2111
2112         for (action = actions;
2113              action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
2114                 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2115                 if (rc != 0)
2116                         goto fail_rule_parse_action;
2117
2118                 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
2119                                                error);
2120                 if (rc != 0)
2121                         goto fail_rule_parse_action;
2122         }
2123
2124         rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2125         if (rc != 0)
2126                 goto fail_rule_parse_action;
2127
2128         *action_setp = sfc_mae_action_set_attach(sa, spec);
2129         if (*action_setp != NULL) {
2130                 efx_mae_action_set_spec_fini(sa->nic, spec);
2131                 return 0;
2132         }
2133
2134         rc = sfc_mae_action_set_add(sa, spec, action_setp);
2135         if (rc != 0)
2136                 goto fail_action_set_add;
2137
2138         return 0;
2139
2140 fail_action_set_add:
2141 fail_rule_parse_action:
2142         efx_mae_action_set_spec_fini(sa->nic, spec);
2143
2144 fail_action_set_spec_init:
2145         if (rc > 0) {
2146                 rc = rte_flow_error_set(error, rc,
2147                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2148                         NULL, "Failed to process the action");
2149         }
2150         return rc;
2151 }
2152
2153 static bool
2154 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
2155                         const efx_mae_match_spec_t *left,
2156                         const efx_mae_match_spec_t *right)
2157 {
2158         bool have_same_class;
2159         int rc;
2160
2161         rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
2162                                            &have_same_class);
2163
2164         return (rc == 0) ? have_same_class : false;
2165 }
2166
2167 static int
2168 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
2169                                 struct sfc_mae_outer_rule *rule)
2170 {
2171         struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
2172         struct sfc_mae_outer_rule *entry;
2173         struct sfc_mae *mae = &sa->mae;
2174
2175         if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
2176                 /* An active rule is reused. It's class is wittingly valid. */
2177                 return 0;
2178         }
2179
2180         TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
2181                               sfc_mae_outer_rules, entries) {
2182                 const efx_mae_match_spec_t *left = entry->match_spec;
2183                 const efx_mae_match_spec_t *right = rule->match_spec;
2184
2185                 if (entry == rule)
2186                         continue;
2187
2188                 if (sfc_mae_rules_class_cmp(sa, left, right))
2189                         return 0;
2190         }
2191
2192         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2193                  "support for outer frame pattern items is not guaranteed; "
2194                  "other than that, the items are valid from SW standpoint");
2195         return 0;
2196 }
2197
2198 static int
2199 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
2200                                  struct sfc_flow_spec_mae *spec)
2201 {
2202         const struct rte_flow *entry;
2203
2204         TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
2205                 const struct sfc_flow_spec *entry_spec = &entry->spec;
2206                 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
2207                 const efx_mae_match_spec_t *left = es_mae->match_spec;
2208                 const efx_mae_match_spec_t *right = spec->match_spec;
2209
2210                 switch (entry_spec->type) {
2211                 case SFC_FLOW_SPEC_FILTER:
2212                         /* Ignore VNIC-level flows */
2213                         break;
2214                 case SFC_FLOW_SPEC_MAE:
2215                         if (sfc_mae_rules_class_cmp(sa, left, right))
2216                                 return 0;
2217                         break;
2218                 default:
2219                         SFC_ASSERT(false);
2220                 }
2221         }
2222
2223         sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2224                  "support for inner frame pattern items is not guaranteed; "
2225                  "other than that, the items are valid from SW standpoint");
2226         return 0;
2227 }
2228
2229 /**
2230  * Confirm that a given flow can be accepted by the FW.
2231  *
2232  * @param sa
2233  *   Software adapter context
2234  * @param flow
2235  *   Flow to be verified
2236  * @return
2237  *   Zero on success and non-zero in the case of error.
2238  *   A special value of EAGAIN indicates that the adapter is
2239  *   not in started state. This state is compulsory because
2240  *   it only makes sense to compare the rule class of the flow
2241  *   being validated with classes of the active rules.
2242  *   Such classes are wittingly supported by the FW.
2243  */
2244 int
2245 sfc_mae_flow_verify(struct sfc_adapter *sa,
2246                     struct rte_flow *flow)
2247 {
2248         struct sfc_flow_spec *spec = &flow->spec;
2249         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2250         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2251         int rc;
2252
2253         SFC_ASSERT(sfc_adapter_is_locked(sa));
2254
2255         if (sa->state != SFC_ADAPTER_STARTED)
2256                 return EAGAIN;
2257
2258         if (outer_rule != NULL) {
2259                 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
2260                 if (rc != 0)
2261                         return rc;
2262         }
2263
2264         return sfc_mae_action_rule_class_verify(sa, spec_mae);
2265 }
2266
2267 int
2268 sfc_mae_flow_insert(struct sfc_adapter *sa,
2269                     struct rte_flow *flow)
2270 {
2271         struct sfc_flow_spec *spec = &flow->spec;
2272         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2273         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2274         struct sfc_mae_action_set *action_set = spec_mae->action_set;
2275         struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
2276         int rc;
2277
2278         SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
2279         SFC_ASSERT(action_set != NULL);
2280
2281         if (outer_rule != NULL) {
2282                 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
2283                                                spec_mae->match_spec);
2284                 if (rc != 0)
2285                         goto fail_outer_rule_enable;
2286         }
2287
2288         rc = sfc_mae_action_set_enable(sa, action_set);
2289         if (rc != 0)
2290                 goto fail_action_set_enable;
2291
2292         rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
2293                                         NULL, &fw_rsrc->aset_id,
2294                                         &spec_mae->rule_id);
2295         if (rc != 0)
2296                 goto fail_action_rule_insert;
2297
2298         return 0;
2299
2300 fail_action_rule_insert:
2301         (void)sfc_mae_action_set_disable(sa, action_set);
2302
2303 fail_action_set_enable:
2304         if (outer_rule != NULL)
2305                 (void)sfc_mae_outer_rule_disable(sa, outer_rule);
2306
2307 fail_outer_rule_enable:
2308         return rc;
2309 }
2310
2311 int
2312 sfc_mae_flow_remove(struct sfc_adapter *sa,
2313                     struct rte_flow *flow)
2314 {
2315         struct sfc_flow_spec *spec = &flow->spec;
2316         struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2317         struct sfc_mae_action_set *action_set = spec_mae->action_set;
2318         struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2319         int rc;
2320
2321         SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
2322         SFC_ASSERT(action_set != NULL);
2323
2324         rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
2325         if (rc != 0)
2326                 return rc;
2327
2328         spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
2329
2330         rc = sfc_mae_action_set_disable(sa, action_set);
2331         if (rc != 0) {
2332                 sfc_err(sa, "failed to disable the action set (rc = %d)", rc);
2333                 /* Despite the error, proceed with outer rule removal. */
2334         }
2335
2336         if (outer_rule != NULL)
2337                 return sfc_mae_outer_rule_disable(sa, outer_rule);
2338
2339         return 0;
2340 }