net/bnxt: fail init when mbuf allocation fails
[dpdk.git] / drivers / net / enic / enic_fm_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2019 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_hash.h>
12 #include <rte_jhash.h>
13 #include <rte_ip.h>
14 #include <rte_udp.h>
15 #include <rte_memzone.h>
16
17 #include "enic_compat.h"
18 #include "enic.h"
19 #include "vnic_dev.h"
20 #include "vnic_nic.h"
21
22 #define IP_DEFTTL  64   /* from RFC 1340. */
23 #define IP6_VTC_FLOW 0x60000000
24
25 /* Up to 1024 TCAM entries */
26 #define FM_MAX_TCAM_TABLE_SIZE 1024
27
28 /* Up to 4096 entries per exact match table */
29 #define FM_MAX_EXACT_TABLE_SIZE 4096
30
31 /* Number of counters to increase on for each increment */
32 #define FM_COUNTERS_EXPAND  100
33
34 #define FM_INVALID_HANDLE 0
35
36 /* Low priority used for implicit VF -> representor flow */
37 #define FM_LOWEST_PRIORITY 100000
38
39 /* High priority used for implicit representor -> VF flow */
40 #define FM_HIGHEST_PRIORITY 0
41
42 /* Tag used for implicit VF <-> representor flows */
43 #define FM_VF_REP_TAG 1
44
45 /* Max number of actions supported by VIC is 2K. Make hash table double that. */
46 #define FM_MAX_ACTION_TABLE_SIZE 4096
47
48 /*
49  * Flow exact match tables (FET) in the VIC and rte_flow groups.
50  * Use a simple scheme to map groups to tables.
51  * Group 0 uses the single TCAM tables, one for each direction.
52  * Group 1, 2, ... uses its own exact match table.
53  *
54  * The TCAM tables are allocated upfront during init.
55  *
56  * Exact match tables are allocated on demand. 3 paths that lead allocations.
57  *
58  * 1. Add a flow that jumps from group 0 to group N.
59  *
60  * If N does not exist, we allocate an exact match table for it, using
61  * a dummy key. A key is required for the table.
62  *
63  * 2. Add a flow that uses group N.
64  *
65  * If N does not exist, we allocate an exact match table for it, using
66  * the flow's key. Subsequent flows to the same group all should have
67  * the same key.
68  *
69  * Without a jump flow to N, N is not reachable in hardware. No packets
70  * reach N and match.
71  *
72  * 3. Add a flow to an empty group N.
73  *
74  * N has been created via (1) and the dummy key. We free that table, allocate
75  * a new table using the new flow's key. Also re-do the existing jump flow to
76  * point to the new table.
77  */
78 #define FM_TCAM_RTE_GROUP 0
79
80 struct enic_fm_fet {
81         TAILQ_ENTRY(enic_fm_fet) list;
82         uint32_t group; /* rte_flow group ID */
83         uint64_t handle; /* Exact match table handle from flowman */
84         uint8_t ingress;
85         uint8_t default_key;
86         int ref; /* Reference count via get/put */
87         struct fm_key_template key; /* Key associated with the table */
88 };
89
90 struct enic_fm_counter {
91         SLIST_ENTRY(enic_fm_counter) next;
92         uint32_t handle;
93 };
94
95 struct enic_fm_action {
96         int ref;
97         uint64_t handle;
98         struct fm_action key;
99 };
100
101 /* rte_flow.fm */
102 struct enic_fm_flow {
103         bool counter_valid;
104         uint64_t entry_handle;
105         struct enic_fm_action  *action;
106         struct enic_fm_counter *counter;
107         struct enic_fm_fet *fet;
108         /* Auto-added steer action for hairpin flows (e.g. vnic->vnic) */
109         struct enic_fm_flow *hairpin_steer_flow;
110 };
111
112 struct enic_fm_jump_flow {
113         TAILQ_ENTRY(enic_fm_jump_flow) list;
114         struct rte_flow *flow;
115         uint32_t group;
116         struct fm_tcam_match_entry match;
117         struct fm_action action;
118 };
119
120 /*
121  * Flowman uses host memory for commands. This structure is allocated
122  * in DMA-able memory.
123  */
124 union enic_flowman_cmd_mem {
125         struct fm_tcam_match_table fm_tcam_match_table;
126         struct fm_exact_match_table fm_exact_match_table;
127         struct fm_tcam_match_entry fm_tcam_match_entry;
128         struct fm_exact_match_entry fm_exact_match_entry;
129         struct fm_action fm_action;
130 };
131
132 /*
133  * PF has a flowman instance, and VF representors share it with PF.
134  * PF allocates this structure and owns it. VF representors borrow
135  * the PF's structure during API calls (e.g. create, query).
136  */
137 struct enic_flowman {
138         struct enic *owner_enic; /* PF */
139         struct enic *user_enic;  /* API caller (PF or representor) */
140         /*
141          * Representors and PF share the same underlying flowman.
142          * Lock API calls to serialize accesses from them. Only used
143          * when VF representors are present.
144          */
145         rte_spinlock_t lock;
146         /* Command buffer */
147         struct {
148                 union enic_flowman_cmd_mem *va;
149                 dma_addr_t pa;
150         } cmd;
151         /* TCAM tables allocated upfront, used for group 0 */
152         uint64_t ig_tcam_hndl;
153         uint64_t eg_tcam_hndl;
154         /* Counters */
155         SLIST_HEAD(enic_free_counters, enic_fm_counter) counters;
156         void *counter_stack;
157         uint32_t counters_alloced;
158         /* Exact match tables for groups != 0, dynamically allocated */
159         TAILQ_HEAD(fet_list, enic_fm_fet) fet_list;
160         /*
161          * Default exact match tables used for jump actions to
162          * non-existent groups.
163          */
164         struct enic_fm_fet *default_eg_fet;
165         struct enic_fm_fet *default_ig_fet;
166         /* hash table for Action reuse */
167         struct rte_hash *action_hash;
168         /* Flows that jump to the default table above */
169         TAILQ_HEAD(jump_flow_list, enic_fm_jump_flow) jump_list;
170         /*
171          * Scratch data used during each invocation of flow_create
172          * and flow_validate.
173          */
174         struct enic_fm_fet *fet;
175         struct fm_tcam_match_entry tcam_entry;
176         struct fm_action action;
177         struct fm_action action_tmp; /* enic_fm_reorder_action_op */
178         int action_op_count;
179         /* Tags used for representor flows */
180         uint8_t vf_rep_tag;
181         /* For auto-added steer action for hairpin */
182         int need_hairpin_steer;
183         uint64_t hairpin_steer_vnic_h;
184 };
185
186 static int enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle);
187 /*
188  * API functions (create, destroy, validate, flush) call begin_fm()
189  * upon entering to save the caller enic (PF or VF representor) and
190  * lock. Upon exit, they call end_fm() to unlock.
191  */
192 static struct enic_flowman *begin_fm(struct enic *enic);
193 static void end_fm(struct enic_flowman *fm);
194 /* Delete internal flows created for representor paths */
195 static void delete_rep_flows(struct enic *enic);
196
197 /*
198  * Common arguments passed to copy_item functions. Use this structure
199  * so we can easily add new arguments.
200  * item: Item specification.
201  * fm_tcam_entry: Flowman TCAM match entry.
202  * header_level: 0 for outer header, 1 for inner header.
203  */
204 struct copy_item_args {
205         const struct rte_flow_item *item;
206         struct fm_tcam_match_entry *fm_tcam_entry;
207         uint8_t header_level;
208 };
209
210 /* functions for copying items into flowman match */
211 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
212
213 /* Info about how to copy items into flowman match */
214 struct enic_fm_items {
215         /* Function for copying and validating an item. */
216         enic_copy_item_fn * const copy_item;
217         /* List of valid previous items. */
218         const enum rte_flow_item_type * const prev_items;
219         /*
220          * True if it's OK for this item to be the first item. For some NIC
221          * versions, it's invalid to start the stack above layer 3.
222          */
223         const uint8_t valid_start_item;
224 };
225
226 static enic_copy_item_fn enic_fm_copy_item_eth;
227 static enic_copy_item_fn enic_fm_copy_item_ipv4;
228 static enic_copy_item_fn enic_fm_copy_item_ipv6;
229 static enic_copy_item_fn enic_fm_copy_item_raw;
230 static enic_copy_item_fn enic_fm_copy_item_sctp;
231 static enic_copy_item_fn enic_fm_copy_item_tcp;
232 static enic_copy_item_fn enic_fm_copy_item_udp;
233 static enic_copy_item_fn enic_fm_copy_item_vlan;
234 static enic_copy_item_fn enic_fm_copy_item_vxlan;
235 static enic_copy_item_fn enic_fm_copy_item_gtp;
236
237 /* Ingress actions */
238 static const enum rte_flow_action_type enic_fm_supported_ig_actions[] = {
239         RTE_FLOW_ACTION_TYPE_COUNT,
240         RTE_FLOW_ACTION_TYPE_DROP,
241         RTE_FLOW_ACTION_TYPE_FLAG,
242         RTE_FLOW_ACTION_TYPE_JUMP,
243         RTE_FLOW_ACTION_TYPE_MARK,
244         RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
245         RTE_FLOW_ACTION_TYPE_PORT_ID,
246         RTE_FLOW_ACTION_TYPE_PASSTHRU,
247         RTE_FLOW_ACTION_TYPE_QUEUE,
248         RTE_FLOW_ACTION_TYPE_RSS,
249         RTE_FLOW_ACTION_TYPE_VOID,
250         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
251         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
252         RTE_FLOW_ACTION_TYPE_END, /* END must be the last entry */
253 };
254
255 /* Egress actions */
256 static const enum rte_flow_action_type enic_fm_supported_eg_actions[] = {
257         RTE_FLOW_ACTION_TYPE_COUNT,
258         RTE_FLOW_ACTION_TYPE_DROP,
259         RTE_FLOW_ACTION_TYPE_JUMP,
260         RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
261         RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
262         RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
263         RTE_FLOW_ACTION_TYPE_PORT_ID,
264         RTE_FLOW_ACTION_TYPE_PASSTHRU,
265         RTE_FLOW_ACTION_TYPE_VOID,
266         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
267         RTE_FLOW_ACTION_TYPE_END,
268 };
269
270 static const struct enic_fm_items enic_fm_items[] = {
271         [RTE_FLOW_ITEM_TYPE_RAW] = {
272                 .copy_item = enic_fm_copy_item_raw,
273                 .valid_start_item = 0,
274                 .prev_items = (const enum rte_flow_item_type[]) {
275                                RTE_FLOW_ITEM_TYPE_UDP,
276                                RTE_FLOW_ITEM_TYPE_END,
277                 },
278         },
279         [RTE_FLOW_ITEM_TYPE_ETH] = {
280                 .copy_item = enic_fm_copy_item_eth,
281                 .valid_start_item = 1,
282                 .prev_items = (const enum rte_flow_item_type[]) {
283                                RTE_FLOW_ITEM_TYPE_END,
284                 },
285         },
286         [RTE_FLOW_ITEM_TYPE_VLAN] = {
287                 .copy_item = enic_fm_copy_item_vlan,
288                 .valid_start_item = 1,
289                 .prev_items = (const enum rte_flow_item_type[]) {
290                                RTE_FLOW_ITEM_TYPE_ETH,
291                                RTE_FLOW_ITEM_TYPE_END,
292                 },
293         },
294         [RTE_FLOW_ITEM_TYPE_IPV4] = {
295                 .copy_item = enic_fm_copy_item_ipv4,
296                 .valid_start_item = 1,
297                 .prev_items = (const enum rte_flow_item_type[]) {
298                                RTE_FLOW_ITEM_TYPE_ETH,
299                                RTE_FLOW_ITEM_TYPE_VLAN,
300                                RTE_FLOW_ITEM_TYPE_END,
301                 },
302         },
303         [RTE_FLOW_ITEM_TYPE_IPV6] = {
304                 .copy_item = enic_fm_copy_item_ipv6,
305                 .valid_start_item = 1,
306                 .prev_items = (const enum rte_flow_item_type[]) {
307                                RTE_FLOW_ITEM_TYPE_ETH,
308                                RTE_FLOW_ITEM_TYPE_VLAN,
309                                RTE_FLOW_ITEM_TYPE_END,
310                 },
311         },
312         [RTE_FLOW_ITEM_TYPE_UDP] = {
313                 .copy_item = enic_fm_copy_item_udp,
314                 .valid_start_item = 1,
315                 .prev_items = (const enum rte_flow_item_type[]) {
316                                RTE_FLOW_ITEM_TYPE_IPV4,
317                                RTE_FLOW_ITEM_TYPE_IPV6,
318                                RTE_FLOW_ITEM_TYPE_END,
319                 },
320         },
321         [RTE_FLOW_ITEM_TYPE_TCP] = {
322                 .copy_item = enic_fm_copy_item_tcp,
323                 .valid_start_item = 1,
324                 .prev_items = (const enum rte_flow_item_type[]) {
325                                RTE_FLOW_ITEM_TYPE_IPV4,
326                                RTE_FLOW_ITEM_TYPE_IPV6,
327                                RTE_FLOW_ITEM_TYPE_END,
328                 },
329         },
330         [RTE_FLOW_ITEM_TYPE_SCTP] = {
331                 .copy_item = enic_fm_copy_item_sctp,
332                 .valid_start_item = 0,
333                 .prev_items = (const enum rte_flow_item_type[]) {
334                                RTE_FLOW_ITEM_TYPE_IPV4,
335                                RTE_FLOW_ITEM_TYPE_IPV6,
336                                RTE_FLOW_ITEM_TYPE_END,
337                 },
338         },
339         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
340                 .copy_item = enic_fm_copy_item_vxlan,
341                 .valid_start_item = 1,
342                 .prev_items = (const enum rte_flow_item_type[]) {
343                                RTE_FLOW_ITEM_TYPE_UDP,
344                                RTE_FLOW_ITEM_TYPE_END,
345                 },
346         },
347         [RTE_FLOW_ITEM_TYPE_GTP] = {
348                 .copy_item = enic_fm_copy_item_gtp,
349                 .valid_start_item = 0,
350                 .prev_items = (const enum rte_flow_item_type[]) {
351                                RTE_FLOW_ITEM_TYPE_UDP,
352                                RTE_FLOW_ITEM_TYPE_END,
353                 },
354         },
355         [RTE_FLOW_ITEM_TYPE_GTPC] = {
356                 .copy_item = enic_fm_copy_item_gtp,
357                 .valid_start_item = 1,
358                 .prev_items = (const enum rte_flow_item_type[]) {
359                                RTE_FLOW_ITEM_TYPE_UDP,
360                                RTE_FLOW_ITEM_TYPE_END,
361                 },
362         },
363         [RTE_FLOW_ITEM_TYPE_GTPU] = {
364                 .copy_item = enic_fm_copy_item_gtp,
365                 .valid_start_item = 1,
366                 .prev_items = (const enum rte_flow_item_type[]) {
367                                RTE_FLOW_ITEM_TYPE_UDP,
368                                RTE_FLOW_ITEM_TYPE_END,
369                 },
370         },
371 };
372
373 static int
374 enic_fm_copy_item_eth(struct copy_item_args *arg)
375 {
376         const struct rte_flow_item *item = arg->item;
377         const struct rte_flow_item_eth *spec = item->spec;
378         const struct rte_flow_item_eth *mask = item->mask;
379         const uint8_t lvl = arg->header_level;
380         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
381         struct fm_header_set *fm_data, *fm_mask;
382
383         ENICPMD_FUNC_TRACE();
384         /* Match all if no spec */
385         if (!spec)
386                 return 0;
387         if (!mask)
388                 mask = &rte_flow_item_eth_mask;
389         fm_data = &entry->ftm_data.fk_hdrset[lvl];
390         fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
391         fm_data->fk_header_select |= FKH_ETHER;
392         fm_mask->fk_header_select |= FKH_ETHER;
393         memcpy(&fm_data->l2.eth, spec, sizeof(struct rte_ether_hdr));
394         memcpy(&fm_mask->l2.eth, mask, sizeof(struct rte_ether_hdr));
395         return 0;
396 }
397
398 static int
399 enic_fm_copy_item_vlan(struct copy_item_args *arg)
400 {
401         const struct rte_flow_item *item = arg->item;
402         const struct rte_flow_item_vlan *spec = item->spec;
403         const struct rte_flow_item_vlan *mask = item->mask;
404         const uint8_t lvl = arg->header_level;
405         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
406         struct fm_header_set *fm_data, *fm_mask;
407         struct rte_ether_hdr *eth_mask;
408         struct rte_ether_hdr *eth_val;
409         uint32_t meta;
410
411         ENICPMD_FUNC_TRACE();
412         fm_data = &entry->ftm_data.fk_hdrset[lvl];
413         fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
414         /* Outer and inner packet vlans need different flags */
415         meta = FKM_VLAN_PRES;
416         if (lvl > 0)
417                 meta = FKM_QTAG;
418         fm_data->fk_metadata |= meta;
419         fm_mask->fk_metadata |= meta;
420
421         /* Match all if no spec */
422         if (!spec)
423                 return 0;
424         if (!mask)
425                 mask = &rte_flow_item_vlan_mask;
426
427         eth_mask = (void *)&fm_mask->l2.eth;
428         eth_val = (void *)&fm_data->l2.eth;
429
430         /*
431          * Outer TPID cannot be matched. If inner_type is 0, use what is
432          * in the eth header.
433          */
434         if (eth_mask->ether_type && mask->inner_type)
435                 return -ENOTSUP;
436
437         /*
438          * When packet matching, the VIC always compares vlan-stripped
439          * L2, regardless of vlan stripping settings. So, the inner type
440          * from vlan becomes the ether type of the eth header.
441          */
442         if (mask->inner_type) {
443                 eth_mask->ether_type = mask->inner_type;
444                 eth_val->ether_type = spec->inner_type;
445         }
446         fm_data->fk_header_select |= FKH_ETHER | FKH_QTAG;
447         fm_mask->fk_header_select |= FKH_ETHER | FKH_QTAG;
448         fm_data->fk_vlan = rte_be_to_cpu_16(spec->tci);
449         fm_mask->fk_vlan = rte_be_to_cpu_16(mask->tci);
450         return 0;
451 }
452
453 static int
454 enic_fm_copy_item_ipv4(struct copy_item_args *arg)
455 {
456         const struct rte_flow_item *item = arg->item;
457         const struct rte_flow_item_ipv4 *spec = item->spec;
458         const struct rte_flow_item_ipv4 *mask = item->mask;
459         const uint8_t lvl = arg->header_level;
460         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
461         struct fm_header_set *fm_data, *fm_mask;
462
463         ENICPMD_FUNC_TRACE();
464         fm_data = &entry->ftm_data.fk_hdrset[lvl];
465         fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
466         fm_data->fk_metadata |= FKM_IPV4;
467         fm_mask->fk_metadata |= FKM_IPV4;
468
469         if (!spec)
470                 return 0;
471         if (!mask)
472                 mask = &rte_flow_item_ipv4_mask;
473
474         fm_data->fk_header_select |= FKH_IPV4;
475         fm_mask->fk_header_select |= FKH_IPV4;
476         memcpy(&fm_data->l3.ip4, spec, sizeof(*spec));
477         memcpy(&fm_mask->l3.ip4, mask, sizeof(*mask));
478         return 0;
479 }
480
481 static int
482 enic_fm_copy_item_ipv6(struct copy_item_args *arg)
483 {
484         const struct rte_flow_item *item = arg->item;
485         const struct rte_flow_item_ipv6 *spec = item->spec;
486         const struct rte_flow_item_ipv6 *mask = item->mask;
487         const uint8_t lvl = arg->header_level;
488         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
489         struct fm_header_set *fm_data, *fm_mask;
490
491         ENICPMD_FUNC_TRACE();
492         fm_data = &entry->ftm_data.fk_hdrset[lvl];
493         fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
494         fm_data->fk_metadata |= FKM_IPV6;
495         fm_mask->fk_metadata |= FKM_IPV6;
496
497         if (!spec)
498                 return 0;
499         if (!mask)
500                 mask = &rte_flow_item_ipv6_mask;
501
502         fm_data->fk_header_select |= FKH_IPV6;
503         fm_mask->fk_header_select |= FKH_IPV6;
504         memcpy(&fm_data->l3.ip6, spec, sizeof(struct rte_ipv6_hdr));
505         memcpy(&fm_mask->l3.ip6, mask, sizeof(struct rte_ipv6_hdr));
506         return 0;
507 }
508
509 static int
510 enic_fm_copy_item_udp(struct copy_item_args *arg)
511 {
512         const struct rte_flow_item *item = arg->item;
513         const struct rte_flow_item_udp *spec = item->spec;
514         const struct rte_flow_item_udp *mask = item->mask;
515         const uint8_t lvl = arg->header_level;
516         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
517         struct fm_header_set *fm_data, *fm_mask;
518
519         ENICPMD_FUNC_TRACE();
520         fm_data = &entry->ftm_data.fk_hdrset[lvl];
521         fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
522         fm_data->fk_metadata |= FKM_UDP;
523         fm_mask->fk_metadata |= FKM_UDP;
524
525         if (!spec)
526                 return 0;
527         if (!mask)
528                 mask = &rte_flow_item_udp_mask;
529
530         fm_data->fk_header_select |= FKH_UDP;
531         fm_mask->fk_header_select |= FKH_UDP;
532         memcpy(&fm_data->l4.udp, spec, sizeof(*spec));
533         memcpy(&fm_mask->l4.udp, mask, sizeof(*mask));
534         return 0;
535 }
536
537 static int
538 enic_fm_copy_item_tcp(struct copy_item_args *arg)
539 {
540         const struct rte_flow_item *item = arg->item;
541         const struct rte_flow_item_tcp *spec = item->spec;
542         const struct rte_flow_item_tcp *mask = item->mask;
543         const uint8_t lvl = arg->header_level;
544         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
545         struct fm_header_set *fm_data, *fm_mask;
546
547         ENICPMD_FUNC_TRACE();
548         fm_data = &entry->ftm_data.fk_hdrset[lvl];
549         fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
550         fm_data->fk_metadata |= FKM_TCP;
551         fm_mask->fk_metadata |= FKM_TCP;
552
553         if (!spec)
554                 return 0;
555         if (!mask)
556                 mask = &rte_flow_item_tcp_mask;
557
558         fm_data->fk_header_select |= FKH_TCP;
559         fm_mask->fk_header_select |= FKH_TCP;
560         memcpy(&fm_data->l4.tcp, spec, sizeof(*spec));
561         memcpy(&fm_mask->l4.tcp, mask, sizeof(*mask));
562         return 0;
563 }
564
565 static int
566 enic_fm_copy_item_sctp(struct copy_item_args *arg)
567 {
568         const struct rte_flow_item *item = arg->item;
569         const struct rte_flow_item_sctp *spec = item->spec;
570         const struct rte_flow_item_sctp *mask = item->mask;
571         const uint8_t lvl = arg->header_level;
572         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
573         struct fm_header_set *fm_data, *fm_mask;
574         uint8_t *ip_proto_mask = NULL;
575         uint8_t *ip_proto = NULL;
576         uint32_t l3_fkh;
577
578         ENICPMD_FUNC_TRACE();
579         fm_data = &entry->ftm_data.fk_hdrset[lvl];
580         fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
581         /*
582          * The NIC filter API has no flags for "match sctp", so explicitly
583          * set the protocol number in the IP pattern.
584          */
585         if (fm_data->fk_metadata & FKM_IPV4) {
586                 struct rte_ipv4_hdr *ip;
587                 ip = (struct rte_ipv4_hdr *)&fm_mask->l3.ip4;
588                 ip_proto_mask = &ip->next_proto_id;
589                 ip = (struct rte_ipv4_hdr *)&fm_data->l3.ip4;
590                 ip_proto = &ip->next_proto_id;
591                 l3_fkh = FKH_IPV4;
592         } else if (fm_data->fk_metadata & FKM_IPV6) {
593                 struct rte_ipv6_hdr *ip;
594                 ip = (struct rte_ipv6_hdr *)&fm_mask->l3.ip6;
595                 ip_proto_mask = &ip->proto;
596                 ip = (struct rte_ipv6_hdr *)&fm_data->l3.ip6;
597                 ip_proto = &ip->proto;
598                 l3_fkh = FKH_IPV6;
599         } else {
600                 /* Need IPv4/IPv6 pattern first */
601                 return -EINVAL;
602         }
603         *ip_proto = IPPROTO_SCTP;
604         *ip_proto_mask = 0xff;
605         fm_data->fk_header_select |= l3_fkh;
606         fm_mask->fk_header_select |= l3_fkh;
607
608         if (!spec)
609                 return 0;
610         if (!mask)
611                 mask = &rte_flow_item_sctp_mask;
612
613         fm_data->fk_header_select |= FKH_L4RAW;
614         fm_mask->fk_header_select |= FKH_L4RAW;
615         memcpy(fm_data->l4.rawdata, spec, sizeof(*spec));
616         memcpy(fm_mask->l4.rawdata, mask, sizeof(*mask));
617         return 0;
618 }
619
620 static int
621 enic_fm_copy_item_vxlan(struct copy_item_args *arg)
622 {
623         const struct rte_flow_item *item = arg->item;
624         const struct rte_flow_item_vxlan *spec = item->spec;
625         const struct rte_flow_item_vxlan *mask = item->mask;
626         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
627         struct fm_header_set *fm_data, *fm_mask;
628
629         ENICPMD_FUNC_TRACE();
630         /* Only 2 header levels (outer and inner) allowed */
631         if (arg->header_level > 0)
632                 return -EINVAL;
633
634         fm_data = &entry->ftm_data.fk_hdrset[0];
635         fm_mask = &entry->ftm_mask.fk_hdrset[0];
636         fm_data->fk_metadata |= FKM_VXLAN;
637         fm_mask->fk_metadata |= FKM_VXLAN;
638         /* items from here on out are inner header items */
639         arg->header_level = 1;
640
641         /* Match all if no spec */
642         if (!spec)
643                 return 0;
644         if (!mask)
645                 mask = &rte_flow_item_vxlan_mask;
646
647         fm_data->fk_header_select |= FKH_VXLAN;
648         fm_mask->fk_header_select |= FKH_VXLAN;
649         memcpy(&fm_data->vxlan, spec, sizeof(*spec));
650         memcpy(&fm_mask->vxlan, mask, sizeof(*mask));
651         return 0;
652 }
653
654 static int
655 enic_fm_copy_item_gtp(struct copy_item_args *arg)
656 {
657         const struct rte_flow_item *item = arg->item;
658         const struct rte_flow_item_gtp *spec = item->spec;
659         const struct rte_flow_item_gtp *mask = item->mask;
660         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
661         struct fm_header_set *fm_data, *fm_mask;
662         int off;
663         uint16_t udp_gtp_uc_port_be = 0;
664
665         ENICPMD_FUNC_TRACE();
666         /* Only 2 header levels (outer and inner) allowed */
667         if (arg->header_level > 0)
668                 return -EINVAL;
669
670         fm_data = &entry->ftm_data.fk_hdrset[0];
671         fm_mask = &entry->ftm_mask.fk_hdrset[0];
672
673         switch (item->type) {
674         case RTE_FLOW_ITEM_TYPE_GTP:
675         {
676                 /* For vanilla GTP, the UDP destination port must be specified
677                  * but value of the port is not enforced here.
678                  */
679                 if (!(fm_data->fk_metadata & FKM_UDP) ||
680                     !(fm_data->fk_header_select & FKH_UDP) ||
681                     fm_data->l4.udp.fk_dest == 0)
682                         return -EINVAL;
683                 if (!(fm_mask->fk_metadata & FKM_UDP) ||
684                     !(fm_mask->fk_header_select & FKH_UDP) ||
685                     fm_mask->l4.udp.fk_dest != 0xFFFF)
686                         return -EINVAL;
687                 break;
688         }
689         case RTE_FLOW_ITEM_TYPE_GTPC:
690         {
691                 udp_gtp_uc_port_be = rte_cpu_to_be_16(RTE_GTPC_UDP_PORT);
692                 break;
693         }
694         case RTE_FLOW_ITEM_TYPE_GTPU:
695         {
696                 udp_gtp_uc_port_be = rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
697                 break;
698         }
699         default:
700                 RTE_ASSERT(0);
701         }
702
703         /* The GTP-C or GTP-U UDP destination port must be matched. */
704         if (udp_gtp_uc_port_be) {
705                 if (fm_data->fk_metadata & FKM_UDP &&
706                     fm_data->fk_header_select & FKH_UDP &&
707                     fm_data->l4.udp.fk_dest != udp_gtp_uc_port_be)
708                         return -EINVAL;
709                 if (fm_mask->fk_metadata & FKM_UDP &&
710                     fm_mask->fk_header_select & FKH_UDP &&
711                     fm_mask->l4.udp.fk_dest != 0xFFFF)
712                         return -EINVAL;
713
714                 /* In any case, add match for GTP-C GTP-U UDP dst port */
715                 fm_data->fk_metadata |= FKM_UDP;
716                 fm_data->fk_header_select |= FKH_UDP;
717                 fm_data->l4.udp.fk_dest = udp_gtp_uc_port_be;
718                 fm_mask->fk_metadata |= FKM_UDP;
719                 fm_mask->fk_header_select |= FKH_UDP;
720                 fm_mask->l4.udp.fk_dest = 0xFFFF;
721         }
722
723         /* NIC does not support GTP tunnels. No Items are allowed after this.
724          * This prevents the specificaiton of further items.
725          */
726         arg->header_level = 0;
727
728         /* Match all if no spec */
729         if (!spec)
730                 return 0;
731         if (!mask)
732                 mask = &rte_flow_item_gtp_mask;
733
734         /*
735          * Use the raw L4 buffer to match GTP as fm_header_set does not have
736          * GTP header. UDP dst port must be specifiec. Using the raw buffer
737          * does not affect such UDP item, since we skip UDP in the raw buffer.
738          */
739         fm_data->fk_header_select |= FKH_L4RAW;
740         fm_mask->fk_header_select |= FKH_L4RAW;
741         off = sizeof(fm_data->l4.udp);
742         memcpy(&fm_data->l4.rawdata[off], spec, sizeof(*spec));
743         memcpy(&fm_mask->l4.rawdata[off], mask, sizeof(*mask));
744         return 0;
745 }
746
747 /*
748  * Currently, raw pattern match is very limited. It is intended for matching
749  * UDP tunnel header (e.g. vxlan or geneve).
750  */
751 static int
752 enic_fm_copy_item_raw(struct copy_item_args *arg)
753 {
754         const struct rte_flow_item *item = arg->item;
755         const struct rte_flow_item_raw *spec = item->spec;
756         const struct rte_flow_item_raw *mask = item->mask;
757         const uint8_t lvl = arg->header_level;
758         struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
759         struct fm_header_set *fm_data, *fm_mask;
760
761         ENICPMD_FUNC_TRACE();
762         /* Cannot be used for inner packet */
763         if (lvl > 0)
764                 return -EINVAL;
765         /* Need both spec and mask */
766         if (!spec || !mask)
767                 return -EINVAL;
768         /* Only supports relative with offset 0 */
769         if (!spec->relative || spec->offset != 0 || spec->search ||
770             spec->limit)
771                 return -EINVAL;
772         /* Need non-null pattern that fits within the NIC's filter pattern */
773         if (spec->length == 0 ||
774             spec->length + sizeof(struct rte_udp_hdr) > FM_LAYER_SIZE ||
775             !spec->pattern || !mask->pattern)
776                 return -EINVAL;
777         /*
778          * Mask fields, including length, are often set to zero. Assume that
779          * means "same as spec" to avoid breaking existing apps. If length
780          * is not zero, then it should be >= spec length.
781          *
782          * No more pattern follows this, so append to the L4 layer instead of
783          * L5 to work with both recent and older VICs.
784          */
785         if (mask->length != 0 && mask->length < spec->length)
786                 return -EINVAL;
787
788         fm_data = &entry->ftm_data.fk_hdrset[lvl];
789         fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
790         fm_data->fk_header_select |= FKH_L4RAW;
791         fm_mask->fk_header_select |= FKH_L4RAW;
792         fm_data->fk_header_select &= ~FKH_UDP;
793         fm_mask->fk_header_select &= ~FKH_UDP;
794         memcpy(fm_data->l4.rawdata + sizeof(struct rte_udp_hdr),
795                spec->pattern, spec->length);
796         memcpy(fm_mask->l4.rawdata + sizeof(struct rte_udp_hdr),
797                mask->pattern, spec->length);
798         return 0;
799 }
800
801 static int
802 flowman_cmd(struct enic_flowman *fm, uint64_t *args, int nargs)
803 {
804         return vnic_dev_flowman_cmd(fm->owner_enic->vdev, args, nargs);
805 }
806
807 static int
808 enic_fet_alloc(struct enic_flowman *fm, uint8_t ingress,
809                struct fm_key_template *key, int entries,
810                struct enic_fm_fet **fet_out)
811 {
812         struct fm_exact_match_table *cmd;
813         struct fm_header_set *hdr;
814         struct enic_fm_fet *fet;
815         uint64_t args[3];
816         int ret;
817
818         ENICPMD_FUNC_TRACE();
819         fet = calloc(1, sizeof(struct enic_fm_fet));
820         if (fet == NULL)
821                 return -ENOMEM;
822         cmd = &fm->cmd.va->fm_exact_match_table;
823         memset(cmd, 0, sizeof(*cmd));
824         cmd->fet_direction = ingress ? FM_INGRESS : FM_EGRESS;
825         cmd->fet_stage = FM_STAGE_LAST;
826         cmd->fet_max_entries = entries ? entries : FM_MAX_EXACT_TABLE_SIZE;
827         if (key == NULL) {
828                 hdr = &cmd->fet_key.fk_hdrset[0];
829                 memset(hdr, 0, sizeof(*hdr));
830                 hdr->fk_header_select = FKH_IPV4 | FKH_UDP;
831                 hdr->l3.ip4.fk_saddr = 0xFFFFFFFF;
832                 hdr->l3.ip4.fk_daddr = 0xFFFFFFFF;
833                 hdr->l4.udp.fk_source = 0xFFFF;
834                 hdr->l4.udp.fk_dest = 0xFFFF;
835                 fet->default_key = 1;
836         } else {
837                 memcpy(&cmd->fet_key, key, sizeof(*key));
838                 memcpy(&fet->key, key, sizeof(*key));
839                 fet->default_key = 0;
840         }
841         cmd->fet_key.fk_packet_tag = 1;
842
843         args[0] = FM_EXACT_TABLE_ALLOC;
844         args[1] = fm->cmd.pa;
845         ret = flowman_cmd(fm, args, 2);
846         if (ret) {
847                 ENICPMD_LOG(ERR, "cannot alloc exact match table: rc=%d", ret);
848                 free(fet);
849                 return ret;
850         }
851         fet->handle = args[0];
852         fet->ingress = ingress;
853         ENICPMD_LOG(DEBUG, "allocated exact match table: handle=0x%" PRIx64,
854                     fet->handle);
855         *fet_out = fet;
856         return 0;
857 }
858
859 static void
860 enic_fet_free(struct enic_flowman *fm, struct enic_fm_fet *fet)
861 {
862         ENICPMD_FUNC_TRACE();
863         enic_fm_tbl_free(fm, fet->handle);
864         if (!fet->default_key)
865                 TAILQ_REMOVE(&fm->fet_list, fet, list);
866         free(fet);
867 }
868
869 /*
870  * Get the exact match table for the given combination of
871  * <group, ingress, key>. Allocate one on the fly as necessary.
872  */
873 static int
874 enic_fet_get(struct enic_flowman *fm,
875              uint32_t group,
876              uint8_t ingress,
877              struct fm_key_template *key,
878              struct enic_fm_fet **fet_out,
879              struct rte_flow_error *error)
880 {
881         struct enic_fm_fet *fet;
882
883         ENICPMD_FUNC_TRACE();
884         /* See if we already have this table open */
885         TAILQ_FOREACH(fet, &fm->fet_list, list) {
886                 if (fet->group == group && fet->ingress == ingress)
887                         break;
888         }
889         if (fet == NULL) {
890                 /* Jumping to a non-existing group? Use the default table */
891                 if (key == NULL) {
892                         fet = ingress ? fm->default_ig_fet : fm->default_eg_fet;
893                 } else if (enic_fet_alloc(fm, ingress, key, 0, &fet)) {
894                         return rte_flow_error_set(error, EINVAL,
895                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896                                 NULL, "enic: cannot get exact match table");
897                 }
898                 fet->group = group;
899                 /* Default table is never on the open table list */
900                 if (!fet->default_key)
901                         TAILQ_INSERT_HEAD(&fm->fet_list, fet, list);
902         }
903         fet->ref++;
904         *fet_out = fet;
905         ENICPMD_LOG(DEBUG, "fet_get: %s %s group=%u ref=%u",
906                     fet->default_key ? "default" : "",
907                     fet->ingress ? "ingress" : "egress",
908                     fet->group, fet->ref);
909         return 0;
910 }
911
912 static void
913 enic_fet_put(struct enic_flowman *fm, struct enic_fm_fet *fet)
914 {
915         ENICPMD_FUNC_TRACE();
916         RTE_ASSERT(fet->ref > 0);
917         fet->ref--;
918         ENICPMD_LOG(DEBUG, "fet_put: %s %s group=%u ref=%u",
919                     fet->default_key ? "default" : "",
920                     fet->ingress ? "ingress" : "egress",
921                     fet->group, fet->ref);
922         if (fet->ref == 0)
923                 enic_fet_free(fm, fet);
924 }
925
926 /* Return 1 if current item is valid on top of the previous one. */
927 static int
928 fm_item_stacking_valid(enum rte_flow_item_type prev_item,
929                        const struct enic_fm_items *item_info,
930                        uint8_t is_first_item)
931 {
932         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
933
934         ENICPMD_FUNC_TRACE();
935         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
936                 if (prev_item == *allowed_items)
937                         return 1;
938         }
939
940         /* This is the first item in the stack. Check if that's cool */
941         if (is_first_item && item_info->valid_start_item)
942                 return 1;
943         return 0;
944 }
945
946 /*
947  * Build the flow manager match entry structure from the provided pattern.
948  * The pattern is validated as the items are copied.
949  */
950 static int
951 enic_fm_copy_entry(struct enic_flowman *fm,
952                    const struct rte_flow_item pattern[],
953                    struct rte_flow_error *error)
954 {
955         const struct enic_fm_items *item_info;
956         enum rte_flow_item_type prev_item;
957         const struct rte_flow_item *item;
958         struct copy_item_args args;
959         uint8_t prev_header_level;
960         uint8_t is_first_item;
961         int ret;
962
963         ENICPMD_FUNC_TRACE();
964         item = pattern;
965         is_first_item = 1;
966         prev_item = RTE_FLOW_ITEM_TYPE_END;
967
968         args.fm_tcam_entry = &fm->tcam_entry;
969         args.header_level = 0;
970         prev_header_level = 0;
971         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
972                 /*
973                  * Get info about how to validate and copy the item. If NULL
974                  * is returned the nic does not support the item.
975                  */
976                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
977                         continue;
978
979                 item_info = &enic_fm_items[item->type];
980
981                 if (item->type >= RTE_DIM(enic_fm_items) ||
982                     item_info->copy_item == NULL) {
983                         return rte_flow_error_set(error, ENOTSUP,
984                                 RTE_FLOW_ERROR_TYPE_ITEM,
985                                 NULL, "enic: unsupported item");
986                 }
987
988                 /* check to see if item stacking is valid */
989                 if (!fm_item_stacking_valid(prev_item, item_info,
990                                             is_first_item))
991                         goto stacking_error;
992
993                 args.item = item;
994                 ret = item_info->copy_item(&args);
995                 if (ret)
996                         goto item_not_supported;
997                 /* Going from outer to inner? Treat it as a new packet start */
998                 if (prev_header_level != args.header_level) {
999                         prev_item = RTE_FLOW_ITEM_TYPE_END;
1000                         is_first_item = 1;
1001                 } else {
1002                         prev_item = item->type;
1003                         is_first_item = 0;
1004                 }
1005                 prev_header_level = args.header_level;
1006         }
1007         return 0;
1008
1009 item_not_supported:
1010         return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
1011                                   NULL, "enic: unsupported item type");
1012
1013 stacking_error:
1014         return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1015                                   item, "enic: unsupported item stack");
1016 }
1017
1018 static void
1019 flow_item_skip_void(const struct rte_flow_item **item)
1020 {
1021         for ( ; ; (*item)++)
1022                 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
1023                         return;
1024 }
1025
1026 static void
1027 append_template(void **template, uint8_t *off, const void *data, int len)
1028 {
1029         memcpy(*template, data, len);
1030         *template = (char *)*template + len;
1031         *off = *off + len;
1032 }
1033
1034 static int
1035 enic_fm_append_action_op(struct enic_flowman *fm,
1036                          struct fm_action_op *fm_op,
1037                          struct rte_flow_error *error)
1038 {
1039         int count;
1040
1041         count = fm->action_op_count;
1042         ENICPMD_LOG(DEBUG, "append action op: idx=%d op=%u",
1043                     count, fm_op->fa_op);
1044         if (count == FM_ACTION_OP_MAX) {
1045                 return rte_flow_error_set(error, EINVAL,
1046                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1047                         "too many action operations");
1048         }
1049         fm->action.fma_action_ops[count] = *fm_op;
1050         fm->action_op_count = count + 1;
1051         return 0;
1052 }
1053
1054 static struct fm_action_op *
1055 find_prev_action_op(struct enic_flowman *fm, uint32_t opcode)
1056 {
1057         struct fm_action_op *op;
1058         int i;
1059
1060         for (i = 0; i < fm->action_op_count; i++) {
1061                 op = &fm->action.fma_action_ops[i];
1062                 if (op->fa_op == opcode)
1063                         return op;
1064         }
1065         return NULL;
1066 }
1067
1068 /* NIC requires that 1st steer appear before decap.
1069  * Correct example: steer, decap, steer, steer, ...
1070  */
1071 static void
1072 enic_fm_reorder_action_op(struct enic_flowman *fm)
1073 {
1074         struct fm_action_op *op, *steer, *decap;
1075         struct fm_action_op tmp_op;
1076
1077         ENICPMD_FUNC_TRACE();
1078         /* Find 1st steer and decap */
1079         op = fm->action.fma_action_ops;
1080         steer = NULL;
1081         decap = NULL;
1082         while (op->fa_op != FMOP_END) {
1083                 if (!decap && (op->fa_op == FMOP_DECAP_NOSTRIP ||
1084                                op->fa_op == FMOP_DECAP_STRIP))
1085                         decap = op;
1086                 else if (!steer && op->fa_op == FMOP_RQ_STEER)
1087                         steer = op;
1088                 op++;
1089         }
1090         /* If decap is before steer, swap */
1091         if (steer && decap && decap < steer) {
1092                 op = fm->action.fma_action_ops;
1093                 ENICPMD_LOG(DEBUG, "swap decap %ld <-> steer %ld",
1094                             (long)(decap - op), (long)(steer - op));
1095                 tmp_op = *decap;
1096                 *decap = *steer;
1097                 *steer = tmp_op;
1098         }
1099 }
1100
1101 /* VXLAN decap is done via flowman compound action */
1102 static int
1103 enic_fm_copy_vxlan_decap(struct enic_flowman *fm,
1104                          struct fm_tcam_match_entry *fmt,
1105                          const struct rte_flow_action *action,
1106                          struct rte_flow_error *error)
1107 {
1108         struct fm_header_set *fm_data;
1109         struct fm_action_op fm_op;
1110
1111         ENICPMD_FUNC_TRACE();
1112         fm_data = &fmt->ftm_data.fk_hdrset[0];
1113         if (!(fm_data->fk_metadata & FKM_VXLAN)) {
1114                 return rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ACTION, action,
1116                         "vxlan-decap: vxlan must be in pattern");
1117         }
1118
1119         memset(&fm_op, 0, sizeof(fm_op));
1120         fm_op.fa_op = FMOP_DECAP_NOSTRIP;
1121         return enic_fm_append_action_op(fm, &fm_op, error);
1122 }
1123
1124 /* Generate a reasonable source port number */
1125 static uint16_t
1126 gen_src_port(void)
1127 {
1128         /* Min/max below are the default values in OVS-DPDK and Linux */
1129         uint16_t p = rte_rand();
1130         p = RTE_MAX(p, 32768);
1131         p = RTE_MIN(p, 61000);
1132         return rte_cpu_to_be_16(p);
1133 }
1134
1135 /* VXLAN encap is done via flowman compound action */
1136 static int
1137 enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
1138                          const struct rte_flow_item *item,
1139                          struct rte_flow_error *error)
1140 {
1141         struct fm_action_op fm_op;
1142         struct rte_ether_hdr *eth;
1143         struct rte_udp_hdr *udp;
1144         uint16_t *ethertype;
1145         void *template;
1146         uint8_t off;
1147
1148         ENICPMD_FUNC_TRACE();
1149         memset(&fm_op, 0, sizeof(fm_op));
1150         fm_op.fa_op = FMOP_ENCAP;
1151         template = fm->action.fma_data;
1152         off = 0;
1153         /*
1154          * Copy flow items to the flowman template starting L2.
1155          * L2 must be ethernet.
1156          */
1157         flow_item_skip_void(&item);
1158         if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
1159                 return rte_flow_error_set(error, EINVAL,
1160                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1161                         "vxlan-encap: first item should be ethernet");
1162         eth = (struct rte_ether_hdr *)template;
1163         ethertype = &eth->ether_type;
1164         append_template(&template, &off, item->spec,
1165                         sizeof(struct rte_ether_hdr));
1166         item++;
1167         flow_item_skip_void(&item);
1168         /* Optional VLAN */
1169         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1170                 const struct rte_flow_item_vlan *spec;
1171
1172                 ENICPMD_LOG(DEBUG, "vxlan-encap: vlan");
1173                 spec = item->spec;
1174                 fm_op.encap.outer_vlan = rte_be_to_cpu_16(spec->tci);
1175                 item++;
1176                 flow_item_skip_void(&item);
1177         }
1178         /* L3 must be IPv4, IPv6 */
1179         switch (item->type) {
1180         case RTE_FLOW_ITEM_TYPE_IPV4:
1181         {
1182                 struct rte_ipv4_hdr *ip4;
1183
1184                 ENICPMD_LOG(DEBUG, "vxlan-encap: ipv4");
1185                 *ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1186                 ip4 = (struct rte_ipv4_hdr *)template;
1187                 /*
1188                  * Offset of IPv4 length field and its initial value
1189                  * (IP + UDP + VXLAN) are specified in the action. The NIC
1190                  * will add inner packet length.
1191                  */
1192                 fm_op.encap.len1_offset = off +
1193                         offsetof(struct rte_ipv4_hdr, total_length);
1194                 fm_op.encap.len1_delta = sizeof(struct rte_ipv4_hdr) +
1195                         sizeof(struct rte_udp_hdr) +
1196                         sizeof(struct rte_vxlan_hdr);
1197                 append_template(&template, &off, item->spec,
1198                                 sizeof(struct rte_ipv4_hdr));
1199                 ip4->version_ihl = RTE_IPV4_VHL_DEF;
1200                 if (ip4->time_to_live == 0)
1201                         ip4->time_to_live = IP_DEFTTL;
1202                 ip4->next_proto_id = IPPROTO_UDP;
1203                 break;
1204         }
1205         case RTE_FLOW_ITEM_TYPE_IPV6:
1206         {
1207                 struct rte_ipv6_hdr *ip6;
1208
1209                 ENICPMD_LOG(DEBUG, "vxlan-encap: ipv6");
1210                 *ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1211                 ip6 = (struct rte_ipv6_hdr *)template;
1212                 fm_op.encap.len1_offset = off +
1213                         offsetof(struct rte_ipv6_hdr, payload_len);
1214                 fm_op.encap.len1_delta = sizeof(struct rte_udp_hdr) +
1215                         sizeof(struct rte_vxlan_hdr);
1216                 append_template(&template, &off, item->spec,
1217                                 sizeof(struct rte_ipv6_hdr));
1218                 ip6->vtc_flow |= rte_cpu_to_be_32(IP6_VTC_FLOW);
1219                 if (ip6->hop_limits == 0)
1220                         ip6->hop_limits = IP_DEFTTL;
1221                 ip6->proto = IPPROTO_UDP;
1222                 break;
1223         }
1224         default:
1225                 return rte_flow_error_set(error,
1226                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1227                         "vxlan-encap: L3 must be IPv4/IPv6");
1228         }
1229         item++;
1230         flow_item_skip_void(&item);
1231
1232         /* L4 is UDP */
1233         if (item->type != RTE_FLOW_ITEM_TYPE_UDP)
1234                 return rte_flow_error_set(error, EINVAL,
1235                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1236                         "vxlan-encap: UDP must follow IPv4/IPv6");
1237         /* UDP length = UDP + VXLAN. NIC will add inner packet length. */
1238         fm_op.encap.len2_offset =
1239                 off + offsetof(struct rte_udp_hdr, dgram_len);
1240         fm_op.encap.len2_delta =
1241                 sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr);
1242         udp = (struct rte_udp_hdr *)template;
1243         append_template(&template, &off, item->spec,
1244                         sizeof(struct rte_udp_hdr));
1245         /*
1246          * Firmware does not hash/fill source port yet. Generate a
1247          * random port, as there is *usually* one rte_flow for the
1248          * given inner packet stream (i.e. a single stream has one
1249          * random port).
1250          */
1251         if (udp->src_port == 0)
1252                 udp->src_port = gen_src_port();
1253         item++;
1254         flow_item_skip_void(&item);
1255
1256         /* Finally VXLAN */
1257         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN)
1258                 return rte_flow_error_set(error,
1259                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1260                         "vxlan-encap: VXLAN must follow UDP");
1261         append_template(&template, &off, item->spec,
1262                         sizeof(struct rte_flow_item_vxlan));
1263
1264         /*
1265          * Fill in the rest of the action structure.
1266          * Indicate that we want to encap with vxlan at packet start.
1267          */
1268         fm_op.encap.template_offset = 0;
1269         fm_op.encap.template_len = off;
1270         return enic_fm_append_action_op(fm, &fm_op, error);
1271 }
1272
1273 static int
1274 enic_fm_find_vnic(struct enic *enic, const struct rte_pci_addr *addr,
1275                   uint64_t *handle)
1276 {
1277         uint32_t bdf;
1278         uint64_t args[2];
1279         int rc;
1280
1281         ENICPMD_FUNC_TRACE();
1282         ENICPMD_LOG(DEBUG, "bdf=%x:%x:%x", addr->bus, addr->devid,
1283                     addr->function);
1284         bdf = addr->bus << 8 | addr->devid << 3 | addr->function;
1285         args[0] = FM_VNIC_FIND;
1286         args[1] = bdf;
1287         rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
1288         if (rc != 0) {
1289                 /* Expected to fail if BDF is not on the adapter */
1290                 ENICPMD_LOG(DEBUG, "cannot find vnic handle: rc=%d", rc);
1291                 return rc;
1292         }
1293         *handle = args[0];
1294         ENICPMD_LOG(DEBUG, "found vnic: handle=0x%" PRIx64, *handle);
1295         return 0;
1296 }
1297
1298 /*
1299  * Egress: target port should be either PF uplink or VF.
1300  * Supported cases
1301  * 1. VF egress -> PF uplink
1302  *   PF may be this VF's PF, or another PF, as long as they are on the same VIC.
1303  * 2. VF egress -> VF
1304  *
1305  * Unsupported cases
1306  * 1. PF egress -> VF
1307  *   App should be using representor to pass packets to VF
1308  */
1309 static int
1310 vf_egress_port_id_action(struct enic_flowman *fm,
1311                          struct rte_eth_dev *dst_dev,
1312                          uint64_t dst_vnic_h,
1313                          struct fm_action_op *fm_op,
1314                          struct rte_flow_error *error)
1315 {
1316         struct enic *src_enic, *dst_enic;
1317         struct enic_vf_representor *vf;
1318         uint8_t uif;
1319         int ret;
1320
1321         ENICPMD_FUNC_TRACE();
1322         src_enic = fm->user_enic;
1323         dst_enic = pmd_priv(dst_dev);
1324         if (!(src_enic->rte_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
1325                 return rte_flow_error_set(error, EINVAL,
1326                         RTE_FLOW_ERROR_TYPE_ACTION,
1327                         NULL, "source port is not VF representor");
1328         }
1329
1330         /* VF -> PF uplink. dst is not VF representor */
1331         if (!(dst_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
1332                 /* PF is the VF's PF? Then nothing to do */
1333                 vf = VF_ENIC_TO_VF_REP(src_enic);
1334                 if (vf->pf == dst_enic) {
1335                         ENICPMD_LOG(DEBUG, "destination port is VF's PF");
1336                         return 0;
1337                 }
1338                 /* If not, steer to the remote PF's uplink */
1339                 uif = dst_enic->fm_vnic_uif;
1340                 ENICPMD_LOG(DEBUG, "steer to uplink %u", uif);
1341                 memset(fm_op, 0, sizeof(*fm_op));
1342                 fm_op->fa_op = FMOP_SET_EGPORT;
1343                 fm_op->set_egport.egport = uif;
1344                 ret = enic_fm_append_action_op(fm, fm_op, error);
1345                 return ret;
1346         }
1347
1348         /* VF -> VF loopback. Hairpin and steer to vnic */
1349         memset(fm_op, 0, sizeof(*fm_op));
1350         fm_op->fa_op = FMOP_EG_HAIRPIN;
1351         ret = enic_fm_append_action_op(fm, fm_op, error);
1352         if (ret)
1353                 return ret;
1354         ENICPMD_LOG(DEBUG, "egress hairpin");
1355         fm->hairpin_steer_vnic_h = dst_vnic_h;
1356         fm->need_hairpin_steer = 1;
1357         return 0;
1358 }
1359
1360 static int
1361 enic_fm_check_transfer_dst(struct enic *enic, uint16_t dst_port_id,
1362                            struct rte_eth_dev **dst_dev,
1363                            struct rte_flow_error *error)
1364 {
1365         struct rte_eth_dev *dev;
1366
1367         ENICPMD_LOG(DEBUG, "port id %u", dst_port_id);
1368         if (!rte_eth_dev_is_valid_port(dst_port_id)) {
1369                 return rte_flow_error_set(error, EINVAL,
1370                         RTE_FLOW_ERROR_TYPE_ACTION,
1371                         NULL, "invalid port_id");
1372         }
1373         dev = &rte_eth_devices[dst_port_id];
1374         if (!dev_is_enic(dev)) {
1375                 return rte_flow_error_set(error, EINVAL,
1376                         RTE_FLOW_ERROR_TYPE_ACTION,
1377                         NULL, "port_id is not enic");
1378         }
1379         if (enic->switch_domain_id != pmd_priv(dev)->switch_domain_id) {
1380                 return rte_flow_error_set(error, EINVAL,
1381                         RTE_FLOW_ERROR_TYPE_ACTION,
1382                         NULL, "destination and source ports are not in the same switch domain");
1383         }
1384
1385         *dst_dev = dev;
1386         return 0;
1387 }
1388
1389 /* Translate flow actions to flowman TCAM entry actions */
1390 static int
1391 enic_fm_copy_action(struct enic_flowman *fm,
1392                     const struct rte_flow_action actions[],
1393                     uint8_t ingress,
1394                     struct rte_flow_error *error)
1395 {
1396         enum {
1397                 FATE = 1 << 0,
1398                 DECAP = 1 << 1,
1399                 PASSTHRU = 1 << 2,
1400                 COUNT = 1 << 3,
1401                 ENCAP = 1 << 4,
1402                 PUSH_VLAN = 1 << 5,
1403                 PORT_ID = 1 << 6,
1404         };
1405         struct fm_tcam_match_entry *fmt;
1406         struct fm_action_op fm_op;
1407         bool need_ovlan_action;
1408         struct enic *enic;
1409         uint32_t overlap;
1410         uint64_t vnic_h;
1411         uint16_t ovlan;
1412         bool first_rq;
1413         bool steer;
1414         int ret;
1415
1416         ENICPMD_FUNC_TRACE();
1417         fmt = &fm->tcam_entry;
1418         need_ovlan_action = false;
1419         ovlan = 0;
1420         first_rq = true;
1421         steer = false;
1422         enic = fm->user_enic;
1423         overlap = 0;
1424         vnic_h = enic->fm_vnic_handle;
1425
1426         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1427                 switch (actions->type) {
1428                 case RTE_FLOW_ACTION_TYPE_VOID:
1429                         continue;
1430                 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1431                         if (overlap & PASSTHRU)
1432                                 goto unsupported;
1433                         overlap |= PASSTHRU;
1434                         break;
1435                 }
1436                 case RTE_FLOW_ACTION_TYPE_JUMP: {
1437                         const struct rte_flow_action_jump *jump =
1438                                 actions->conf;
1439                         struct enic_fm_fet *fet;
1440
1441                         if (overlap & FATE)
1442                                 goto unsupported;
1443                         ret = enic_fet_get(fm, jump->group, ingress, NULL,
1444                                            &fet, error);
1445                         if (ret)
1446                                 return ret;
1447                         overlap |= FATE;
1448                         memset(&fm_op, 0, sizeof(fm_op));
1449                         fm_op.fa_op = FMOP_EXACT_MATCH;
1450                         fm_op.exact.handle = fet->handle;
1451                         fm->fet = fet;
1452                         ret = enic_fm_append_action_op(fm, &fm_op, error);
1453                         if (ret)
1454                                 return ret;
1455                         break;
1456                 }
1457                 case RTE_FLOW_ACTION_TYPE_MARK: {
1458                         const struct rte_flow_action_mark *mark =
1459                                 actions->conf;
1460
1461                         if (enic->use_noscatter_vec_rx_handler)
1462                                 goto unsupported;
1463                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1464                                 return rte_flow_error_set(error, EINVAL,
1465                                         RTE_FLOW_ERROR_TYPE_ACTION,
1466                                         NULL, "invalid mark id");
1467                         memset(&fm_op, 0, sizeof(fm_op));
1468                         fm_op.fa_op = FMOP_MARK;
1469                         fm_op.mark.mark = mark->id + 1;
1470                         ret = enic_fm_append_action_op(fm, &fm_op, error);
1471                         if (ret)
1472                                 return ret;
1473                         break;
1474                 }
1475                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1476                         if (enic->use_noscatter_vec_rx_handler)
1477                                 goto unsupported;
1478                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1479                         memset(&fm_op, 0, sizeof(fm_op));
1480                         fm_op.fa_op = FMOP_MARK;
1481                         fm_op.mark.mark = ENIC_MAGIC_FILTER_ID;
1482                         ret = enic_fm_append_action_op(fm, &fm_op, error);
1483                         if (ret)
1484                                 return ret;
1485                         break;
1486                 }
1487                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1488                         const struct rte_flow_action_queue *queue =
1489                                 actions->conf;
1490
1491                         /*
1492                          * If fate other than QUEUE or RSS, fail. Multiple
1493                          * rss and queue actions are ok.
1494                          */
1495                         if ((overlap & FATE) && first_rq)
1496                                 goto unsupported;
1497                         first_rq = false;
1498                         overlap |= FATE;
1499                         memset(&fm_op, 0, sizeof(fm_op));
1500                         fm_op.fa_op = FMOP_RQ_STEER;
1501                         fm_op.rq_steer.rq_index =
1502                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1503                         fm_op.rq_steer.rq_count = 1;
1504                         fm_op.rq_steer.vnic_handle = vnic_h;
1505                         ret = enic_fm_append_action_op(fm, &fm_op, error);
1506                         if (ret)
1507                                 return ret;
1508                         ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
1509                                     fm_op.rq_steer.rq_index);
1510                         steer = true;
1511                         break;
1512                 }
1513                 case RTE_FLOW_ACTION_TYPE_DROP: {
1514                         if (overlap & FATE)
1515                                 goto unsupported;
1516                         overlap |= FATE;
1517                         memset(&fm_op, 0, sizeof(fm_op));
1518                         fm_op.fa_op = FMOP_DROP;
1519                         ret = enic_fm_append_action_op(fm, &fm_op, error);
1520                         if (ret)
1521                                 return ret;
1522                         ENICPMD_LOG(DEBUG, "create DROP action");
1523                         break;
1524                 }
1525                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1526                         if (overlap & COUNT)
1527                                 goto unsupported;
1528                         overlap |= COUNT;
1529                         /* Count is associated with entry not action on VIC. */
1530                         fmt->ftm_flags |= FMEF_COUNTER;
1531                         break;
1532                 }
1533                 case RTE_FLOW_ACTION_TYPE_RSS: {
1534                         const struct rte_flow_action_rss *rss = actions->conf;
1535                         bool allow;
1536                         uint16_t i;
1537
1538                         /*
1539                          * If fate other than QUEUE or RSS, fail. Multiple
1540                          * rss and queue actions are ok.
1541                          */
1542                         if ((overlap & FATE) && first_rq)
1543                                 goto unsupported;
1544                         first_rq = false;
1545                         overlap |= FATE;
1546
1547                         /*
1548                          * Hardware only supports RSS actions on outer level
1549                          * with default type and function. Queues must be
1550                          * sequential.
1551                          */
1552                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1553                                 rss->level == 0 && (rss->types == 0 ||
1554                                 rss->types == enic->rss_hf) &&
1555                                 rss->queue_num <= enic->rq_count &&
1556                                 rss->queue[rss->queue_num - 1] < enic->rq_count;
1557
1558
1559                         /* Identity queue map needs to be sequential */
1560                         for (i = 1; i < rss->queue_num; i++)
1561                                 allow = allow && (rss->queue[i] ==
1562                                         rss->queue[i - 1] + 1);
1563                         if (!allow)
1564                                 goto unsupported;
1565
1566                         memset(&fm_op, 0, sizeof(fm_op));
1567                         fm_op.fa_op = FMOP_RQ_STEER;
1568                         fm_op.rq_steer.rq_index =
1569                                 enic_rte_rq_idx_to_sop_idx(rss->queue[0]);
1570                         fm_op.rq_steer.rq_count = rss->queue_num;
1571                         fm_op.rq_steer.vnic_handle = vnic_h;
1572                         ret = enic_fm_append_action_op(fm, &fm_op, error);
1573                         if (ret)
1574                                 return ret;
1575                         ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
1576                                     fm_op.rq_steer.rq_index);
1577                         steer = true;
1578                         break;
1579                 }
1580                 case RTE_FLOW_ACTION_TYPE_PORT_ID: {
1581                         const struct rte_flow_action_port_id *port;
1582                         struct rte_eth_dev *dev = NULL;
1583
1584                         if (!ingress && (overlap & PORT_ID)) {
1585                                 ENICPMD_LOG(DEBUG, "cannot have multiple egress PORT_ID actions");
1586                                 goto unsupported;
1587                         }
1588                         port = actions->conf;
1589                         if (port->original) {
1590                                 vnic_h = enic->fm_vnic_handle; /* This port */
1591                                 break;
1592                         }
1593                         ret = enic_fm_check_transfer_dst(enic, port->id, &dev,
1594                                                          error);
1595                         if (ret)
1596                                 return ret;
1597                         vnic_h = pmd_priv(dev)->fm_vnic_handle;
1598                         overlap |= PORT_ID;
1599                         /*
1600                          * Ingress. Nothing more to do. We add an implicit
1601                          * steer at the end if needed.
1602                          */
1603                         if (ingress)
1604                                 break;
1605                         /* Egress */
1606                         ret = vf_egress_port_id_action(fm, dev, vnic_h, &fm_op,
1607                                 error);
1608                         if (ret)
1609                                 return ret;
1610                         break;
1611                 }
1612                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: {
1613                         if (overlap & DECAP)
1614                                 goto unsupported;
1615                         overlap |= DECAP;
1616
1617                         ret = enic_fm_copy_vxlan_decap(fm, fmt, actions,
1618                                 error);
1619                         if (ret != 0)
1620                                 return ret;
1621                         break;
1622                 }
1623                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: {
1624                         const struct rte_flow_action_vxlan_encap *encap;
1625
1626                         encap = actions->conf;
1627                         if (overlap & ENCAP)
1628                                 goto unsupported;
1629                         overlap |= ENCAP;
1630                         ret = enic_fm_copy_vxlan_encap(fm, encap->definition,
1631                                 error);
1632                         if (ret != 0)
1633                                 return ret;
1634                         break;
1635                 }
1636                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: {
1637                         struct fm_action_op *decap;
1638
1639                         /*
1640                          * If decap-nostrip appears before pop vlan, this pop
1641                          * applies to the inner packet vlan. Turn it into
1642                          * decap-strip.
1643                          */
1644                         decap = find_prev_action_op(fm, FMOP_DECAP_NOSTRIP);
1645                         if (decap) {
1646                                 ENICPMD_LOG(DEBUG, "pop-vlan inner: decap-nostrip => decap-strip");
1647                                 decap->fa_op = FMOP_DECAP_STRIP;
1648                                 break;
1649                         }
1650                         memset(&fm_op, 0, sizeof(fm_op));
1651                         fm_op.fa_op = FMOP_POP_VLAN;
1652                         ret = enic_fm_append_action_op(fm, &fm_op, error);
1653                         if (ret)
1654                                 return ret;
1655                         break;
1656                 }
1657                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: {
1658                         const struct rte_flow_action_of_push_vlan *vlan;
1659
1660                         if (overlap & PASSTHRU)
1661                                 goto unsupported;
1662                         vlan = actions->conf;
1663                         if (vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN)) {
1664                                 return rte_flow_error_set(error, EINVAL,
1665                                         RTE_FLOW_ERROR_TYPE_ACTION,
1666                                         NULL, "unexpected push_vlan ethertype");
1667                         }
1668                         overlap |= PUSH_VLAN;
1669                         need_ovlan_action = true;
1670                         break;
1671                 }
1672                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: {
1673                         const struct rte_flow_action_of_set_vlan_pcp *pcp;
1674
1675                         pcp = actions->conf;
1676                         if (pcp->vlan_pcp > 7) {
1677                                 return rte_flow_error_set(error, EINVAL,
1678                                         RTE_FLOW_ERROR_TYPE_ACTION,
1679                                         NULL, "invalid vlan_pcp");
1680                         }
1681                         need_ovlan_action = true;
1682                         ovlan |= ((uint16_t)pcp->vlan_pcp) << 13;
1683                         break;
1684                 }
1685                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: {
1686                         const struct rte_flow_action_of_set_vlan_vid *vid;
1687
1688                         vid = actions->conf;
1689                         need_ovlan_action = true;
1690                         ovlan |= rte_be_to_cpu_16(vid->vlan_vid);
1691                         break;
1692                 }
1693                 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
1694                         const struct rte_flow_action_ethdev *ethdev;
1695                         struct rte_eth_dev *dev = NULL;
1696
1697                         ethdev = actions->conf;
1698                         ret = enic_fm_check_transfer_dst(enic, ethdev->port_id,
1699                                                          &dev, error);
1700                         if (ret)
1701                                 return ret;
1702                         vnic_h = pmd_priv(dev)->fm_vnic_handle;
1703                         overlap |= PORT_ID;
1704                         /*
1705                          * Action PORT_REPRESENTOR implies ingress destination.
1706                          * Noting to do. We add an implicit stree at the
1707                          * end if needed.
1708                          */
1709                         ingress = 1;
1710                         break;
1711                 }
1712                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
1713                         const struct rte_flow_action_ethdev *ethdev;
1714                         struct rte_eth_dev *dev = NULL;
1715
1716                         if (overlap & PORT_ID) {
1717                                 ENICPMD_LOG(DEBUG, "cannot have multiple egress PORT_ID actions");
1718                                 goto unsupported;
1719                         }
1720                         ethdev = actions->conf;
1721                         ret = enic_fm_check_transfer_dst(enic, ethdev->port_id,
1722                                                          &dev, error);
1723                         if (ret)
1724                                 return ret;
1725                         vnic_h = pmd_priv(dev)->fm_vnic_handle;
1726                         overlap |= PORT_ID;
1727                         /* Action REPRESENTED_PORT: always egress destination */
1728                         ingress = 0;
1729                         ret = vf_egress_port_id_action(fm, dev, vnic_h, &fm_op,
1730                                 error);
1731                         if (ret)
1732                                 return ret;
1733                         break;
1734                 }
1735                 default:
1736                         goto unsupported;
1737                 }
1738         }
1739
1740         if (!(overlap & (FATE | PASSTHRU | COUNT | PORT_ID)))
1741                 goto unsupported;
1742         /* Egress from VF: need implicit WQ match */
1743         if (enic_is_vf_rep(enic) && !ingress) {
1744                 fmt->ftm_data.fk_wq_id = 0;
1745                 fmt->ftm_mask.fk_wq_id = 0xffff;
1746                 fmt->ftm_data.fk_wq_vnic = enic->fm_vnic_handle;
1747                 ENICPMD_LOG(DEBUG, "add implicit wq id match for vf %d",
1748                             VF_ENIC_TO_VF_REP(enic)->vf_id);
1749         }
1750         if (need_ovlan_action) {
1751                 memset(&fm_op, 0, sizeof(fm_op));
1752                 fm_op.fa_op = FMOP_SET_OVLAN;
1753                 fm_op.ovlan.vlan = ovlan;
1754                 ret = enic_fm_append_action_op(fm, &fm_op, error);
1755                 if (ret)
1756                         return ret;
1757         }
1758         /* Add steer op for PORT_ID without QUEUE */
1759         if ((overlap & PORT_ID) && !steer && ingress) {
1760                 memset(&fm_op, 0, sizeof(fm_op));
1761                 /* Always to queue 0 for now as generic RSS is not available */
1762                 fm_op.fa_op = FMOP_RQ_STEER;
1763                 fm_op.rq_steer.rq_index = 0;
1764                 fm_op.rq_steer.vnic_handle = vnic_h;
1765                 ret = enic_fm_append_action_op(fm, &fm_op, error);
1766                 if (ret)
1767                         return ret;
1768                 ENICPMD_LOG(DEBUG, "add implicit steer op");
1769         }
1770         /* Add required END */
1771         memset(&fm_op, 0, sizeof(fm_op));
1772         fm_op.fa_op = FMOP_END;
1773         ret = enic_fm_append_action_op(fm, &fm_op, error);
1774         if (ret)
1775                 return ret;
1776         enic_fm_reorder_action_op(fm);
1777         return 0;
1778
1779 unsupported:
1780         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1781                                   NULL, "enic: unsupported action");
1782 }
1783
1784 /** Check if the action is supported */
1785 static int
1786 enic_fm_match_action(const struct rte_flow_action *action,
1787                      const enum rte_flow_action_type *supported_actions)
1788 {
1789         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1790              supported_actions++) {
1791                 if (action->type == *supported_actions)
1792                         return 1;
1793         }
1794         return 0;
1795 }
1796
1797 /* Debug function to dump internal NIC action structure. */
1798 static void
1799 enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
1800 {
1801         /* Manually keep in sync with FMOP commands */
1802         const char *fmop_str[FMOP_OP_MAX] = {
1803                 [FMOP_END] = "end",
1804                 [FMOP_DROP] = "drop",
1805                 [FMOP_RQ_STEER] = "steer",
1806                 [FMOP_EXACT_MATCH] = "exmatch",
1807                 [FMOP_MARK] = "mark",
1808                 [FMOP_EXT_MARK] = "ext_mark",
1809                 [FMOP_TAG] = "tag",
1810                 [FMOP_EG_HAIRPIN] = "eg_hairpin",
1811                 [FMOP_IG_HAIRPIN] = "ig_hairpin",
1812                 [FMOP_ENCAP_IVLAN] = "encap_ivlan",
1813                 [FMOP_ENCAP_NOIVLAN] = "encap_noivlan",
1814                 [FMOP_ENCAP] = "encap",
1815                 [FMOP_SET_OVLAN] = "set_ovlan",
1816                 [FMOP_DECAP_NOSTRIP] = "decap_nostrip",
1817                 [FMOP_DECAP_STRIP] = "decap_strip",
1818                 [FMOP_POP_VLAN] = "pop_vlan",
1819                 [FMOP_SET_EGPORT] = "set_egport",
1820                 [FMOP_RQ_STEER_ONLY] = "rq_steer_only",
1821                 [FMOP_SET_ENCAP_VLAN] = "set_encap_vlan",
1822                 [FMOP_EMIT] = "emit",
1823                 [FMOP_MODIFY] = "modify",
1824         };
1825         const struct fm_action_op *op = &fm_action->fma_action_ops[0];
1826         char buf[128], *bp = buf;
1827         const char *op_str;
1828         int i, n, buf_len;
1829
1830         buf[0] = '\0';
1831         buf_len = sizeof(buf);
1832         for (i = 0; i < FM_ACTION_OP_MAX; i++) {
1833                 if (op->fa_op == FMOP_END)
1834                         break;
1835                 if (op->fa_op >= FMOP_OP_MAX)
1836                         op_str = "unknown";
1837                 else
1838                         op_str = fmop_str[op->fa_op];
1839                 n = snprintf(bp, buf_len, "%s,", op_str);
1840                 if (n > 0 && n < buf_len) {
1841                         bp += n;
1842                         buf_len -= n;
1843                 }
1844                 op++;
1845         }
1846         /* Remove trailing comma */
1847         if (buf[0])
1848                 *(bp - 1) = '\0';
1849         ENICPMD_LOG(DEBUG, "       Acions: %s", buf);
1850 }
1851
1852 static int
1853 bits_to_str(uint32_t bits, const char *strings[], int max,
1854             char *buf, int buf_len)
1855 {
1856         int i, n = 0, len = 0;
1857
1858         for (i = 0; i < max; i++) {
1859                 if (bits & (1 << i)) {
1860                         n = snprintf(buf, buf_len, "%s,", strings[i]);
1861                         if (n > 0 && n < buf_len) {
1862                                 buf += n;
1863                                 buf_len -= n;
1864                                 len += n;
1865                         }
1866                 }
1867         }
1868         /* Remove trailing comma */
1869         if (len) {
1870                 *(buf - 1) = '\0';
1871                 len--;
1872         }
1873         return len;
1874 }
1875
1876 /* Debug function to dump internal NIC filter structure. */
1877 static void
1878 __enic_fm_dump_tcam_match(const struct fm_header_set *fk_hdrset, char *buf,
1879                           int buf_len)
1880 {
1881         /* Manually keep in sync with FKM_BITS */
1882         const char *fm_fkm_str[FKM_BIT_COUNT] = {
1883                 [FKM_QTAG_BIT] = "qtag",
1884                 [FKM_CMD_BIT] = "cmd",
1885                 [FKM_IPV4_BIT] = "ip4",
1886                 [FKM_IPV6_BIT] = "ip6",
1887                 [FKM_ROCE_BIT] = "roce",
1888                 [FKM_UDP_BIT] = "udp",
1889                 [FKM_TCP_BIT] = "tcp",
1890                 [FKM_TCPORUDP_BIT] = "tcpportudp",
1891                 [FKM_IPFRAG_BIT] = "ipfrag",
1892                 [FKM_NVGRE_BIT] = "nvgre",
1893                 [FKM_VXLAN_BIT] = "vxlan",
1894                 [FKM_GENEVE_BIT] = "geneve",
1895                 [FKM_NSH_BIT] = "nsh",
1896                 [FKM_ROCEV2_BIT] = "rocev2",
1897                 [FKM_VLAN_PRES_BIT] = "vlan_pres",
1898                 [FKM_IPOK_BIT] = "ipok",
1899                 [FKM_L4OK_BIT] = "l4ok",
1900                 [FKM_ROCEOK_BIT] = "roceok",
1901                 [FKM_FCSOK_BIT] = "fcsok",
1902                 [FKM_EG_SPAN_BIT] = "eg_span",
1903                 [FKM_IG_SPAN_BIT] = "ig_span",
1904                 [FKM_EG_HAIRPINNED_BIT] = "eg_hairpinned",
1905         };
1906         /* Manually keep in sync with FKH_BITS */
1907         const char *fm_fkh_str[FKH_BIT_COUNT] = {
1908                 [FKH_ETHER_BIT] = "eth",
1909                 [FKH_QTAG_BIT] = "qtag",
1910                 [FKH_L2RAW_BIT] = "l2raw",
1911                 [FKH_IPV4_BIT] = "ip4",
1912                 [FKH_IPV6_BIT] = "ip6",
1913                 [FKH_L3RAW_BIT] = "l3raw",
1914                 [FKH_UDP_BIT] = "udp",
1915                 [FKH_TCP_BIT] = "tcp",
1916                 [FKH_ICMP_BIT] = "icmp",
1917                 [FKH_VXLAN_BIT] = "vxlan",
1918                 [FKH_L4RAW_BIT] = "l4raw",
1919         };
1920         uint32_t fkh_bits = fk_hdrset->fk_header_select;
1921         uint32_t fkm_bits = fk_hdrset->fk_metadata;
1922         int n;
1923
1924         if (!fkm_bits && !fkh_bits)
1925                 return;
1926         n = snprintf(buf, buf_len, "metadata(");
1927         if (n > 0 && n < buf_len) {
1928                 buf += n;
1929                 buf_len -= n;
1930         }
1931         n = bits_to_str(fkm_bits, fm_fkm_str, FKM_BIT_COUNT, buf, buf_len);
1932         if (n > 0 && n < buf_len) {
1933                 buf += n;
1934                 buf_len -= n;
1935         }
1936         n = snprintf(buf, buf_len, ") valid hdr fields(");
1937         if (n > 0 && n < buf_len) {
1938                 buf += n;
1939                 buf_len -= n;
1940         }
1941         n = bits_to_str(fkh_bits, fm_fkh_str, FKH_BIT_COUNT, buf, buf_len);
1942         if (n > 0 && n < buf_len) {
1943                 buf += n;
1944                 buf_len -= n;
1945         }
1946         snprintf(buf, buf_len, ")");
1947 }
1948
1949 static void
1950 enic_fm_dump_tcam_match(const struct fm_tcam_match_entry *match,
1951                         uint8_t ingress)
1952 {
1953         char buf[256];
1954
1955         memset(buf, 0, sizeof(buf));
1956         __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[0],
1957                                   buf, sizeof(buf));
1958         ENICPMD_LOG(DEBUG, " TCAM %s Outer: %s %scounter position %u",
1959                     (ingress) ? "IG" : "EG", buf,
1960                     (match->ftm_flags & FMEF_COUNTER) ? "" : "no ",
1961                     match->ftm_position);
1962         memset(buf, 0, sizeof(buf));
1963         __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[1],
1964                                   buf, sizeof(buf));
1965         if (buf[0])
1966                 ENICPMD_LOG(DEBUG, "         Inner: %s", buf);
1967 }
1968
1969 /* Debug function to dump internal NIC flow structures. */
1970 static void
1971 enic_fm_dump_tcam_entry(const struct fm_tcam_match_entry *fm_match,
1972                         const struct fm_action *fm_action,
1973                         uint8_t ingress)
1974 {
1975         if (!rte_log_can_log(enic_pmd_logtype, RTE_LOG_DEBUG))
1976                 return;
1977         enic_fm_dump_tcam_match(fm_match, ingress);
1978         enic_fm_dump_tcam_actions(fm_action);
1979 }
1980
1981 static int
1982 enic_fm_flow_parse(struct enic_flowman *fm,
1983                    const struct rte_flow_attr *attrs,
1984                    const struct rte_flow_item pattern[],
1985                    const struct rte_flow_action actions[],
1986                    struct rte_flow_error *error)
1987 {
1988         const struct rte_flow_action *action;
1989         unsigned int ret;
1990         static const enum rte_flow_action_type *sa;
1991
1992         ENICPMD_FUNC_TRACE();
1993         ret = 0;
1994         if (!pattern) {
1995                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1996                                    NULL, "no pattern specified");
1997                 return -rte_errno;
1998         }
1999
2000         if (!actions) {
2001                 rte_flow_error_set(error, EINVAL,
2002                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2003                                    NULL, "no action specified");
2004                 return -rte_errno;
2005         }
2006
2007         if (attrs) {
2008                 if (attrs->group != FM_TCAM_RTE_GROUP && attrs->priority) {
2009                         rte_flow_error_set(error, ENOTSUP,
2010                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2011                                            NULL,
2012                                            "priorities are not supported for non-default (0) groups");
2013                         return -rte_errno;
2014                 } else if (!fm->owner_enic->switchdev_mode && attrs->transfer) {
2015                         rte_flow_error_set(error, ENOTSUP,
2016                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2017                                            NULL,
2018                                            "transfer is not supported");
2019                         return -rte_errno;
2020                 } else if (attrs->ingress && attrs->egress) {
2021                         rte_flow_error_set(error, ENOTSUP,
2022                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2023                                            NULL,
2024                                            "bidirectional rules not supported");
2025                         return -rte_errno;
2026                 }
2027
2028         } else {
2029                 rte_flow_error_set(error, EINVAL,
2030                                    RTE_FLOW_ERROR_TYPE_ATTR,
2031                                    NULL, "no attribute specified");
2032                 return -rte_errno;
2033         }
2034
2035         /* Verify Actions. */
2036         sa = (attrs->ingress) ? enic_fm_supported_ig_actions :
2037              enic_fm_supported_eg_actions;
2038         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
2039              action++) {
2040                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
2041                         continue;
2042                 else if (!enic_fm_match_action(action, sa))
2043                         break;
2044         }
2045         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
2046                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
2047                                    action, "invalid action");
2048                 return -rte_errno;
2049         }
2050         ret = enic_fm_copy_entry(fm, pattern, error);
2051         if (ret)
2052                 return ret;
2053         ret = enic_fm_copy_action(fm, actions, attrs->ingress, error);
2054         return ret;
2055 }
2056
2057 static void
2058 enic_fm_counter_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
2059 {
2060         if (!fm_flow->counter_valid)
2061                 return;
2062         SLIST_INSERT_HEAD(&fm->counters, fm_flow->counter, next);
2063         fm_flow->counter_valid = false;
2064 }
2065
2066 static int
2067 enic_fm_more_counters(struct enic_flowman *fm)
2068 {
2069         struct enic_fm_counter *new_stack;
2070         struct enic_fm_counter *ctrs;
2071         int i, rc;
2072         uint64_t args[2];
2073
2074         ENICPMD_FUNC_TRACE();
2075         new_stack = rte_realloc(fm->counter_stack, (fm->counters_alloced +
2076                                 FM_COUNTERS_EXPAND) *
2077                                 sizeof(struct enic_fm_counter), 0);
2078         if (new_stack == NULL) {
2079                 ENICPMD_LOG(ERR, "cannot alloc counter memory");
2080                 return -ENOMEM;
2081         }
2082         fm->counter_stack = new_stack;
2083
2084         args[0] = FM_COUNTER_BRK;
2085         args[1] = fm->counters_alloced + FM_COUNTERS_EXPAND;
2086         rc = flowman_cmd(fm, args, 2);
2087         if (rc != 0) {
2088                 ENICPMD_LOG(ERR, "cannot alloc counters rc=%d", rc);
2089                 return rc;
2090         }
2091         ctrs = (struct enic_fm_counter *)fm->counter_stack +
2092                 fm->counters_alloced;
2093         for (i = 0; i < FM_COUNTERS_EXPAND; i++, ctrs++) {
2094                 ctrs->handle = fm->counters_alloced + i;
2095                 SLIST_INSERT_HEAD(&fm->counters, ctrs, next);
2096         }
2097         fm->counters_alloced += FM_COUNTERS_EXPAND;
2098         ENICPMD_LOG(DEBUG, "%u counters allocated, total: %u",
2099                     FM_COUNTERS_EXPAND, fm->counters_alloced);
2100         return 0;
2101 }
2102
2103 static int
2104 enic_fm_counter_zero(struct enic_flowman *fm, struct enic_fm_counter *c)
2105 {
2106         uint64_t args[3];
2107         int ret;
2108
2109         ENICPMD_FUNC_TRACE();
2110         args[0] = FM_COUNTER_QUERY;
2111         args[1] = c->handle;
2112         args[2] = 1; /* clear */
2113         ret = flowman_cmd(fm, args, 3);
2114         if (ret) {
2115                 ENICPMD_LOG(ERR, "counter init: rc=%d handle=0x%x",
2116                             ret, c->handle);
2117                 return ret;
2118         }
2119         return 0;
2120 }
2121
2122 static int
2123 enic_fm_counter_alloc(struct enic_flowman *fm, struct rte_flow_error *error,
2124                       struct enic_fm_counter **ctr)
2125 {
2126         struct enic_fm_counter *c;
2127         int ret;
2128
2129         ENICPMD_FUNC_TRACE();
2130         *ctr = NULL;
2131         if (SLIST_EMPTY(&fm->counters)) {
2132                 ret = enic_fm_more_counters(fm);
2133                 if (ret)
2134                         return rte_flow_error_set(error, -ret,
2135                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2136                                 NULL, "enic: out of counters");
2137         }
2138         c = SLIST_FIRST(&fm->counters);
2139         SLIST_REMOVE_HEAD(&fm->counters, next);
2140         *ctr = c;
2141         return 0;
2142 }
2143
2144 static int
2145 enic_fm_action_free(struct enic_flowman *fm, struct enic_fm_action *ah)
2146 {
2147         uint64_t args[2];
2148         int ret = 0;
2149
2150         ENICPMD_FUNC_TRACE();
2151         RTE_ASSERT(ah->ref > 0);
2152         ah->ref--;
2153         if (ah->ref == 0) {
2154                 args[0] = FM_ACTION_FREE;
2155                 args[1] = ah->handle;
2156                 ret = flowman_cmd(fm, args, 2);
2157                 if (ret)
2158                         /* This is a "should never happen" error. */
2159                         ENICPMD_LOG(ERR, "freeing action rc=%d handle=0x%"
2160                                     PRIx64, ret, ah->handle);
2161                 rte_hash_del_key(fm->action_hash, (const void *)&ah->key);
2162                 free(ah);
2163         }
2164         return ret;
2165 }
2166
2167 static int
2168 enic_fm_entry_free(struct enic_flowman *fm, uint64_t handle)
2169 {
2170         uint64_t args[2];
2171         int rc;
2172
2173         ENICPMD_FUNC_TRACE();
2174         args[0] = FM_MATCH_ENTRY_REMOVE;
2175         args[1] = handle;
2176         rc = flowman_cmd(fm, args, 2);
2177         if (rc)
2178                 ENICPMD_LOG(ERR, "cannot free match entry: rc=%d"
2179                             " handle=0x%" PRIx64, rc, handle);
2180         return rc;
2181 }
2182
2183 static struct enic_fm_jump_flow *
2184 find_jump_flow(struct enic_flowman *fm, uint32_t group)
2185 {
2186         struct enic_fm_jump_flow *j;
2187
2188         ENICPMD_FUNC_TRACE();
2189         TAILQ_FOREACH(j, &fm->jump_list, list) {
2190                 if (j->group == group)
2191                         return j;
2192         }
2193         return NULL;
2194 }
2195
2196 static void
2197 remove_jump_flow(struct enic_flowman *fm, struct rte_flow *flow)
2198 {
2199         struct enic_fm_jump_flow *j;
2200
2201         ENICPMD_FUNC_TRACE();
2202         TAILQ_FOREACH(j, &fm->jump_list, list) {
2203                 if (j->flow == flow) {
2204                         TAILQ_REMOVE(&fm->jump_list, j, list);
2205                         free(j);
2206                         return;
2207                 }
2208         }
2209 }
2210
2211 static int
2212 save_jump_flow(struct enic_flowman *fm,
2213                struct rte_flow *flow,
2214                uint32_t group,
2215                struct fm_tcam_match_entry *match,
2216                struct fm_action *action)
2217 {
2218         struct enic_fm_jump_flow *j;
2219
2220         ENICPMD_FUNC_TRACE();
2221         j = calloc(1, sizeof(struct enic_fm_jump_flow));
2222         if (j == NULL)
2223                 return -ENOMEM;
2224         j->flow = flow;
2225         j->group = group;
2226         j->match = *match;
2227         j->action = *action;
2228         TAILQ_INSERT_HEAD(&fm->jump_list, j, list);
2229         ENICPMD_LOG(DEBUG, "saved jump flow: flow=%p group=%u", flow, group);
2230         return 0;
2231 }
2232
2233 static void
2234 __enic_fm_flow_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
2235 {
2236         if (fm_flow->entry_handle != FM_INVALID_HANDLE) {
2237                 enic_fm_entry_free(fm, fm_flow->entry_handle);
2238                 fm_flow->entry_handle = FM_INVALID_HANDLE;
2239         }
2240         if (fm_flow->action != NULL) {
2241                 enic_fm_action_free(fm, fm_flow->action);
2242                 fm_flow->action = NULL;
2243         }
2244         enic_fm_counter_free(fm, fm_flow);
2245         if (fm_flow->fet) {
2246                 enic_fet_put(fm, fm_flow->fet);
2247                 fm_flow->fet = NULL;
2248         }
2249 }
2250
2251 static void
2252 enic_fm_flow_free(struct enic_flowman *fm, struct rte_flow *flow)
2253 {
2254         struct enic_fm_flow *steer = flow->fm->hairpin_steer_flow;
2255
2256         if (flow->fm->fet && flow->fm->fet->default_key)
2257                 remove_jump_flow(fm, flow);
2258         __enic_fm_flow_free(fm, flow->fm);
2259         if (steer) {
2260                 __enic_fm_flow_free(fm, steer);
2261                 free(steer);
2262         }
2263         free(flow->fm);
2264         free(flow);
2265 }
2266
2267 static int
2268 enic_fm_add_tcam_entry(struct enic_flowman *fm,
2269                        struct fm_tcam_match_entry *match_in,
2270                        uint64_t *entry_handle,
2271                        uint8_t ingress,
2272                        struct rte_flow_error *error)
2273 {
2274         struct fm_tcam_match_entry *ftm;
2275         uint64_t args[3];
2276         int ret;
2277
2278         ENICPMD_FUNC_TRACE();
2279         /* Copy entry to the command buffer */
2280         ftm = &fm->cmd.va->fm_tcam_match_entry;
2281         memcpy(ftm, match_in, sizeof(*ftm));
2282         /* Add TCAM entry */
2283         args[0] = FM_TCAM_ENTRY_INSTALL;
2284         args[1] = ingress ? fm->ig_tcam_hndl : fm->eg_tcam_hndl;
2285         args[2] = fm->cmd.pa;
2286         ret = flowman_cmd(fm, args, 3);
2287         if (ret != 0) {
2288                 ENICPMD_LOG(ERR, "cannot add %s TCAM entry: rc=%d",
2289                             ingress ? "ingress" : "egress", ret);
2290                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2291                         NULL, "enic: devcmd(tcam-entry-install)");
2292                 return ret;
2293         }
2294         ENICPMD_LOG(DEBUG, "installed %s TCAM entry: handle=0x%" PRIx64,
2295                     ingress ? "ingress" : "egress", (uint64_t)args[0]);
2296         *entry_handle = args[0];
2297         return 0;
2298 }
2299
2300 static int
2301 enic_fm_add_exact_entry(struct enic_flowman *fm,
2302                         struct fm_tcam_match_entry *match_in,
2303                         uint64_t *entry_handle,
2304                         struct enic_fm_fet *fet,
2305                         struct rte_flow_error *error)
2306 {
2307         struct fm_exact_match_entry *fem;
2308         uint64_t args[3];
2309         int ret;
2310
2311         ENICPMD_FUNC_TRACE();
2312         /* The new entry must have the table's key */
2313         if (memcmp(fet->key.fk_hdrset, match_in->ftm_mask.fk_hdrset,
2314                    sizeof(struct fm_header_set) * FM_HDRSET_MAX)) {
2315                 return rte_flow_error_set(error, EINVAL,
2316                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2317                         "enic: key does not match group's key");
2318         }
2319
2320         /* Copy entry to the command buffer */
2321         fem = &fm->cmd.va->fm_exact_match_entry;
2322         /*
2323          * Translate TCAM entry to exact entry. As is only need to drop
2324          * position and mask. The mask is part of the exact match table.
2325          * Position (aka priority) is not supported in the exact match table.
2326          */
2327         fem->fem_data = match_in->ftm_data;
2328         fem->fem_flags = match_in->ftm_flags;
2329         fem->fem_action = match_in->ftm_action;
2330         fem->fem_counter = match_in->ftm_counter;
2331
2332         /* Add exact entry */
2333         args[0] = FM_EXACT_ENTRY_INSTALL;
2334         args[1] = fet->handle;
2335         args[2] = fm->cmd.pa;
2336         ret = flowman_cmd(fm, args, 3);
2337         if (ret != 0) {
2338                 ENICPMD_LOG(ERR, "cannot add %s exact entry: group=%u",
2339                             fet->ingress ? "ingress" : "egress", fet->group);
2340                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2341                         NULL, "enic: devcmd(exact-entry-install)");
2342                 return ret;
2343         }
2344         ENICPMD_LOG(DEBUG, "installed %s exact entry: group=%u"
2345                     " handle=0x%" PRIx64,
2346                     fet->ingress ? "ingress" : "egress", fet->group,
2347                     (uint64_t)args[0]);
2348         *entry_handle = args[0];
2349         return 0;
2350 }
2351
2352 static int
2353 enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,
2354                        struct rte_flow_error *error,
2355                        struct enic_fm_action **ah_o)
2356 {
2357         struct enic_fm_action *ah;
2358         struct fm_action *fma;
2359         uint64_t args[2];
2360         int ret = 0;
2361
2362         ret = rte_hash_lookup_data(fm->action_hash, action_in,
2363                                    (void **)&ah);
2364         if (ret < 0 && ret != -ENOENT)
2365                 return rte_flow_error_set(error, -ret,
2366                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2367                                    NULL, "enic: rte_hash_lookup(aciton)");
2368
2369         if (ret == -ENOENT) {
2370                 /* Allocate a new action on the NIC. */
2371                 fma = &fm->cmd.va->fm_action;
2372                 memcpy(fma, action_in, sizeof(*fma));
2373
2374                 ah = calloc(1, sizeof(*ah));
2375                 memcpy(&ah->key, action_in, sizeof(struct fm_action));
2376                 if (ah == NULL)
2377                         return rte_flow_error_set(error, ENOMEM,
2378                                            RTE_FLOW_ERROR_TYPE_HANDLE,
2379                                            NULL, "enic: calloc(fm-action)");
2380                 args[0] = FM_ACTION_ALLOC;
2381                 args[1] = fm->cmd.pa;
2382                 ret = flowman_cmd(fm, args, 2);
2383                 if (ret != 0) {
2384                         rte_flow_error_set(error, -ret,
2385                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2386                                            NULL, "enic: devcmd(action-alloc)");
2387                         goto error_with_ah;
2388                 }
2389                 ah->handle = args[0];
2390                 ret = rte_hash_add_key_data(fm->action_hash,
2391                                             (const void *)action_in,
2392                                             (void *)ah);
2393                 if (ret != 0) {
2394                         rte_flow_error_set(error, -ret,
2395                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2396                                            NULL,
2397                                            "enic: rte_hash_add_key_data(actn)");
2398                         goto error_with_action_handle;
2399                 }
2400                 ENICPMD_LOG(DEBUG, "action allocated: handle=0x%" PRIx64,
2401                             ah->handle);
2402         }
2403
2404         /* Action handle struct is valid, increment reference count. */
2405         ah->ref++;
2406         *ah_o = ah;
2407         return 0;
2408 error_with_action_handle:
2409         args[0] = FM_ACTION_FREE;
2410         args[1] = ah->handle;
2411         ret = flowman_cmd(fm, args, 2);
2412         if (ret != 0)
2413                 rte_flow_error_set(error, -ret,
2414                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2415                                    NULL, "enic: devcmd(action-free)");
2416 error_with_ah:
2417         free(ah);
2418         return ret;
2419 }
2420
2421 /* Push match-action to the NIC. */
2422 static int
2423 __enic_fm_flow_add_entry(struct enic_flowman *fm,
2424                          struct enic_fm_flow *fm_flow,
2425                          struct fm_tcam_match_entry *match_in,
2426                          struct fm_action *action_in,
2427                          uint32_t group,
2428                          uint8_t ingress,
2429                          struct rte_flow_error *error)
2430 {
2431         struct enic_fm_counter *ctr;
2432         struct enic_fm_action *ah = NULL;
2433         uint64_t entry_h;
2434         int ret;
2435
2436         ENICPMD_FUNC_TRACE();
2437
2438         /* Get or create an aciton handle. */
2439         ret = enic_action_handle_get(fm, action_in, error, &ah);
2440         if (ret)
2441                 return ret;
2442         match_in->ftm_action = ah->handle;
2443         fm_flow->action = ah;
2444
2445         /* Allocate counter if requested. */
2446         if (match_in->ftm_flags & FMEF_COUNTER) {
2447                 ret = enic_fm_counter_alloc(fm, error, &ctr);
2448                 if (ret) /* error has been filled in */
2449                         return ret;
2450                 fm_flow->counter_valid = true;
2451                 fm_flow->counter = ctr;
2452                 match_in->ftm_counter = ctr->handle;
2453         }
2454
2455         /*
2456          * Get the group's table (either TCAM or exact match table) and
2457          * add entry to it. If we use the exact match table, the handler
2458          * will translate the TCAM entry (match_in) to the appropriate
2459          * exact match entry and use that instead.
2460          */
2461         entry_h = FM_INVALID_HANDLE;
2462         if (group == FM_TCAM_RTE_GROUP) {
2463                 ret = enic_fm_add_tcam_entry(fm, match_in, &entry_h, ingress,
2464                                              error);
2465                 if (ret)
2466                         return ret;
2467                 /* Jump action might have a ref to fet */
2468                 fm_flow->fet = fm->fet;
2469                 fm->fet = NULL;
2470         } else {
2471                 struct enic_fm_fet *fet = NULL;
2472
2473                 ret = enic_fet_get(fm, group, ingress,
2474                                    &match_in->ftm_mask, &fet, error);
2475                 if (ret)
2476                         return ret;
2477                 fm_flow->fet = fet;
2478                 ret = enic_fm_add_exact_entry(fm, match_in, &entry_h, fet,
2479                                               error);
2480                 if (ret)
2481                         return ret;
2482         }
2483         /* Clear counter after adding entry, as it requires in-use counter */
2484         if (fm_flow->counter_valid) {
2485                 ret = enic_fm_counter_zero(fm, fm_flow->counter);
2486                 if (ret)
2487                         return ret;
2488         }
2489         fm_flow->entry_handle = entry_h;
2490         return 0;
2491 }
2492
2493 /* Push match-action to the NIC. */
2494 static struct rte_flow *
2495 enic_fm_flow_add_entry(struct enic_flowman *fm,
2496                        struct fm_tcam_match_entry *match_in,
2497                        struct fm_action *action_in,
2498                        const struct rte_flow_attr *attrs,
2499                        struct rte_flow_error *error)
2500 {
2501         struct enic_fm_flow *fm_flow;
2502         struct rte_flow *flow;
2503
2504         ENICPMD_FUNC_TRACE();
2505         match_in->ftm_position = attrs->priority;
2506         enic_fm_dump_tcam_entry(match_in, action_in, attrs->ingress);
2507         flow = calloc(1, sizeof(*flow));
2508         fm_flow = calloc(1, sizeof(*fm_flow));
2509         if (flow == NULL || fm_flow == NULL) {
2510                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2511                         NULL, "enic: cannot allocate rte_flow");
2512                 free(flow);
2513                 free(fm_flow);
2514                 return NULL;
2515         }
2516         flow->fm = fm_flow;
2517         fm_flow->action = NULL;
2518         fm_flow->entry_handle = FM_INVALID_HANDLE;
2519         if (__enic_fm_flow_add_entry(fm, fm_flow, match_in, action_in,
2520                                      attrs->group, attrs->ingress, error)) {
2521                 enic_fm_flow_free(fm, flow);
2522                 return NULL;
2523         }
2524         return flow;
2525 }
2526
2527 static void
2528 convert_jump_flows(struct enic_flowman *fm, struct enic_fm_fet *fet,
2529                    struct rte_flow_error *error)
2530 {
2531         struct enic_fm_flow *fm_flow;
2532         struct enic_fm_jump_flow *j;
2533         struct fm_action *fma;
2534         uint32_t group;
2535
2536         ENICPMD_FUNC_TRACE();
2537         /*
2538          * Find the saved flows that should jump to the new table (fet).
2539          * Then delete the old TCAM entry that jumps to the default table,
2540          * and add a new one that jumps to the new table.
2541          */
2542         group = fet->group;
2543         j = find_jump_flow(fm, group);
2544         while (j) {
2545                 ENICPMD_LOG(DEBUG, "convert jump flow: flow=%p group=%u",
2546                             j->flow, group);
2547                 /* Delete old entry */
2548                 fm_flow = j->flow->fm;
2549                 __enic_fm_flow_free(fm, fm_flow);
2550
2551                 /* Add new entry */
2552                 fma = &j->action;
2553                 fma->fma_action_ops[0].exact.handle = fet->handle;
2554                 if (__enic_fm_flow_add_entry(fm, fm_flow, &j->match, fma,
2555                         FM_TCAM_RTE_GROUP, fet->ingress, error)) {
2556                         /* Cannot roll back changes at the moment */
2557                         ENICPMD_LOG(ERR, "cannot convert jump flow: flow=%p",
2558                                     j->flow);
2559                 } else {
2560                         fm_flow->fet = fet;
2561                         fet->ref++;
2562                         ENICPMD_LOG(DEBUG, "convert ok: group=%u ref=%u",
2563                                     fet->group, fet->ref);
2564                 }
2565
2566                 TAILQ_REMOVE(&fm->jump_list, j, list);
2567                 free(j);
2568                 j = find_jump_flow(fm, group);
2569         }
2570 }
2571
2572 static int
2573 add_hairpin_steer(struct enic_flowman *fm, struct rte_flow *flow,
2574                   struct rte_flow_error *error)
2575 {
2576         struct fm_tcam_match_entry *fm_tcam_entry;
2577         struct enic_fm_flow *fm_flow;
2578         struct fm_action *fm_action;
2579         struct fm_action_op fm_op;
2580         int ret;
2581
2582         ENICPMD_FUNC_TRACE();
2583         fm_flow = calloc(1, sizeof(*fm_flow));
2584         if (fm_flow == NULL) {
2585                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2586                         NULL, "enic: cannot allocate rte_flow");
2587                 return -ENOMEM;
2588         }
2589         /* Original egress hairpin flow */
2590         fm_tcam_entry = &fm->tcam_entry;
2591         fm_action = &fm->action;
2592         /* Use the match pattern of the egress flow as is, without counters */
2593         fm_tcam_entry->ftm_flags &= ~FMEF_COUNTER;
2594         /* The only action is steer to vnic */
2595         fm->action_op_count = 0;
2596         memset(fm_action, 0, sizeof(*fm_action));
2597         memset(&fm_op, 0, sizeof(fm_op));
2598         /* Always to queue 0 for now */
2599         fm_op.fa_op = FMOP_RQ_STEER;
2600         fm_op.rq_steer.rq_index = 0;
2601         fm_op.rq_steer.vnic_handle = fm->hairpin_steer_vnic_h;
2602         ret = enic_fm_append_action_op(fm, &fm_op, error);
2603         if (ret)
2604                 goto error_with_flow;
2605         ENICPMD_LOG(DEBUG, "add steer op");
2606         /* Add required END */
2607         memset(&fm_op, 0, sizeof(fm_op));
2608         fm_op.fa_op = FMOP_END;
2609         ret = enic_fm_append_action_op(fm, &fm_op, error);
2610         if (ret)
2611                 goto error_with_flow;
2612         /* Add the ingress flow */
2613         fm_flow->action = NULL;
2614         fm_flow->entry_handle = FM_INVALID_HANDLE;
2615         ret = __enic_fm_flow_add_entry(fm, fm_flow, fm_tcam_entry, fm_action,
2616                                        FM_TCAM_RTE_GROUP, 1 /* ingress */, error);
2617         if (ret) {
2618                 ENICPMD_LOG(ERR, "cannot add hairpin-steer flow");
2619                 goto error_with_flow;
2620         }
2621         /* The new flow is now the egress flow's paired flow */
2622         flow->fm->hairpin_steer_flow = fm_flow;
2623         return 0;
2624
2625 error_with_flow:
2626         free(fm_flow);
2627         return ret;
2628 }
2629
2630 static void
2631 enic_fm_open_scratch(struct enic_flowman *fm)
2632 {
2633         fm->action_op_count = 0;
2634         fm->fet = NULL;
2635         fm->need_hairpin_steer = 0;
2636         fm->hairpin_steer_vnic_h = 0;
2637         memset(&fm->tcam_entry, 0, sizeof(fm->tcam_entry));
2638         memset(&fm->action, 0, sizeof(fm->action));
2639 }
2640
2641 static void
2642 enic_fm_close_scratch(struct enic_flowman *fm)
2643 {
2644         if (fm->fet) {
2645                 enic_fet_put(fm, fm->fet);
2646                 fm->fet = NULL;
2647         }
2648         fm->action_op_count = 0;
2649 }
2650
2651 static int
2652 enic_fm_flow_validate(struct rte_eth_dev *dev,
2653                       const struct rte_flow_attr *attrs,
2654                       const struct rte_flow_item pattern[],
2655                       const struct rte_flow_action actions[],
2656                       struct rte_flow_error *error)
2657 {
2658         struct fm_tcam_match_entry *fm_tcam_entry;
2659         struct fm_action *fm_action;
2660         struct enic_flowman *fm;
2661         int ret;
2662
2663         ENICPMD_FUNC_TRACE();
2664         fm = begin_fm(pmd_priv(dev));
2665         if (fm == NULL)
2666                 return -ENOTSUP;
2667         enic_fm_open_scratch(fm);
2668         ret = enic_fm_flow_parse(fm, attrs, pattern, actions, error);
2669         if (!ret) {
2670                 fm_tcam_entry = &fm->tcam_entry;
2671                 fm_action = &fm->action;
2672                 enic_fm_dump_tcam_entry(fm_tcam_entry, fm_action,
2673                                         attrs->ingress);
2674         }
2675         enic_fm_close_scratch(fm);
2676         end_fm(fm);
2677         return ret;
2678 }
2679
2680 static int
2681 enic_fm_flow_query_count(struct rte_eth_dev *dev,
2682                          struct rte_flow *flow, void *data,
2683                          struct rte_flow_error *error)
2684 {
2685         struct rte_flow_query_count *query;
2686         struct enic_fm_flow *fm_flow;
2687         struct enic_flowman *fm;
2688         uint64_t args[3];
2689         int rc;
2690
2691         ENICPMD_FUNC_TRACE();
2692         fm = begin_fm(pmd_priv(dev));
2693         query = data;
2694         fm_flow = flow->fm;
2695         if (!fm_flow->counter_valid) {
2696                 rc = rte_flow_error_set(error, ENOTSUP,
2697                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2698                         "enic: flow does not have counter");
2699                 goto exit;
2700         }
2701
2702         args[0] = FM_COUNTER_QUERY;
2703         args[1] = fm_flow->counter->handle;
2704         args[2] = query->reset;
2705         rc = flowman_cmd(fm, args, 3);
2706         if (rc) {
2707                 ENICPMD_LOG(ERR, "cannot query counter: rc=%d handle=0x%x",
2708                             rc, fm_flow->counter->handle);
2709                 goto exit;
2710         }
2711         query->hits_set = 1;
2712         query->hits = args[0];
2713         query->bytes_set = 1;
2714         query->bytes = args[1];
2715         rc = 0;
2716 exit:
2717         end_fm(fm);
2718         return rc;
2719 }
2720
2721 static int
2722 enic_fm_flow_query(struct rte_eth_dev *dev,
2723                    struct rte_flow *flow,
2724                    const struct rte_flow_action *actions,
2725                    void *data,
2726                    struct rte_flow_error *error)
2727 {
2728         int ret = 0;
2729
2730         ENICPMD_FUNC_TRACE();
2731         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2732                 switch (actions->type) {
2733                 case RTE_FLOW_ACTION_TYPE_VOID:
2734                         break;
2735                 case RTE_FLOW_ACTION_TYPE_COUNT:
2736                         ret = enic_fm_flow_query_count(dev, flow, data, error);
2737                         break;
2738                 default:
2739                         return rte_flow_error_set(error, ENOTSUP,
2740                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2741                                                   actions,
2742                                                   "action not supported");
2743                 }
2744                 if (ret < 0)
2745                         return ret;
2746         }
2747         return 0;
2748 }
2749
2750 static struct rte_flow *
2751 enic_fm_flow_create(struct rte_eth_dev *dev,
2752                     const struct rte_flow_attr *attrs,
2753                     const struct rte_flow_item pattern[],
2754                     const struct rte_flow_action actions[],
2755                     struct rte_flow_error *error)
2756 {
2757         struct fm_tcam_match_entry *fm_tcam_entry;
2758         struct fm_action *fm_action;
2759         struct enic_flowman *fm;
2760         struct enic_fm_fet *fet;
2761         struct rte_flow *flow;
2762         struct enic *enic;
2763         int ret;
2764
2765         ENICPMD_FUNC_TRACE();
2766         enic = pmd_priv(dev);
2767         fm = begin_fm(enic);
2768         if (fm == NULL) {
2769                 rte_flow_error_set(error, ENOTSUP,
2770                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2771                         "flowman is not initialized");
2772                 return NULL;
2773         }
2774         enic_fm_open_scratch(fm);
2775         flow = NULL;
2776         ret = enic_fm_flow_parse(fm, attrs, pattern, actions, error);
2777         if (ret < 0)
2778                 goto error_with_scratch;
2779         fm_tcam_entry = &fm->tcam_entry;
2780         fm_action = &fm->action;
2781         flow = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
2782                                       attrs, error);
2783         if (flow) {
2784                 /* Add ingress rule that pairs with hairpin rule */
2785                 if (fm->need_hairpin_steer) {
2786                         ret = add_hairpin_steer(fm, flow, error);
2787                         if (ret) {
2788                                 enic_fm_flow_free(fm, flow);
2789                                 flow = NULL;
2790                                 goto error_with_scratch;
2791                         }
2792                 }
2793                 LIST_INSERT_HEAD(&enic->flows, flow, next);
2794                 fet = flow->fm->fet;
2795                 if (fet && fet->default_key) {
2796                         /*
2797                          * Jump to non-existent group? Save the relevant info
2798                          * so we can convert this flow when that group
2799                          * materializes.
2800                          */
2801                         save_jump_flow(fm, flow, fet->group,
2802                                        fm_tcam_entry, fm_action);
2803                 } else if (fet && fet->ref == 1) {
2804                         /*
2805                          * A new table is created. Convert the saved flows
2806                          * that should jump to this group.
2807                          */
2808                         convert_jump_flows(fm, fet, error);
2809                 }
2810         }
2811
2812 error_with_scratch:
2813         enic_fm_close_scratch(fm);
2814         end_fm(fm);
2815         return flow;
2816 }
2817
2818 static int
2819 enic_fm_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2820                      __rte_unused struct rte_flow_error *error)
2821 {
2822         struct enic *enic = pmd_priv(dev);
2823         struct enic_flowman *fm;
2824
2825         ENICPMD_FUNC_TRACE();
2826         fm = begin_fm(enic);
2827         if (fm == NULL)
2828                 return 0;
2829         LIST_REMOVE(flow, next);
2830         enic_fm_flow_free(fm, flow);
2831         end_fm(fm);
2832         return 0;
2833 }
2834
2835 static int
2836 enic_fm_flow_flush(struct rte_eth_dev *dev,
2837                    __rte_unused struct rte_flow_error *error)
2838 {
2839         LIST_HEAD(enic_flows, rte_flow) internal;
2840         struct enic_fm_flow *fm_flow;
2841         struct enic_flowman *fm;
2842         struct rte_flow *flow;
2843         struct enic *enic = pmd_priv(dev);
2844
2845         ENICPMD_FUNC_TRACE();
2846
2847         fm = begin_fm(enic);
2848         if (fm == NULL)
2849                 return 0;
2850         /* Destroy all non-internal flows */
2851         LIST_INIT(&internal);
2852         while (!LIST_EMPTY(&enic->flows)) {
2853                 flow = LIST_FIRST(&enic->flows);
2854                 fm_flow = flow->fm;
2855                 LIST_REMOVE(flow, next);
2856                 if (flow->internal) {
2857                         LIST_INSERT_HEAD(&internal, flow, next);
2858                         continue;
2859                 }
2860                 /*
2861                  * If tables are null, then vNIC is closing, and the firmware
2862                  * has already cleaned up flowman state. So do not try to free
2863                  * resources, as it only causes errors.
2864                  */
2865                 if (fm->ig_tcam_hndl == FM_INVALID_HANDLE) {
2866                         fm_flow->entry_handle = FM_INVALID_HANDLE;
2867                         fm_flow->action = NULL;
2868                         fm_flow->fet = NULL;
2869                 }
2870                 enic_fm_flow_free(fm, flow);
2871         }
2872         while (!LIST_EMPTY(&internal)) {
2873                 flow = LIST_FIRST(&internal);
2874                 LIST_REMOVE(flow, next);
2875                 LIST_INSERT_HEAD(&enic->flows, flow, next);
2876         }
2877         end_fm(fm);
2878         return 0;
2879 }
2880
2881 static int
2882 enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle)
2883 {
2884         uint64_t args[2];
2885         int rc;
2886
2887         args[0] = FM_MATCH_TABLE_FREE;
2888         args[1] = handle;
2889         rc = flowman_cmd(fm, args, 2);
2890         if (rc)
2891                 ENICPMD_LOG(ERR, "cannot free table: rc=%d handle=0x%" PRIx64,
2892                             rc, handle);
2893         return rc;
2894 }
2895
2896 static int
2897 enic_fm_tcam_tbl_alloc(struct enic_flowman *fm, uint32_t direction,
2898                         uint32_t max_entries, uint64_t *handle)
2899 {
2900         struct fm_tcam_match_table *tcam_tbl;
2901         uint64_t args[2];
2902         int rc;
2903
2904         ENICPMD_FUNC_TRACE();
2905         tcam_tbl = &fm->cmd.va->fm_tcam_match_table;
2906         tcam_tbl->ftt_direction = direction;
2907         tcam_tbl->ftt_stage = FM_STAGE_LAST;
2908         tcam_tbl->ftt_max_entries = max_entries;
2909         args[0] = FM_TCAM_TABLE_ALLOC;
2910         args[1] = fm->cmd.pa;
2911         rc = flowman_cmd(fm, args, 2);
2912         if (rc) {
2913                 ENICPMD_LOG(ERR, "cannot alloc %s TCAM table: rc=%d",
2914                             (direction == FM_INGRESS) ? "IG" : "EG", rc);
2915                 return rc;
2916         }
2917         *handle = args[0];
2918         ENICPMD_LOG(DEBUG, "%s TCAM table allocated, handle=0x%" PRIx64,
2919                     (direction == FM_INGRESS) ? "IG" : "EG", *handle);
2920         return 0;
2921 }
2922
2923 static int
2924 enic_fm_init_actions(struct enic_flowman *fm)
2925 {
2926         struct rte_hash *a_hash;
2927         char name[RTE_HASH_NAMESIZE];
2928         struct rte_hash_parameters params = {
2929                 .entries = FM_MAX_ACTION_TABLE_SIZE,
2930                 .key_len = sizeof(struct fm_action),
2931                 .hash_func = rte_jhash,
2932                 .hash_func_init_val = 0,
2933                 .socket_id = rte_socket_id(),
2934         };
2935
2936         ENICPMD_FUNC_TRACE();
2937         snprintf((char *)name, sizeof(name), "fm-ah-%s",
2938                  fm->owner_enic->bdf_name);
2939         params.name = name;
2940
2941         a_hash = rte_hash_create(&params);
2942         if (a_hash == NULL)
2943                 return -rte_errno;
2944         fm->action_hash = a_hash;
2945         return 0;
2946 }
2947
2948 static int
2949 enic_fm_init_counters(struct enic_flowman *fm)
2950 {
2951         ENICPMD_FUNC_TRACE();
2952         SLIST_INIT(&fm->counters);
2953         return enic_fm_more_counters(fm);
2954 }
2955
2956 static void
2957 enic_fm_free_all_counters(struct enic_flowman *fm)
2958 {
2959         uint64_t args[2];
2960         int rc;
2961
2962         args[0] = FM_COUNTER_BRK;
2963         args[1] = 0;
2964         rc = flowman_cmd(fm, args, 2);
2965         if (rc != 0)
2966                 ENICPMD_LOG(ERR, "cannot free counters: rc=%d", rc);
2967         rte_free(fm->counter_stack);
2968 }
2969
2970 static int
2971 enic_fm_alloc_tcam_tables(struct enic_flowman *fm)
2972 {
2973         int rc;
2974
2975         ENICPMD_FUNC_TRACE();
2976         rc = enic_fm_tcam_tbl_alloc(fm, FM_INGRESS, FM_MAX_TCAM_TABLE_SIZE,
2977                                     &fm->ig_tcam_hndl);
2978         if (rc)
2979                 return rc;
2980         rc = enic_fm_tcam_tbl_alloc(fm, FM_EGRESS, FM_MAX_TCAM_TABLE_SIZE,
2981                                     &fm->eg_tcam_hndl);
2982         return rc;
2983 }
2984
2985 static void
2986 enic_fm_free_tcam_tables(struct enic_flowman *fm)
2987 {
2988         ENICPMD_FUNC_TRACE();
2989         if (fm->ig_tcam_hndl) {
2990                 ENICPMD_LOG(DEBUG, "free IG TCAM table handle=0x%" PRIx64,
2991                             fm->ig_tcam_hndl);
2992                 enic_fm_tbl_free(fm, fm->ig_tcam_hndl);
2993                 fm->ig_tcam_hndl = FM_INVALID_HANDLE;
2994         }
2995         if (fm->eg_tcam_hndl) {
2996                 ENICPMD_LOG(DEBUG, "free EG TCAM table handle=0x%" PRIx64,
2997                             fm->eg_tcam_hndl);
2998                 enic_fm_tbl_free(fm, fm->eg_tcam_hndl);
2999                 fm->eg_tcam_hndl = FM_INVALID_HANDLE;
3000         }
3001 }
3002
3003 int
3004 enic_fm_init(struct enic *enic)
3005 {
3006         const struct rte_pci_addr *addr;
3007         struct enic_flowman *fm;
3008         uint8_t name[RTE_MEMZONE_NAMESIZE];
3009         int rc;
3010
3011         if (enic->flow_filter_mode != FILTER_FLOWMAN)
3012                 return 0;
3013         ENICPMD_FUNC_TRACE();
3014         /* Get vnic handle and save for port-id action */
3015         if (enic_is_vf_rep(enic))
3016                 addr = &VF_ENIC_TO_VF_REP(enic)->bdf;
3017         else
3018                 addr = &RTE_ETH_DEV_TO_PCI(enic->rte_dev)->addr;
3019         rc = enic_fm_find_vnic(enic, addr, &enic->fm_vnic_handle);
3020         if (rc) {
3021                 ENICPMD_LOG(ERR, "cannot find vnic handle for %x:%x:%x",
3022                             addr->bus, addr->devid, addr->function);
3023                 return rc;
3024         }
3025         /* Save UIF for egport action */
3026         enic->fm_vnic_uif = vnic_dev_uif(enic->vdev);
3027         ENICPMD_LOG(DEBUG, "uif %u", enic->fm_vnic_uif);
3028         /* Nothing else to do for representor. It will share the PF flowman */
3029         if (enic_is_vf_rep(enic))
3030                 return 0;
3031         fm = calloc(1, sizeof(*fm));
3032         if (fm == NULL) {
3033                 ENICPMD_LOG(ERR, "cannot alloc flowman struct");
3034                 return -ENOMEM;
3035         }
3036         fm->owner_enic = enic;
3037         rte_spinlock_init(&fm->lock);
3038         TAILQ_INIT(&fm->fet_list);
3039         TAILQ_INIT(&fm->jump_list);
3040         /* Allocate host memory for flowman commands */
3041         snprintf((char *)name, sizeof(name), "fm-cmd-%s", enic->bdf_name);
3042         fm->cmd.va = enic_alloc_consistent(enic,
3043                 sizeof(union enic_flowman_cmd_mem), &fm->cmd.pa, name);
3044         if (!fm->cmd.va) {
3045                 ENICPMD_LOG(ERR, "cannot allocate flowman command memory");
3046                 rc = -ENOMEM;
3047                 goto error_fm;
3048         }
3049         /* Allocate TCAM tables upfront as they are the main tables */
3050         rc = enic_fm_alloc_tcam_tables(fm);
3051         if (rc) {
3052                 ENICPMD_LOG(ERR, "cannot alloc TCAM tables");
3053                 goto error_cmd;
3054         }
3055         /* Then a number of counters */
3056         rc = enic_fm_init_counters(fm);
3057         if (rc) {
3058                 ENICPMD_LOG(ERR, "cannot alloc counters");
3059                 goto error_tables;
3060         }
3061         /* set up action handle hash */
3062         rc = enic_fm_init_actions(fm);
3063         if (rc) {
3064                 ENICPMD_LOG(ERR, "cannot create action hash, error:%d", rc);
3065                 goto error_counters;
3066         }
3067         /*
3068          * One default exact match table for each direction. We hold onto
3069          * it until close.
3070          */
3071         rc = enic_fet_alloc(fm, 1, NULL, 128, &fm->default_ig_fet);
3072         if (rc) {
3073                 ENICPMD_LOG(ERR, "cannot alloc default IG exact match table");
3074                 goto error_actions;
3075         }
3076         fm->default_ig_fet->ref = 1;
3077         rc = enic_fet_alloc(fm, 0, NULL, 128, &fm->default_eg_fet);
3078         if (rc) {
3079                 ENICPMD_LOG(ERR, "cannot alloc default EG exact match table");
3080                 goto error_ig_fet;
3081         }
3082         fm->default_eg_fet->ref = 1;
3083         fm->vf_rep_tag = FM_VF_REP_TAG;
3084         enic->fm = fm;
3085         return 0;
3086
3087 error_ig_fet:
3088         enic_fet_free(fm, fm->default_ig_fet);
3089 error_actions:
3090         rte_hash_free(fm->action_hash);
3091 error_counters:
3092         enic_fm_free_all_counters(fm);
3093 error_tables:
3094         enic_fm_free_tcam_tables(fm);
3095 error_cmd:
3096         enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem),
3097                 fm->cmd.va, fm->cmd.pa);
3098 error_fm:
3099         free(fm);
3100         return rc;
3101 }
3102
3103 void
3104 enic_fm_destroy(struct enic *enic)
3105 {
3106         struct enic_flowman *fm;
3107         struct enic_fm_fet *fet;
3108
3109         ENICPMD_FUNC_TRACE();
3110         if (enic_is_vf_rep(enic)) {
3111                 delete_rep_flows(enic);
3112                 return;
3113         }
3114         if (enic->fm == NULL)
3115                 return;
3116         fm = enic->fm;
3117         enic_fm_flow_flush(enic->rte_dev, NULL);
3118         enic_fet_free(fm, fm->default_eg_fet);
3119         enic_fet_free(fm, fm->default_ig_fet);
3120         /* Free all exact match tables still open */
3121         while (!TAILQ_EMPTY(&fm->fet_list)) {
3122                 fet = TAILQ_FIRST(&fm->fet_list);
3123                 enic_fet_free(fm, fet);
3124         }
3125         enic_fm_free_tcam_tables(fm);
3126         enic_fm_free_all_counters(fm);
3127         rte_hash_free(fm->action_hash);
3128         enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem),
3129                 fm->cmd.va, fm->cmd.pa);
3130         fm->cmd.va = NULL;
3131         free(fm);
3132         enic->fm = NULL;
3133 }
3134
3135 int
3136 enic_fm_allocate_switch_domain(struct enic *pf)
3137 {
3138         const struct rte_pci_addr *cur_a, *prev_a;
3139         struct rte_eth_dev *dev;
3140         struct enic *cur, *prev;
3141         uint16_t domain_id;
3142         uint64_t vnic_h;
3143         uint16_t pid;
3144         int ret;
3145
3146         ENICPMD_FUNC_TRACE();
3147         if (enic_is_vf_rep(pf))
3148                 return -EINVAL;
3149         cur = pf;
3150         cur_a = &RTE_ETH_DEV_TO_PCI(cur->rte_dev)->addr;
3151         /* Go through ports and find another PF that is on the same adapter */
3152         RTE_ETH_FOREACH_DEV(pid) {
3153                 dev = &rte_eth_devices[pid];
3154                 if (!dev_is_enic(dev))
3155                         continue;
3156                 if (dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
3157                         continue;
3158                 if (dev == cur->rte_dev)
3159                         continue;
3160                 /* dev is another PF. Is it on the same adapter? */
3161                 prev = pmd_priv(dev);
3162                 prev_a = &RTE_ETH_DEV_TO_PCI(dev)->addr;
3163                 if (!enic_fm_find_vnic(cur, prev_a, &vnic_h)) {
3164                         ENICPMD_LOG(DEBUG, "Port %u (PF BDF %x:%x:%x) and port %u (PF BDF %x:%x:%x domain %u) are on the same VIC",
3165                                 cur->rte_dev->data->port_id,
3166                                 cur_a->bus, cur_a->devid, cur_a->function,
3167                                 dev->data->port_id,
3168                                 prev_a->bus, prev_a->devid, prev_a->function,
3169                                 prev->switch_domain_id);
3170                         cur->switch_domain_id = prev->switch_domain_id;
3171                         return 0;
3172                 }
3173         }
3174         ret = rte_eth_switch_domain_alloc(&domain_id);
3175         if (ret) {
3176                 ENICPMD_LOG(WARNING, "failed to allocate switch domain for device %d",
3177                             ret);
3178         }
3179         cur->switch_domain_id = domain_id;
3180         ENICPMD_LOG(DEBUG, "Port %u (PF BDF %x:%x:%x) is the 1st PF on the VIC. Allocated switch domain id %u",
3181                     cur->rte_dev->data->port_id,
3182                     cur_a->bus, cur_a->devid, cur_a->function,
3183                     domain_id);
3184         return ret;
3185 }
3186
3187 const struct rte_flow_ops enic_fm_flow_ops = {
3188         .validate = enic_fm_flow_validate,
3189         .create = enic_fm_flow_create,
3190         .destroy = enic_fm_flow_destroy,
3191         .flush = enic_fm_flow_flush,
3192         .query = enic_fm_flow_query,
3193 };
3194
3195 /* Add a high priority flow that loops representor packets to VF */
3196 int
3197 enic_fm_add_rep2vf_flow(struct enic_vf_representor *vf)
3198 {
3199         struct fm_tcam_match_entry *fm_tcam_entry;
3200         struct rte_flow *flow0, *flow1;
3201         struct fm_action *fm_action;
3202         struct rte_flow_error error;
3203         struct rte_flow_attr attrs;
3204         struct fm_action_op fm_op;
3205         struct enic_flowman *fm;
3206         struct enic *pf;
3207         uint8_t tag;
3208
3209         pf = vf->pf;
3210         fm = pf->fm;
3211         tag = fm->vf_rep_tag;
3212         enic_fm_open_scratch(fm);
3213         fm_tcam_entry = &fm->tcam_entry;
3214         fm_action = &fm->action;
3215         /* Egress rule: match WQ ID and tag+hairpin */
3216         fm_tcam_entry->ftm_data.fk_wq_id = vf->pf_wq_idx;
3217         fm_tcam_entry->ftm_mask.fk_wq_id = 0xffff;
3218         fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
3219         memset(&fm_op, 0, sizeof(fm_op));
3220         fm_op.fa_op = FMOP_TAG;
3221         fm_op.tag.tag = tag;
3222         enic_fm_append_action_op(fm, &fm_op, &error);
3223         memset(&fm_op, 0, sizeof(fm_op));
3224         fm_op.fa_op = FMOP_EG_HAIRPIN;
3225         enic_fm_append_action_op(fm, &fm_op, &error);
3226         memset(&fm_op, 0, sizeof(fm_op));
3227         fm_op.fa_op = FMOP_END;
3228         enic_fm_append_action_op(fm, &fm_op, &error);
3229         attrs.group = 0;
3230         attrs.ingress = 0;
3231         attrs.egress = 1;
3232         attrs.priority = FM_HIGHEST_PRIORITY;
3233         flow0 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
3234                                        &attrs, &error);
3235         enic_fm_close_scratch(fm);
3236         if (flow0 == NULL) {
3237                 ENICPMD_LOG(ERR, "Cannot create flow 0 for representor->VF");
3238                 return -EINVAL;
3239         }
3240         LIST_INSERT_HEAD(&pf->flows, flow0, next);
3241         /* Make this flow internal, so the user app cannot delete it */
3242         flow0->internal = 1;
3243         ENICPMD_LOG(DEBUG, "representor->VF %d flow created: wq %d -> tag %d hairpin",
3244                     vf->vf_id, vf->pf_wq_idx, tag);
3245
3246         /* Ingress: steer hairpinned to VF RQ 0 */
3247         enic_fm_open_scratch(fm);
3248         fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
3249         fm_tcam_entry->ftm_data.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
3250         fm_tcam_entry->ftm_mask.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
3251         fm_tcam_entry->ftm_data.fk_packet_tag = tag;
3252         fm_tcam_entry->ftm_mask.fk_packet_tag = 0xff;
3253         memset(&fm_op, 0, sizeof(fm_op));
3254         fm_op.fa_op = FMOP_RQ_STEER;
3255         fm_op.rq_steer.rq_index = 0;
3256         fm_op.rq_steer.vnic_handle = vf->enic.fm_vnic_handle;
3257         enic_fm_append_action_op(fm, &fm_op, &error);
3258         memset(&fm_op, 0, sizeof(fm_op));
3259         fm_op.fa_op = FMOP_END;
3260         enic_fm_append_action_op(fm, &fm_op, &error);
3261         attrs.group = 0;
3262         attrs.ingress = 1;
3263         attrs.egress = 0;
3264         attrs.priority = FM_HIGHEST_PRIORITY;
3265         flow1 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
3266                                        &attrs, &error);
3267         enic_fm_close_scratch(fm);
3268         if (flow1 == NULL) {
3269                 ENICPMD_LOG(ERR, "Cannot create flow 1 for representor->VF");
3270                 enic_fm_flow_destroy(pf->rte_dev, flow0, &error);
3271                 return -EINVAL;
3272         }
3273         LIST_INSERT_HEAD(&pf->flows, flow1, next);
3274         flow1->internal = 1;
3275         ENICPMD_LOG(DEBUG, "representor->VF %d flow created: tag %d hairpinned -> VF RQ %d",
3276                     vf->vf_id, tag, fm_op.rq_steer.rq_index);
3277         vf->rep2vf_flow[0] = flow0;
3278         vf->rep2vf_flow[1] = flow1;
3279         /* Done with this tag, use a different one next time */
3280         fm->vf_rep_tag++;
3281         return 0;
3282 }
3283
3284 /*
3285  * Add a low priority flow that matches all packets from VF and loops them
3286  * back to the representor.
3287  */
3288 int
3289 enic_fm_add_vf2rep_flow(struct enic_vf_representor *vf)
3290 {
3291         struct fm_tcam_match_entry *fm_tcam_entry;
3292         struct rte_flow *flow0, *flow1;
3293         struct fm_action *fm_action;
3294         struct rte_flow_error error;
3295         struct rte_flow_attr attrs;
3296         struct fm_action_op fm_op;
3297         struct enic_flowman *fm;
3298         struct enic *pf;
3299         uint8_t tag;
3300
3301         pf = vf->pf;
3302         fm = pf->fm;
3303         tag = fm->vf_rep_tag;
3304         enic_fm_open_scratch(fm);
3305         fm_tcam_entry = &fm->tcam_entry;
3306         fm_action = &fm->action;
3307         /* Egress rule: match-any and tag+hairpin */
3308         fm_tcam_entry->ftm_data.fk_wq_id = 0;
3309         fm_tcam_entry->ftm_mask.fk_wq_id = 0xffff;
3310         fm_tcam_entry->ftm_data.fk_wq_vnic = vf->enic.fm_vnic_handle;
3311         fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
3312         memset(&fm_op, 0, sizeof(fm_op));
3313         fm_op.fa_op = FMOP_TAG;
3314         fm_op.tag.tag = tag;
3315         enic_fm_append_action_op(fm, &fm_op, &error);
3316         memset(&fm_op, 0, sizeof(fm_op));
3317         fm_op.fa_op = FMOP_EG_HAIRPIN;
3318         enic_fm_append_action_op(fm, &fm_op, &error);
3319         memset(&fm_op, 0, sizeof(fm_op));
3320         fm_op.fa_op = FMOP_END;
3321         enic_fm_append_action_op(fm, &fm_op, &error);
3322         attrs.group = 0;
3323         attrs.ingress = 0;
3324         attrs.egress = 1;
3325         attrs.priority = FM_LOWEST_PRIORITY;
3326         flow0 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
3327                                        &attrs, &error);
3328         enic_fm_close_scratch(fm);
3329         if (flow0 == NULL) {
3330                 ENICPMD_LOG(ERR, "Cannot create flow 0 for VF->representor");
3331                 return -EINVAL;
3332         }
3333         LIST_INSERT_HEAD(&pf->flows, flow0, next);
3334         /* Make this flow internal, so the user app cannot delete it */
3335         flow0->internal = 1;
3336         ENICPMD_LOG(DEBUG, "VF %d->representor flow created: wq %d (low prio) -> tag %d hairpin",
3337                     vf->vf_id, fm_tcam_entry->ftm_data.fk_wq_id, tag);
3338
3339         /* Ingress: steer hairpinned to VF rep RQ */
3340         enic_fm_open_scratch(fm);
3341         fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
3342         fm_tcam_entry->ftm_data.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
3343         fm_tcam_entry->ftm_mask.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
3344         fm_tcam_entry->ftm_data.fk_packet_tag = tag;
3345         fm_tcam_entry->ftm_mask.fk_packet_tag = 0xff;
3346         memset(&fm_op, 0, sizeof(fm_op));
3347         fm_op.fa_op = FMOP_RQ_STEER;
3348         fm_op.rq_steer.rq_index = vf->pf_rq_sop_idx;
3349         fm_op.rq_steer.vnic_handle = pf->fm_vnic_handle;
3350         enic_fm_append_action_op(fm, &fm_op, &error);
3351         memset(&fm_op, 0, sizeof(fm_op));
3352         fm_op.fa_op = FMOP_END;
3353         enic_fm_append_action_op(fm, &fm_op, &error);
3354         attrs.group = 0;
3355         attrs.ingress = 1;
3356         attrs.egress = 0;
3357         attrs.priority = FM_HIGHEST_PRIORITY;
3358         flow1 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
3359                                        &attrs, &error);
3360         enic_fm_close_scratch(fm);
3361         if (flow1 == NULL) {
3362                 ENICPMD_LOG(ERR, "Cannot create flow 1 for VF->representor");
3363                 enic_fm_flow_destroy(pf->rte_dev, flow0, &error);
3364                 return -EINVAL;
3365         }
3366         LIST_INSERT_HEAD(&pf->flows, flow1, next);
3367         flow1->internal = 1;
3368         ENICPMD_LOG(DEBUG, "VF %d->representor flow created: tag %d hairpinned -> PF RQ %d",
3369                     vf->vf_id, tag, vf->pf_rq_sop_idx);
3370         vf->vf2rep_flow[0] = flow0;
3371         vf->vf2rep_flow[1] = flow1;
3372         /* Done with this tag, use a different one next time */
3373         fm->vf_rep_tag++;
3374         return 0;
3375 }
3376
3377 /* Destroy representor flows created by enic_fm_add_{rep2vf,vf2rep}_flow */
3378 static void
3379 delete_rep_flows(struct enic *enic)
3380 {
3381         struct enic_vf_representor *vf;
3382         struct rte_flow_error error;
3383         struct rte_eth_dev *dev;
3384         uint32_t i;
3385
3386         RTE_ASSERT(enic_is_vf_rep(enic));
3387         vf = VF_ENIC_TO_VF_REP(enic);
3388         dev = vf->pf->rte_dev;
3389         for (i = 0; i < ARRAY_SIZE(vf->vf2rep_flow); i++) {
3390                 if (vf->vf2rep_flow[i])
3391                         enic_fm_flow_destroy(dev, vf->vf2rep_flow[i], &error);
3392         }
3393         for (i = 0; i < ARRAY_SIZE(vf->rep2vf_flow); i++) {
3394                 if (vf->rep2vf_flow[i])
3395                         enic_fm_flow_destroy(dev, vf->rep2vf_flow[i], &error);
3396         }
3397 }
3398
3399 static struct enic_flowman *
3400 begin_fm(struct enic *enic)
3401 {
3402         struct enic_vf_representor *vf;
3403         struct enic_flowman *fm;
3404
3405         /* Representor uses PF flowman */
3406         if (enic_is_vf_rep(enic)) {
3407                 vf = VF_ENIC_TO_VF_REP(enic);
3408                 fm = vf->pf->fm;
3409         } else {
3410                 fm = enic->fm;
3411         }
3412         /* Save the API caller and lock if representors exist */
3413         if (fm) {
3414                 if (fm->owner_enic->switchdev_mode)
3415                         rte_spinlock_lock(&fm->lock);
3416                 fm->user_enic = enic;
3417         }
3418         return fm;
3419 }
3420
3421 static void
3422 end_fm(struct enic_flowman *fm)
3423 {
3424         fm->user_enic = NULL;
3425         if (fm->owner_enic->switchdev_mode)
3426                 rte_spinlock_unlock(&fm->lock);
3427 }