f389677c003eb1933a7a28376e8e49dc33f42635
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /*
27  * Common arguments passed to copy_item functions. Use this structure
28  * so we can easily add new arguments.
29  * item: Item specification.
30  * filter: Partially filled in NIC filter structure.
31  * inner_ofst: If zero, this is an outer header. If non-zero, this is
32  *   the offset into L5 where the header begins.
33  * l2_proto_off: offset to EtherType eth or vlan header.
34  * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
35  */
36 struct copy_item_args {
37         const struct rte_flow_item *item;
38         struct filter_v2 *filter;
39         uint8_t *inner_ofst;
40         uint8_t l2_proto_off;
41         uint8_t l3_proto_off;
42         struct enic *enic;
43 };
44
45 /* functions for copying items into enic filters */
46 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
47
48 /** Info about how to copy items into enic filters. */
49 struct enic_items {
50         /** Function for copying and validating an item. */
51         enic_copy_item_fn *copy_item;
52         /** List of valid previous items. */
53         const enum rte_flow_item_type * const prev_items;
54         /** True if it's OK for this item to be the first item. For some NIC
55          * versions, it's invalid to start the stack above layer 3.
56          */
57         const u8 valid_start_item;
58         /* Inner packet version of copy_item. */
59         enic_copy_item_fn *inner_copy_item;
60 };
61
62 /** Filtering capabilities for various NIC and firmware versions. */
63 struct enic_filter_cap {
64         /** list of valid items and their handlers and attributes. */
65         const struct enic_items *item_info;
66         /* Max type in the above list, used to detect unsupported types */
67         enum rte_flow_item_type max_item_type;
68 };
69
70 /* functions for copying flow actions into enic actions */
71 typedef int (copy_action_fn)(struct enic *enic,
72                              const struct rte_flow_action actions[],
73                              struct filter_action_v2 *enic_action);
74
75 /** Action capabilities for various NICs. */
76 struct enic_action_cap {
77         /** list of valid actions */
78         const enum rte_flow_action_type *actions;
79         /** copy function for a particular NIC */
80         copy_action_fn *copy_fn;
81 };
82
83 /* Forward declarations */
84 static enic_copy_item_fn enic_copy_item_ipv4_v1;
85 static enic_copy_item_fn enic_copy_item_udp_v1;
86 static enic_copy_item_fn enic_copy_item_tcp_v1;
87 static enic_copy_item_fn enic_copy_item_raw_v2;
88 static enic_copy_item_fn enic_copy_item_eth_v2;
89 static enic_copy_item_fn enic_copy_item_vlan_v2;
90 static enic_copy_item_fn enic_copy_item_ipv4_v2;
91 static enic_copy_item_fn enic_copy_item_ipv6_v2;
92 static enic_copy_item_fn enic_copy_item_udp_v2;
93 static enic_copy_item_fn enic_copy_item_tcp_v2;
94 static enic_copy_item_fn enic_copy_item_sctp_v2;
95 static enic_copy_item_fn enic_copy_item_vxlan_v2;
96 static enic_copy_item_fn enic_copy_item_inner_eth_v2;
97 static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
98 static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
99 static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
100 static enic_copy_item_fn enic_copy_item_inner_udp_v2;
101 static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
102 static copy_action_fn enic_copy_action_v1;
103 static copy_action_fn enic_copy_action_v2;
104
105 /**
106  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
107  * is supported.
108  */
109 static const struct enic_items enic_items_v1[] = {
110         [RTE_FLOW_ITEM_TYPE_IPV4] = {
111                 .copy_item = enic_copy_item_ipv4_v1,
112                 .valid_start_item = 1,
113                 .prev_items = (const enum rte_flow_item_type[]) {
114                                RTE_FLOW_ITEM_TYPE_END,
115                 },
116                 .inner_copy_item = NULL,
117         },
118         [RTE_FLOW_ITEM_TYPE_UDP] = {
119                 .copy_item = enic_copy_item_udp_v1,
120                 .valid_start_item = 0,
121                 .prev_items = (const enum rte_flow_item_type[]) {
122                                RTE_FLOW_ITEM_TYPE_IPV4,
123                                RTE_FLOW_ITEM_TYPE_END,
124                 },
125                 .inner_copy_item = NULL,
126         },
127         [RTE_FLOW_ITEM_TYPE_TCP] = {
128                 .copy_item = enic_copy_item_tcp_v1,
129                 .valid_start_item = 0,
130                 .prev_items = (const enum rte_flow_item_type[]) {
131                                RTE_FLOW_ITEM_TYPE_IPV4,
132                                RTE_FLOW_ITEM_TYPE_END,
133                 },
134                 .inner_copy_item = NULL,
135         },
136 };
137
138 /**
139  * NICs have Advanced Filters capability but they are disabled. This means
140  * that layer 3 must be specified.
141  */
142 static const struct enic_items enic_items_v2[] = {
143         [RTE_FLOW_ITEM_TYPE_RAW] = {
144                 .copy_item = enic_copy_item_raw_v2,
145                 .valid_start_item = 0,
146                 .prev_items = (const enum rte_flow_item_type[]) {
147                                RTE_FLOW_ITEM_TYPE_UDP,
148                                RTE_FLOW_ITEM_TYPE_END,
149                 },
150                 .inner_copy_item = NULL,
151         },
152         [RTE_FLOW_ITEM_TYPE_ETH] = {
153                 .copy_item = enic_copy_item_eth_v2,
154                 .valid_start_item = 1,
155                 .prev_items = (const enum rte_flow_item_type[]) {
156                                RTE_FLOW_ITEM_TYPE_VXLAN,
157                                RTE_FLOW_ITEM_TYPE_END,
158                 },
159                 .inner_copy_item = enic_copy_item_inner_eth_v2,
160         },
161         [RTE_FLOW_ITEM_TYPE_VLAN] = {
162                 .copy_item = enic_copy_item_vlan_v2,
163                 .valid_start_item = 1,
164                 .prev_items = (const enum rte_flow_item_type[]) {
165                                RTE_FLOW_ITEM_TYPE_ETH,
166                                RTE_FLOW_ITEM_TYPE_END,
167                 },
168                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
169         },
170         [RTE_FLOW_ITEM_TYPE_IPV4] = {
171                 .copy_item = enic_copy_item_ipv4_v2,
172                 .valid_start_item = 1,
173                 .prev_items = (const enum rte_flow_item_type[]) {
174                                RTE_FLOW_ITEM_TYPE_ETH,
175                                RTE_FLOW_ITEM_TYPE_VLAN,
176                                RTE_FLOW_ITEM_TYPE_END,
177                 },
178                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
179         },
180         [RTE_FLOW_ITEM_TYPE_IPV6] = {
181                 .copy_item = enic_copy_item_ipv6_v2,
182                 .valid_start_item = 1,
183                 .prev_items = (const enum rte_flow_item_type[]) {
184                                RTE_FLOW_ITEM_TYPE_ETH,
185                                RTE_FLOW_ITEM_TYPE_VLAN,
186                                RTE_FLOW_ITEM_TYPE_END,
187                 },
188                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
189         },
190         [RTE_FLOW_ITEM_TYPE_UDP] = {
191                 .copy_item = enic_copy_item_udp_v2,
192                 .valid_start_item = 0,
193                 .prev_items = (const enum rte_flow_item_type[]) {
194                                RTE_FLOW_ITEM_TYPE_IPV4,
195                                RTE_FLOW_ITEM_TYPE_IPV6,
196                                RTE_FLOW_ITEM_TYPE_END,
197                 },
198                 .inner_copy_item = enic_copy_item_inner_udp_v2,
199         },
200         [RTE_FLOW_ITEM_TYPE_TCP] = {
201                 .copy_item = enic_copy_item_tcp_v2,
202                 .valid_start_item = 0,
203                 .prev_items = (const enum rte_flow_item_type[]) {
204                                RTE_FLOW_ITEM_TYPE_IPV4,
205                                RTE_FLOW_ITEM_TYPE_IPV6,
206                                RTE_FLOW_ITEM_TYPE_END,
207                 },
208                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
209         },
210         [RTE_FLOW_ITEM_TYPE_SCTP] = {
211                 .copy_item = enic_copy_item_sctp_v2,
212                 .valid_start_item = 0,
213                 .prev_items = (const enum rte_flow_item_type[]) {
214                                RTE_FLOW_ITEM_TYPE_IPV4,
215                                RTE_FLOW_ITEM_TYPE_IPV6,
216                                RTE_FLOW_ITEM_TYPE_END,
217                 },
218                 .inner_copy_item = NULL,
219         },
220         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
221                 .copy_item = enic_copy_item_vxlan_v2,
222                 .valid_start_item = 0,
223                 .prev_items = (const enum rte_flow_item_type[]) {
224                                RTE_FLOW_ITEM_TYPE_UDP,
225                                RTE_FLOW_ITEM_TYPE_END,
226                 },
227                 .inner_copy_item = NULL,
228         },
229 };
230
231 /** NICs with Advanced filters enabled */
232 static const struct enic_items enic_items_v3[] = {
233         [RTE_FLOW_ITEM_TYPE_RAW] = {
234                 .copy_item = enic_copy_item_raw_v2,
235                 .valid_start_item = 0,
236                 .prev_items = (const enum rte_flow_item_type[]) {
237                                RTE_FLOW_ITEM_TYPE_UDP,
238                                RTE_FLOW_ITEM_TYPE_END,
239                 },
240                 .inner_copy_item = NULL,
241         },
242         [RTE_FLOW_ITEM_TYPE_ETH] = {
243                 .copy_item = enic_copy_item_eth_v2,
244                 .valid_start_item = 1,
245                 .prev_items = (const enum rte_flow_item_type[]) {
246                                RTE_FLOW_ITEM_TYPE_VXLAN,
247                                RTE_FLOW_ITEM_TYPE_END,
248                 },
249                 .inner_copy_item = enic_copy_item_inner_eth_v2,
250         },
251         [RTE_FLOW_ITEM_TYPE_VLAN] = {
252                 .copy_item = enic_copy_item_vlan_v2,
253                 .valid_start_item = 1,
254                 .prev_items = (const enum rte_flow_item_type[]) {
255                                RTE_FLOW_ITEM_TYPE_ETH,
256                                RTE_FLOW_ITEM_TYPE_END,
257                 },
258                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
259         },
260         [RTE_FLOW_ITEM_TYPE_IPV4] = {
261                 .copy_item = enic_copy_item_ipv4_v2,
262                 .valid_start_item = 1,
263                 .prev_items = (const enum rte_flow_item_type[]) {
264                                RTE_FLOW_ITEM_TYPE_ETH,
265                                RTE_FLOW_ITEM_TYPE_VLAN,
266                                RTE_FLOW_ITEM_TYPE_END,
267                 },
268                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
269         },
270         [RTE_FLOW_ITEM_TYPE_IPV6] = {
271                 .copy_item = enic_copy_item_ipv6_v2,
272                 .valid_start_item = 1,
273                 .prev_items = (const enum rte_flow_item_type[]) {
274                                RTE_FLOW_ITEM_TYPE_ETH,
275                                RTE_FLOW_ITEM_TYPE_VLAN,
276                                RTE_FLOW_ITEM_TYPE_END,
277                 },
278                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
279         },
280         [RTE_FLOW_ITEM_TYPE_UDP] = {
281                 .copy_item = enic_copy_item_udp_v2,
282                 .valid_start_item = 1,
283                 .prev_items = (const enum rte_flow_item_type[]) {
284                                RTE_FLOW_ITEM_TYPE_IPV4,
285                                RTE_FLOW_ITEM_TYPE_IPV6,
286                                RTE_FLOW_ITEM_TYPE_END,
287                 },
288                 .inner_copy_item = enic_copy_item_inner_udp_v2,
289         },
290         [RTE_FLOW_ITEM_TYPE_TCP] = {
291                 .copy_item = enic_copy_item_tcp_v2,
292                 .valid_start_item = 1,
293                 .prev_items = (const enum rte_flow_item_type[]) {
294                                RTE_FLOW_ITEM_TYPE_IPV4,
295                                RTE_FLOW_ITEM_TYPE_IPV6,
296                                RTE_FLOW_ITEM_TYPE_END,
297                 },
298                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
299         },
300         [RTE_FLOW_ITEM_TYPE_SCTP] = {
301                 .copy_item = enic_copy_item_sctp_v2,
302                 .valid_start_item = 0,
303                 .prev_items = (const enum rte_flow_item_type[]) {
304                                RTE_FLOW_ITEM_TYPE_IPV4,
305                                RTE_FLOW_ITEM_TYPE_IPV6,
306                                RTE_FLOW_ITEM_TYPE_END,
307                 },
308                 .inner_copy_item = NULL,
309         },
310         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
311                 .copy_item = enic_copy_item_vxlan_v2,
312                 .valid_start_item = 1,
313                 .prev_items = (const enum rte_flow_item_type[]) {
314                                RTE_FLOW_ITEM_TYPE_UDP,
315                                RTE_FLOW_ITEM_TYPE_END,
316                 },
317                 .inner_copy_item = NULL,
318         },
319 };
320
321 /** Filtering capabilities indexed this NICs supported filter type. */
322 static const struct enic_filter_cap enic_filter_cap[] = {
323         [FILTER_IPV4_5TUPLE] = {
324                 .item_info = enic_items_v1,
325                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
326         },
327         [FILTER_USNIC_IP] = {
328                 .item_info = enic_items_v2,
329                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
330         },
331         [FILTER_DPDK_1] = {
332                 .item_info = enic_items_v3,
333                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
334         },
335 };
336
337 /** Supported actions for older NICs */
338 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
339         RTE_FLOW_ACTION_TYPE_QUEUE,
340         RTE_FLOW_ACTION_TYPE_END,
341 };
342
343 /** Supported actions for newer NICs */
344 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
345         RTE_FLOW_ACTION_TYPE_QUEUE,
346         RTE_FLOW_ACTION_TYPE_MARK,
347         RTE_FLOW_ACTION_TYPE_FLAG,
348         RTE_FLOW_ACTION_TYPE_RSS,
349         RTE_FLOW_ACTION_TYPE_PASSTHRU,
350         RTE_FLOW_ACTION_TYPE_END,
351 };
352
353 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
354         RTE_FLOW_ACTION_TYPE_QUEUE,
355         RTE_FLOW_ACTION_TYPE_MARK,
356         RTE_FLOW_ACTION_TYPE_FLAG,
357         RTE_FLOW_ACTION_TYPE_DROP,
358         RTE_FLOW_ACTION_TYPE_RSS,
359         RTE_FLOW_ACTION_TYPE_PASSTHRU,
360         RTE_FLOW_ACTION_TYPE_END,
361 };
362
363 /** Action capabilities indexed by NIC version information */
364 static const struct enic_action_cap enic_action_cap[] = {
365         [FILTER_ACTION_RQ_STEERING_FLAG] = {
366                 .actions = enic_supported_actions_v1,
367                 .copy_fn = enic_copy_action_v1,
368         },
369         [FILTER_ACTION_FILTER_ID_FLAG] = {
370                 .actions = enic_supported_actions_v2_id,
371                 .copy_fn = enic_copy_action_v2,
372         },
373         [FILTER_ACTION_DROP_FLAG] = {
374                 .actions = enic_supported_actions_v2_drop,
375                 .copy_fn = enic_copy_action_v2,
376         },
377 };
378
379 static int
380 mask_exact_match(const u8 *supported, const u8 *supplied,
381                  unsigned int size)
382 {
383         unsigned int i;
384         for (i = 0; i < size; i++) {
385                 if (supported[i] != supplied[i])
386                         return 0;
387         }
388         return 1;
389 }
390
391 static int
392 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
393 {
394         const struct rte_flow_item *item = arg->item;
395         struct filter_v2 *enic_filter = arg->filter;
396         const struct rte_flow_item_ipv4 *spec = item->spec;
397         const struct rte_flow_item_ipv4 *mask = item->mask;
398         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
399         struct rte_ipv4_hdr supported_mask = {
400                 .src_addr = 0xffffffff,
401                 .dst_addr = 0xffffffff,
402         };
403
404         FLOW_TRACE();
405
406         if (!mask)
407                 mask = &rte_flow_item_ipv4_mask;
408
409         /* This is an exact match filter, both fields must be set */
410         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
411                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
412                 return ENOTSUP;
413         }
414
415         /* check that the suppied mask exactly matches capabilty */
416         if (!mask_exact_match((const u8 *)&supported_mask,
417                               (const u8 *)item->mask, sizeof(*mask))) {
418                 FLOW_LOG(ERR, "IPv4 exact match mask");
419                 return ENOTSUP;
420         }
421
422         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
423         enic_5tup->src_addr = spec->hdr.src_addr;
424         enic_5tup->dst_addr = spec->hdr.dst_addr;
425
426         return 0;
427 }
428
429 static int
430 enic_copy_item_udp_v1(struct copy_item_args *arg)
431 {
432         const struct rte_flow_item *item = arg->item;
433         struct filter_v2 *enic_filter = arg->filter;
434         const struct rte_flow_item_udp *spec = item->spec;
435         const struct rte_flow_item_udp *mask = item->mask;
436         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
437         struct rte_udp_hdr supported_mask = {
438                 .src_port = 0xffff,
439                 .dst_port = 0xffff,
440         };
441
442         FLOW_TRACE();
443
444         if (!mask)
445                 mask = &rte_flow_item_udp_mask;
446
447         /* This is an exact match filter, both ports must be set */
448         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
449                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
450                 return ENOTSUP;
451         }
452
453         /* check that the suppied mask exactly matches capabilty */
454         if (!mask_exact_match((const u8 *)&supported_mask,
455                               (const u8 *)item->mask, sizeof(*mask))) {
456                 FLOW_LOG(ERR, "UDP exact match mask");
457                 return ENOTSUP;
458         }
459
460         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
461         enic_5tup->src_port = spec->hdr.src_port;
462         enic_5tup->dst_port = spec->hdr.dst_port;
463         enic_5tup->protocol = PROTO_UDP;
464
465         return 0;
466 }
467
468 static int
469 enic_copy_item_tcp_v1(struct copy_item_args *arg)
470 {
471         const struct rte_flow_item *item = arg->item;
472         struct filter_v2 *enic_filter = arg->filter;
473         const struct rte_flow_item_tcp *spec = item->spec;
474         const struct rte_flow_item_tcp *mask = item->mask;
475         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
476         struct rte_tcp_hdr supported_mask = {
477                 .src_port = 0xffff,
478                 .dst_port = 0xffff,
479         };
480
481         FLOW_TRACE();
482
483         if (!mask)
484                 mask = &rte_flow_item_tcp_mask;
485
486         /* This is an exact match filter, both ports must be set */
487         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
488                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
489                 return ENOTSUP;
490         }
491
492         /* check that the suppied mask exactly matches capabilty */
493         if (!mask_exact_match((const u8 *)&supported_mask,
494                              (const u8 *)item->mask, sizeof(*mask))) {
495                 FLOW_LOG(ERR, "TCP exact match mask");
496                 return ENOTSUP;
497         }
498
499         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
500         enic_5tup->src_port = spec->hdr.src_port;
501         enic_5tup->dst_port = spec->hdr.dst_port;
502         enic_5tup->protocol = PROTO_TCP;
503
504         return 0;
505 }
506
507 /*
508  * The common 'copy' function for all inner packet patterns. Patterns are
509  * first appended to the L5 pattern buffer. Then, since the NIC filter
510  * API has no special support for inner packet matching at the moment,
511  * we set EtherType and IP proto as necessary.
512  */
513 static int
514 copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
515                   const void *val, const void *mask, uint8_t val_size,
516                   uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
517 {
518         uint8_t *l5_mask, *l5_val;
519         uint8_t start_off;
520
521         /* No space left in the L5 pattern buffer. */
522         start_off = *inner_ofst;
523         if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
524                 return ENOTSUP;
525         l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
526         l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
527         /* Copy the pattern into the L5 buffer. */
528         if (val) {
529                 memcpy(l5_mask + start_off, mask, val_size);
530                 memcpy(l5_val + start_off, val, val_size);
531         }
532         /* Set the protocol field in the previous header. */
533         if (proto_off) {
534                 void *m, *v;
535
536                 m = l5_mask + proto_off;
537                 v = l5_val + proto_off;
538                 if (proto_size == 1) {
539                         *(uint8_t *)m = 0xff;
540                         *(uint8_t *)v = (uint8_t)proto_val;
541                 } else if (proto_size == 2) {
542                         *(uint16_t *)m = 0xffff;
543                         *(uint16_t *)v = proto_val;
544                 }
545         }
546         /* All inner headers land in L5 buffer even if their spec is null. */
547         *inner_ofst += val_size;
548         return 0;
549 }
550
551 static int
552 enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
553 {
554         const void *mask = arg->item->mask;
555         uint8_t *off = arg->inner_ofst;
556
557         FLOW_TRACE();
558         if (!mask)
559                 mask = &rte_flow_item_eth_mask;
560         arg->l2_proto_off = *off + offsetof(struct rte_ether_hdr, ether_type);
561         return copy_inner_common(&arg->filter->u.generic_1, off,
562                 arg->item->spec, mask, sizeof(struct rte_ether_hdr),
563                 0 /* no previous protocol */, 0, 0);
564 }
565
566 static int
567 enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
568 {
569         const void *mask = arg->item->mask;
570         uint8_t *off = arg->inner_ofst;
571         uint8_t eth_type_off;
572
573         FLOW_TRACE();
574         if (!mask)
575                 mask = &rte_flow_item_vlan_mask;
576         /* Append vlan header to L5 and set ether type = TPID */
577         eth_type_off = arg->l2_proto_off;
578         arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto);
579         return copy_inner_common(&arg->filter->u.generic_1, off,
580                 arg->item->spec, mask, sizeof(struct rte_vlan_hdr),
581                 eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2);
582 }
583
584 static int
585 enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
586 {
587         const void *mask = arg->item->mask;
588         uint8_t *off = arg->inner_ofst;
589
590         FLOW_TRACE();
591         if (!mask)
592                 mask = &rte_flow_item_ipv4_mask;
593         /* Append ipv4 header to L5 and set ether type = ipv4 */
594         arg->l3_proto_off = *off + offsetof(struct rte_ipv4_hdr, next_proto_id);
595         return copy_inner_common(&arg->filter->u.generic_1, off,
596                 arg->item->spec, mask, sizeof(struct rte_ipv4_hdr),
597                 arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4), 2);
598 }
599
600 static int
601 enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
602 {
603         const void *mask = arg->item->mask;
604         uint8_t *off = arg->inner_ofst;
605
606         FLOW_TRACE();
607         if (!mask)
608                 mask = &rte_flow_item_ipv6_mask;
609         /* Append ipv6 header to L5 and set ether type = ipv6 */
610         arg->l3_proto_off = *off + offsetof(struct rte_ipv6_hdr, proto);
611         return copy_inner_common(&arg->filter->u.generic_1, off,
612                 arg->item->spec, mask, sizeof(struct rte_ipv6_hdr),
613                 arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6), 2);
614 }
615
616 static int
617 enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
618 {
619         const void *mask = arg->item->mask;
620         uint8_t *off = arg->inner_ofst;
621
622         FLOW_TRACE();
623         if (!mask)
624                 mask = &rte_flow_item_udp_mask;
625         /* Append udp header to L5 and set ip proto = udp */
626         return copy_inner_common(&arg->filter->u.generic_1, off,
627                 arg->item->spec, mask, sizeof(struct rte_udp_hdr),
628                 arg->l3_proto_off, IPPROTO_UDP, 1);
629 }
630
631 static int
632 enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
633 {
634         const void *mask = arg->item->mask;
635         uint8_t *off = arg->inner_ofst;
636
637         FLOW_TRACE();
638         if (!mask)
639                 mask = &rte_flow_item_tcp_mask;
640         /* Append tcp header to L5 and set ip proto = tcp */
641         return copy_inner_common(&arg->filter->u.generic_1, off,
642                 arg->item->spec, mask, sizeof(struct rte_tcp_hdr),
643                 arg->l3_proto_off, IPPROTO_TCP, 1);
644 }
645
646 static int
647 enic_copy_item_eth_v2(struct copy_item_args *arg)
648 {
649         const struct rte_flow_item *item = arg->item;
650         struct filter_v2 *enic_filter = arg->filter;
651         struct rte_ether_hdr enic_spec;
652         struct rte_ether_hdr enic_mask;
653         const struct rte_flow_item_eth *spec = item->spec;
654         const struct rte_flow_item_eth *mask = item->mask;
655         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
656
657         FLOW_TRACE();
658
659         /* Match all if no spec */
660         if (!spec)
661                 return 0;
662
663         if (!mask)
664                 mask = &rte_flow_item_eth_mask;
665
666         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
667                RTE_ETHER_ADDR_LEN);
668         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
669                RTE_ETHER_ADDR_LEN);
670
671         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
672                RTE_ETHER_ADDR_LEN);
673         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
674                RTE_ETHER_ADDR_LEN);
675         enic_spec.ether_type = spec->type;
676         enic_mask.ether_type = mask->type;
677
678         /* outer header */
679         memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
680                sizeof(struct rte_ether_hdr));
681         memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
682                sizeof(struct rte_ether_hdr));
683         return 0;
684 }
685
686 static int
687 enic_copy_item_vlan_v2(struct copy_item_args *arg)
688 {
689         const struct rte_flow_item *item = arg->item;
690         struct filter_v2 *enic_filter = arg->filter;
691         const struct rte_flow_item_vlan *spec = item->spec;
692         const struct rte_flow_item_vlan *mask = item->mask;
693         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
694         struct rte_ether_hdr *eth_mask;
695         struct rte_ether_hdr *eth_val;
696
697         FLOW_TRACE();
698
699         /* Match all if no spec */
700         if (!spec)
701                 return 0;
702
703         if (!mask)
704                 mask = &rte_flow_item_vlan_mask;
705
706         eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
707         eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
708         /* Outer TPID cannot be matched */
709         if (eth_mask->ether_type)
710                 return ENOTSUP;
711         /*
712          * For recent models:
713          * When packet matching, the VIC always compares vlan-stripped
714          * L2, regardless of vlan stripping settings. So, the inner type
715          * from vlan becomes the ether type of the eth header.
716          *
717          * Older models w/o hardware vxlan parser have a different
718          * behavior when vlan stripping is disabled. In this case,
719          * vlan tag remains in the L2 buffer.
720          */
721         if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
722                 struct rte_vlan_hdr *vlan;
723
724                 vlan = (struct rte_vlan_hdr *)(eth_mask + 1);
725                 vlan->eth_proto = mask->inner_type;
726                 vlan = (struct rte_vlan_hdr *)(eth_val + 1);
727                 vlan->eth_proto = spec->inner_type;
728         } else {
729                 eth_mask->ether_type = mask->inner_type;
730                 eth_val->ether_type = spec->inner_type;
731         }
732         /* For TCI, use the vlan mask/val fields (little endian). */
733         gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
734         gp->val_vlan = rte_be_to_cpu_16(spec->tci);
735         return 0;
736 }
737
738 static int
739 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
740 {
741         const struct rte_flow_item *item = arg->item;
742         struct filter_v2 *enic_filter = arg->filter;
743         const struct rte_flow_item_ipv4 *spec = item->spec;
744         const struct rte_flow_item_ipv4 *mask = item->mask;
745         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
746
747         FLOW_TRACE();
748
749         /* Match IPv4 */
750         gp->mask_flags |= FILTER_GENERIC_1_IPV4;
751         gp->val_flags |= FILTER_GENERIC_1_IPV4;
752
753         /* Match all if no spec */
754         if (!spec)
755                 return 0;
756
757         if (!mask)
758                 mask = &rte_flow_item_ipv4_mask;
759
760         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
761                sizeof(struct rte_ipv4_hdr));
762         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
763                sizeof(struct rte_ipv4_hdr));
764         return 0;
765 }
766
767 static int
768 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
769 {
770         const struct rte_flow_item *item = arg->item;
771         struct filter_v2 *enic_filter = arg->filter;
772         const struct rte_flow_item_ipv6 *spec = item->spec;
773         const struct rte_flow_item_ipv6 *mask = item->mask;
774         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
775
776         FLOW_TRACE();
777
778         /* Match IPv6 */
779         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
780         gp->val_flags |= FILTER_GENERIC_1_IPV6;
781
782         /* Match all if no spec */
783         if (!spec)
784                 return 0;
785
786         if (!mask)
787                 mask = &rte_flow_item_ipv6_mask;
788
789         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
790                sizeof(struct rte_ipv6_hdr));
791         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
792                sizeof(struct rte_ipv6_hdr));
793         return 0;
794 }
795
796 static int
797 enic_copy_item_udp_v2(struct copy_item_args *arg)
798 {
799         const struct rte_flow_item *item = arg->item;
800         struct filter_v2 *enic_filter = arg->filter;
801         const struct rte_flow_item_udp *spec = item->spec;
802         const struct rte_flow_item_udp *mask = item->mask;
803         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
804
805         FLOW_TRACE();
806
807         /* Match UDP */
808         gp->mask_flags |= FILTER_GENERIC_1_UDP;
809         gp->val_flags |= FILTER_GENERIC_1_UDP;
810
811         /* Match all if no spec */
812         if (!spec)
813                 return 0;
814
815         if (!mask)
816                 mask = &rte_flow_item_udp_mask;
817
818         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
819                sizeof(struct rte_udp_hdr));
820         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
821                sizeof(struct rte_udp_hdr));
822         return 0;
823 }
824
825 static int
826 enic_copy_item_tcp_v2(struct copy_item_args *arg)
827 {
828         const struct rte_flow_item *item = arg->item;
829         struct filter_v2 *enic_filter = arg->filter;
830         const struct rte_flow_item_tcp *spec = item->spec;
831         const struct rte_flow_item_tcp *mask = item->mask;
832         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
833
834         FLOW_TRACE();
835
836         /* Match TCP */
837         gp->mask_flags |= FILTER_GENERIC_1_TCP;
838         gp->val_flags |= FILTER_GENERIC_1_TCP;
839
840         /* Match all if no spec */
841         if (!spec)
842                 return 0;
843
844         if (!mask)
845                 return ENOTSUP;
846
847         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
848                sizeof(struct rte_tcp_hdr));
849         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
850                sizeof(struct rte_tcp_hdr));
851         return 0;
852 }
853
854 static int
855 enic_copy_item_sctp_v2(struct copy_item_args *arg)
856 {
857         const struct rte_flow_item *item = arg->item;
858         struct filter_v2 *enic_filter = arg->filter;
859         const struct rte_flow_item_sctp *spec = item->spec;
860         const struct rte_flow_item_sctp *mask = item->mask;
861         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
862         uint8_t *ip_proto_mask = NULL;
863         uint8_t *ip_proto = NULL;
864
865         FLOW_TRACE();
866
867         /*
868          * The NIC filter API has no flags for "match sctp", so explicitly set
869          * the protocol number in the IP pattern.
870          */
871         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
872                 struct rte_ipv4_hdr *ip;
873                 ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
874                 ip_proto_mask = &ip->next_proto_id;
875                 ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
876                 ip_proto = &ip->next_proto_id;
877         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
878                 struct rte_ipv6_hdr *ip;
879                 ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
880                 ip_proto_mask = &ip->proto;
881                 ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
882                 ip_proto = &ip->proto;
883         } else {
884                 /* Need IPv4/IPv6 pattern first */
885                 return EINVAL;
886         }
887         *ip_proto = IPPROTO_SCTP;
888         *ip_proto_mask = 0xff;
889
890         /* Match all if no spec */
891         if (!spec)
892                 return 0;
893
894         if (!mask)
895                 mask = &rte_flow_item_sctp_mask;
896
897         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
898                sizeof(struct rte_sctp_hdr));
899         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
900                sizeof(struct rte_sctp_hdr));
901         return 0;
902 }
903
904 static int
905 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
906 {
907         const struct rte_flow_item *item = arg->item;
908         struct filter_v2 *enic_filter = arg->filter;
909         uint8_t *inner_ofst = arg->inner_ofst;
910         const struct rte_flow_item_vxlan *spec = item->spec;
911         const struct rte_flow_item_vxlan *mask = item->mask;
912         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
913         struct rte_udp_hdr *udp;
914
915         FLOW_TRACE();
916
917         /*
918          * The NIC filter API has no flags for "match vxlan". Set UDP port to
919          * avoid false positives.
920          */
921         gp->mask_flags |= FILTER_GENERIC_1_UDP;
922         gp->val_flags |= FILTER_GENERIC_1_UDP;
923         udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
924         udp->dst_port = 0xffff;
925         udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
926         udp->dst_port = RTE_BE16(4789);
927         /* Match all if no spec */
928         if (!spec)
929                 return 0;
930
931         if (!mask)
932                 mask = &rte_flow_item_vxlan_mask;
933
934         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
935                sizeof(struct rte_vxlan_hdr));
936         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
937                sizeof(struct rte_vxlan_hdr));
938
939         *inner_ofst = sizeof(struct rte_vxlan_hdr);
940         return 0;
941 }
942
943 /*
944  * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
945  * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
946  * or geneve).
947  */
948 static int
949 enic_copy_item_raw_v2(struct copy_item_args *arg)
950 {
951         const struct rte_flow_item *item = arg->item;
952         struct filter_v2 *enic_filter = arg->filter;
953         uint8_t *inner_ofst = arg->inner_ofst;
954         const struct rte_flow_item_raw *spec = item->spec;
955         const struct rte_flow_item_raw *mask = item->mask;
956         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
957
958         FLOW_TRACE();
959
960         /* Cannot be used for inner packet */
961         if (*inner_ofst)
962                 return EINVAL;
963         /* Need both spec and mask */
964         if (!spec || !mask)
965                 return EINVAL;
966         /* Only supports relative with offset 0 */
967         if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
968                 return EINVAL;
969         /* Need non-null pattern that fits within the NIC's filter pattern */
970         if (spec->length == 0 ||
971             spec->length + sizeof(struct rte_udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
972             !spec->pattern || !mask->pattern)
973                 return EINVAL;
974         /*
975          * Mask fields, including length, are often set to zero. Assume that
976          * means "same as spec" to avoid breaking existing apps. If length
977          * is not zero, then it should be >= spec length.
978          *
979          * No more pattern follows this, so append to the L4 layer instead of
980          * L5 to work with both recent and older VICs.
981          */
982         if (mask->length != 0 && mask->length < spec->length)
983                 return EINVAL;
984         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
985                mask->pattern, spec->length);
986         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
987                spec->pattern, spec->length);
988
989         return 0;
990 }
991
992 /**
993  * Return 1 if current item is valid on top of the previous one.
994  *
995  * @param prev_item[in]
996  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
997  *   is the first item.
998  * @param item_info[in]
999  *   Info about this item, like valid previous items.
1000  * @param is_first[in]
1001  *   True if this the first item in the pattern.
1002  */
1003 static int
1004 item_stacking_valid(enum rte_flow_item_type prev_item,
1005                     const struct enic_items *item_info, u8 is_first_item)
1006 {
1007         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
1008
1009         FLOW_TRACE();
1010
1011         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
1012                 if (prev_item == *allowed_items)
1013                         return 1;
1014         }
1015
1016         /* This is the first item in the stack. Check if that's cool */
1017         if (is_first_item && item_info->valid_start_item)
1018                 return 1;
1019
1020         return 0;
1021 }
1022
1023 /*
1024  * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
1025  * Instead it is in L4 following the UDP header. Append the vxlan
1026  * pattern to L4 (udp) and shift any inner packet pattern in L5.
1027  */
1028 static void
1029 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
1030                uint8_t inner_ofst)
1031 {
1032         uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
1033         uint8_t inner;
1034         uint8_t vxlan;
1035
1036         if (!(inner_ofst > 0 && enic->vxlan))
1037                 return;
1038         FLOW_TRACE();
1039         vxlan = sizeof(struct rte_vxlan_hdr);
1040         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
1041                gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
1042         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
1043                gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
1044         inner = inner_ofst - vxlan;
1045         memset(layer, 0, sizeof(layer));
1046         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
1047         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
1048         memset(layer, 0, sizeof(layer));
1049         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
1050         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
1051 }
1052
1053 /**
1054  * Build the intenal enic filter structure from the provided pattern. The
1055  * pattern is validated as the items are copied.
1056  *
1057  * @param pattern[in]
1058  * @param items_info[in]
1059  *   Info about this NICs item support, like valid previous items.
1060  * @param enic_filter[out]
1061  *   NIC specfilc filters derived from the pattern.
1062  * @param error[out]
1063  */
1064 static int
1065 enic_copy_filter(const struct rte_flow_item pattern[],
1066                  const struct enic_filter_cap *cap,
1067                  struct enic *enic,
1068                  struct filter_v2 *enic_filter,
1069                  struct rte_flow_error *error)
1070 {
1071         int ret;
1072         const struct rte_flow_item *item = pattern;
1073         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1074         enum rte_flow_item_type prev_item;
1075         const struct enic_items *item_info;
1076         struct copy_item_args args;
1077         enic_copy_item_fn *copy_fn;
1078         u8 is_first_item = 1;
1079
1080         FLOW_TRACE();
1081
1082         prev_item = 0;
1083
1084         args.filter = enic_filter;
1085         args.inner_ofst = &inner_ofst;
1086         args.enic = enic;
1087         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1088                 /* Get info about how to validate and copy the item. If NULL
1089                  * is returned the nic does not support the item.
1090                  */
1091                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1092                         continue;
1093
1094                 item_info = &cap->item_info[item->type];
1095                 if (item->type > cap->max_item_type ||
1096                     item_info->copy_item == NULL ||
1097                     (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1098                         rte_flow_error_set(error, ENOTSUP,
1099                                 RTE_FLOW_ERROR_TYPE_ITEM,
1100                                 NULL, "Unsupported item.");
1101                         return -rte_errno;
1102                 }
1103
1104                 /* check to see if item stacking is valid */
1105                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1106                         goto stacking_error;
1107
1108                 args.item = item;
1109                 copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1110                         item_info->copy_item;
1111                 ret = copy_fn(&args);
1112                 if (ret)
1113                         goto item_not_supported;
1114                 prev_item = item->type;
1115                 is_first_item = 0;
1116         }
1117         fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1118
1119         return 0;
1120
1121 item_not_supported:
1122         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1123                            NULL, "enic type error");
1124         return -rte_errno;
1125
1126 stacking_error:
1127         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1128                            item, "stacking error");
1129         return -rte_errno;
1130 }
1131
1132 /**
1133  * Build the intenal version 1 NIC action structure from the provided pattern.
1134  * The pattern is validated as the items are copied.
1135  *
1136  * @param actions[in]
1137  * @param enic_action[out]
1138  *   NIC specfilc actions derived from the actions.
1139  * @param error[out]
1140  */
1141 static int
1142 enic_copy_action_v1(__rte_unused struct enic *enic,
1143                     const struct rte_flow_action actions[],
1144                     struct filter_action_v2 *enic_action)
1145 {
1146         enum { FATE = 1, };
1147         uint32_t overlap = 0;
1148
1149         FLOW_TRACE();
1150
1151         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1152                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1153                         continue;
1154
1155                 switch (actions->type) {
1156                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1157                         const struct rte_flow_action_queue *queue =
1158                                 (const struct rte_flow_action_queue *)
1159                                 actions->conf;
1160
1161                         if (overlap & FATE)
1162                                 return ENOTSUP;
1163                         overlap |= FATE;
1164                         enic_action->rq_idx =
1165                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1166                         break;
1167                 }
1168                 default:
1169                         RTE_ASSERT(0);
1170                         break;
1171                 }
1172         }
1173         if (!(overlap & FATE))
1174                 return ENOTSUP;
1175         enic_action->type = FILTER_ACTION_RQ_STEERING;
1176         return 0;
1177 }
1178
1179 /**
1180  * Build the intenal version 2 NIC action structure from the provided pattern.
1181  * The pattern is validated as the items are copied.
1182  *
1183  * @param actions[in]
1184  * @param enic_action[out]
1185  *   NIC specfilc actions derived from the actions.
1186  * @param error[out]
1187  */
1188 static int
1189 enic_copy_action_v2(struct enic *enic,
1190                     const struct rte_flow_action actions[],
1191                     struct filter_action_v2 *enic_action)
1192 {
1193         enum { FATE = 1, MARK = 2, };
1194         uint32_t overlap = 0;
1195         bool passthru = false;
1196
1197         FLOW_TRACE();
1198
1199         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1200                 switch (actions->type) {
1201                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1202                         const struct rte_flow_action_queue *queue =
1203                                 (const struct rte_flow_action_queue *)
1204                                 actions->conf;
1205
1206                         if (overlap & FATE)
1207                                 return ENOTSUP;
1208                         overlap |= FATE;
1209                         enic_action->rq_idx =
1210                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1211                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1212                         break;
1213                 }
1214                 case RTE_FLOW_ACTION_TYPE_MARK: {
1215                         const struct rte_flow_action_mark *mark =
1216                                 (const struct rte_flow_action_mark *)
1217                                 actions->conf;
1218
1219                         if (overlap & MARK)
1220                                 return ENOTSUP;
1221                         overlap |= MARK;
1222                         /*
1223                          * Map mark ID (32-bit) to filter ID (16-bit):
1224                          * - Reject values > 16 bits
1225                          * - Filter ID 0 is reserved for filters that steer
1226                          *   but not mark. So add 1 to the mark ID to avoid
1227                          *   using 0.
1228                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1229                          *   reserved for the "flag" action below.
1230                          */
1231                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1232                                 return EINVAL;
1233                         enic_action->filter_id = mark->id + 1;
1234                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1235                         break;
1236                 }
1237                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1238                         if (overlap & MARK)
1239                                 return ENOTSUP;
1240                         overlap |= MARK;
1241                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1242                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1243                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1244                         break;
1245                 }
1246                 case RTE_FLOW_ACTION_TYPE_DROP: {
1247                         if (overlap & FATE)
1248                                 return ENOTSUP;
1249                         overlap |= FATE;
1250                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1251                         break;
1252                 }
1253                 case RTE_FLOW_ACTION_TYPE_RSS: {
1254                         const struct rte_flow_action_rss *rss =
1255                                 (const struct rte_flow_action_rss *)
1256                                 actions->conf;
1257                         bool allow;
1258                         uint16_t i;
1259
1260                         /*
1261                          * Hardware does not support general RSS actions, but
1262                          * we can still support the dummy one that is used to
1263                          * "receive normally".
1264                          */
1265                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1266                                 rss->level == 0 &&
1267                                 (rss->types == 0 ||
1268                                  rss->types == enic->rss_hf) &&
1269                                 rss->queue_num == enic->rq_count &&
1270                                 rss->key_len == 0;
1271                         /* Identity queue map is ok */
1272                         for (i = 0; i < rss->queue_num; i++)
1273                                 allow = allow && (i == rss->queue[i]);
1274                         if (!allow)
1275                                 return ENOTSUP;
1276                         if (overlap & FATE)
1277                                 return ENOTSUP;
1278                         /* Need MARK or FLAG */
1279                         if (!(overlap & MARK))
1280                                 return ENOTSUP;
1281                         overlap |= FATE;
1282                         break;
1283                 }
1284                 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1285                         /*
1286                          * Like RSS above, PASSTHRU + MARK may be used to
1287                          * "mark and then receive normally". MARK usually comes
1288                          * after PASSTHRU, so remember we have seen passthru
1289                          * and check for mark later.
1290                          */
1291                         if (overlap & FATE)
1292                                 return ENOTSUP;
1293                         overlap |= FATE;
1294                         passthru = true;
1295                         break;
1296                 }
1297                 case RTE_FLOW_ACTION_TYPE_VOID:
1298                         continue;
1299                 default:
1300                         RTE_ASSERT(0);
1301                         break;
1302                 }
1303         }
1304         /* Only PASSTHRU + MARK is allowed */
1305         if (passthru && !(overlap & MARK))
1306                 return ENOTSUP;
1307         if (!(overlap & FATE))
1308                 return ENOTSUP;
1309         enic_action->type = FILTER_ACTION_V2;
1310         return 0;
1311 }
1312
1313 /** Check if the action is supported */
1314 static int
1315 enic_match_action(const struct rte_flow_action *action,
1316                   const enum rte_flow_action_type *supported_actions)
1317 {
1318         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1319              supported_actions++) {
1320                 if (action->type == *supported_actions)
1321                         return 1;
1322         }
1323         return 0;
1324 }
1325
1326 /** Get the NIC filter capabilties structure */
1327 static const struct enic_filter_cap *
1328 enic_get_filter_cap(struct enic *enic)
1329 {
1330         if (enic->flow_filter_mode)
1331                 return &enic_filter_cap[enic->flow_filter_mode];
1332
1333         return NULL;
1334 }
1335
1336 /** Get the actions for this NIC version. */
1337 static const struct enic_action_cap *
1338 enic_get_action_cap(struct enic *enic)
1339 {
1340         const struct enic_action_cap *ea;
1341         uint8_t actions;
1342
1343         actions = enic->filter_actions;
1344         if (actions & FILTER_ACTION_DROP_FLAG)
1345                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1346         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1347                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1348         else
1349                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1350         return ea;
1351 }
1352
1353 /* Debug function to dump internal NIC action structure. */
1354 static void
1355 enic_dump_actions(const struct filter_action_v2 *ea)
1356 {
1357         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1358                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1359         } else if (ea->type == FILTER_ACTION_V2) {
1360                 FLOW_LOG(INFO, "Actions(V2)\n");
1361                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1362                         FLOW_LOG(INFO, "\tqueue: %u\n",
1363                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1364                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1365                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1366         }
1367 }
1368
1369 /* Debug function to dump internal NIC filter structure. */
1370 static void
1371 enic_dump_filter(const struct filter_v2 *filt)
1372 {
1373         const struct filter_generic_1 *gp;
1374         int i, j, mbyte;
1375         char buf[128], *bp;
1376         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1377         char l4csum[16], ipfrag[16];
1378
1379         switch (filt->type) {
1380         case FILTER_IPV4_5TUPLE:
1381                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1382                 break;
1383         case FILTER_USNIC_IP:
1384         case FILTER_DPDK_1:
1385                 /* FIXME: this should be a loop */
1386                 gp = &filt->u.generic_1;
1387                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1388                        gp->val_vlan, gp->mask_vlan);
1389
1390                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1391                         sprintf(ip4, "%s ",
1392                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1393                                  ? "ip4(y)" : "ip4(n)");
1394                 else
1395                         sprintf(ip4, "%s ", "ip4(x)");
1396
1397                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1398                         sprintf(ip6, "%s ",
1399                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1400                                  ? "ip6(y)" : "ip6(n)");
1401                 else
1402                         sprintf(ip6, "%s ", "ip6(x)");
1403
1404                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1405                         sprintf(udp, "%s ",
1406                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1407                                  ? "udp(y)" : "udp(n)");
1408                 else
1409                         sprintf(udp, "%s ", "udp(x)");
1410
1411                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1412                         sprintf(tcp, "%s ",
1413                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1414                                  ? "tcp(y)" : "tcp(n)");
1415                 else
1416                         sprintf(tcp, "%s ", "tcp(x)");
1417
1418                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1419                         sprintf(tcpudp, "%s ",
1420                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1421                                  ? "tcpudp(y)" : "tcpudp(n)");
1422                 else
1423                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1424
1425                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1426                         sprintf(ip4csum, "%s ",
1427                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1428                                  ? "ip4csum(y)" : "ip4csum(n)");
1429                 else
1430                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1431
1432                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1433                         sprintf(l4csum, "%s ",
1434                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1435                                  ? "l4csum(y)" : "l4csum(n)");
1436                 else
1437                         sprintf(l4csum, "%s ", "l4csum(x)");
1438
1439                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1440                         sprintf(ipfrag, "%s ",
1441                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1442                                  ? "ipfrag(y)" : "ipfrag(n)");
1443                 else
1444                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1445                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1446                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1447
1448                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1449                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1450                         while (mbyte && !gp->layer[i].mask[mbyte])
1451                                 mbyte--;
1452                         if (mbyte == 0)
1453                                 continue;
1454
1455                         bp = buf;
1456                         for (j = 0; j <= mbyte; j++) {
1457                                 sprintf(bp, "%02x",
1458                                         gp->layer[i].mask[j]);
1459                                 bp += 2;
1460                         }
1461                         *bp = '\0';
1462                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1463                         bp = buf;
1464                         for (j = 0; j <= mbyte; j++) {
1465                                 sprintf(bp, "%02x",
1466                                         gp->layer[i].val[j]);
1467                                 bp += 2;
1468                         }
1469                         *bp = '\0';
1470                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1471                 }
1472                 break;
1473         default:
1474                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1475                 break;
1476         }
1477 }
1478
1479 /* Debug function to dump internal NIC flow structures. */
1480 static void
1481 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1482 {
1483         enic_dump_filter(filt);
1484         enic_dump_actions(ea);
1485 }
1486
1487
1488 /**
1489  * Internal flow parse/validate function.
1490  *
1491  * @param dev[in]
1492  *   This device pointer.
1493  * @param pattern[in]
1494  * @param actions[in]
1495  * @param error[out]
1496  * @param enic_filter[out]
1497  *   Internal NIC filter structure pointer.
1498  * @param enic_action[out]
1499  *   Internal NIC action structure pointer.
1500  */
1501 static int
1502 enic_flow_parse(struct rte_eth_dev *dev,
1503                 const struct rte_flow_attr *attrs,
1504                 const struct rte_flow_item pattern[],
1505                 const struct rte_flow_action actions[],
1506                 struct rte_flow_error *error,
1507                 struct filter_v2 *enic_filter,
1508                 struct filter_action_v2 *enic_action)
1509 {
1510         unsigned int ret = 0;
1511         struct enic *enic = pmd_priv(dev);
1512         const struct enic_filter_cap *enic_filter_cap;
1513         const struct enic_action_cap *enic_action_cap;
1514         const struct rte_flow_action *action;
1515
1516         FLOW_TRACE();
1517
1518         memset(enic_filter, 0, sizeof(*enic_filter));
1519         memset(enic_action, 0, sizeof(*enic_action));
1520
1521         if (!pattern) {
1522                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1523                                    NULL, "No pattern specified");
1524                 return -rte_errno;
1525         }
1526
1527         if (!actions) {
1528                 rte_flow_error_set(error, EINVAL,
1529                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1530                                    NULL, "No action specified");
1531                 return -rte_errno;
1532         }
1533
1534         if (attrs) {
1535                 if (attrs->group) {
1536                         rte_flow_error_set(error, ENOTSUP,
1537                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1538                                            NULL,
1539                                            "priority groups are not supported");
1540                         return -rte_errno;
1541                 } else if (attrs->priority) {
1542                         rte_flow_error_set(error, ENOTSUP,
1543                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1544                                            NULL,
1545                                            "priorities are not supported");
1546                         return -rte_errno;
1547                 } else if (attrs->egress) {
1548                         rte_flow_error_set(error, ENOTSUP,
1549                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1550                                            NULL,
1551                                            "egress is not supported");
1552                         return -rte_errno;
1553                 } else if (attrs->transfer) {
1554                         rte_flow_error_set(error, ENOTSUP,
1555                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1556                                            NULL,
1557                                            "transfer is not supported");
1558                         return -rte_errno;
1559                 } else if (!attrs->ingress) {
1560                         rte_flow_error_set(error, ENOTSUP,
1561                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1562                                            NULL,
1563                                            "only ingress is supported");
1564                         return -rte_errno;
1565                 }
1566
1567         } else {
1568                 rte_flow_error_set(error, EINVAL,
1569                                    RTE_FLOW_ERROR_TYPE_ATTR,
1570                                    NULL, "No attribute specified");
1571                 return -rte_errno;
1572         }
1573
1574         /* Verify Actions. */
1575         enic_action_cap =  enic_get_action_cap(enic);
1576         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1577              action++) {
1578                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1579                         continue;
1580                 else if (!enic_match_action(action, enic_action_cap->actions))
1581                         break;
1582         }
1583         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1584                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1585                                    action, "Invalid action.");
1586                 return -rte_errno;
1587         }
1588         ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1589         if (ret) {
1590                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1591                            NULL, "Unsupported action.");
1592                 return -rte_errno;
1593         }
1594
1595         /* Verify Flow items. If copying the filter from flow format to enic
1596          * format fails, the flow is not supported
1597          */
1598         enic_filter_cap =  enic_get_filter_cap(enic);
1599         if (enic_filter_cap == NULL) {
1600                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1601                            NULL, "Flow API not available");
1602                 return -rte_errno;
1603         }
1604         enic_filter->type = enic->flow_filter_mode;
1605         ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1606                                        enic_filter, error);
1607         return ret;
1608 }
1609
1610 /**
1611  * Push filter/action to the NIC.
1612  *
1613  * @param enic[in]
1614  *   Device structure pointer.
1615  * @param enic_filter[in]
1616  *   Internal NIC filter structure pointer.
1617  * @param enic_action[in]
1618  *   Internal NIC action structure pointer.
1619  * @param error[out]
1620  */
1621 static struct rte_flow *
1622 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1623                    struct filter_action_v2 *enic_action,
1624                    struct rte_flow_error *error)
1625 {
1626         struct rte_flow *flow;
1627         int err;
1628         u16 entry;
1629
1630         FLOW_TRACE();
1631
1632         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1633         if (!flow) {
1634                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1635                                    NULL, "cannot allocate flow memory");
1636                 return NULL;
1637         }
1638
1639         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1640         entry = enic_action->rq_idx;
1641         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1642                                   enic_action);
1643         if (err) {
1644                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1645                                    NULL, "vnic_dev_classifier error");
1646                 rte_free(flow);
1647                 return NULL;
1648         }
1649
1650         flow->enic_filter_id = entry;
1651         flow->enic_filter = *enic_filter;
1652         return flow;
1653 }
1654
1655 /**
1656  * Remove filter/action from the NIC.
1657  *
1658  * @param enic[in]
1659  *   Device structure pointer.
1660  * @param filter_id[in]
1661  *   Id of NIC filter.
1662  * @param enic_action[in]
1663  *   Internal NIC action structure pointer.
1664  * @param error[out]
1665  */
1666 static int
1667 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1668                    struct rte_flow_error *error)
1669 {
1670         u16 filter_id;
1671         int err;
1672
1673         FLOW_TRACE();
1674
1675         filter_id = flow->enic_filter_id;
1676         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1677         if (err) {
1678                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1679                                    NULL, "vnic_dev_classifier failed");
1680                 return -err;
1681         }
1682         return 0;
1683 }
1684
1685 /*
1686  * The following functions are callbacks for Generic flow API.
1687  */
1688
1689 /**
1690  * Validate a flow supported by the NIC.
1691  *
1692  * @see rte_flow_validate()
1693  * @see rte_flow_ops
1694  */
1695 static int
1696 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1697                    const struct rte_flow_item pattern[],
1698                    const struct rte_flow_action actions[],
1699                    struct rte_flow_error *error)
1700 {
1701         struct filter_v2 enic_filter;
1702         struct filter_action_v2 enic_action;
1703         int ret;
1704
1705         FLOW_TRACE();
1706
1707         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1708                                &enic_filter, &enic_action);
1709         if (!ret)
1710                 enic_dump_flow(&enic_action, &enic_filter);
1711         return ret;
1712 }
1713
1714 /**
1715  * Create a flow supported by the NIC.
1716  *
1717  * @see rte_flow_create()
1718  * @see rte_flow_ops
1719  */
1720 static struct rte_flow *
1721 enic_flow_create(struct rte_eth_dev *dev,
1722                  const struct rte_flow_attr *attrs,
1723                  const struct rte_flow_item pattern[],
1724                  const struct rte_flow_action actions[],
1725                  struct rte_flow_error *error)
1726 {
1727         int ret;
1728         struct filter_v2 enic_filter;
1729         struct filter_action_v2 enic_action;
1730         struct rte_flow *flow;
1731         struct enic *enic = pmd_priv(dev);
1732
1733         FLOW_TRACE();
1734
1735         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1736                               &enic_action);
1737         if (ret < 0)
1738                 return NULL;
1739
1740         rte_spinlock_lock(&enic->flows_lock);
1741         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1742                                     error);
1743         if (flow)
1744                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1745         rte_spinlock_unlock(&enic->flows_lock);
1746
1747         return flow;
1748 }
1749
1750 /**
1751  * Destroy a flow supported by the NIC.
1752  *
1753  * @see rte_flow_destroy()
1754  * @see rte_flow_ops
1755  */
1756 static int
1757 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1758                   __rte_unused struct rte_flow_error *error)
1759 {
1760         struct enic *enic = pmd_priv(dev);
1761
1762         FLOW_TRACE();
1763
1764         rte_spinlock_lock(&enic->flows_lock);
1765         enic_flow_del_filter(enic, flow, error);
1766         LIST_REMOVE(flow, next);
1767         rte_spinlock_unlock(&enic->flows_lock);
1768         rte_free(flow);
1769         return 0;
1770 }
1771
1772 /**
1773  * Flush all flows on the device.
1774  *
1775  * @see rte_flow_flush()
1776  * @see rte_flow_ops
1777  */
1778 static int
1779 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1780 {
1781         struct rte_flow *flow;
1782         struct enic *enic = pmd_priv(dev);
1783
1784         FLOW_TRACE();
1785
1786         rte_spinlock_lock(&enic->flows_lock);
1787
1788         while (!LIST_EMPTY(&enic->flows)) {
1789                 flow = LIST_FIRST(&enic->flows);
1790                 enic_flow_del_filter(enic, flow, error);
1791                 LIST_REMOVE(flow, next);
1792                 rte_free(flow);
1793         }
1794         rte_spinlock_unlock(&enic->flows_lock);
1795         return 0;
1796 }
1797
1798 /**
1799  * Flow callback registration.
1800  *
1801  * @see rte_flow_ops
1802  */
1803 const struct rte_flow_ops enic_flow_ops = {
1804         .validate = enic_flow_validate,
1805         .create = enic_flow_create,
1806         .destroy = enic_flow_destroy,
1807         .flush = enic_flow_flush,
1808 };