net/bnxt: fix FW version query
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 /*
20  * Common arguments passed to copy_item functions. Use this structure
21  * so we can easily add new arguments.
22  * item: Item specification.
23  * filter: Partially filled in NIC filter structure.
24  * inner_ofst: If zero, this is an outer header. If non-zero, this is
25  *   the offset into L5 where the header begins.
26  * l2_proto_off: offset to EtherType eth or vlan header.
27  * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
28  */
29 struct copy_item_args {
30         const struct rte_flow_item *item;
31         struct filter_v2 *filter;
32         uint8_t *inner_ofst;
33         uint8_t l2_proto_off;
34         uint8_t l3_proto_off;
35         struct enic *enic;
36 };
37
38 /* functions for copying items into enic filters */
39 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
40
41 /** Info about how to copy items into enic filters. */
42 struct enic_items {
43         /** Function for copying and validating an item. */
44         enic_copy_item_fn *copy_item;
45         /** List of valid previous items. */
46         const enum rte_flow_item_type * const prev_items;
47         /** True if it's OK for this item to be the first item. For some NIC
48          * versions, it's invalid to start the stack above layer 3.
49          */
50         const uint8_t valid_start_item;
51         /* Inner packet version of copy_item. */
52         enic_copy_item_fn *inner_copy_item;
53 };
54
55 /** Filtering capabilities for various NIC and firmware versions. */
56 struct enic_filter_cap {
57         /** list of valid items and their handlers and attributes. */
58         const struct enic_items *item_info;
59         /* Max type in the above list, used to detect unsupported types */
60         enum rte_flow_item_type max_item_type;
61 };
62
63 /* functions for copying flow actions into enic actions */
64 typedef int (copy_action_fn)(struct enic *enic,
65                              const struct rte_flow_action actions[],
66                              struct filter_action_v2 *enic_action);
67
68 /** Action capabilities for various NICs. */
69 struct enic_action_cap {
70         /** list of valid actions */
71         const enum rte_flow_action_type *actions;
72         /** copy function for a particular NIC */
73         copy_action_fn *copy_fn;
74 };
75
76 /* Forward declarations */
77 static enic_copy_item_fn enic_copy_item_ipv4_v1;
78 static enic_copy_item_fn enic_copy_item_udp_v1;
79 static enic_copy_item_fn enic_copy_item_tcp_v1;
80 static enic_copy_item_fn enic_copy_item_raw_v2;
81 static enic_copy_item_fn enic_copy_item_eth_v2;
82 static enic_copy_item_fn enic_copy_item_vlan_v2;
83 static enic_copy_item_fn enic_copy_item_ipv4_v2;
84 static enic_copy_item_fn enic_copy_item_ipv6_v2;
85 static enic_copy_item_fn enic_copy_item_udp_v2;
86 static enic_copy_item_fn enic_copy_item_tcp_v2;
87 static enic_copy_item_fn enic_copy_item_sctp_v2;
88 static enic_copy_item_fn enic_copy_item_vxlan_v2;
89 static enic_copy_item_fn enic_copy_item_inner_eth_v2;
90 static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
91 static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
92 static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
93 static enic_copy_item_fn enic_copy_item_inner_udp_v2;
94 static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
95 static copy_action_fn enic_copy_action_v1;
96 static copy_action_fn enic_copy_action_v2;
97
98 /**
99  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
100  * is supported.
101  */
102 static const struct enic_items enic_items_v1[] = {
103         [RTE_FLOW_ITEM_TYPE_IPV4] = {
104                 .copy_item = enic_copy_item_ipv4_v1,
105                 .valid_start_item = 1,
106                 .prev_items = (const enum rte_flow_item_type[]) {
107                                RTE_FLOW_ITEM_TYPE_END,
108                 },
109                 .inner_copy_item = NULL,
110         },
111         [RTE_FLOW_ITEM_TYPE_UDP] = {
112                 .copy_item = enic_copy_item_udp_v1,
113                 .valid_start_item = 0,
114                 .prev_items = (const enum rte_flow_item_type[]) {
115                                RTE_FLOW_ITEM_TYPE_IPV4,
116                                RTE_FLOW_ITEM_TYPE_END,
117                 },
118                 .inner_copy_item = NULL,
119         },
120         [RTE_FLOW_ITEM_TYPE_TCP] = {
121                 .copy_item = enic_copy_item_tcp_v1,
122                 .valid_start_item = 0,
123                 .prev_items = (const enum rte_flow_item_type[]) {
124                                RTE_FLOW_ITEM_TYPE_IPV4,
125                                RTE_FLOW_ITEM_TYPE_END,
126                 },
127                 .inner_copy_item = NULL,
128         },
129 };
130
131 /**
132  * NICs have Advanced Filters capability but they are disabled. This means
133  * that layer 3 must be specified.
134  */
135 static const struct enic_items enic_items_v2[] = {
136         [RTE_FLOW_ITEM_TYPE_RAW] = {
137                 .copy_item = enic_copy_item_raw_v2,
138                 .valid_start_item = 0,
139                 .prev_items = (const enum rte_flow_item_type[]) {
140                                RTE_FLOW_ITEM_TYPE_UDP,
141                                RTE_FLOW_ITEM_TYPE_END,
142                 },
143                 .inner_copy_item = NULL,
144         },
145         [RTE_FLOW_ITEM_TYPE_ETH] = {
146                 .copy_item = enic_copy_item_eth_v2,
147                 .valid_start_item = 1,
148                 .prev_items = (const enum rte_flow_item_type[]) {
149                                RTE_FLOW_ITEM_TYPE_VXLAN,
150                                RTE_FLOW_ITEM_TYPE_END,
151                 },
152                 .inner_copy_item = enic_copy_item_inner_eth_v2,
153         },
154         [RTE_FLOW_ITEM_TYPE_VLAN] = {
155                 .copy_item = enic_copy_item_vlan_v2,
156                 .valid_start_item = 1,
157                 .prev_items = (const enum rte_flow_item_type[]) {
158                                RTE_FLOW_ITEM_TYPE_ETH,
159                                RTE_FLOW_ITEM_TYPE_END,
160                 },
161                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
162         },
163         [RTE_FLOW_ITEM_TYPE_IPV4] = {
164                 .copy_item = enic_copy_item_ipv4_v2,
165                 .valid_start_item = 1,
166                 .prev_items = (const enum rte_flow_item_type[]) {
167                                RTE_FLOW_ITEM_TYPE_ETH,
168                                RTE_FLOW_ITEM_TYPE_VLAN,
169                                RTE_FLOW_ITEM_TYPE_END,
170                 },
171                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
172         },
173         [RTE_FLOW_ITEM_TYPE_IPV6] = {
174                 .copy_item = enic_copy_item_ipv6_v2,
175                 .valid_start_item = 1,
176                 .prev_items = (const enum rte_flow_item_type[]) {
177                                RTE_FLOW_ITEM_TYPE_ETH,
178                                RTE_FLOW_ITEM_TYPE_VLAN,
179                                RTE_FLOW_ITEM_TYPE_END,
180                 },
181                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
182         },
183         [RTE_FLOW_ITEM_TYPE_UDP] = {
184                 .copy_item = enic_copy_item_udp_v2,
185                 .valid_start_item = 0,
186                 .prev_items = (const enum rte_flow_item_type[]) {
187                                RTE_FLOW_ITEM_TYPE_IPV4,
188                                RTE_FLOW_ITEM_TYPE_IPV6,
189                                RTE_FLOW_ITEM_TYPE_END,
190                 },
191                 .inner_copy_item = enic_copy_item_inner_udp_v2,
192         },
193         [RTE_FLOW_ITEM_TYPE_TCP] = {
194                 .copy_item = enic_copy_item_tcp_v2,
195                 .valid_start_item = 0,
196                 .prev_items = (const enum rte_flow_item_type[]) {
197                                RTE_FLOW_ITEM_TYPE_IPV4,
198                                RTE_FLOW_ITEM_TYPE_IPV6,
199                                RTE_FLOW_ITEM_TYPE_END,
200                 },
201                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
202         },
203         [RTE_FLOW_ITEM_TYPE_SCTP] = {
204                 .copy_item = enic_copy_item_sctp_v2,
205                 .valid_start_item = 0,
206                 .prev_items = (const enum rte_flow_item_type[]) {
207                                RTE_FLOW_ITEM_TYPE_IPV4,
208                                RTE_FLOW_ITEM_TYPE_IPV6,
209                                RTE_FLOW_ITEM_TYPE_END,
210                 },
211                 .inner_copy_item = NULL,
212         },
213         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
214                 .copy_item = enic_copy_item_vxlan_v2,
215                 .valid_start_item = 0,
216                 .prev_items = (const enum rte_flow_item_type[]) {
217                                RTE_FLOW_ITEM_TYPE_UDP,
218                                RTE_FLOW_ITEM_TYPE_END,
219                 },
220                 .inner_copy_item = NULL,
221         },
222 };
223
224 /** NICs with Advanced filters enabled */
225 static const struct enic_items enic_items_v3[] = {
226         [RTE_FLOW_ITEM_TYPE_RAW] = {
227                 .copy_item = enic_copy_item_raw_v2,
228                 .valid_start_item = 0,
229                 .prev_items = (const enum rte_flow_item_type[]) {
230                                RTE_FLOW_ITEM_TYPE_UDP,
231                                RTE_FLOW_ITEM_TYPE_END,
232                 },
233                 .inner_copy_item = NULL,
234         },
235         [RTE_FLOW_ITEM_TYPE_ETH] = {
236                 .copy_item = enic_copy_item_eth_v2,
237                 .valid_start_item = 1,
238                 .prev_items = (const enum rte_flow_item_type[]) {
239                                RTE_FLOW_ITEM_TYPE_VXLAN,
240                                RTE_FLOW_ITEM_TYPE_END,
241                 },
242                 .inner_copy_item = enic_copy_item_inner_eth_v2,
243         },
244         [RTE_FLOW_ITEM_TYPE_VLAN] = {
245                 .copy_item = enic_copy_item_vlan_v2,
246                 .valid_start_item = 1,
247                 .prev_items = (const enum rte_flow_item_type[]) {
248                                RTE_FLOW_ITEM_TYPE_ETH,
249                                RTE_FLOW_ITEM_TYPE_END,
250                 },
251                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
252         },
253         [RTE_FLOW_ITEM_TYPE_IPV4] = {
254                 .copy_item = enic_copy_item_ipv4_v2,
255                 .valid_start_item = 1,
256                 .prev_items = (const enum rte_flow_item_type[]) {
257                                RTE_FLOW_ITEM_TYPE_ETH,
258                                RTE_FLOW_ITEM_TYPE_VLAN,
259                                RTE_FLOW_ITEM_TYPE_END,
260                 },
261                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
262         },
263         [RTE_FLOW_ITEM_TYPE_IPV6] = {
264                 .copy_item = enic_copy_item_ipv6_v2,
265                 .valid_start_item = 1,
266                 .prev_items = (const enum rte_flow_item_type[]) {
267                                RTE_FLOW_ITEM_TYPE_ETH,
268                                RTE_FLOW_ITEM_TYPE_VLAN,
269                                RTE_FLOW_ITEM_TYPE_END,
270                 },
271                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
272         },
273         [RTE_FLOW_ITEM_TYPE_UDP] = {
274                 .copy_item = enic_copy_item_udp_v2,
275                 .valid_start_item = 1,
276                 .prev_items = (const enum rte_flow_item_type[]) {
277                                RTE_FLOW_ITEM_TYPE_IPV4,
278                                RTE_FLOW_ITEM_TYPE_IPV6,
279                                RTE_FLOW_ITEM_TYPE_END,
280                 },
281                 .inner_copy_item = enic_copy_item_inner_udp_v2,
282         },
283         [RTE_FLOW_ITEM_TYPE_TCP] = {
284                 .copy_item = enic_copy_item_tcp_v2,
285                 .valid_start_item = 1,
286                 .prev_items = (const enum rte_flow_item_type[]) {
287                                RTE_FLOW_ITEM_TYPE_IPV4,
288                                RTE_FLOW_ITEM_TYPE_IPV6,
289                                RTE_FLOW_ITEM_TYPE_END,
290                 },
291                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
292         },
293         [RTE_FLOW_ITEM_TYPE_SCTP] = {
294                 .copy_item = enic_copy_item_sctp_v2,
295                 .valid_start_item = 0,
296                 .prev_items = (const enum rte_flow_item_type[]) {
297                                RTE_FLOW_ITEM_TYPE_IPV4,
298                                RTE_FLOW_ITEM_TYPE_IPV6,
299                                RTE_FLOW_ITEM_TYPE_END,
300                 },
301                 .inner_copy_item = NULL,
302         },
303         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
304                 .copy_item = enic_copy_item_vxlan_v2,
305                 .valid_start_item = 1,
306                 .prev_items = (const enum rte_flow_item_type[]) {
307                                RTE_FLOW_ITEM_TYPE_UDP,
308                                RTE_FLOW_ITEM_TYPE_END,
309                 },
310                 .inner_copy_item = NULL,
311         },
312 };
313
314 /** Filtering capabilities indexed this NICs supported filter type. */
315 static const struct enic_filter_cap enic_filter_cap[] = {
316         [FILTER_IPV4_5TUPLE] = {
317                 .item_info = enic_items_v1,
318                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
319         },
320         [FILTER_USNIC_IP] = {
321                 .item_info = enic_items_v2,
322                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
323         },
324         [FILTER_DPDK_1] = {
325                 .item_info = enic_items_v3,
326                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
327         },
328 };
329
330 /** Supported actions for older NICs */
331 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
332         RTE_FLOW_ACTION_TYPE_QUEUE,
333         RTE_FLOW_ACTION_TYPE_END,
334 };
335
336 /** Supported actions for newer NICs */
337 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
338         RTE_FLOW_ACTION_TYPE_QUEUE,
339         RTE_FLOW_ACTION_TYPE_MARK,
340         RTE_FLOW_ACTION_TYPE_FLAG,
341         RTE_FLOW_ACTION_TYPE_RSS,
342         RTE_FLOW_ACTION_TYPE_PASSTHRU,
343         RTE_FLOW_ACTION_TYPE_END,
344 };
345
346 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
347         RTE_FLOW_ACTION_TYPE_QUEUE,
348         RTE_FLOW_ACTION_TYPE_MARK,
349         RTE_FLOW_ACTION_TYPE_FLAG,
350         RTE_FLOW_ACTION_TYPE_DROP,
351         RTE_FLOW_ACTION_TYPE_RSS,
352         RTE_FLOW_ACTION_TYPE_PASSTHRU,
353         RTE_FLOW_ACTION_TYPE_END,
354 };
355
356 /** Action capabilities indexed by NIC version information */
357 static const struct enic_action_cap enic_action_cap[] = {
358         [FILTER_ACTION_RQ_STEERING_FLAG] = {
359                 .actions = enic_supported_actions_v1,
360                 .copy_fn = enic_copy_action_v1,
361         },
362         [FILTER_ACTION_FILTER_ID_FLAG] = {
363                 .actions = enic_supported_actions_v2_id,
364                 .copy_fn = enic_copy_action_v2,
365         },
366         [FILTER_ACTION_DROP_FLAG] = {
367                 .actions = enic_supported_actions_v2_drop,
368                 .copy_fn = enic_copy_action_v2,
369         },
370 };
371
372 static int
373 mask_exact_match(const uint8_t *supported, const uint8_t *supplied,
374                  unsigned int size)
375 {
376         unsigned int i;
377         for (i = 0; i < size; i++) {
378                 if (supported[i] != supplied[i])
379                         return 0;
380         }
381         return 1;
382 }
383
384 static int
385 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
386 {
387         const struct rte_flow_item *item = arg->item;
388         struct filter_v2 *enic_filter = arg->filter;
389         const struct rte_flow_item_ipv4 *spec = item->spec;
390         const struct rte_flow_item_ipv4 *mask = item->mask;
391         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
392         struct rte_ipv4_hdr supported_mask = {
393                 .src_addr = 0xffffffff,
394                 .dst_addr = 0xffffffff,
395         };
396
397         ENICPMD_FUNC_TRACE();
398
399         if (!mask)
400                 mask = &rte_flow_item_ipv4_mask;
401
402         /* This is an exact match filter, both fields must be set */
403         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
404                 ENICPMD_LOG(ERR, "IPv4 exact match src/dst addr");
405                 return ENOTSUP;
406         }
407
408         /* check that the suppied mask exactly matches capabilty */
409         if (!mask_exact_match((const uint8_t *)&supported_mask,
410                               (const uint8_t *)item->mask, sizeof(*mask))) {
411                 ENICPMD_LOG(ERR, "IPv4 exact match mask");
412                 return ENOTSUP;
413         }
414
415         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
416         enic_5tup->src_addr = spec->hdr.src_addr;
417         enic_5tup->dst_addr = spec->hdr.dst_addr;
418
419         return 0;
420 }
421
422 static int
423 enic_copy_item_udp_v1(struct copy_item_args *arg)
424 {
425         const struct rte_flow_item *item = arg->item;
426         struct filter_v2 *enic_filter = arg->filter;
427         const struct rte_flow_item_udp *spec = item->spec;
428         const struct rte_flow_item_udp *mask = item->mask;
429         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
430         struct rte_udp_hdr supported_mask = {
431                 .src_port = 0xffff,
432                 .dst_port = 0xffff,
433         };
434
435         ENICPMD_FUNC_TRACE();
436
437         if (!mask)
438                 mask = &rte_flow_item_udp_mask;
439
440         /* This is an exact match filter, both ports must be set */
441         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
442                 ENICPMD_LOG(ERR, "UDP exact match src/dst addr");
443                 return ENOTSUP;
444         }
445
446         /* check that the suppied mask exactly matches capabilty */
447         if (!mask_exact_match((const uint8_t *)&supported_mask,
448                               (const uint8_t *)item->mask, sizeof(*mask))) {
449                 ENICPMD_LOG(ERR, "UDP exact match mask");
450                 return ENOTSUP;
451         }
452
453         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
454         enic_5tup->src_port = spec->hdr.src_port;
455         enic_5tup->dst_port = spec->hdr.dst_port;
456         enic_5tup->protocol = PROTO_UDP;
457
458         return 0;
459 }
460
461 static int
462 enic_copy_item_tcp_v1(struct copy_item_args *arg)
463 {
464         const struct rte_flow_item *item = arg->item;
465         struct filter_v2 *enic_filter = arg->filter;
466         const struct rte_flow_item_tcp *spec = item->spec;
467         const struct rte_flow_item_tcp *mask = item->mask;
468         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
469         struct rte_tcp_hdr supported_mask = {
470                 .src_port = 0xffff,
471                 .dst_port = 0xffff,
472         };
473
474         ENICPMD_FUNC_TRACE();
475
476         if (!mask)
477                 mask = &rte_flow_item_tcp_mask;
478
479         /* This is an exact match filter, both ports must be set */
480         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
481                 ENICPMD_LOG(ERR, "TCPIPv4 exact match src/dst addr");
482                 return ENOTSUP;
483         }
484
485         /* check that the suppied mask exactly matches capabilty */
486         if (!mask_exact_match((const uint8_t *)&supported_mask,
487                              (const uint8_t *)item->mask, sizeof(*mask))) {
488                 ENICPMD_LOG(ERR, "TCP exact match mask");
489                 return ENOTSUP;
490         }
491
492         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
493         enic_5tup->src_port = spec->hdr.src_port;
494         enic_5tup->dst_port = spec->hdr.dst_port;
495         enic_5tup->protocol = PROTO_TCP;
496
497         return 0;
498 }
499
500 /*
501  * The common 'copy' function for all inner packet patterns. Patterns are
502  * first appended to the L5 pattern buffer. Then, since the NIC filter
503  * API has no special support for inner packet matching at the moment,
504  * we set EtherType and IP proto as necessary.
505  */
506 static int
507 copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
508                   const void *val, const void *mask, uint8_t val_size,
509                   uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
510 {
511         uint8_t *l5_mask, *l5_val;
512         uint8_t start_off;
513
514         /* No space left in the L5 pattern buffer. */
515         start_off = *inner_ofst;
516         if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
517                 return ENOTSUP;
518         l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
519         l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
520         /* Copy the pattern into the L5 buffer. */
521         if (val) {
522                 memcpy(l5_mask + start_off, mask, val_size);
523                 memcpy(l5_val + start_off, val, val_size);
524         }
525         /* Set the protocol field in the previous header. */
526         if (proto_off) {
527                 void *m, *v;
528
529                 m = l5_mask + proto_off;
530                 v = l5_val + proto_off;
531                 if (proto_size == 1) {
532                         *(uint8_t *)m = 0xff;
533                         *(uint8_t *)v = (uint8_t)proto_val;
534                 } else if (proto_size == 2) {
535                         *(uint16_t *)m = 0xffff;
536                         *(uint16_t *)v = proto_val;
537                 }
538         }
539         /* All inner headers land in L5 buffer even if their spec is null. */
540         *inner_ofst += val_size;
541         return 0;
542 }
543
544 static int
545 enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
546 {
547         const void *mask = arg->item->mask;
548         uint8_t *off = arg->inner_ofst;
549
550         ENICPMD_FUNC_TRACE();
551         if (!mask)
552                 mask = &rte_flow_item_eth_mask;
553         arg->l2_proto_off = *off + offsetof(struct rte_ether_hdr, ether_type);
554         return copy_inner_common(&arg->filter->u.generic_1, off,
555                 arg->item->spec, mask, sizeof(struct rte_ether_hdr),
556                 0 /* no previous protocol */, 0, 0);
557 }
558
559 static int
560 enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
561 {
562         const void *mask = arg->item->mask;
563         uint8_t *off = arg->inner_ofst;
564         uint8_t eth_type_off;
565
566         ENICPMD_FUNC_TRACE();
567         if (!mask)
568                 mask = &rte_flow_item_vlan_mask;
569         /* Append vlan header to L5 and set ether type = TPID */
570         eth_type_off = arg->l2_proto_off;
571         arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto);
572         return copy_inner_common(&arg->filter->u.generic_1, off,
573                 arg->item->spec, mask, sizeof(struct rte_vlan_hdr),
574                 eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2);
575 }
576
577 static int
578 enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
579 {
580         const void *mask = arg->item->mask;
581         uint8_t *off = arg->inner_ofst;
582
583         ENICPMD_FUNC_TRACE();
584         if (!mask)
585                 mask = &rte_flow_item_ipv4_mask;
586         /* Append ipv4 header to L5 and set ether type = ipv4 */
587         arg->l3_proto_off = *off + offsetof(struct rte_ipv4_hdr, next_proto_id);
588         return copy_inner_common(&arg->filter->u.generic_1, off,
589                 arg->item->spec, mask, sizeof(struct rte_ipv4_hdr),
590                 arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4), 2);
591 }
592
593 static int
594 enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
595 {
596         const void *mask = arg->item->mask;
597         uint8_t *off = arg->inner_ofst;
598
599         ENICPMD_FUNC_TRACE();
600         if (!mask)
601                 mask = &rte_flow_item_ipv6_mask;
602         /* Append ipv6 header to L5 and set ether type = ipv6 */
603         arg->l3_proto_off = *off + offsetof(struct rte_ipv6_hdr, proto);
604         return copy_inner_common(&arg->filter->u.generic_1, off,
605                 arg->item->spec, mask, sizeof(struct rte_ipv6_hdr),
606                 arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6), 2);
607 }
608
609 static int
610 enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
611 {
612         const void *mask = arg->item->mask;
613         uint8_t *off = arg->inner_ofst;
614
615         ENICPMD_FUNC_TRACE();
616         if (!mask)
617                 mask = &rte_flow_item_udp_mask;
618         /* Append udp header to L5 and set ip proto = udp */
619         return copy_inner_common(&arg->filter->u.generic_1, off,
620                 arg->item->spec, mask, sizeof(struct rte_udp_hdr),
621                 arg->l3_proto_off, IPPROTO_UDP, 1);
622 }
623
624 static int
625 enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
626 {
627         const void *mask = arg->item->mask;
628         uint8_t *off = arg->inner_ofst;
629
630         ENICPMD_FUNC_TRACE();
631         if (!mask)
632                 mask = &rte_flow_item_tcp_mask;
633         /* Append tcp header to L5 and set ip proto = tcp */
634         return copy_inner_common(&arg->filter->u.generic_1, off,
635                 arg->item->spec, mask, sizeof(struct rte_tcp_hdr),
636                 arg->l3_proto_off, IPPROTO_TCP, 1);
637 }
638
639 static int
640 enic_copy_item_eth_v2(struct copy_item_args *arg)
641 {
642         const struct rte_flow_item *item = arg->item;
643         struct filter_v2 *enic_filter = arg->filter;
644         struct rte_ether_hdr enic_spec;
645         struct rte_ether_hdr enic_mask;
646         const struct rte_flow_item_eth *spec = item->spec;
647         const struct rte_flow_item_eth *mask = item->mask;
648         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
649
650         ENICPMD_FUNC_TRACE();
651
652         /* Match all if no spec */
653         if (!spec)
654                 return 0;
655
656         if (!mask)
657                 mask = &rte_flow_item_eth_mask;
658
659         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
660                RTE_ETHER_ADDR_LEN);
661         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
662                RTE_ETHER_ADDR_LEN);
663
664         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
665                RTE_ETHER_ADDR_LEN);
666         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
667                RTE_ETHER_ADDR_LEN);
668         enic_spec.ether_type = spec->type;
669         enic_mask.ether_type = mask->type;
670
671         /* outer header */
672         memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
673                sizeof(struct rte_ether_hdr));
674         memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
675                sizeof(struct rte_ether_hdr));
676         return 0;
677 }
678
679 static int
680 enic_copy_item_vlan_v2(struct copy_item_args *arg)
681 {
682         const struct rte_flow_item *item = arg->item;
683         struct filter_v2 *enic_filter = arg->filter;
684         const struct rte_flow_item_vlan *spec = item->spec;
685         const struct rte_flow_item_vlan *mask = item->mask;
686         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
687         struct rte_ether_hdr *eth_mask;
688         struct rte_ether_hdr *eth_val;
689
690         ENICPMD_FUNC_TRACE();
691
692         /* Match all if no spec */
693         if (!spec)
694                 return 0;
695
696         if (!mask)
697                 mask = &rte_flow_item_vlan_mask;
698
699         eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
700         eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
701         /* Outer TPID cannot be matched */
702         if (eth_mask->ether_type)
703                 return ENOTSUP;
704         /*
705          * For recent models:
706          * When packet matching, the VIC always compares vlan-stripped
707          * L2, regardless of vlan stripping settings. So, the inner type
708          * from vlan becomes the ether type of the eth header.
709          *
710          * Older models w/o hardware vxlan parser have a different
711          * behavior when vlan stripping is disabled. In this case,
712          * vlan tag remains in the L2 buffer.
713          */
714         if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
715                 struct rte_vlan_hdr *vlan;
716
717                 vlan = (struct rte_vlan_hdr *)(eth_mask + 1);
718                 vlan->eth_proto = mask->inner_type;
719                 vlan = (struct rte_vlan_hdr *)(eth_val + 1);
720                 vlan->eth_proto = spec->inner_type;
721         } else {
722                 eth_mask->ether_type = mask->inner_type;
723                 eth_val->ether_type = spec->inner_type;
724         }
725         /* For TCI, use the vlan mask/val fields (little endian). */
726         gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
727         gp->val_vlan = rte_be_to_cpu_16(spec->tci);
728         return 0;
729 }
730
731 static int
732 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
733 {
734         const struct rte_flow_item *item = arg->item;
735         struct filter_v2 *enic_filter = arg->filter;
736         const struct rte_flow_item_ipv4 *spec = item->spec;
737         const struct rte_flow_item_ipv4 *mask = item->mask;
738         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
739
740         ENICPMD_FUNC_TRACE();
741
742         /* Match IPv4 */
743         gp->mask_flags |= FILTER_GENERIC_1_IPV4;
744         gp->val_flags |= FILTER_GENERIC_1_IPV4;
745
746         /* Match all if no spec */
747         if (!spec)
748                 return 0;
749
750         if (!mask)
751                 mask = &rte_flow_item_ipv4_mask;
752
753         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
754                sizeof(struct rte_ipv4_hdr));
755         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
756                sizeof(struct rte_ipv4_hdr));
757         return 0;
758 }
759
760 static int
761 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
762 {
763         const struct rte_flow_item *item = arg->item;
764         struct filter_v2 *enic_filter = arg->filter;
765         const struct rte_flow_item_ipv6 *spec = item->spec;
766         const struct rte_flow_item_ipv6 *mask = item->mask;
767         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
768
769         ENICPMD_FUNC_TRACE();
770
771         /* Match IPv6 */
772         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
773         gp->val_flags |= FILTER_GENERIC_1_IPV6;
774
775         /* Match all if no spec */
776         if (!spec)
777                 return 0;
778
779         if (!mask)
780                 mask = &rte_flow_item_ipv6_mask;
781
782         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
783                sizeof(struct rte_ipv6_hdr));
784         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
785                sizeof(struct rte_ipv6_hdr));
786         return 0;
787 }
788
789 static int
790 enic_copy_item_udp_v2(struct copy_item_args *arg)
791 {
792         const struct rte_flow_item *item = arg->item;
793         struct filter_v2 *enic_filter = arg->filter;
794         const struct rte_flow_item_udp *spec = item->spec;
795         const struct rte_flow_item_udp *mask = item->mask;
796         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
797
798         ENICPMD_FUNC_TRACE();
799
800         /* Match UDP */
801         gp->mask_flags |= FILTER_GENERIC_1_UDP;
802         gp->val_flags |= FILTER_GENERIC_1_UDP;
803
804         /* Match all if no spec */
805         if (!spec)
806                 return 0;
807
808         if (!mask)
809                 mask = &rte_flow_item_udp_mask;
810
811         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
812                sizeof(struct rte_udp_hdr));
813         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
814                sizeof(struct rte_udp_hdr));
815         return 0;
816 }
817
818 static int
819 enic_copy_item_tcp_v2(struct copy_item_args *arg)
820 {
821         const struct rte_flow_item *item = arg->item;
822         struct filter_v2 *enic_filter = arg->filter;
823         const struct rte_flow_item_tcp *spec = item->spec;
824         const struct rte_flow_item_tcp *mask = item->mask;
825         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
826
827         ENICPMD_FUNC_TRACE();
828
829         /* Match TCP */
830         gp->mask_flags |= FILTER_GENERIC_1_TCP;
831         gp->val_flags |= FILTER_GENERIC_1_TCP;
832
833         /* Match all if no spec */
834         if (!spec)
835                 return 0;
836
837         if (!mask)
838                 return ENOTSUP;
839
840         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
841                sizeof(struct rte_tcp_hdr));
842         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
843                sizeof(struct rte_tcp_hdr));
844         return 0;
845 }
846
847 static int
848 enic_copy_item_sctp_v2(struct copy_item_args *arg)
849 {
850         const struct rte_flow_item *item = arg->item;
851         struct filter_v2 *enic_filter = arg->filter;
852         const struct rte_flow_item_sctp *spec = item->spec;
853         const struct rte_flow_item_sctp *mask = item->mask;
854         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
855         uint8_t *ip_proto_mask = NULL;
856         uint8_t *ip_proto = NULL;
857
858         ENICPMD_FUNC_TRACE();
859
860         /*
861          * The NIC filter API has no flags for "match sctp", so explicitly set
862          * the protocol number in the IP pattern.
863          */
864         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
865                 struct rte_ipv4_hdr *ip;
866                 ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
867                 ip_proto_mask = &ip->next_proto_id;
868                 ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
869                 ip_proto = &ip->next_proto_id;
870         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
871                 struct rte_ipv6_hdr *ip;
872                 ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
873                 ip_proto_mask = &ip->proto;
874                 ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
875                 ip_proto = &ip->proto;
876         } else {
877                 /* Need IPv4/IPv6 pattern first */
878                 return EINVAL;
879         }
880         *ip_proto = IPPROTO_SCTP;
881         *ip_proto_mask = 0xff;
882
883         /* Match all if no spec */
884         if (!spec)
885                 return 0;
886
887         if (!mask)
888                 mask = &rte_flow_item_sctp_mask;
889
890         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
891                sizeof(struct rte_sctp_hdr));
892         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
893                sizeof(struct rte_sctp_hdr));
894         return 0;
895 }
896
897 static int
898 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
899 {
900         const struct rte_flow_item *item = arg->item;
901         struct filter_v2 *enic_filter = arg->filter;
902         uint8_t *inner_ofst = arg->inner_ofst;
903         const struct rte_flow_item_vxlan *spec = item->spec;
904         const struct rte_flow_item_vxlan *mask = item->mask;
905         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
906         struct rte_udp_hdr *udp;
907
908         ENICPMD_FUNC_TRACE();
909
910         /*
911          * The NIC filter API has no flags for "match vxlan". Set UDP port to
912          * avoid false positives.
913          */
914         gp->mask_flags |= FILTER_GENERIC_1_UDP;
915         gp->val_flags |= FILTER_GENERIC_1_UDP;
916         udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
917         udp->dst_port = 0xffff;
918         udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
919         udp->dst_port = RTE_BE16(4789);
920         /* Match all if no spec */
921         if (!spec)
922                 return 0;
923
924         if (!mask)
925                 mask = &rte_flow_item_vxlan_mask;
926
927         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
928                sizeof(struct rte_vxlan_hdr));
929         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
930                sizeof(struct rte_vxlan_hdr));
931
932         *inner_ofst = sizeof(struct rte_vxlan_hdr);
933         return 0;
934 }
935
936 /*
937  * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
938  * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
939  * or geneve).
940  */
941 static int
942 enic_copy_item_raw_v2(struct copy_item_args *arg)
943 {
944         const struct rte_flow_item *item = arg->item;
945         struct filter_v2 *enic_filter = arg->filter;
946         uint8_t *inner_ofst = arg->inner_ofst;
947         const struct rte_flow_item_raw *spec = item->spec;
948         const struct rte_flow_item_raw *mask = item->mask;
949         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
950
951         ENICPMD_FUNC_TRACE();
952
953         /* Cannot be used for inner packet */
954         if (*inner_ofst)
955                 return EINVAL;
956         /* Need both spec and mask */
957         if (!spec || !mask)
958                 return EINVAL;
959         /* Only supports relative with offset 0 */
960         if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
961                 return EINVAL;
962         /* Need non-null pattern that fits within the NIC's filter pattern */
963         if (spec->length == 0 ||
964             spec->length + sizeof(struct rte_udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
965             !spec->pattern || !mask->pattern)
966                 return EINVAL;
967         /*
968          * Mask fields, including length, are often set to zero. Assume that
969          * means "same as spec" to avoid breaking existing apps. If length
970          * is not zero, then it should be >= spec length.
971          *
972          * No more pattern follows this, so append to the L4 layer instead of
973          * L5 to work with both recent and older VICs.
974          */
975         if (mask->length != 0 && mask->length < spec->length)
976                 return EINVAL;
977         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
978                mask->pattern, spec->length);
979         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
980                spec->pattern, spec->length);
981
982         return 0;
983 }
984
985 /**
986  * Return 1 if current item is valid on top of the previous one.
987  *
988  * @param prev_item[in]
989  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
990  *   is the first item.
991  * @param item_info[in]
992  *   Info about this item, like valid previous items.
993  * @param is_first[in]
994  *   True if this the first item in the pattern.
995  */
996 static int
997 item_stacking_valid(enum rte_flow_item_type prev_item,
998                     const struct enic_items *item_info, uint8_t is_first_item)
999 {
1000         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
1001
1002         ENICPMD_FUNC_TRACE();
1003
1004         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
1005                 if (prev_item == *allowed_items)
1006                         return 1;
1007         }
1008
1009         /* This is the first item in the stack. Check if that's cool */
1010         if (is_first_item && item_info->valid_start_item)
1011                 return 1;
1012
1013         return 0;
1014 }
1015
1016 /*
1017  * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
1018  * Instead it is in L4 following the UDP header. Append the vxlan
1019  * pattern to L4 (udp) and shift any inner packet pattern in L5.
1020  */
1021 static void
1022 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
1023                uint8_t inner_ofst)
1024 {
1025         uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
1026         uint8_t inner;
1027         uint8_t vxlan;
1028
1029         if (!(inner_ofst > 0 && enic->vxlan))
1030                 return;
1031         ENICPMD_FUNC_TRACE();
1032         vxlan = sizeof(struct rte_vxlan_hdr);
1033         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
1034                gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
1035         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
1036                gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
1037         inner = inner_ofst - vxlan;
1038         memset(layer, 0, sizeof(layer));
1039         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
1040         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
1041         memset(layer, 0, sizeof(layer));
1042         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
1043         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
1044 }
1045
1046 /**
1047  * Build the intenal enic filter structure from the provided pattern. The
1048  * pattern is validated as the items are copied.
1049  *
1050  * @param pattern[in]
1051  * @param items_info[in]
1052  *   Info about this NICs item support, like valid previous items.
1053  * @param enic_filter[out]
1054  *   NIC specfilc filters derived from the pattern.
1055  * @param error[out]
1056  */
1057 static int
1058 enic_copy_filter(const struct rte_flow_item pattern[],
1059                  const struct enic_filter_cap *cap,
1060                  struct enic *enic,
1061                  struct filter_v2 *enic_filter,
1062                  struct rte_flow_error *error)
1063 {
1064         int ret;
1065         const struct rte_flow_item *item = pattern;
1066         uint8_t inner_ofst = 0; /* If encapsulated, ofst into L5 */
1067         enum rte_flow_item_type prev_item;
1068         const struct enic_items *item_info;
1069         struct copy_item_args args;
1070         enic_copy_item_fn *copy_fn;
1071         uint8_t is_first_item = 1;
1072
1073         ENICPMD_FUNC_TRACE();
1074
1075         prev_item = 0;
1076
1077         args.filter = enic_filter;
1078         args.inner_ofst = &inner_ofst;
1079         args.enic = enic;
1080         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1081                 /* Get info about how to validate and copy the item. If NULL
1082                  * is returned the nic does not support the item.
1083                  */
1084                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1085                         continue;
1086
1087                 item_info = &cap->item_info[item->type];
1088                 if (item->type > cap->max_item_type ||
1089                     item_info->copy_item == NULL ||
1090                     (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1091                         rte_flow_error_set(error, ENOTSUP,
1092                                 RTE_FLOW_ERROR_TYPE_ITEM,
1093                                 NULL, "Unsupported item.");
1094                         return -rte_errno;
1095                 }
1096
1097                 /* check to see if item stacking is valid */
1098                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1099                         goto stacking_error;
1100
1101                 args.item = item;
1102                 copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1103                         item_info->copy_item;
1104                 ret = copy_fn(&args);
1105                 if (ret)
1106                         goto item_not_supported;
1107                 prev_item = item->type;
1108                 is_first_item = 0;
1109         }
1110         fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1111
1112         return 0;
1113
1114 item_not_supported:
1115         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1116                            NULL, "enic type error");
1117         return -rte_errno;
1118
1119 stacking_error:
1120         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1121                            item, "stacking error");
1122         return -rte_errno;
1123 }
1124
1125 /**
1126  * Build the intenal version 1 NIC action structure from the provided pattern.
1127  * The pattern is validated as the items are copied.
1128  *
1129  * @param actions[in]
1130  * @param enic_action[out]
1131  *   NIC specfilc actions derived from the actions.
1132  * @param error[out]
1133  */
1134 static int
1135 enic_copy_action_v1(__rte_unused struct enic *enic,
1136                     const struct rte_flow_action actions[],
1137                     struct filter_action_v2 *enic_action)
1138 {
1139         enum { FATE = 1, };
1140         uint32_t overlap = 0;
1141
1142         ENICPMD_FUNC_TRACE();
1143
1144         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1145                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1146                         continue;
1147
1148                 switch (actions->type) {
1149                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1150                         const struct rte_flow_action_queue *queue =
1151                                 (const struct rte_flow_action_queue *)
1152                                 actions->conf;
1153
1154                         if (overlap & FATE)
1155                                 return ENOTSUP;
1156                         overlap |= FATE;
1157                         enic_action->rq_idx =
1158                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1159                         break;
1160                 }
1161                 default:
1162                         RTE_ASSERT(0);
1163                         break;
1164                 }
1165         }
1166         if (!(overlap & FATE))
1167                 return ENOTSUP;
1168         enic_action->type = FILTER_ACTION_RQ_STEERING;
1169         return 0;
1170 }
1171
1172 /**
1173  * Build the intenal version 2 NIC action structure from the provided pattern.
1174  * The pattern is validated as the items are copied.
1175  *
1176  * @param actions[in]
1177  * @param enic_action[out]
1178  *   NIC specfilc actions derived from the actions.
1179  * @param error[out]
1180  */
1181 static int
1182 enic_copy_action_v2(struct enic *enic,
1183                     const struct rte_flow_action actions[],
1184                     struct filter_action_v2 *enic_action)
1185 {
1186         enum { FATE = 1, MARK = 2, };
1187         uint32_t overlap = 0;
1188         bool passthru = false;
1189
1190         ENICPMD_FUNC_TRACE();
1191
1192         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1193                 switch (actions->type) {
1194                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1195                         const struct rte_flow_action_queue *queue =
1196                                 (const struct rte_flow_action_queue *)
1197                                 actions->conf;
1198
1199                         if (overlap & FATE)
1200                                 return ENOTSUP;
1201                         overlap |= FATE;
1202                         enic_action->rq_idx =
1203                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1204                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1205                         break;
1206                 }
1207                 case RTE_FLOW_ACTION_TYPE_MARK: {
1208                         const struct rte_flow_action_mark *mark =
1209                                 (const struct rte_flow_action_mark *)
1210                                 actions->conf;
1211
1212                         if (overlap & MARK)
1213                                 return ENOTSUP;
1214                         overlap |= MARK;
1215                         /*
1216                          * Map mark ID (32-bit) to filter ID (16-bit):
1217                          * - Reject values > 16 bits
1218                          * - Filter ID 0 is reserved for filters that steer
1219                          *   but not mark. So add 1 to the mark ID to avoid
1220                          *   using 0.
1221                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1222                          *   reserved for the "flag" action below.
1223                          */
1224                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1225                                 return EINVAL;
1226                         enic_action->filter_id = mark->id + 1;
1227                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1228                         break;
1229                 }
1230                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1231                         if (overlap & MARK)
1232                                 return ENOTSUP;
1233                         overlap |= MARK;
1234                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1235                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1236                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1237                         break;
1238                 }
1239                 case RTE_FLOW_ACTION_TYPE_DROP: {
1240                         if (overlap & FATE)
1241                                 return ENOTSUP;
1242                         overlap |= FATE;
1243                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1244                         break;
1245                 }
1246                 case RTE_FLOW_ACTION_TYPE_RSS: {
1247                         const struct rte_flow_action_rss *rss =
1248                                 (const struct rte_flow_action_rss *)
1249                                 actions->conf;
1250                         bool allow;
1251                         uint16_t i;
1252
1253                         /*
1254                          * Hardware does not support general RSS actions, but
1255                          * we can still support the dummy one that is used to
1256                          * "receive normally".
1257                          */
1258                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1259                                 rss->level == 0 &&
1260                                 (rss->types == 0 ||
1261                                  rss->types == enic->rss_hf) &&
1262                                 rss->queue_num == enic->rq_count &&
1263                                 rss->key_len == 0;
1264                         /* Identity queue map is ok */
1265                         for (i = 0; i < rss->queue_num; i++)
1266                                 allow = allow && (i == rss->queue[i]);
1267                         if (!allow)
1268                                 return ENOTSUP;
1269                         if (overlap & FATE)
1270                                 return ENOTSUP;
1271                         /* Need MARK or FLAG */
1272                         if (!(overlap & MARK))
1273                                 return ENOTSUP;
1274                         overlap |= FATE;
1275                         break;
1276                 }
1277                 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1278                         /*
1279                          * Like RSS above, PASSTHRU + MARK may be used to
1280                          * "mark and then receive normally". MARK usually comes
1281                          * after PASSTHRU, so remember we have seen passthru
1282                          * and check for mark later.
1283                          */
1284                         if (overlap & FATE)
1285                                 return ENOTSUP;
1286                         overlap |= FATE;
1287                         passthru = true;
1288                         break;
1289                 }
1290                 case RTE_FLOW_ACTION_TYPE_VOID:
1291                         continue;
1292                 default:
1293                         RTE_ASSERT(0);
1294                         break;
1295                 }
1296         }
1297         /* Only PASSTHRU + MARK is allowed */
1298         if (passthru && !(overlap & MARK))
1299                 return ENOTSUP;
1300         if (!(overlap & FATE))
1301                 return ENOTSUP;
1302         enic_action->type = FILTER_ACTION_V2;
1303         return 0;
1304 }
1305
1306 /** Check if the action is supported */
1307 static int
1308 enic_match_action(const struct rte_flow_action *action,
1309                   const enum rte_flow_action_type *supported_actions)
1310 {
1311         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1312              supported_actions++) {
1313                 if (action->type == *supported_actions)
1314                         return 1;
1315         }
1316         return 0;
1317 }
1318
1319 /** Get the NIC filter capabilties structure */
1320 static const struct enic_filter_cap *
1321 enic_get_filter_cap(struct enic *enic)
1322 {
1323         if (enic->flow_filter_mode)
1324                 return &enic_filter_cap[enic->flow_filter_mode];
1325
1326         return NULL;
1327 }
1328
1329 /** Get the actions for this NIC version. */
1330 static const struct enic_action_cap *
1331 enic_get_action_cap(struct enic *enic)
1332 {
1333         const struct enic_action_cap *ea;
1334         uint8_t actions;
1335
1336         actions = enic->filter_actions;
1337         if (actions & FILTER_ACTION_DROP_FLAG)
1338                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1339         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1340                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1341         else
1342                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1343         return ea;
1344 }
1345
1346 /* Debug function to dump internal NIC action structure. */
1347 static void
1348 enic_dump_actions(const struct filter_action_v2 *ea)
1349 {
1350         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1351                 ENICPMD_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1352         } else if (ea->type == FILTER_ACTION_V2) {
1353                 ENICPMD_LOG(INFO, "Actions(V2)\n");
1354                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1355                         ENICPMD_LOG(INFO, "\tqueue: %u\n",
1356                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1357                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1358                         ENICPMD_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1359         }
1360 }
1361
1362 /* Debug function to dump internal NIC filter structure. */
1363 static void
1364 enic_dump_filter(const struct filter_v2 *filt)
1365 {
1366         const struct filter_generic_1 *gp;
1367         int i, j, mbyte;
1368         char buf[128], *bp;
1369         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1370         char l4csum[16], ipfrag[16];
1371
1372         switch (filt->type) {
1373         case FILTER_IPV4_5TUPLE:
1374                 ENICPMD_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1375                 break;
1376         case FILTER_USNIC_IP:
1377         case FILTER_DPDK_1:
1378                 /* FIXME: this should be a loop */
1379                 gp = &filt->u.generic_1;
1380                 ENICPMD_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1381                        gp->val_vlan, gp->mask_vlan);
1382
1383                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1384                         sprintf(ip4, "%s ",
1385                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1386                                  ? "ip4(y)" : "ip4(n)");
1387                 else
1388                         sprintf(ip4, "%s ", "ip4(x)");
1389
1390                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1391                         sprintf(ip6, "%s ",
1392                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1393                                  ? "ip6(y)" : "ip6(n)");
1394                 else
1395                         sprintf(ip6, "%s ", "ip6(x)");
1396
1397                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1398                         sprintf(udp, "%s ",
1399                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1400                                  ? "udp(y)" : "udp(n)");
1401                 else
1402                         sprintf(udp, "%s ", "udp(x)");
1403
1404                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1405                         sprintf(tcp, "%s ",
1406                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1407                                  ? "tcp(y)" : "tcp(n)");
1408                 else
1409                         sprintf(tcp, "%s ", "tcp(x)");
1410
1411                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1412                         sprintf(tcpudp, "%s ",
1413                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1414                                  ? "tcpudp(y)" : "tcpudp(n)");
1415                 else
1416                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1417
1418                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1419                         sprintf(ip4csum, "%s ",
1420                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1421                                  ? "ip4csum(y)" : "ip4csum(n)");
1422                 else
1423                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1424
1425                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1426                         sprintf(l4csum, "%s ",
1427                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1428                                  ? "l4csum(y)" : "l4csum(n)");
1429                 else
1430                         sprintf(l4csum, "%s ", "l4csum(x)");
1431
1432                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1433                         sprintf(ipfrag, "%s ",
1434                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1435                                  ? "ipfrag(y)" : "ipfrag(n)");
1436                 else
1437                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1438                 ENICPMD_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1439                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1440
1441                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1442                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1443                         while (mbyte && !gp->layer[i].mask[mbyte])
1444                                 mbyte--;
1445                         if (mbyte == 0)
1446                                 continue;
1447
1448                         bp = buf;
1449                         for (j = 0; j <= mbyte; j++) {
1450                                 sprintf(bp, "%02x",
1451                                         gp->layer[i].mask[j]);
1452                                 bp += 2;
1453                         }
1454                         *bp = '\0';
1455                         ENICPMD_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1456                         bp = buf;
1457                         for (j = 0; j <= mbyte; j++) {
1458                                 sprintf(bp, "%02x",
1459                                         gp->layer[i].val[j]);
1460                                 bp += 2;
1461                         }
1462                         *bp = '\0';
1463                         ENICPMD_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1464                 }
1465                 break;
1466         default:
1467                 ENICPMD_LOG(INFO, "FILTER UNKNOWN\n");
1468                 break;
1469         }
1470 }
1471
1472 /* Debug function to dump internal NIC flow structures. */
1473 static void
1474 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1475 {
1476         enic_dump_filter(filt);
1477         enic_dump_actions(ea);
1478 }
1479
1480
1481 /**
1482  * Internal flow parse/validate function.
1483  *
1484  * @param dev[in]
1485  *   This device pointer.
1486  * @param pattern[in]
1487  * @param actions[in]
1488  * @param error[out]
1489  * @param enic_filter[out]
1490  *   Internal NIC filter structure pointer.
1491  * @param enic_action[out]
1492  *   Internal NIC action structure pointer.
1493  */
1494 static int
1495 enic_flow_parse(struct rte_eth_dev *dev,
1496                 const struct rte_flow_attr *attrs,
1497                 const struct rte_flow_item pattern[],
1498                 const struct rte_flow_action actions[],
1499                 struct rte_flow_error *error,
1500                 struct filter_v2 *enic_filter,
1501                 struct filter_action_v2 *enic_action)
1502 {
1503         unsigned int ret = 0;
1504         struct enic *enic = pmd_priv(dev);
1505         const struct enic_filter_cap *enic_filter_cap;
1506         const struct enic_action_cap *enic_action_cap;
1507         const struct rte_flow_action *action;
1508
1509         ENICPMD_FUNC_TRACE();
1510
1511         memset(enic_filter, 0, sizeof(*enic_filter));
1512         memset(enic_action, 0, sizeof(*enic_action));
1513
1514         if (!pattern) {
1515                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1516                                    NULL, "No pattern specified");
1517                 return -rte_errno;
1518         }
1519
1520         if (!actions) {
1521                 rte_flow_error_set(error, EINVAL,
1522                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1523                                    NULL, "No action specified");
1524                 return -rte_errno;
1525         }
1526
1527         if (attrs) {
1528                 if (attrs->group) {
1529                         rte_flow_error_set(error, ENOTSUP,
1530                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1531                                            NULL,
1532                                            "priority groups are not supported");
1533                         return -rte_errno;
1534                 } else if (attrs->priority) {
1535                         rte_flow_error_set(error, ENOTSUP,
1536                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1537                                            NULL,
1538                                            "priorities are not supported");
1539                         return -rte_errno;
1540                 } else if (attrs->egress) {
1541                         rte_flow_error_set(error, ENOTSUP,
1542                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1543                                            NULL,
1544                                            "egress is not supported");
1545                         return -rte_errno;
1546                 } else if (attrs->transfer) {
1547                         rte_flow_error_set(error, ENOTSUP,
1548                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1549                                            NULL,
1550                                            "transfer is not supported");
1551                         return -rte_errno;
1552                 } else if (!attrs->ingress) {
1553                         rte_flow_error_set(error, ENOTSUP,
1554                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1555                                            NULL,
1556                                            "only ingress is supported");
1557                         return -rte_errno;
1558                 }
1559
1560         } else {
1561                 rte_flow_error_set(error, EINVAL,
1562                                    RTE_FLOW_ERROR_TYPE_ATTR,
1563                                    NULL, "No attribute specified");
1564                 return -rte_errno;
1565         }
1566
1567         /* Verify Actions. */
1568         enic_action_cap =  enic_get_action_cap(enic);
1569         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1570              action++) {
1571                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1572                         continue;
1573                 else if (!enic_match_action(action, enic_action_cap->actions))
1574                         break;
1575         }
1576         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1577                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1578                                    action, "Invalid action.");
1579                 return -rte_errno;
1580         }
1581         ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1582         if (ret) {
1583                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1584                            NULL, "Unsupported action.");
1585                 return -rte_errno;
1586         }
1587
1588         /* Verify Flow items. If copying the filter from flow format to enic
1589          * format fails, the flow is not supported
1590          */
1591         enic_filter_cap =  enic_get_filter_cap(enic);
1592         if (enic_filter_cap == NULL) {
1593                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1594                            NULL, "Flow API not available");
1595                 return -rte_errno;
1596         }
1597         enic_filter->type = enic->flow_filter_mode;
1598         ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1599                                        enic_filter, error);
1600         return ret;
1601 }
1602
1603 /**
1604  * Push filter/action to the NIC.
1605  *
1606  * @param enic[in]
1607  *   Device structure pointer.
1608  * @param enic_filter[in]
1609  *   Internal NIC filter structure pointer.
1610  * @param enic_action[in]
1611  *   Internal NIC action structure pointer.
1612  * @param error[out]
1613  */
1614 static struct rte_flow *
1615 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1616                    struct filter_action_v2 *enic_action,
1617                    struct rte_flow_error *error)
1618 {
1619         struct rte_flow *flow;
1620         int err;
1621         uint16_t entry;
1622
1623         ENICPMD_FUNC_TRACE();
1624
1625         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1626         if (!flow) {
1627                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1628                                    NULL, "cannot allocate flow memory");
1629                 return NULL;
1630         }
1631
1632         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1633         entry = enic_action->rq_idx;
1634         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1635                                   enic_action);
1636         if (err) {
1637                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1638                                    NULL, "vnic_dev_classifier error");
1639                 rte_free(flow);
1640                 return NULL;
1641         }
1642
1643         flow->enic_filter_id = entry;
1644         flow->enic_filter = *enic_filter;
1645         return flow;
1646 }
1647
1648 /**
1649  * Remove filter/action from the NIC.
1650  *
1651  * @param enic[in]
1652  *   Device structure pointer.
1653  * @param filter_id[in]
1654  *   Id of NIC filter.
1655  * @param enic_action[in]
1656  *   Internal NIC action structure pointer.
1657  * @param error[out]
1658  */
1659 static int
1660 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1661                    struct rte_flow_error *error)
1662 {
1663         uint16_t filter_id;
1664         int err;
1665
1666         ENICPMD_FUNC_TRACE();
1667
1668         filter_id = flow->enic_filter_id;
1669         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1670         if (err) {
1671                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1672                                    NULL, "vnic_dev_classifier failed");
1673                 return -err;
1674         }
1675         return 0;
1676 }
1677
1678 /*
1679  * The following functions are callbacks for Generic flow API.
1680  */
1681
1682 /**
1683  * Validate a flow supported by the NIC.
1684  *
1685  * @see rte_flow_validate()
1686  * @see rte_flow_ops
1687  */
1688 static int
1689 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1690                    const struct rte_flow_item pattern[],
1691                    const struct rte_flow_action actions[],
1692                    struct rte_flow_error *error)
1693 {
1694         struct filter_v2 enic_filter;
1695         struct filter_action_v2 enic_action;
1696         int ret;
1697
1698         ENICPMD_FUNC_TRACE();
1699
1700         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1701                                &enic_filter, &enic_action);
1702         if (!ret)
1703                 enic_dump_flow(&enic_action, &enic_filter);
1704         return ret;
1705 }
1706
1707 /**
1708  * Create a flow supported by the NIC.
1709  *
1710  * @see rte_flow_create()
1711  * @see rte_flow_ops
1712  */
1713 static struct rte_flow *
1714 enic_flow_create(struct rte_eth_dev *dev,
1715                  const struct rte_flow_attr *attrs,
1716                  const struct rte_flow_item pattern[],
1717                  const struct rte_flow_action actions[],
1718                  struct rte_flow_error *error)
1719 {
1720         int ret;
1721         struct filter_v2 enic_filter;
1722         struct filter_action_v2 enic_action;
1723         struct rte_flow *flow;
1724         struct enic *enic = pmd_priv(dev);
1725
1726         ENICPMD_FUNC_TRACE();
1727
1728         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1729                               &enic_action);
1730         if (ret < 0)
1731                 return NULL;
1732
1733         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1734                                     error);
1735         if (flow)
1736                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1737
1738         return flow;
1739 }
1740
1741 /**
1742  * Destroy a flow supported by the NIC.
1743  *
1744  * @see rte_flow_destroy()
1745  * @see rte_flow_ops
1746  */
1747 static int
1748 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1749                   __rte_unused struct rte_flow_error *error)
1750 {
1751         struct enic *enic = pmd_priv(dev);
1752
1753         ENICPMD_FUNC_TRACE();
1754
1755         enic_flow_del_filter(enic, flow, error);
1756         LIST_REMOVE(flow, next);
1757         rte_free(flow);
1758         return 0;
1759 }
1760
1761 /**
1762  * Flush all flows on the device.
1763  *
1764  * @see rte_flow_flush()
1765  * @see rte_flow_ops
1766  */
1767 static int
1768 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1769 {
1770         struct rte_flow *flow;
1771         struct enic *enic = pmd_priv(dev);
1772
1773         ENICPMD_FUNC_TRACE();
1774
1775
1776         while (!LIST_EMPTY(&enic->flows)) {
1777                 flow = LIST_FIRST(&enic->flows);
1778                 enic_flow_del_filter(enic, flow, error);
1779                 LIST_REMOVE(flow, next);
1780                 rte_free(flow);
1781         }
1782         return 0;
1783 }
1784
1785 /**
1786  * Flow callback registration.
1787  *
1788  * @see rte_flow_ops
1789  */
1790 const struct rte_flow_ops enic_flow_ops = {
1791         .validate = enic_flow_validate,
1792         .create = enic_flow_create,
1793         .destroy = enic_flow_destroy,
1794         .flush = enic_flow_flush,
1795 };