net/enic: fix endianness in VLAN match
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /*
27  * Common arguments passed to copy_item functions. Use this structure
28  * so we can easily add new arguments.
29  * item: Item specification.
30  * filter: Partially filled in NIC filter structure.
31  * inner_ofst: If zero, this is an outer header. If non-zero, this is
32  *   the offset into L5 where the header begins.
33  */
34 struct copy_item_args {
35         const struct rte_flow_item *item;
36         struct filter_v2 *filter;
37         uint8_t *inner_ofst;
38 };
39
40 /* functions for copying items into enic filters */
41 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
42
43 /** Info about how to copy items into enic filters. */
44 struct enic_items {
45         /** Function for copying and validating an item. */
46         enic_copy_item_fn *copy_item;
47         /** List of valid previous items. */
48         const enum rte_flow_item_type * const prev_items;
49         /** True if it's OK for this item to be the first item. For some NIC
50          * versions, it's invalid to start the stack above layer 3.
51          */
52         const u8 valid_start_item;
53 };
54
55 /** Filtering capabilities for various NIC and firmware versions. */
56 struct enic_filter_cap {
57         /** list of valid items and their handlers and attributes. */
58         const struct enic_items *item_info;
59         /* Max type in the above list, used to detect unsupported types */
60         enum rte_flow_item_type max_item_type;
61 };
62
63 /* functions for copying flow actions into enic actions */
64 typedef int (copy_action_fn)(struct enic *enic,
65                              const struct rte_flow_action actions[],
66                              struct filter_action_v2 *enic_action);
67
68 /** Action capabilities for various NICs. */
69 struct enic_action_cap {
70         /** list of valid actions */
71         const enum rte_flow_action_type *actions;
72         /** copy function for a particular NIC */
73         copy_action_fn *copy_fn;
74 };
75
76 /* Forward declarations */
77 static enic_copy_item_fn enic_copy_item_ipv4_v1;
78 static enic_copy_item_fn enic_copy_item_udp_v1;
79 static enic_copy_item_fn enic_copy_item_tcp_v1;
80 static enic_copy_item_fn enic_copy_item_raw_v2;
81 static enic_copy_item_fn enic_copy_item_eth_v2;
82 static enic_copy_item_fn enic_copy_item_vlan_v2;
83 static enic_copy_item_fn enic_copy_item_ipv4_v2;
84 static enic_copy_item_fn enic_copy_item_ipv6_v2;
85 static enic_copy_item_fn enic_copy_item_udp_v2;
86 static enic_copy_item_fn enic_copy_item_tcp_v2;
87 static enic_copy_item_fn enic_copy_item_sctp_v2;
88 static enic_copy_item_fn enic_copy_item_vxlan_v2;
89 static copy_action_fn enic_copy_action_v1;
90 static copy_action_fn enic_copy_action_v2;
91
92 /**
93  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
94  * is supported.
95  */
96 static const struct enic_items enic_items_v1[] = {
97         [RTE_FLOW_ITEM_TYPE_IPV4] = {
98                 .copy_item = enic_copy_item_ipv4_v1,
99                 .valid_start_item = 1,
100                 .prev_items = (const enum rte_flow_item_type[]) {
101                                RTE_FLOW_ITEM_TYPE_END,
102                 },
103         },
104         [RTE_FLOW_ITEM_TYPE_UDP] = {
105                 .copy_item = enic_copy_item_udp_v1,
106                 .valid_start_item = 0,
107                 .prev_items = (const enum rte_flow_item_type[]) {
108                                RTE_FLOW_ITEM_TYPE_IPV4,
109                                RTE_FLOW_ITEM_TYPE_END,
110                 },
111         },
112         [RTE_FLOW_ITEM_TYPE_TCP] = {
113                 .copy_item = enic_copy_item_tcp_v1,
114                 .valid_start_item = 0,
115                 .prev_items = (const enum rte_flow_item_type[]) {
116                                RTE_FLOW_ITEM_TYPE_IPV4,
117                                RTE_FLOW_ITEM_TYPE_END,
118                 },
119         },
120 };
121
122 /**
123  * NICs have Advanced Filters capability but they are disabled. This means
124  * that layer 3 must be specified.
125  */
126 static const struct enic_items enic_items_v2[] = {
127         [RTE_FLOW_ITEM_TYPE_RAW] = {
128                 .copy_item = enic_copy_item_raw_v2,
129                 .valid_start_item = 0,
130                 .prev_items = (const enum rte_flow_item_type[]) {
131                                RTE_FLOW_ITEM_TYPE_UDP,
132                                RTE_FLOW_ITEM_TYPE_END,
133                 },
134         },
135         [RTE_FLOW_ITEM_TYPE_ETH] = {
136                 .copy_item = enic_copy_item_eth_v2,
137                 .valid_start_item = 1,
138                 .prev_items = (const enum rte_flow_item_type[]) {
139                                RTE_FLOW_ITEM_TYPE_VXLAN,
140                                RTE_FLOW_ITEM_TYPE_END,
141                 },
142         },
143         [RTE_FLOW_ITEM_TYPE_VLAN] = {
144                 .copy_item = enic_copy_item_vlan_v2,
145                 .valid_start_item = 1,
146                 .prev_items = (const enum rte_flow_item_type[]) {
147                                RTE_FLOW_ITEM_TYPE_ETH,
148                                RTE_FLOW_ITEM_TYPE_END,
149                 },
150         },
151         [RTE_FLOW_ITEM_TYPE_IPV4] = {
152                 .copy_item = enic_copy_item_ipv4_v2,
153                 .valid_start_item = 1,
154                 .prev_items = (const enum rte_flow_item_type[]) {
155                                RTE_FLOW_ITEM_TYPE_ETH,
156                                RTE_FLOW_ITEM_TYPE_VLAN,
157                                RTE_FLOW_ITEM_TYPE_END,
158                 },
159         },
160         [RTE_FLOW_ITEM_TYPE_IPV6] = {
161                 .copy_item = enic_copy_item_ipv6_v2,
162                 .valid_start_item = 1,
163                 .prev_items = (const enum rte_flow_item_type[]) {
164                                RTE_FLOW_ITEM_TYPE_ETH,
165                                RTE_FLOW_ITEM_TYPE_VLAN,
166                                RTE_FLOW_ITEM_TYPE_END,
167                 },
168         },
169         [RTE_FLOW_ITEM_TYPE_UDP] = {
170                 .copy_item = enic_copy_item_udp_v2,
171                 .valid_start_item = 0,
172                 .prev_items = (const enum rte_flow_item_type[]) {
173                                RTE_FLOW_ITEM_TYPE_IPV4,
174                                RTE_FLOW_ITEM_TYPE_IPV6,
175                                RTE_FLOW_ITEM_TYPE_END,
176                 },
177         },
178         [RTE_FLOW_ITEM_TYPE_TCP] = {
179                 .copy_item = enic_copy_item_tcp_v2,
180                 .valid_start_item = 0,
181                 .prev_items = (const enum rte_flow_item_type[]) {
182                                RTE_FLOW_ITEM_TYPE_IPV4,
183                                RTE_FLOW_ITEM_TYPE_IPV6,
184                                RTE_FLOW_ITEM_TYPE_END,
185                 },
186         },
187         [RTE_FLOW_ITEM_TYPE_SCTP] = {
188                 .copy_item = enic_copy_item_sctp_v2,
189                 .valid_start_item = 0,
190                 .prev_items = (const enum rte_flow_item_type[]) {
191                                RTE_FLOW_ITEM_TYPE_IPV4,
192                                RTE_FLOW_ITEM_TYPE_IPV6,
193                                RTE_FLOW_ITEM_TYPE_END,
194                 },
195         },
196         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
197                 .copy_item = enic_copy_item_vxlan_v2,
198                 .valid_start_item = 0,
199                 .prev_items = (const enum rte_flow_item_type[]) {
200                                RTE_FLOW_ITEM_TYPE_UDP,
201                                RTE_FLOW_ITEM_TYPE_END,
202                 },
203         },
204 };
205
206 /** NICs with Advanced filters enabled */
207 static const struct enic_items enic_items_v3[] = {
208         [RTE_FLOW_ITEM_TYPE_RAW] = {
209                 .copy_item = enic_copy_item_raw_v2,
210                 .valid_start_item = 0,
211                 .prev_items = (const enum rte_flow_item_type[]) {
212                                RTE_FLOW_ITEM_TYPE_UDP,
213                                RTE_FLOW_ITEM_TYPE_END,
214                 },
215         },
216         [RTE_FLOW_ITEM_TYPE_ETH] = {
217                 .copy_item = enic_copy_item_eth_v2,
218                 .valid_start_item = 1,
219                 .prev_items = (const enum rte_flow_item_type[]) {
220                                RTE_FLOW_ITEM_TYPE_VXLAN,
221                                RTE_FLOW_ITEM_TYPE_END,
222                 },
223         },
224         [RTE_FLOW_ITEM_TYPE_VLAN] = {
225                 .copy_item = enic_copy_item_vlan_v2,
226                 .valid_start_item = 1,
227                 .prev_items = (const enum rte_flow_item_type[]) {
228                                RTE_FLOW_ITEM_TYPE_ETH,
229                                RTE_FLOW_ITEM_TYPE_END,
230                 },
231         },
232         [RTE_FLOW_ITEM_TYPE_IPV4] = {
233                 .copy_item = enic_copy_item_ipv4_v2,
234                 .valid_start_item = 1,
235                 .prev_items = (const enum rte_flow_item_type[]) {
236                                RTE_FLOW_ITEM_TYPE_ETH,
237                                RTE_FLOW_ITEM_TYPE_VLAN,
238                                RTE_FLOW_ITEM_TYPE_END,
239                 },
240         },
241         [RTE_FLOW_ITEM_TYPE_IPV6] = {
242                 .copy_item = enic_copy_item_ipv6_v2,
243                 .valid_start_item = 1,
244                 .prev_items = (const enum rte_flow_item_type[]) {
245                                RTE_FLOW_ITEM_TYPE_ETH,
246                                RTE_FLOW_ITEM_TYPE_VLAN,
247                                RTE_FLOW_ITEM_TYPE_END,
248                 },
249         },
250         [RTE_FLOW_ITEM_TYPE_UDP] = {
251                 .copy_item = enic_copy_item_udp_v2,
252                 .valid_start_item = 1,
253                 .prev_items = (const enum rte_flow_item_type[]) {
254                                RTE_FLOW_ITEM_TYPE_IPV4,
255                                RTE_FLOW_ITEM_TYPE_IPV6,
256                                RTE_FLOW_ITEM_TYPE_END,
257                 },
258         },
259         [RTE_FLOW_ITEM_TYPE_TCP] = {
260                 .copy_item = enic_copy_item_tcp_v2,
261                 .valid_start_item = 1,
262                 .prev_items = (const enum rte_flow_item_type[]) {
263                                RTE_FLOW_ITEM_TYPE_IPV4,
264                                RTE_FLOW_ITEM_TYPE_IPV6,
265                                RTE_FLOW_ITEM_TYPE_END,
266                 },
267         },
268         [RTE_FLOW_ITEM_TYPE_SCTP] = {
269                 .copy_item = enic_copy_item_sctp_v2,
270                 .valid_start_item = 0,
271                 .prev_items = (const enum rte_flow_item_type[]) {
272                                RTE_FLOW_ITEM_TYPE_IPV4,
273                                RTE_FLOW_ITEM_TYPE_IPV6,
274                                RTE_FLOW_ITEM_TYPE_END,
275                 },
276         },
277         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
278                 .copy_item = enic_copy_item_vxlan_v2,
279                 .valid_start_item = 1,
280                 .prev_items = (const enum rte_flow_item_type[]) {
281                                RTE_FLOW_ITEM_TYPE_UDP,
282                                RTE_FLOW_ITEM_TYPE_END,
283                 },
284         },
285 };
286
287 /** Filtering capabilities indexed this NICs supported filter type. */
288 static const struct enic_filter_cap enic_filter_cap[] = {
289         [FILTER_IPV4_5TUPLE] = {
290                 .item_info = enic_items_v1,
291                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
292         },
293         [FILTER_USNIC_IP] = {
294                 .item_info = enic_items_v2,
295                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
296         },
297         [FILTER_DPDK_1] = {
298                 .item_info = enic_items_v3,
299                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
300         },
301 };
302
303 /** Supported actions for older NICs */
304 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
305         RTE_FLOW_ACTION_TYPE_QUEUE,
306         RTE_FLOW_ACTION_TYPE_END,
307 };
308
309 /** Supported actions for newer NICs */
310 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
311         RTE_FLOW_ACTION_TYPE_QUEUE,
312         RTE_FLOW_ACTION_TYPE_MARK,
313         RTE_FLOW_ACTION_TYPE_FLAG,
314         RTE_FLOW_ACTION_TYPE_RSS,
315         RTE_FLOW_ACTION_TYPE_PASSTHRU,
316         RTE_FLOW_ACTION_TYPE_END,
317 };
318
319 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
320         RTE_FLOW_ACTION_TYPE_QUEUE,
321         RTE_FLOW_ACTION_TYPE_MARK,
322         RTE_FLOW_ACTION_TYPE_FLAG,
323         RTE_FLOW_ACTION_TYPE_DROP,
324         RTE_FLOW_ACTION_TYPE_RSS,
325         RTE_FLOW_ACTION_TYPE_PASSTHRU,
326         RTE_FLOW_ACTION_TYPE_END,
327 };
328
329 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
330         RTE_FLOW_ACTION_TYPE_QUEUE,
331         RTE_FLOW_ACTION_TYPE_MARK,
332         RTE_FLOW_ACTION_TYPE_FLAG,
333         RTE_FLOW_ACTION_TYPE_DROP,
334         RTE_FLOW_ACTION_TYPE_COUNT,
335         RTE_FLOW_ACTION_TYPE_RSS,
336         RTE_FLOW_ACTION_TYPE_PASSTHRU,
337         RTE_FLOW_ACTION_TYPE_END,
338 };
339
340 /** Action capabilities indexed by NIC version information */
341 static const struct enic_action_cap enic_action_cap[] = {
342         [FILTER_ACTION_RQ_STEERING_FLAG] = {
343                 .actions = enic_supported_actions_v1,
344                 .copy_fn = enic_copy_action_v1,
345         },
346         [FILTER_ACTION_FILTER_ID_FLAG] = {
347                 .actions = enic_supported_actions_v2_id,
348                 .copy_fn = enic_copy_action_v2,
349         },
350         [FILTER_ACTION_DROP_FLAG] = {
351                 .actions = enic_supported_actions_v2_drop,
352                 .copy_fn = enic_copy_action_v2,
353         },
354         [FILTER_ACTION_COUNTER_FLAG] = {
355                 .actions = enic_supported_actions_v2_count,
356                 .copy_fn = enic_copy_action_v2,
357         },
358 };
359
360 static int
361 mask_exact_match(const u8 *supported, const u8 *supplied,
362                  unsigned int size)
363 {
364         unsigned int i;
365         for (i = 0; i < size; i++) {
366                 if (supported[i] != supplied[i])
367                         return 0;
368         }
369         return 1;
370 }
371
372 static int
373 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
374 {
375         const struct rte_flow_item *item = arg->item;
376         struct filter_v2 *enic_filter = arg->filter;
377         uint8_t *inner_ofst = arg->inner_ofst;
378         const struct rte_flow_item_ipv4 *spec = item->spec;
379         const struct rte_flow_item_ipv4 *mask = item->mask;
380         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
381         struct ipv4_hdr supported_mask = {
382                 .src_addr = 0xffffffff,
383                 .dst_addr = 0xffffffff,
384         };
385
386         FLOW_TRACE();
387
388         if (*inner_ofst)
389                 return ENOTSUP;
390
391         if (!mask)
392                 mask = &rte_flow_item_ipv4_mask;
393
394         /* This is an exact match filter, both fields must be set */
395         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
396                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
397                 return ENOTSUP;
398         }
399
400         /* check that the suppied mask exactly matches capabilty */
401         if (!mask_exact_match((const u8 *)&supported_mask,
402                               (const u8 *)item->mask, sizeof(*mask))) {
403                 FLOW_LOG(ERR, "IPv4 exact match mask");
404                 return ENOTSUP;
405         }
406
407         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
408         enic_5tup->src_addr = spec->hdr.src_addr;
409         enic_5tup->dst_addr = spec->hdr.dst_addr;
410
411         return 0;
412 }
413
414 static int
415 enic_copy_item_udp_v1(struct copy_item_args *arg)
416 {
417         const struct rte_flow_item *item = arg->item;
418         struct filter_v2 *enic_filter = arg->filter;
419         uint8_t *inner_ofst = arg->inner_ofst;
420         const struct rte_flow_item_udp *spec = item->spec;
421         const struct rte_flow_item_udp *mask = item->mask;
422         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
423         struct udp_hdr supported_mask = {
424                 .src_port = 0xffff,
425                 .dst_port = 0xffff,
426         };
427
428         FLOW_TRACE();
429
430         if (*inner_ofst)
431                 return ENOTSUP;
432
433         if (!mask)
434                 mask = &rte_flow_item_udp_mask;
435
436         /* This is an exact match filter, both ports must be set */
437         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
438                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
439                 return ENOTSUP;
440         }
441
442         /* check that the suppied mask exactly matches capabilty */
443         if (!mask_exact_match((const u8 *)&supported_mask,
444                               (const u8 *)item->mask, sizeof(*mask))) {
445                 FLOW_LOG(ERR, "UDP exact match mask");
446                 return ENOTSUP;
447         }
448
449         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
450         enic_5tup->src_port = spec->hdr.src_port;
451         enic_5tup->dst_port = spec->hdr.dst_port;
452         enic_5tup->protocol = PROTO_UDP;
453
454         return 0;
455 }
456
457 static int
458 enic_copy_item_tcp_v1(struct copy_item_args *arg)
459 {
460         const struct rte_flow_item *item = arg->item;
461         struct filter_v2 *enic_filter = arg->filter;
462         uint8_t *inner_ofst = arg->inner_ofst;
463         const struct rte_flow_item_tcp *spec = item->spec;
464         const struct rte_flow_item_tcp *mask = item->mask;
465         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
466         struct tcp_hdr supported_mask = {
467                 .src_port = 0xffff,
468                 .dst_port = 0xffff,
469         };
470
471         FLOW_TRACE();
472
473         if (*inner_ofst)
474                 return ENOTSUP;
475
476         if (!mask)
477                 mask = &rte_flow_item_tcp_mask;
478
479         /* This is an exact match filter, both ports must be set */
480         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
481                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
482                 return ENOTSUP;
483         }
484
485         /* check that the suppied mask exactly matches capabilty */
486         if (!mask_exact_match((const u8 *)&supported_mask,
487                              (const u8 *)item->mask, sizeof(*mask))) {
488                 FLOW_LOG(ERR, "TCP exact match mask");
489                 return ENOTSUP;
490         }
491
492         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
493         enic_5tup->src_port = spec->hdr.src_port;
494         enic_5tup->dst_port = spec->hdr.dst_port;
495         enic_5tup->protocol = PROTO_TCP;
496
497         return 0;
498 }
499
500 static int
501 enic_copy_item_eth_v2(struct copy_item_args *arg)
502 {
503         const struct rte_flow_item *item = arg->item;
504         struct filter_v2 *enic_filter = arg->filter;
505         uint8_t *inner_ofst = arg->inner_ofst;
506         struct ether_hdr enic_spec;
507         struct ether_hdr enic_mask;
508         const struct rte_flow_item_eth *spec = item->spec;
509         const struct rte_flow_item_eth *mask = item->mask;
510         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
511
512         FLOW_TRACE();
513
514         /* Match all if no spec */
515         if (!spec)
516                 return 0;
517
518         if (!mask)
519                 mask = &rte_flow_item_eth_mask;
520
521         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
522                ETHER_ADDR_LEN);
523         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
524                ETHER_ADDR_LEN);
525
526         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
527                ETHER_ADDR_LEN);
528         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
529                ETHER_ADDR_LEN);
530         enic_spec.ether_type = spec->type;
531         enic_mask.ether_type = mask->type;
532
533         if (*inner_ofst == 0) {
534                 /* outer header */
535                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
536                        sizeof(struct ether_hdr));
537                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
538                        sizeof(struct ether_hdr));
539         } else {
540                 /* inner header */
541                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
542                      FILTER_GENERIC_1_KEY_LEN)
543                         return ENOTSUP;
544                 /* Offset into L5 where inner Ethernet header goes */
545                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
546                        &enic_mask, sizeof(struct ether_hdr));
547                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
548                        &enic_spec, sizeof(struct ether_hdr));
549                 *inner_ofst += sizeof(struct ether_hdr);
550         }
551         return 0;
552 }
553
554 static int
555 enic_copy_item_vlan_v2(struct copy_item_args *arg)
556 {
557         const struct rte_flow_item *item = arg->item;
558         struct filter_v2 *enic_filter = arg->filter;
559         uint8_t *inner_ofst = arg->inner_ofst;
560         const struct rte_flow_item_vlan *spec = item->spec;
561         const struct rte_flow_item_vlan *mask = item->mask;
562         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
563
564         FLOW_TRACE();
565
566         /* Match all if no spec */
567         if (!spec)
568                 return 0;
569
570         if (!mask)
571                 mask = &rte_flow_item_vlan_mask;
572
573         if (*inner_ofst == 0) {
574                 struct ether_hdr *eth_mask =
575                         (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
576                 struct ether_hdr *eth_val =
577                         (void *)gp->layer[FILTER_GENERIC_1_L2].val;
578
579                 /* Outer TPID cannot be matched */
580                 if (eth_mask->ether_type)
581                         return ENOTSUP;
582                 /*
583                  * When packet matching, the VIC always compares vlan-stripped
584                  * L2, regardless of vlan stripping settings. So, the inner type
585                  * from vlan becomes the ether type of the eth header.
586                  */
587                 eth_mask->ether_type = mask->inner_type;
588                 eth_val->ether_type = spec->inner_type;
589                 /* For TCI, use the vlan mask/val fields (little endian). */
590                 gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
591                 gp->val_vlan = rte_be_to_cpu_16(spec->tci);
592         } else {
593                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
594                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
595                      FILTER_GENERIC_1_KEY_LEN)
596                         return ENOTSUP;
597                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
598                        mask, sizeof(struct vlan_hdr));
599                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
600                        spec, sizeof(struct vlan_hdr));
601                 *inner_ofst += sizeof(struct vlan_hdr);
602         }
603         return 0;
604 }
605
606 static int
607 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
608 {
609         const struct rte_flow_item *item = arg->item;
610         struct filter_v2 *enic_filter = arg->filter;
611         uint8_t *inner_ofst = arg->inner_ofst;
612         const struct rte_flow_item_ipv4 *spec = item->spec;
613         const struct rte_flow_item_ipv4 *mask = item->mask;
614         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
615
616         FLOW_TRACE();
617
618         if (*inner_ofst == 0) {
619                 /* Match IPv4 */
620                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
621                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
622
623                 /* Match all if no spec */
624                 if (!spec)
625                         return 0;
626
627                 if (!mask)
628                         mask = &rte_flow_item_ipv4_mask;
629
630                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
631                        sizeof(struct ipv4_hdr));
632                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
633                        sizeof(struct ipv4_hdr));
634         } else {
635                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
636                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
637                      FILTER_GENERIC_1_KEY_LEN)
638                         return ENOTSUP;
639                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
640                        mask, sizeof(struct ipv4_hdr));
641                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
642                        spec, sizeof(struct ipv4_hdr));
643                 *inner_ofst += sizeof(struct ipv4_hdr);
644         }
645         return 0;
646 }
647
648 static int
649 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
650 {
651         const struct rte_flow_item *item = arg->item;
652         struct filter_v2 *enic_filter = arg->filter;
653         uint8_t *inner_ofst = arg->inner_ofst;
654         const struct rte_flow_item_ipv6 *spec = item->spec;
655         const struct rte_flow_item_ipv6 *mask = item->mask;
656         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
657
658         FLOW_TRACE();
659
660         /* Match IPv6 */
661         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
662         gp->val_flags |= FILTER_GENERIC_1_IPV6;
663
664         /* Match all if no spec */
665         if (!spec)
666                 return 0;
667
668         if (!mask)
669                 mask = &rte_flow_item_ipv6_mask;
670
671         if (*inner_ofst == 0) {
672                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
673                        sizeof(struct ipv6_hdr));
674                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
675                        sizeof(struct ipv6_hdr));
676         } else {
677                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
678                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
679                      FILTER_GENERIC_1_KEY_LEN)
680                         return ENOTSUP;
681                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
682                        mask, sizeof(struct ipv6_hdr));
683                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
684                        spec, sizeof(struct ipv6_hdr));
685                 *inner_ofst += sizeof(struct ipv6_hdr);
686         }
687         return 0;
688 }
689
690 static int
691 enic_copy_item_udp_v2(struct copy_item_args *arg)
692 {
693         const struct rte_flow_item *item = arg->item;
694         struct filter_v2 *enic_filter = arg->filter;
695         uint8_t *inner_ofst = arg->inner_ofst;
696         const struct rte_flow_item_udp *spec = item->spec;
697         const struct rte_flow_item_udp *mask = item->mask;
698         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
699
700         FLOW_TRACE();
701
702         /* Match UDP */
703         gp->mask_flags |= FILTER_GENERIC_1_UDP;
704         gp->val_flags |= FILTER_GENERIC_1_UDP;
705
706         /* Match all if no spec */
707         if (!spec)
708                 return 0;
709
710         if (!mask)
711                 mask = &rte_flow_item_udp_mask;
712
713         if (*inner_ofst == 0) {
714                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
715                        sizeof(struct udp_hdr));
716                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
717                        sizeof(struct udp_hdr));
718         } else {
719                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
720                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
721                      FILTER_GENERIC_1_KEY_LEN)
722                         return ENOTSUP;
723                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
724                        mask, sizeof(struct udp_hdr));
725                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
726                        spec, sizeof(struct udp_hdr));
727                 *inner_ofst += sizeof(struct udp_hdr);
728         }
729         return 0;
730 }
731
732 static int
733 enic_copy_item_tcp_v2(struct copy_item_args *arg)
734 {
735         const struct rte_flow_item *item = arg->item;
736         struct filter_v2 *enic_filter = arg->filter;
737         uint8_t *inner_ofst = arg->inner_ofst;
738         const struct rte_flow_item_tcp *spec = item->spec;
739         const struct rte_flow_item_tcp *mask = item->mask;
740         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
741
742         FLOW_TRACE();
743
744         /* Match TCP */
745         gp->mask_flags |= FILTER_GENERIC_1_TCP;
746         gp->val_flags |= FILTER_GENERIC_1_TCP;
747
748         /* Match all if no spec */
749         if (!spec)
750                 return 0;
751
752         if (!mask)
753                 return ENOTSUP;
754
755         if (*inner_ofst == 0) {
756                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
757                        sizeof(struct tcp_hdr));
758                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
759                        sizeof(struct tcp_hdr));
760         } else {
761                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
762                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
763                      FILTER_GENERIC_1_KEY_LEN)
764                         return ENOTSUP;
765                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
766                        mask, sizeof(struct tcp_hdr));
767                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
768                        spec, sizeof(struct tcp_hdr));
769                 *inner_ofst += sizeof(struct tcp_hdr);
770         }
771         return 0;
772 }
773
774 static int
775 enic_copy_item_sctp_v2(struct copy_item_args *arg)
776 {
777         const struct rte_flow_item *item = arg->item;
778         struct filter_v2 *enic_filter = arg->filter;
779         uint8_t *inner_ofst = arg->inner_ofst;
780         const struct rte_flow_item_sctp *spec = item->spec;
781         const struct rte_flow_item_sctp *mask = item->mask;
782         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
783         uint8_t *ip_proto_mask = NULL;
784         uint8_t *ip_proto = NULL;
785
786         FLOW_TRACE();
787
788         if (*inner_ofst)
789                 return ENOTSUP;
790
791         /*
792          * The NIC filter API has no flags for "match sctp", so explicitly set
793          * the protocol number in the IP pattern.
794          */
795         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
796                 struct ipv4_hdr *ip;
797                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
798                 ip_proto_mask = &ip->next_proto_id;
799                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
800                 ip_proto = &ip->next_proto_id;
801         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
802                 struct ipv6_hdr *ip;
803                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
804                 ip_proto_mask = &ip->proto;
805                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
806                 ip_proto = &ip->proto;
807         } else {
808                 /* Need IPv4/IPv6 pattern first */
809                 return EINVAL;
810         }
811         *ip_proto = IPPROTO_SCTP;
812         *ip_proto_mask = 0xff;
813
814         /* Match all if no spec */
815         if (!spec)
816                 return 0;
817
818         if (!mask)
819                 mask = &rte_flow_item_sctp_mask;
820
821         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
822                sizeof(struct sctp_hdr));
823         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
824                sizeof(struct sctp_hdr));
825         return 0;
826 }
827
828 static int
829 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
830 {
831         const struct rte_flow_item *item = arg->item;
832         struct filter_v2 *enic_filter = arg->filter;
833         uint8_t *inner_ofst = arg->inner_ofst;
834         const struct rte_flow_item_vxlan *spec = item->spec;
835         const struct rte_flow_item_vxlan *mask = item->mask;
836         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
837         struct udp_hdr *udp;
838
839         FLOW_TRACE();
840
841         if (*inner_ofst)
842                 return EINVAL;
843
844         /*
845          * The NIC filter API has no flags for "match vxlan". Set UDP port to
846          * avoid false positives.
847          */
848         gp->mask_flags |= FILTER_GENERIC_1_UDP;
849         gp->val_flags |= FILTER_GENERIC_1_UDP;
850         udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
851         udp->dst_port = 0xffff;
852         udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
853         udp->dst_port = RTE_BE16(4789);
854         /* Match all if no spec */
855         if (!spec)
856                 return 0;
857
858         if (!mask)
859                 mask = &rte_flow_item_vxlan_mask;
860
861         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
862                sizeof(struct vxlan_hdr));
863         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
864                sizeof(struct vxlan_hdr));
865
866         *inner_ofst = sizeof(struct vxlan_hdr);
867         return 0;
868 }
869
870 /*
871  * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
872  * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
873  * or geneve).
874  */
875 static int
876 enic_copy_item_raw_v2(struct copy_item_args *arg)
877 {
878         const struct rte_flow_item *item = arg->item;
879         struct filter_v2 *enic_filter = arg->filter;
880         uint8_t *inner_ofst = arg->inner_ofst;
881         const struct rte_flow_item_raw *spec = item->spec;
882         const struct rte_flow_item_raw *mask = item->mask;
883         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
884
885         FLOW_TRACE();
886
887         /* Cannot be used for inner packet */
888         if (*inner_ofst)
889                 return EINVAL;
890         /* Need both spec and mask */
891         if (!spec || !mask)
892                 return EINVAL;
893         /* Only supports relative with offset 0 */
894         if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
895                 return EINVAL;
896         /* Need non-null pattern that fits within the NIC's filter pattern */
897         if (spec->length == 0 || spec->length > FILTER_GENERIC_1_KEY_LEN ||
898             !spec->pattern || !mask->pattern)
899                 return EINVAL;
900         /*
901          * Mask fields, including length, are often set to zero. Assume that
902          * means "same as spec" to avoid breaking existing apps. If length
903          * is not zero, then it should be >= spec length.
904          *
905          * No more pattern follows this, so append to the L4 layer instead of
906          * L5 to work with both recent and older VICs.
907          */
908         if (mask->length != 0 && mask->length < spec->length)
909                 return EINVAL;
910         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
911                mask->pattern, spec->length);
912         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
913                spec->pattern, spec->length);
914
915         return 0;
916 }
917
918 /**
919  * Return 1 if current item is valid on top of the previous one.
920  *
921  * @param prev_item[in]
922  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
923  *   is the first item.
924  * @param item_info[in]
925  *   Info about this item, like valid previous items.
926  * @param is_first[in]
927  *   True if this the first item in the pattern.
928  */
929 static int
930 item_stacking_valid(enum rte_flow_item_type prev_item,
931                     const struct enic_items *item_info, u8 is_first_item)
932 {
933         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
934
935         FLOW_TRACE();
936
937         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
938                 if (prev_item == *allowed_items)
939                         return 1;
940         }
941
942         /* This is the first item in the stack. Check if that's cool */
943         if (is_first_item && item_info->valid_start_item)
944                 return 1;
945
946         return 0;
947 }
948
949 /*
950  * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
951  * Instead it is in L4 following the UDP header. Append the vxlan
952  * pattern to L4 (udp) and shift any inner packet pattern in L5.
953  */
954 static void
955 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
956                uint8_t inner_ofst)
957 {
958         uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
959         uint8_t inner;
960         uint8_t vxlan;
961
962         if (!(inner_ofst > 0 && enic->vxlan))
963                 return;
964         FLOW_TRACE();
965         vxlan = sizeof(struct vxlan_hdr);
966         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
967                gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
968         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
969                gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
970         inner = inner_ofst - vxlan;
971         memset(layer, 0, sizeof(layer));
972         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
973         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
974         memset(layer, 0, sizeof(layer));
975         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
976         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
977 }
978
979 /**
980  * Build the intenal enic filter structure from the provided pattern. The
981  * pattern is validated as the items are copied.
982  *
983  * @param pattern[in]
984  * @param items_info[in]
985  *   Info about this NICs item support, like valid previous items.
986  * @param enic_filter[out]
987  *   NIC specfilc filters derived from the pattern.
988  * @param error[out]
989  */
990 static int
991 enic_copy_filter(const struct rte_flow_item pattern[],
992                  const struct enic_filter_cap *cap,
993                  struct enic *enic,
994                  struct filter_v2 *enic_filter,
995                  struct rte_flow_error *error)
996 {
997         int ret;
998         const struct rte_flow_item *item = pattern;
999         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1000         enum rte_flow_item_type prev_item;
1001         const struct enic_items *item_info;
1002         struct copy_item_args args;
1003         u8 is_first_item = 1;
1004
1005         FLOW_TRACE();
1006
1007         prev_item = 0;
1008
1009         args.filter = enic_filter;
1010         args.inner_ofst = &inner_ofst;
1011         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1012                 /* Get info about how to validate and copy the item. If NULL
1013                  * is returned the nic does not support the item.
1014                  */
1015                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1016                         continue;
1017
1018                 item_info = &cap->item_info[item->type];
1019                 if (item->type > cap->max_item_type ||
1020                     item_info->copy_item == NULL) {
1021                         rte_flow_error_set(error, ENOTSUP,
1022                                 RTE_FLOW_ERROR_TYPE_ITEM,
1023                                 NULL, "Unsupported item.");
1024                         return -rte_errno;
1025                 }
1026
1027                 /* check to see if item stacking is valid */
1028                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1029                         goto stacking_error;
1030
1031                 args.item = item;
1032                 ret = item_info->copy_item(&args);
1033                 if (ret)
1034                         goto item_not_supported;
1035                 prev_item = item->type;
1036                 is_first_item = 0;
1037         }
1038         fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1039
1040         return 0;
1041
1042 item_not_supported:
1043         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1044                            NULL, "enic type error");
1045         return -rte_errno;
1046
1047 stacking_error:
1048         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1049                            item, "stacking error");
1050         return -rte_errno;
1051 }
1052
1053 /**
1054  * Build the intenal version 1 NIC action structure from the provided pattern.
1055  * The pattern is validated as the items are copied.
1056  *
1057  * @param actions[in]
1058  * @param enic_action[out]
1059  *   NIC specfilc actions derived from the actions.
1060  * @param error[out]
1061  */
1062 static int
1063 enic_copy_action_v1(__rte_unused struct enic *enic,
1064                     const struct rte_flow_action actions[],
1065                     struct filter_action_v2 *enic_action)
1066 {
1067         enum { FATE = 1, };
1068         uint32_t overlap = 0;
1069
1070         FLOW_TRACE();
1071
1072         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1073                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1074                         continue;
1075
1076                 switch (actions->type) {
1077                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1078                         const struct rte_flow_action_queue *queue =
1079                                 (const struct rte_flow_action_queue *)
1080                                 actions->conf;
1081
1082                         if (overlap & FATE)
1083                                 return ENOTSUP;
1084                         overlap |= FATE;
1085                         enic_action->rq_idx =
1086                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1087                         break;
1088                 }
1089                 default:
1090                         RTE_ASSERT(0);
1091                         break;
1092                 }
1093         }
1094         if (!(overlap & FATE))
1095                 return ENOTSUP;
1096         enic_action->type = FILTER_ACTION_RQ_STEERING;
1097         return 0;
1098 }
1099
1100 /**
1101  * Build the intenal version 2 NIC action structure from the provided pattern.
1102  * The pattern is validated as the items are copied.
1103  *
1104  * @param actions[in]
1105  * @param enic_action[out]
1106  *   NIC specfilc actions derived from the actions.
1107  * @param error[out]
1108  */
1109 static int
1110 enic_copy_action_v2(struct enic *enic,
1111                     const struct rte_flow_action actions[],
1112                     struct filter_action_v2 *enic_action)
1113 {
1114         enum { FATE = 1, MARK = 2, };
1115         uint32_t overlap = 0;
1116         bool passthru = false;
1117
1118         FLOW_TRACE();
1119
1120         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1121                 switch (actions->type) {
1122                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1123                         const struct rte_flow_action_queue *queue =
1124                                 (const struct rte_flow_action_queue *)
1125                                 actions->conf;
1126
1127                         if (overlap & FATE)
1128                                 return ENOTSUP;
1129                         overlap |= FATE;
1130                         enic_action->rq_idx =
1131                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1132                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1133                         break;
1134                 }
1135                 case RTE_FLOW_ACTION_TYPE_MARK: {
1136                         const struct rte_flow_action_mark *mark =
1137                                 (const struct rte_flow_action_mark *)
1138                                 actions->conf;
1139
1140                         if (overlap & MARK)
1141                                 return ENOTSUP;
1142                         overlap |= MARK;
1143                         /*
1144                          * Map mark ID (32-bit) to filter ID (16-bit):
1145                          * - Reject values > 16 bits
1146                          * - Filter ID 0 is reserved for filters that steer
1147                          *   but not mark. So add 1 to the mark ID to avoid
1148                          *   using 0.
1149                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1150                          *   reserved for the "flag" action below.
1151                          */
1152                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1153                                 return EINVAL;
1154                         enic_action->filter_id = mark->id + 1;
1155                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1156                         break;
1157                 }
1158                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1159                         if (overlap & MARK)
1160                                 return ENOTSUP;
1161                         overlap |= MARK;
1162                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1163                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1164                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1165                         break;
1166                 }
1167                 case RTE_FLOW_ACTION_TYPE_DROP: {
1168                         if (overlap & FATE)
1169                                 return ENOTSUP;
1170                         overlap |= FATE;
1171                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1172                         break;
1173                 }
1174                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1175                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1176                         break;
1177                 }
1178                 case RTE_FLOW_ACTION_TYPE_RSS: {
1179                         const struct rte_flow_action_rss *rss =
1180                                 (const struct rte_flow_action_rss *)
1181                                 actions->conf;
1182                         bool allow;
1183                         uint16_t i;
1184
1185                         /*
1186                          * Hardware does not support general RSS actions, but
1187                          * we can still support the dummy one that is used to
1188                          * "receive normally".
1189                          */
1190                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1191                                 rss->level == 0 &&
1192                                 (rss->types == 0 ||
1193                                  rss->types == enic->rss_hf) &&
1194                                 rss->queue_num == enic->rq_count &&
1195                                 rss->key_len == 0;
1196                         /* Identity queue map is ok */
1197                         for (i = 0; i < rss->queue_num; i++)
1198                                 allow = allow && (i == rss->queue[i]);
1199                         if (!allow)
1200                                 return ENOTSUP;
1201                         if (overlap & FATE)
1202                                 return ENOTSUP;
1203                         /* Need MARK or FLAG */
1204                         if (!(overlap & MARK))
1205                                 return ENOTSUP;
1206                         overlap |= FATE;
1207                         break;
1208                 }
1209                 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1210                         /*
1211                          * Like RSS above, PASSTHRU + MARK may be used to
1212                          * "mark and then receive normally". MARK usually comes
1213                          * after PASSTHRU, so remember we have seen passthru
1214                          * and check for mark later.
1215                          */
1216                         if (overlap & FATE)
1217                                 return ENOTSUP;
1218                         overlap |= FATE;
1219                         passthru = true;
1220                         break;
1221                 }
1222                 case RTE_FLOW_ACTION_TYPE_VOID:
1223                         continue;
1224                 default:
1225                         RTE_ASSERT(0);
1226                         break;
1227                 }
1228         }
1229         /* Only PASSTHRU + MARK is allowed */
1230         if (passthru && !(overlap & MARK))
1231                 return ENOTSUP;
1232         if (!(overlap & FATE))
1233                 return ENOTSUP;
1234         enic_action->type = FILTER_ACTION_V2;
1235         return 0;
1236 }
1237
1238 /** Check if the action is supported */
1239 static int
1240 enic_match_action(const struct rte_flow_action *action,
1241                   const enum rte_flow_action_type *supported_actions)
1242 {
1243         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1244              supported_actions++) {
1245                 if (action->type == *supported_actions)
1246                         return 1;
1247         }
1248         return 0;
1249 }
1250
1251 /** Get the NIC filter capabilties structure */
1252 static const struct enic_filter_cap *
1253 enic_get_filter_cap(struct enic *enic)
1254 {
1255         if (enic->flow_filter_mode)
1256                 return &enic_filter_cap[enic->flow_filter_mode];
1257
1258         return NULL;
1259 }
1260
1261 /** Get the actions for this NIC version. */
1262 static const struct enic_action_cap *
1263 enic_get_action_cap(struct enic *enic)
1264 {
1265         const struct enic_action_cap *ea;
1266         uint8_t actions;
1267
1268         actions = enic->filter_actions;
1269         if (actions & FILTER_ACTION_COUNTER_FLAG)
1270                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1271         else if (actions & FILTER_ACTION_DROP_FLAG)
1272                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1273         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1274                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1275         else
1276                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1277         return ea;
1278 }
1279
1280 /* Debug function to dump internal NIC action structure. */
1281 static void
1282 enic_dump_actions(const struct filter_action_v2 *ea)
1283 {
1284         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1285                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1286         } else if (ea->type == FILTER_ACTION_V2) {
1287                 FLOW_LOG(INFO, "Actions(V2)\n");
1288                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1289                         FLOW_LOG(INFO, "\tqueue: %u\n",
1290                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1291                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1292                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1293         }
1294 }
1295
1296 /* Debug function to dump internal NIC filter structure. */
1297 static void
1298 enic_dump_filter(const struct filter_v2 *filt)
1299 {
1300         const struct filter_generic_1 *gp;
1301         int i, j, mbyte;
1302         char buf[128], *bp;
1303         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1304         char l4csum[16], ipfrag[16];
1305
1306         switch (filt->type) {
1307         case FILTER_IPV4_5TUPLE:
1308                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1309                 break;
1310         case FILTER_USNIC_IP:
1311         case FILTER_DPDK_1:
1312                 /* FIXME: this should be a loop */
1313                 gp = &filt->u.generic_1;
1314                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1315                        gp->val_vlan, gp->mask_vlan);
1316
1317                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1318                         sprintf(ip4, "%s ",
1319                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1320                                  ? "ip4(y)" : "ip4(n)");
1321                 else
1322                         sprintf(ip4, "%s ", "ip4(x)");
1323
1324                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1325                         sprintf(ip6, "%s ",
1326                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1327                                  ? "ip6(y)" : "ip6(n)");
1328                 else
1329                         sprintf(ip6, "%s ", "ip6(x)");
1330
1331                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1332                         sprintf(udp, "%s ",
1333                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1334                                  ? "udp(y)" : "udp(n)");
1335                 else
1336                         sprintf(udp, "%s ", "udp(x)");
1337
1338                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1339                         sprintf(tcp, "%s ",
1340                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1341                                  ? "tcp(y)" : "tcp(n)");
1342                 else
1343                         sprintf(tcp, "%s ", "tcp(x)");
1344
1345                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1346                         sprintf(tcpudp, "%s ",
1347                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1348                                  ? "tcpudp(y)" : "tcpudp(n)");
1349                 else
1350                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1351
1352                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1353                         sprintf(ip4csum, "%s ",
1354                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1355                                  ? "ip4csum(y)" : "ip4csum(n)");
1356                 else
1357                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1358
1359                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1360                         sprintf(l4csum, "%s ",
1361                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1362                                  ? "l4csum(y)" : "l4csum(n)");
1363                 else
1364                         sprintf(l4csum, "%s ", "l4csum(x)");
1365
1366                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1367                         sprintf(ipfrag, "%s ",
1368                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1369                                  ? "ipfrag(y)" : "ipfrag(n)");
1370                 else
1371                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1372                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1373                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1374
1375                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1376                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1377                         while (mbyte && !gp->layer[i].mask[mbyte])
1378                                 mbyte--;
1379                         if (mbyte == 0)
1380                                 continue;
1381
1382                         bp = buf;
1383                         for (j = 0; j <= mbyte; j++) {
1384                                 sprintf(bp, "%02x",
1385                                         gp->layer[i].mask[j]);
1386                                 bp += 2;
1387                         }
1388                         *bp = '\0';
1389                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1390                         bp = buf;
1391                         for (j = 0; j <= mbyte; j++) {
1392                                 sprintf(bp, "%02x",
1393                                         gp->layer[i].val[j]);
1394                                 bp += 2;
1395                         }
1396                         *bp = '\0';
1397                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1398                 }
1399                 break;
1400         default:
1401                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1402                 break;
1403         }
1404 }
1405
1406 /* Debug function to dump internal NIC flow structures. */
1407 static void
1408 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1409 {
1410         enic_dump_filter(filt);
1411         enic_dump_actions(ea);
1412 }
1413
1414
1415 /**
1416  * Internal flow parse/validate function.
1417  *
1418  * @param dev[in]
1419  *   This device pointer.
1420  * @param pattern[in]
1421  * @param actions[in]
1422  * @param error[out]
1423  * @param enic_filter[out]
1424  *   Internal NIC filter structure pointer.
1425  * @param enic_action[out]
1426  *   Internal NIC action structure pointer.
1427  */
1428 static int
1429 enic_flow_parse(struct rte_eth_dev *dev,
1430                 const struct rte_flow_attr *attrs,
1431                 const struct rte_flow_item pattern[],
1432                 const struct rte_flow_action actions[],
1433                 struct rte_flow_error *error,
1434                 struct filter_v2 *enic_filter,
1435                 struct filter_action_v2 *enic_action)
1436 {
1437         unsigned int ret = 0;
1438         struct enic *enic = pmd_priv(dev);
1439         const struct enic_filter_cap *enic_filter_cap;
1440         const struct enic_action_cap *enic_action_cap;
1441         const struct rte_flow_action *action;
1442
1443         FLOW_TRACE();
1444
1445         memset(enic_filter, 0, sizeof(*enic_filter));
1446         memset(enic_action, 0, sizeof(*enic_action));
1447
1448         if (!pattern) {
1449                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1450                                    NULL, "No pattern specified");
1451                 return -rte_errno;
1452         }
1453
1454         if (!actions) {
1455                 rte_flow_error_set(error, EINVAL,
1456                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1457                                    NULL, "No action specified");
1458                 return -rte_errno;
1459         }
1460
1461         if (attrs) {
1462                 if (attrs->group) {
1463                         rte_flow_error_set(error, ENOTSUP,
1464                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1465                                            NULL,
1466                                            "priority groups are not supported");
1467                         return -rte_errno;
1468                 } else if (attrs->priority) {
1469                         rte_flow_error_set(error, ENOTSUP,
1470                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1471                                            NULL,
1472                                            "priorities are not supported");
1473                         return -rte_errno;
1474                 } else if (attrs->egress) {
1475                         rte_flow_error_set(error, ENOTSUP,
1476                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1477                                            NULL,
1478                                            "egress is not supported");
1479                         return -rte_errno;
1480                 } else if (attrs->transfer) {
1481                         rte_flow_error_set(error, ENOTSUP,
1482                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1483                                            NULL,
1484                                            "transfer is not supported");
1485                         return -rte_errno;
1486                 } else if (!attrs->ingress) {
1487                         rte_flow_error_set(error, ENOTSUP,
1488                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1489                                            NULL,
1490                                            "only ingress is supported");
1491                         return -rte_errno;
1492                 }
1493
1494         } else {
1495                 rte_flow_error_set(error, EINVAL,
1496                                    RTE_FLOW_ERROR_TYPE_ATTR,
1497                                    NULL, "No attribute specified");
1498                 return -rte_errno;
1499         }
1500
1501         /* Verify Actions. */
1502         enic_action_cap =  enic_get_action_cap(enic);
1503         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1504              action++) {
1505                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1506                         continue;
1507                 else if (!enic_match_action(action, enic_action_cap->actions))
1508                         break;
1509         }
1510         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1511                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1512                                    action, "Invalid action.");
1513                 return -rte_errno;
1514         }
1515         ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1516         if (ret) {
1517                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1518                            NULL, "Unsupported action.");
1519                 return -rte_errno;
1520         }
1521
1522         /* Verify Flow items. If copying the filter from flow format to enic
1523          * format fails, the flow is not supported
1524          */
1525         enic_filter_cap =  enic_get_filter_cap(enic);
1526         if (enic_filter_cap == NULL) {
1527                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1528                            NULL, "Flow API not available");
1529                 return -rte_errno;
1530         }
1531         enic_filter->type = enic->flow_filter_mode;
1532         ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1533                                        enic_filter, error);
1534         return ret;
1535 }
1536
1537 /**
1538  * Push filter/action to the NIC.
1539  *
1540  * @param enic[in]
1541  *   Device structure pointer.
1542  * @param enic_filter[in]
1543  *   Internal NIC filter structure pointer.
1544  * @param enic_action[in]
1545  *   Internal NIC action structure pointer.
1546  * @param error[out]
1547  */
1548 static struct rte_flow *
1549 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1550                    struct filter_action_v2 *enic_action,
1551                    struct rte_flow_error *error)
1552 {
1553         struct rte_flow *flow;
1554         int err;
1555         uint16_t entry;
1556         int ctr_idx;
1557         int last_max_flow_ctr;
1558
1559         FLOW_TRACE();
1560
1561         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1562         if (!flow) {
1563                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1564                                    NULL, "cannot allocate flow memory");
1565                 return NULL;
1566         }
1567
1568         flow->counter_idx = -1;
1569         last_max_flow_ctr = -1;
1570         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1571                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1572                         rte_flow_error_set(error, ENOMEM,
1573                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1574                                            NULL, "cannot allocate counter");
1575                         goto unwind_flow_alloc;
1576                 }
1577                 flow->counter_idx = ctr_idx;
1578                 enic_action->counter_index = ctr_idx;
1579
1580                 /* If index is the largest, increase the counter DMA size */
1581                 if (ctr_idx > enic->max_flow_counter) {
1582                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1583                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1584                                                  ctr_idx + 1);
1585                         if (err) {
1586                                 rte_flow_error_set(error, -err,
1587                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1588                                            NULL, "counter DMA config failed");
1589                                 goto unwind_ctr_alloc;
1590                         }
1591                         last_max_flow_ctr = enic->max_flow_counter;
1592                         enic->max_flow_counter = ctr_idx;
1593                 }
1594         }
1595
1596         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1597         entry = enic_action->rq_idx;
1598         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1599                                   enic_action);
1600         if (err) {
1601                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1602                                    NULL, "vnic_dev_classifier error");
1603                 goto unwind_ctr_dma_cfg;
1604         }
1605
1606         flow->enic_filter_id = entry;
1607         flow->enic_filter = *enic_filter;
1608
1609         return flow;
1610
1611 /* unwind if there are errors */
1612 unwind_ctr_dma_cfg:
1613         if (last_max_flow_ctr != -1) {
1614                 /* reduce counter DMA size */
1615                 vnic_dev_counter_dma_cfg(enic->vdev,
1616                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1617                                          last_max_flow_ctr + 1);
1618                 enic->max_flow_counter = last_max_flow_ctr;
1619         }
1620 unwind_ctr_alloc:
1621         if (flow->counter_idx != -1)
1622                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1623 unwind_flow_alloc:
1624         rte_free(flow);
1625         return NULL;
1626 }
1627
1628 /**
1629  * Remove filter/action from the NIC.
1630  *
1631  * @param enic[in]
1632  *   Device structure pointer.
1633  * @param filter_id[in]
1634  *   Id of NIC filter.
1635  * @param enic_action[in]
1636  *   Internal NIC action structure pointer.
1637  * @param error[out]
1638  */
1639 static int
1640 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1641                    struct rte_flow_error *error)
1642 {
1643         u16 filter_id;
1644         int err;
1645
1646         FLOW_TRACE();
1647
1648         filter_id = flow->enic_filter_id;
1649         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1650         if (err) {
1651                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1652                                    NULL, "vnic_dev_classifier failed");
1653                 return -err;
1654         }
1655
1656         if (flow->counter_idx != -1) {
1657                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1658                         dev_err(enic, "counter free failed, idx: %d\n",
1659                                 flow->counter_idx);
1660                 flow->counter_idx = -1;
1661         }
1662         return 0;
1663 }
1664
1665 /*
1666  * The following functions are callbacks for Generic flow API.
1667  */
1668
1669 /**
1670  * Validate a flow supported by the NIC.
1671  *
1672  * @see rte_flow_validate()
1673  * @see rte_flow_ops
1674  */
1675 static int
1676 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1677                    const struct rte_flow_item pattern[],
1678                    const struct rte_flow_action actions[],
1679                    struct rte_flow_error *error)
1680 {
1681         struct filter_v2 enic_filter;
1682         struct filter_action_v2 enic_action;
1683         int ret;
1684
1685         FLOW_TRACE();
1686
1687         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1688                                &enic_filter, &enic_action);
1689         if (!ret)
1690                 enic_dump_flow(&enic_action, &enic_filter);
1691         return ret;
1692 }
1693
1694 /**
1695  * Create a flow supported by the NIC.
1696  *
1697  * @see rte_flow_create()
1698  * @see rte_flow_ops
1699  */
1700 static struct rte_flow *
1701 enic_flow_create(struct rte_eth_dev *dev,
1702                  const struct rte_flow_attr *attrs,
1703                  const struct rte_flow_item pattern[],
1704                  const struct rte_flow_action actions[],
1705                  struct rte_flow_error *error)
1706 {
1707         int ret;
1708         struct filter_v2 enic_filter;
1709         struct filter_action_v2 enic_action;
1710         struct rte_flow *flow;
1711         struct enic *enic = pmd_priv(dev);
1712
1713         FLOW_TRACE();
1714
1715         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1716                               &enic_action);
1717         if (ret < 0)
1718                 return NULL;
1719
1720         rte_spinlock_lock(&enic->flows_lock);
1721         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1722                                     error);
1723         if (flow)
1724                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1725         rte_spinlock_unlock(&enic->flows_lock);
1726
1727         return flow;
1728 }
1729
1730 /**
1731  * Destroy a flow supported by the NIC.
1732  *
1733  * @see rte_flow_destroy()
1734  * @see rte_flow_ops
1735  */
1736 static int
1737 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1738                   __rte_unused struct rte_flow_error *error)
1739 {
1740         struct enic *enic = pmd_priv(dev);
1741
1742         FLOW_TRACE();
1743
1744         rte_spinlock_lock(&enic->flows_lock);
1745         enic_flow_del_filter(enic, flow, error);
1746         LIST_REMOVE(flow, next);
1747         rte_spinlock_unlock(&enic->flows_lock);
1748         rte_free(flow);
1749         return 0;
1750 }
1751
1752 /**
1753  * Flush all flows on the device.
1754  *
1755  * @see rte_flow_flush()
1756  * @see rte_flow_ops
1757  */
1758 static int
1759 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1760 {
1761         struct rte_flow *flow;
1762         struct enic *enic = pmd_priv(dev);
1763
1764         FLOW_TRACE();
1765
1766         rte_spinlock_lock(&enic->flows_lock);
1767
1768         while (!LIST_EMPTY(&enic->flows)) {
1769                 flow = LIST_FIRST(&enic->flows);
1770                 enic_flow_del_filter(enic, flow, error);
1771                 LIST_REMOVE(flow, next);
1772                 rte_free(flow);
1773         }
1774         rte_spinlock_unlock(&enic->flows_lock);
1775         return 0;
1776 }
1777
1778 static int
1779 enic_flow_query_count(struct rte_eth_dev *dev,
1780                       struct rte_flow *flow, void *data,
1781                       struct rte_flow_error *error)
1782 {
1783         struct enic *enic = pmd_priv(dev);
1784         struct rte_flow_query_count *query;
1785         uint64_t packets, bytes;
1786
1787         FLOW_TRACE();
1788
1789         if (flow->counter_idx == -1) {
1790                 return rte_flow_error_set(error, ENOTSUP,
1791                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1792                                           NULL,
1793                                           "flow does not have counter");
1794         }
1795         query = (struct rte_flow_query_count *)data;
1796         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1797                                     !!query->reset, &packets, &bytes)) {
1798                 return rte_flow_error_set
1799                         (error, EINVAL,
1800                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1801                          NULL,
1802                          "cannot read counter");
1803         }
1804         query->hits_set = 1;
1805         query->bytes_set = 1;
1806         query->hits = packets;
1807         query->bytes = bytes;
1808         return 0;
1809 }
1810
1811 static int
1812 enic_flow_query(struct rte_eth_dev *dev,
1813                 struct rte_flow *flow,
1814                 const struct rte_flow_action *actions,
1815                 void *data,
1816                 struct rte_flow_error *error)
1817 {
1818         int ret = 0;
1819
1820         FLOW_TRACE();
1821
1822         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1823                 switch (actions->type) {
1824                 case RTE_FLOW_ACTION_TYPE_VOID:
1825                         break;
1826                 case RTE_FLOW_ACTION_TYPE_COUNT:
1827                         ret = enic_flow_query_count(dev, flow, data, error);
1828                         break;
1829                 default:
1830                         return rte_flow_error_set(error, ENOTSUP,
1831                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1832                                                   actions,
1833                                                   "action not supported");
1834                 }
1835                 if (ret < 0)
1836                         return ret;
1837         }
1838         return 0;
1839 }
1840
1841 /**
1842  * Flow callback registration.
1843  *
1844  * @see rte_flow_ops
1845  */
1846 const struct rte_flow_ops enic_flow_ops = {
1847         .validate = enic_flow_validate,
1848         .create = enic_flow_create,
1849         .destroy = enic_flow_destroy,
1850         .flush = enic_flow_flush,
1851         .query = enic_flow_query,
1852 };