net/enic: fix raw item length check
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /*
27  * Common arguments passed to copy_item functions. Use this structure
28  * so we can easily add new arguments.
29  * item: Item specification.
30  * filter: Partially filled in NIC filter structure.
31  * inner_ofst: If zero, this is an outer header. If non-zero, this is
32  *   the offset into L5 where the header begins.
33  * l2_proto_off: offset to EtherType eth or vlan header.
34  * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
35  */
36 struct copy_item_args {
37         const struct rte_flow_item *item;
38         struct filter_v2 *filter;
39         uint8_t *inner_ofst;
40         uint8_t l2_proto_off;
41         uint8_t l3_proto_off;
42 };
43
44 /* functions for copying items into enic filters */
45 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
46
47 /** Info about how to copy items into enic filters. */
48 struct enic_items {
49         /** Function for copying and validating an item. */
50         enic_copy_item_fn *copy_item;
51         /** List of valid previous items. */
52         const enum rte_flow_item_type * const prev_items;
53         /** True if it's OK for this item to be the first item. For some NIC
54          * versions, it's invalid to start the stack above layer 3.
55          */
56         const u8 valid_start_item;
57         /* Inner packet version of copy_item. */
58         enic_copy_item_fn *inner_copy_item;
59 };
60
61 /** Filtering capabilities for various NIC and firmware versions. */
62 struct enic_filter_cap {
63         /** list of valid items and their handlers and attributes. */
64         const struct enic_items *item_info;
65         /* Max type in the above list, used to detect unsupported types */
66         enum rte_flow_item_type max_item_type;
67 };
68
69 /* functions for copying flow actions into enic actions */
70 typedef int (copy_action_fn)(struct enic *enic,
71                              const struct rte_flow_action actions[],
72                              struct filter_action_v2 *enic_action);
73
74 /** Action capabilities for various NICs. */
75 struct enic_action_cap {
76         /** list of valid actions */
77         const enum rte_flow_action_type *actions;
78         /** copy function for a particular NIC */
79         copy_action_fn *copy_fn;
80 };
81
82 /* Forward declarations */
83 static enic_copy_item_fn enic_copy_item_ipv4_v1;
84 static enic_copy_item_fn enic_copy_item_udp_v1;
85 static enic_copy_item_fn enic_copy_item_tcp_v1;
86 static enic_copy_item_fn enic_copy_item_raw_v2;
87 static enic_copy_item_fn enic_copy_item_eth_v2;
88 static enic_copy_item_fn enic_copy_item_vlan_v2;
89 static enic_copy_item_fn enic_copy_item_ipv4_v2;
90 static enic_copy_item_fn enic_copy_item_ipv6_v2;
91 static enic_copy_item_fn enic_copy_item_udp_v2;
92 static enic_copy_item_fn enic_copy_item_tcp_v2;
93 static enic_copy_item_fn enic_copy_item_sctp_v2;
94 static enic_copy_item_fn enic_copy_item_vxlan_v2;
95 static enic_copy_item_fn enic_copy_item_inner_eth_v2;
96 static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
97 static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
98 static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
99 static enic_copy_item_fn enic_copy_item_inner_udp_v2;
100 static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
101 static copy_action_fn enic_copy_action_v1;
102 static copy_action_fn enic_copy_action_v2;
103
104 /**
105  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
106  * is supported.
107  */
108 static const struct enic_items enic_items_v1[] = {
109         [RTE_FLOW_ITEM_TYPE_IPV4] = {
110                 .copy_item = enic_copy_item_ipv4_v1,
111                 .valid_start_item = 1,
112                 .prev_items = (const enum rte_flow_item_type[]) {
113                                RTE_FLOW_ITEM_TYPE_END,
114                 },
115                 .inner_copy_item = NULL,
116         },
117         [RTE_FLOW_ITEM_TYPE_UDP] = {
118                 .copy_item = enic_copy_item_udp_v1,
119                 .valid_start_item = 0,
120                 .prev_items = (const enum rte_flow_item_type[]) {
121                                RTE_FLOW_ITEM_TYPE_IPV4,
122                                RTE_FLOW_ITEM_TYPE_END,
123                 },
124                 .inner_copy_item = NULL,
125         },
126         [RTE_FLOW_ITEM_TYPE_TCP] = {
127                 .copy_item = enic_copy_item_tcp_v1,
128                 .valid_start_item = 0,
129                 .prev_items = (const enum rte_flow_item_type[]) {
130                                RTE_FLOW_ITEM_TYPE_IPV4,
131                                RTE_FLOW_ITEM_TYPE_END,
132                 },
133                 .inner_copy_item = NULL,
134         },
135 };
136
137 /**
138  * NICs have Advanced Filters capability but they are disabled. This means
139  * that layer 3 must be specified.
140  */
141 static const struct enic_items enic_items_v2[] = {
142         [RTE_FLOW_ITEM_TYPE_RAW] = {
143                 .copy_item = enic_copy_item_raw_v2,
144                 .valid_start_item = 0,
145                 .prev_items = (const enum rte_flow_item_type[]) {
146                                RTE_FLOW_ITEM_TYPE_UDP,
147                                RTE_FLOW_ITEM_TYPE_END,
148                 },
149                 .inner_copy_item = NULL,
150         },
151         [RTE_FLOW_ITEM_TYPE_ETH] = {
152                 .copy_item = enic_copy_item_eth_v2,
153                 .valid_start_item = 1,
154                 .prev_items = (const enum rte_flow_item_type[]) {
155                                RTE_FLOW_ITEM_TYPE_VXLAN,
156                                RTE_FLOW_ITEM_TYPE_END,
157                 },
158                 .inner_copy_item = enic_copy_item_inner_eth_v2,
159         },
160         [RTE_FLOW_ITEM_TYPE_VLAN] = {
161                 .copy_item = enic_copy_item_vlan_v2,
162                 .valid_start_item = 1,
163                 .prev_items = (const enum rte_flow_item_type[]) {
164                                RTE_FLOW_ITEM_TYPE_ETH,
165                                RTE_FLOW_ITEM_TYPE_END,
166                 },
167                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
168         },
169         [RTE_FLOW_ITEM_TYPE_IPV4] = {
170                 .copy_item = enic_copy_item_ipv4_v2,
171                 .valid_start_item = 1,
172                 .prev_items = (const enum rte_flow_item_type[]) {
173                                RTE_FLOW_ITEM_TYPE_ETH,
174                                RTE_FLOW_ITEM_TYPE_VLAN,
175                                RTE_FLOW_ITEM_TYPE_END,
176                 },
177                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
178         },
179         [RTE_FLOW_ITEM_TYPE_IPV6] = {
180                 .copy_item = enic_copy_item_ipv6_v2,
181                 .valid_start_item = 1,
182                 .prev_items = (const enum rte_flow_item_type[]) {
183                                RTE_FLOW_ITEM_TYPE_ETH,
184                                RTE_FLOW_ITEM_TYPE_VLAN,
185                                RTE_FLOW_ITEM_TYPE_END,
186                 },
187                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
188         },
189         [RTE_FLOW_ITEM_TYPE_UDP] = {
190                 .copy_item = enic_copy_item_udp_v2,
191                 .valid_start_item = 0,
192                 .prev_items = (const enum rte_flow_item_type[]) {
193                                RTE_FLOW_ITEM_TYPE_IPV4,
194                                RTE_FLOW_ITEM_TYPE_IPV6,
195                                RTE_FLOW_ITEM_TYPE_END,
196                 },
197                 .inner_copy_item = enic_copy_item_inner_udp_v2,
198         },
199         [RTE_FLOW_ITEM_TYPE_TCP] = {
200                 .copy_item = enic_copy_item_tcp_v2,
201                 .valid_start_item = 0,
202                 .prev_items = (const enum rte_flow_item_type[]) {
203                                RTE_FLOW_ITEM_TYPE_IPV4,
204                                RTE_FLOW_ITEM_TYPE_IPV6,
205                                RTE_FLOW_ITEM_TYPE_END,
206                 },
207                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
208         },
209         [RTE_FLOW_ITEM_TYPE_SCTP] = {
210                 .copy_item = enic_copy_item_sctp_v2,
211                 .valid_start_item = 0,
212                 .prev_items = (const enum rte_flow_item_type[]) {
213                                RTE_FLOW_ITEM_TYPE_IPV4,
214                                RTE_FLOW_ITEM_TYPE_IPV6,
215                                RTE_FLOW_ITEM_TYPE_END,
216                 },
217                 .inner_copy_item = NULL,
218         },
219         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
220                 .copy_item = enic_copy_item_vxlan_v2,
221                 .valid_start_item = 0,
222                 .prev_items = (const enum rte_flow_item_type[]) {
223                                RTE_FLOW_ITEM_TYPE_UDP,
224                                RTE_FLOW_ITEM_TYPE_END,
225                 },
226                 .inner_copy_item = NULL,
227         },
228 };
229
230 /** NICs with Advanced filters enabled */
231 static const struct enic_items enic_items_v3[] = {
232         [RTE_FLOW_ITEM_TYPE_RAW] = {
233                 .copy_item = enic_copy_item_raw_v2,
234                 .valid_start_item = 0,
235                 .prev_items = (const enum rte_flow_item_type[]) {
236                                RTE_FLOW_ITEM_TYPE_UDP,
237                                RTE_FLOW_ITEM_TYPE_END,
238                 },
239                 .inner_copy_item = NULL,
240         },
241         [RTE_FLOW_ITEM_TYPE_ETH] = {
242                 .copy_item = enic_copy_item_eth_v2,
243                 .valid_start_item = 1,
244                 .prev_items = (const enum rte_flow_item_type[]) {
245                                RTE_FLOW_ITEM_TYPE_VXLAN,
246                                RTE_FLOW_ITEM_TYPE_END,
247                 },
248                 .inner_copy_item = enic_copy_item_inner_eth_v2,
249         },
250         [RTE_FLOW_ITEM_TYPE_VLAN] = {
251                 .copy_item = enic_copy_item_vlan_v2,
252                 .valid_start_item = 1,
253                 .prev_items = (const enum rte_flow_item_type[]) {
254                                RTE_FLOW_ITEM_TYPE_ETH,
255                                RTE_FLOW_ITEM_TYPE_END,
256                 },
257                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
258         },
259         [RTE_FLOW_ITEM_TYPE_IPV4] = {
260                 .copy_item = enic_copy_item_ipv4_v2,
261                 .valid_start_item = 1,
262                 .prev_items = (const enum rte_flow_item_type[]) {
263                                RTE_FLOW_ITEM_TYPE_ETH,
264                                RTE_FLOW_ITEM_TYPE_VLAN,
265                                RTE_FLOW_ITEM_TYPE_END,
266                 },
267                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
268         },
269         [RTE_FLOW_ITEM_TYPE_IPV6] = {
270                 .copy_item = enic_copy_item_ipv6_v2,
271                 .valid_start_item = 1,
272                 .prev_items = (const enum rte_flow_item_type[]) {
273                                RTE_FLOW_ITEM_TYPE_ETH,
274                                RTE_FLOW_ITEM_TYPE_VLAN,
275                                RTE_FLOW_ITEM_TYPE_END,
276                 },
277                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
278         },
279         [RTE_FLOW_ITEM_TYPE_UDP] = {
280                 .copy_item = enic_copy_item_udp_v2,
281                 .valid_start_item = 1,
282                 .prev_items = (const enum rte_flow_item_type[]) {
283                                RTE_FLOW_ITEM_TYPE_IPV4,
284                                RTE_FLOW_ITEM_TYPE_IPV6,
285                                RTE_FLOW_ITEM_TYPE_END,
286                 },
287                 .inner_copy_item = enic_copy_item_inner_udp_v2,
288         },
289         [RTE_FLOW_ITEM_TYPE_TCP] = {
290                 .copy_item = enic_copy_item_tcp_v2,
291                 .valid_start_item = 1,
292                 .prev_items = (const enum rte_flow_item_type[]) {
293                                RTE_FLOW_ITEM_TYPE_IPV4,
294                                RTE_FLOW_ITEM_TYPE_IPV6,
295                                RTE_FLOW_ITEM_TYPE_END,
296                 },
297                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
298         },
299         [RTE_FLOW_ITEM_TYPE_SCTP] = {
300                 .copy_item = enic_copy_item_sctp_v2,
301                 .valid_start_item = 0,
302                 .prev_items = (const enum rte_flow_item_type[]) {
303                                RTE_FLOW_ITEM_TYPE_IPV4,
304                                RTE_FLOW_ITEM_TYPE_IPV6,
305                                RTE_FLOW_ITEM_TYPE_END,
306                 },
307                 .inner_copy_item = NULL,
308         },
309         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
310                 .copy_item = enic_copy_item_vxlan_v2,
311                 .valid_start_item = 1,
312                 .prev_items = (const enum rte_flow_item_type[]) {
313                                RTE_FLOW_ITEM_TYPE_UDP,
314                                RTE_FLOW_ITEM_TYPE_END,
315                 },
316                 .inner_copy_item = NULL,
317         },
318 };
319
320 /** Filtering capabilities indexed this NICs supported filter type. */
321 static const struct enic_filter_cap enic_filter_cap[] = {
322         [FILTER_IPV4_5TUPLE] = {
323                 .item_info = enic_items_v1,
324                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
325         },
326         [FILTER_USNIC_IP] = {
327                 .item_info = enic_items_v2,
328                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
329         },
330         [FILTER_DPDK_1] = {
331                 .item_info = enic_items_v3,
332                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
333         },
334 };
335
336 /** Supported actions for older NICs */
337 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
338         RTE_FLOW_ACTION_TYPE_QUEUE,
339         RTE_FLOW_ACTION_TYPE_END,
340 };
341
342 /** Supported actions for newer NICs */
343 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
344         RTE_FLOW_ACTION_TYPE_QUEUE,
345         RTE_FLOW_ACTION_TYPE_MARK,
346         RTE_FLOW_ACTION_TYPE_FLAG,
347         RTE_FLOW_ACTION_TYPE_RSS,
348         RTE_FLOW_ACTION_TYPE_PASSTHRU,
349         RTE_FLOW_ACTION_TYPE_END,
350 };
351
352 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
353         RTE_FLOW_ACTION_TYPE_QUEUE,
354         RTE_FLOW_ACTION_TYPE_MARK,
355         RTE_FLOW_ACTION_TYPE_FLAG,
356         RTE_FLOW_ACTION_TYPE_DROP,
357         RTE_FLOW_ACTION_TYPE_RSS,
358         RTE_FLOW_ACTION_TYPE_PASSTHRU,
359         RTE_FLOW_ACTION_TYPE_END,
360 };
361
362 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
363         RTE_FLOW_ACTION_TYPE_QUEUE,
364         RTE_FLOW_ACTION_TYPE_MARK,
365         RTE_FLOW_ACTION_TYPE_FLAG,
366         RTE_FLOW_ACTION_TYPE_DROP,
367         RTE_FLOW_ACTION_TYPE_COUNT,
368         RTE_FLOW_ACTION_TYPE_RSS,
369         RTE_FLOW_ACTION_TYPE_PASSTHRU,
370         RTE_FLOW_ACTION_TYPE_END,
371 };
372
373 /** Action capabilities indexed by NIC version information */
374 static const struct enic_action_cap enic_action_cap[] = {
375         [FILTER_ACTION_RQ_STEERING_FLAG] = {
376                 .actions = enic_supported_actions_v1,
377                 .copy_fn = enic_copy_action_v1,
378         },
379         [FILTER_ACTION_FILTER_ID_FLAG] = {
380                 .actions = enic_supported_actions_v2_id,
381                 .copy_fn = enic_copy_action_v2,
382         },
383         [FILTER_ACTION_DROP_FLAG] = {
384                 .actions = enic_supported_actions_v2_drop,
385                 .copy_fn = enic_copy_action_v2,
386         },
387         [FILTER_ACTION_COUNTER_FLAG] = {
388                 .actions = enic_supported_actions_v2_count,
389                 .copy_fn = enic_copy_action_v2,
390         },
391 };
392
393 static int
394 mask_exact_match(const u8 *supported, const u8 *supplied,
395                  unsigned int size)
396 {
397         unsigned int i;
398         for (i = 0; i < size; i++) {
399                 if (supported[i] != supplied[i])
400                         return 0;
401         }
402         return 1;
403 }
404
405 static int
406 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
407 {
408         const struct rte_flow_item *item = arg->item;
409         struct filter_v2 *enic_filter = arg->filter;
410         const struct rte_flow_item_ipv4 *spec = item->spec;
411         const struct rte_flow_item_ipv4 *mask = item->mask;
412         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
413         struct ipv4_hdr supported_mask = {
414                 .src_addr = 0xffffffff,
415                 .dst_addr = 0xffffffff,
416         };
417
418         FLOW_TRACE();
419
420         if (!mask)
421                 mask = &rte_flow_item_ipv4_mask;
422
423         /* This is an exact match filter, both fields must be set */
424         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
425                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
426                 return ENOTSUP;
427         }
428
429         /* check that the suppied mask exactly matches capabilty */
430         if (!mask_exact_match((const u8 *)&supported_mask,
431                               (const u8 *)item->mask, sizeof(*mask))) {
432                 FLOW_LOG(ERR, "IPv4 exact match mask");
433                 return ENOTSUP;
434         }
435
436         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
437         enic_5tup->src_addr = spec->hdr.src_addr;
438         enic_5tup->dst_addr = spec->hdr.dst_addr;
439
440         return 0;
441 }
442
443 static int
444 enic_copy_item_udp_v1(struct copy_item_args *arg)
445 {
446         const struct rte_flow_item *item = arg->item;
447         struct filter_v2 *enic_filter = arg->filter;
448         const struct rte_flow_item_udp *spec = item->spec;
449         const struct rte_flow_item_udp *mask = item->mask;
450         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
451         struct udp_hdr supported_mask = {
452                 .src_port = 0xffff,
453                 .dst_port = 0xffff,
454         };
455
456         FLOW_TRACE();
457
458         if (!mask)
459                 mask = &rte_flow_item_udp_mask;
460
461         /* This is an exact match filter, both ports must be set */
462         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
463                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
464                 return ENOTSUP;
465         }
466
467         /* check that the suppied mask exactly matches capabilty */
468         if (!mask_exact_match((const u8 *)&supported_mask,
469                               (const u8 *)item->mask, sizeof(*mask))) {
470                 FLOW_LOG(ERR, "UDP exact match mask");
471                 return ENOTSUP;
472         }
473
474         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
475         enic_5tup->src_port = spec->hdr.src_port;
476         enic_5tup->dst_port = spec->hdr.dst_port;
477         enic_5tup->protocol = PROTO_UDP;
478
479         return 0;
480 }
481
482 static int
483 enic_copy_item_tcp_v1(struct copy_item_args *arg)
484 {
485         const struct rte_flow_item *item = arg->item;
486         struct filter_v2 *enic_filter = arg->filter;
487         const struct rte_flow_item_tcp *spec = item->spec;
488         const struct rte_flow_item_tcp *mask = item->mask;
489         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
490         struct tcp_hdr supported_mask = {
491                 .src_port = 0xffff,
492                 .dst_port = 0xffff,
493         };
494
495         FLOW_TRACE();
496
497         if (!mask)
498                 mask = &rte_flow_item_tcp_mask;
499
500         /* This is an exact match filter, both ports must be set */
501         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
502                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
503                 return ENOTSUP;
504         }
505
506         /* check that the suppied mask exactly matches capabilty */
507         if (!mask_exact_match((const u8 *)&supported_mask,
508                              (const u8 *)item->mask, sizeof(*mask))) {
509                 FLOW_LOG(ERR, "TCP exact match mask");
510                 return ENOTSUP;
511         }
512
513         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
514         enic_5tup->src_port = spec->hdr.src_port;
515         enic_5tup->dst_port = spec->hdr.dst_port;
516         enic_5tup->protocol = PROTO_TCP;
517
518         return 0;
519 }
520
521 /*
522  * The common 'copy' function for all inner packet patterns. Patterns are
523  * first appended to the L5 pattern buffer. Then, since the NIC filter
524  * API has no special support for inner packet matching at the moment,
525  * we set EtherType and IP proto as necessary.
526  */
527 static int
528 copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
529                   const void *val, const void *mask, uint8_t val_size,
530                   uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
531 {
532         uint8_t *l5_mask, *l5_val;
533         uint8_t start_off;
534
535         /* No space left in the L5 pattern buffer. */
536         start_off = *inner_ofst;
537         if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
538                 return ENOTSUP;
539         l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
540         l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
541         /* Copy the pattern into the L5 buffer. */
542         if (val) {
543                 memcpy(l5_mask + start_off, mask, val_size);
544                 memcpy(l5_val + start_off, val, val_size);
545         }
546         /* Set the protocol field in the previous header. */
547         if (proto_off) {
548                 void *m, *v;
549
550                 m = l5_mask + proto_off;
551                 v = l5_val + proto_off;
552                 if (proto_size == 1) {
553                         *(uint8_t *)m = 0xff;
554                         *(uint8_t *)v = (uint8_t)proto_val;
555                 } else if (proto_size == 2) {
556                         *(uint16_t *)m = 0xffff;
557                         *(uint16_t *)v = proto_val;
558                 }
559         }
560         /* All inner headers land in L5 buffer even if their spec is null. */
561         *inner_ofst += val_size;
562         return 0;
563 }
564
565 static int
566 enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
567 {
568         const void *mask = arg->item->mask;
569         uint8_t *off = arg->inner_ofst;
570
571         FLOW_TRACE();
572         if (!mask)
573                 mask = &rte_flow_item_eth_mask;
574         arg->l2_proto_off = *off + offsetof(struct ether_hdr, ether_type);
575         return copy_inner_common(&arg->filter->u.generic_1, off,
576                 arg->item->spec, mask, sizeof(struct ether_hdr),
577                 0 /* no previous protocol */, 0, 0);
578 }
579
580 static int
581 enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
582 {
583         const void *mask = arg->item->mask;
584         uint8_t *off = arg->inner_ofst;
585         uint8_t eth_type_off;
586
587         FLOW_TRACE();
588         if (!mask)
589                 mask = &rte_flow_item_vlan_mask;
590         /* Append vlan header to L5 and set ether type = TPID */
591         eth_type_off = arg->l2_proto_off;
592         arg->l2_proto_off = *off + offsetof(struct vlan_hdr, eth_proto);
593         return copy_inner_common(&arg->filter->u.generic_1, off,
594                 arg->item->spec, mask, sizeof(struct vlan_hdr),
595                 eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
596 }
597
598 static int
599 enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
600 {
601         const void *mask = arg->item->mask;
602         uint8_t *off = arg->inner_ofst;
603
604         FLOW_TRACE();
605         if (!mask)
606                 mask = &rte_flow_item_ipv4_mask;
607         /* Append ipv4 header to L5 and set ether type = ipv4 */
608         arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
609         return copy_inner_common(&arg->filter->u.generic_1, off,
610                 arg->item->spec, mask, sizeof(struct ipv4_hdr),
611                 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
612 }
613
614 static int
615 enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
616 {
617         const void *mask = arg->item->mask;
618         uint8_t *off = arg->inner_ofst;
619
620         FLOW_TRACE();
621         if (!mask)
622                 mask = &rte_flow_item_ipv6_mask;
623         /* Append ipv6 header to L5 and set ether type = ipv6 */
624         arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
625         return copy_inner_common(&arg->filter->u.generic_1, off,
626                 arg->item->spec, mask, sizeof(struct ipv6_hdr),
627                 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
628 }
629
630 static int
631 enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
632 {
633         const void *mask = arg->item->mask;
634         uint8_t *off = arg->inner_ofst;
635
636         FLOW_TRACE();
637         if (!mask)
638                 mask = &rte_flow_item_udp_mask;
639         /* Append udp header to L5 and set ip proto = udp */
640         return copy_inner_common(&arg->filter->u.generic_1, off,
641                 arg->item->spec, mask, sizeof(struct udp_hdr),
642                 arg->l3_proto_off, IPPROTO_UDP, 1);
643 }
644
645 static int
646 enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
647 {
648         const void *mask = arg->item->mask;
649         uint8_t *off = arg->inner_ofst;
650
651         FLOW_TRACE();
652         if (!mask)
653                 mask = &rte_flow_item_tcp_mask;
654         /* Append tcp header to L5 and set ip proto = tcp */
655         return copy_inner_common(&arg->filter->u.generic_1, off,
656                 arg->item->spec, mask, sizeof(struct tcp_hdr),
657                 arg->l3_proto_off, IPPROTO_TCP, 1);
658 }
659
660 static int
661 enic_copy_item_eth_v2(struct copy_item_args *arg)
662 {
663         const struct rte_flow_item *item = arg->item;
664         struct filter_v2 *enic_filter = arg->filter;
665         struct ether_hdr enic_spec;
666         struct ether_hdr enic_mask;
667         const struct rte_flow_item_eth *spec = item->spec;
668         const struct rte_flow_item_eth *mask = item->mask;
669         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
670
671         FLOW_TRACE();
672
673         /* Match all if no spec */
674         if (!spec)
675                 return 0;
676
677         if (!mask)
678                 mask = &rte_flow_item_eth_mask;
679
680         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
681                ETHER_ADDR_LEN);
682         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
683                ETHER_ADDR_LEN);
684
685         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
686                ETHER_ADDR_LEN);
687         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
688                ETHER_ADDR_LEN);
689         enic_spec.ether_type = spec->type;
690         enic_mask.ether_type = mask->type;
691
692         /* outer header */
693         memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
694                sizeof(struct ether_hdr));
695         memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
696                sizeof(struct ether_hdr));
697         return 0;
698 }
699
700 static int
701 enic_copy_item_vlan_v2(struct copy_item_args *arg)
702 {
703         const struct rte_flow_item *item = arg->item;
704         struct filter_v2 *enic_filter = arg->filter;
705         const struct rte_flow_item_vlan *spec = item->spec;
706         const struct rte_flow_item_vlan *mask = item->mask;
707         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
708         struct ether_hdr *eth_mask;
709         struct ether_hdr *eth_val;
710
711         FLOW_TRACE();
712
713         /* Match all if no spec */
714         if (!spec)
715                 return 0;
716
717         if (!mask)
718                 mask = &rte_flow_item_vlan_mask;
719
720         eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
721         eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
722         /* Outer TPID cannot be matched */
723         if (eth_mask->ether_type)
724                 return ENOTSUP;
725         /*
726          * When packet matching, the VIC always compares vlan-stripped
727          * L2, regardless of vlan stripping settings. So, the inner type
728          * from vlan becomes the ether type of the eth header.
729          */
730         eth_mask->ether_type = mask->inner_type;
731         eth_val->ether_type = spec->inner_type;
732         /* For TCI, use the vlan mask/val fields (little endian). */
733         gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
734         gp->val_vlan = rte_be_to_cpu_16(spec->tci);
735         return 0;
736 }
737
738 static int
739 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
740 {
741         const struct rte_flow_item *item = arg->item;
742         struct filter_v2 *enic_filter = arg->filter;
743         const struct rte_flow_item_ipv4 *spec = item->spec;
744         const struct rte_flow_item_ipv4 *mask = item->mask;
745         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
746
747         FLOW_TRACE();
748
749         /* Match IPv4 */
750         gp->mask_flags |= FILTER_GENERIC_1_IPV4;
751         gp->val_flags |= FILTER_GENERIC_1_IPV4;
752
753         /* Match all if no spec */
754         if (!spec)
755                 return 0;
756
757         if (!mask)
758                 mask = &rte_flow_item_ipv4_mask;
759
760         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
761                sizeof(struct ipv4_hdr));
762         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
763                sizeof(struct ipv4_hdr));
764         return 0;
765 }
766
767 static int
768 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
769 {
770         const struct rte_flow_item *item = arg->item;
771         struct filter_v2 *enic_filter = arg->filter;
772         const struct rte_flow_item_ipv6 *spec = item->spec;
773         const struct rte_flow_item_ipv6 *mask = item->mask;
774         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
775
776         FLOW_TRACE();
777
778         /* Match IPv6 */
779         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
780         gp->val_flags |= FILTER_GENERIC_1_IPV6;
781
782         /* Match all if no spec */
783         if (!spec)
784                 return 0;
785
786         if (!mask)
787                 mask = &rte_flow_item_ipv6_mask;
788
789         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
790                sizeof(struct ipv6_hdr));
791         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
792                sizeof(struct ipv6_hdr));
793         return 0;
794 }
795
796 static int
797 enic_copy_item_udp_v2(struct copy_item_args *arg)
798 {
799         const struct rte_flow_item *item = arg->item;
800         struct filter_v2 *enic_filter = arg->filter;
801         const struct rte_flow_item_udp *spec = item->spec;
802         const struct rte_flow_item_udp *mask = item->mask;
803         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
804
805         FLOW_TRACE();
806
807         /* Match UDP */
808         gp->mask_flags |= FILTER_GENERIC_1_UDP;
809         gp->val_flags |= FILTER_GENERIC_1_UDP;
810
811         /* Match all if no spec */
812         if (!spec)
813                 return 0;
814
815         if (!mask)
816                 mask = &rte_flow_item_udp_mask;
817
818         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
819                sizeof(struct udp_hdr));
820         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
821                sizeof(struct udp_hdr));
822         return 0;
823 }
824
825 static int
826 enic_copy_item_tcp_v2(struct copy_item_args *arg)
827 {
828         const struct rte_flow_item *item = arg->item;
829         struct filter_v2 *enic_filter = arg->filter;
830         const struct rte_flow_item_tcp *spec = item->spec;
831         const struct rte_flow_item_tcp *mask = item->mask;
832         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
833
834         FLOW_TRACE();
835
836         /* Match TCP */
837         gp->mask_flags |= FILTER_GENERIC_1_TCP;
838         gp->val_flags |= FILTER_GENERIC_1_TCP;
839
840         /* Match all if no spec */
841         if (!spec)
842                 return 0;
843
844         if (!mask)
845                 return ENOTSUP;
846
847         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
848                sizeof(struct tcp_hdr));
849         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
850                sizeof(struct tcp_hdr));
851         return 0;
852 }
853
854 static int
855 enic_copy_item_sctp_v2(struct copy_item_args *arg)
856 {
857         const struct rte_flow_item *item = arg->item;
858         struct filter_v2 *enic_filter = arg->filter;
859         const struct rte_flow_item_sctp *spec = item->spec;
860         const struct rte_flow_item_sctp *mask = item->mask;
861         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
862         uint8_t *ip_proto_mask = NULL;
863         uint8_t *ip_proto = NULL;
864
865         FLOW_TRACE();
866
867         /*
868          * The NIC filter API has no flags for "match sctp", so explicitly set
869          * the protocol number in the IP pattern.
870          */
871         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
872                 struct ipv4_hdr *ip;
873                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
874                 ip_proto_mask = &ip->next_proto_id;
875                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
876                 ip_proto = &ip->next_proto_id;
877         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
878                 struct ipv6_hdr *ip;
879                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
880                 ip_proto_mask = &ip->proto;
881                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
882                 ip_proto = &ip->proto;
883         } else {
884                 /* Need IPv4/IPv6 pattern first */
885                 return EINVAL;
886         }
887         *ip_proto = IPPROTO_SCTP;
888         *ip_proto_mask = 0xff;
889
890         /* Match all if no spec */
891         if (!spec)
892                 return 0;
893
894         if (!mask)
895                 mask = &rte_flow_item_sctp_mask;
896
897         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
898                sizeof(struct sctp_hdr));
899         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
900                sizeof(struct sctp_hdr));
901         return 0;
902 }
903
904 static int
905 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
906 {
907         const struct rte_flow_item *item = arg->item;
908         struct filter_v2 *enic_filter = arg->filter;
909         uint8_t *inner_ofst = arg->inner_ofst;
910         const struct rte_flow_item_vxlan *spec = item->spec;
911         const struct rte_flow_item_vxlan *mask = item->mask;
912         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
913         struct udp_hdr *udp;
914
915         FLOW_TRACE();
916
917         /*
918          * The NIC filter API has no flags for "match vxlan". Set UDP port to
919          * avoid false positives.
920          */
921         gp->mask_flags |= FILTER_GENERIC_1_UDP;
922         gp->val_flags |= FILTER_GENERIC_1_UDP;
923         udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
924         udp->dst_port = 0xffff;
925         udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
926         udp->dst_port = RTE_BE16(4789);
927         /* Match all if no spec */
928         if (!spec)
929                 return 0;
930
931         if (!mask)
932                 mask = &rte_flow_item_vxlan_mask;
933
934         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
935                sizeof(struct vxlan_hdr));
936         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
937                sizeof(struct vxlan_hdr));
938
939         *inner_ofst = sizeof(struct vxlan_hdr);
940         return 0;
941 }
942
943 /*
944  * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
945  * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
946  * or geneve).
947  */
948 static int
949 enic_copy_item_raw_v2(struct copy_item_args *arg)
950 {
951         const struct rte_flow_item *item = arg->item;
952         struct filter_v2 *enic_filter = arg->filter;
953         uint8_t *inner_ofst = arg->inner_ofst;
954         const struct rte_flow_item_raw *spec = item->spec;
955         const struct rte_flow_item_raw *mask = item->mask;
956         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
957
958         FLOW_TRACE();
959
960         /* Cannot be used for inner packet */
961         if (*inner_ofst)
962                 return EINVAL;
963         /* Need both spec and mask */
964         if (!spec || !mask)
965                 return EINVAL;
966         /* Only supports relative with offset 0 */
967         if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
968                 return EINVAL;
969         /* Need non-null pattern that fits within the NIC's filter pattern */
970         if (spec->length == 0 ||
971             spec->length + sizeof(struct udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
972             !spec->pattern || !mask->pattern)
973                 return EINVAL;
974         /*
975          * Mask fields, including length, are often set to zero. Assume that
976          * means "same as spec" to avoid breaking existing apps. If length
977          * is not zero, then it should be >= spec length.
978          *
979          * No more pattern follows this, so append to the L4 layer instead of
980          * L5 to work with both recent and older VICs.
981          */
982         if (mask->length != 0 && mask->length < spec->length)
983                 return EINVAL;
984         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
985                mask->pattern, spec->length);
986         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
987                spec->pattern, spec->length);
988
989         return 0;
990 }
991
992 /**
993  * Return 1 if current item is valid on top of the previous one.
994  *
995  * @param prev_item[in]
996  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
997  *   is the first item.
998  * @param item_info[in]
999  *   Info about this item, like valid previous items.
1000  * @param is_first[in]
1001  *   True if this the first item in the pattern.
1002  */
1003 static int
1004 item_stacking_valid(enum rte_flow_item_type prev_item,
1005                     const struct enic_items *item_info, u8 is_first_item)
1006 {
1007         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
1008
1009         FLOW_TRACE();
1010
1011         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
1012                 if (prev_item == *allowed_items)
1013                         return 1;
1014         }
1015
1016         /* This is the first item in the stack. Check if that's cool */
1017         if (is_first_item && item_info->valid_start_item)
1018                 return 1;
1019
1020         return 0;
1021 }
1022
1023 /*
1024  * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
1025  * Instead it is in L4 following the UDP header. Append the vxlan
1026  * pattern to L4 (udp) and shift any inner packet pattern in L5.
1027  */
1028 static void
1029 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
1030                uint8_t inner_ofst)
1031 {
1032         uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
1033         uint8_t inner;
1034         uint8_t vxlan;
1035
1036         if (!(inner_ofst > 0 && enic->vxlan))
1037                 return;
1038         FLOW_TRACE();
1039         vxlan = sizeof(struct vxlan_hdr);
1040         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
1041                gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
1042         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
1043                gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
1044         inner = inner_ofst - vxlan;
1045         memset(layer, 0, sizeof(layer));
1046         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
1047         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
1048         memset(layer, 0, sizeof(layer));
1049         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
1050         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
1051 }
1052
1053 /**
1054  * Build the intenal enic filter structure from the provided pattern. The
1055  * pattern is validated as the items are copied.
1056  *
1057  * @param pattern[in]
1058  * @param items_info[in]
1059  *   Info about this NICs item support, like valid previous items.
1060  * @param enic_filter[out]
1061  *   NIC specfilc filters derived from the pattern.
1062  * @param error[out]
1063  */
1064 static int
1065 enic_copy_filter(const struct rte_flow_item pattern[],
1066                  const struct enic_filter_cap *cap,
1067                  struct enic *enic,
1068                  struct filter_v2 *enic_filter,
1069                  struct rte_flow_error *error)
1070 {
1071         int ret;
1072         const struct rte_flow_item *item = pattern;
1073         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1074         enum rte_flow_item_type prev_item;
1075         const struct enic_items *item_info;
1076         struct copy_item_args args;
1077         enic_copy_item_fn *copy_fn;
1078         u8 is_first_item = 1;
1079
1080         FLOW_TRACE();
1081
1082         prev_item = 0;
1083
1084         args.filter = enic_filter;
1085         args.inner_ofst = &inner_ofst;
1086         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1087                 /* Get info about how to validate and copy the item. If NULL
1088                  * is returned the nic does not support the item.
1089                  */
1090                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1091                         continue;
1092
1093                 item_info = &cap->item_info[item->type];
1094                 if (item->type > cap->max_item_type ||
1095                     item_info->copy_item == NULL ||
1096                     (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1097                         rte_flow_error_set(error, ENOTSUP,
1098                                 RTE_FLOW_ERROR_TYPE_ITEM,
1099                                 NULL, "Unsupported item.");
1100                         return -rte_errno;
1101                 }
1102
1103                 /* check to see if item stacking is valid */
1104                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1105                         goto stacking_error;
1106
1107                 args.item = item;
1108                 copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1109                         item_info->copy_item;
1110                 ret = copy_fn(&args);
1111                 if (ret)
1112                         goto item_not_supported;
1113                 prev_item = item->type;
1114                 is_first_item = 0;
1115         }
1116         fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1117
1118         return 0;
1119
1120 item_not_supported:
1121         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1122                            NULL, "enic type error");
1123         return -rte_errno;
1124
1125 stacking_error:
1126         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1127                            item, "stacking error");
1128         return -rte_errno;
1129 }
1130
1131 /**
1132  * Build the intenal version 1 NIC action structure from the provided pattern.
1133  * The pattern is validated as the items are copied.
1134  *
1135  * @param actions[in]
1136  * @param enic_action[out]
1137  *   NIC specfilc actions derived from the actions.
1138  * @param error[out]
1139  */
1140 static int
1141 enic_copy_action_v1(__rte_unused struct enic *enic,
1142                     const struct rte_flow_action actions[],
1143                     struct filter_action_v2 *enic_action)
1144 {
1145         enum { FATE = 1, };
1146         uint32_t overlap = 0;
1147
1148         FLOW_TRACE();
1149
1150         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1151                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1152                         continue;
1153
1154                 switch (actions->type) {
1155                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1156                         const struct rte_flow_action_queue *queue =
1157                                 (const struct rte_flow_action_queue *)
1158                                 actions->conf;
1159
1160                         if (overlap & FATE)
1161                                 return ENOTSUP;
1162                         overlap |= FATE;
1163                         enic_action->rq_idx =
1164                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1165                         break;
1166                 }
1167                 default:
1168                         RTE_ASSERT(0);
1169                         break;
1170                 }
1171         }
1172         if (!(overlap & FATE))
1173                 return ENOTSUP;
1174         enic_action->type = FILTER_ACTION_RQ_STEERING;
1175         return 0;
1176 }
1177
1178 /**
1179  * Build the intenal version 2 NIC action structure from the provided pattern.
1180  * The pattern is validated as the items are copied.
1181  *
1182  * @param actions[in]
1183  * @param enic_action[out]
1184  *   NIC specfilc actions derived from the actions.
1185  * @param error[out]
1186  */
1187 static int
1188 enic_copy_action_v2(struct enic *enic,
1189                     const struct rte_flow_action actions[],
1190                     struct filter_action_v2 *enic_action)
1191 {
1192         enum { FATE = 1, MARK = 2, };
1193         uint32_t overlap = 0;
1194         bool passthru = false;
1195
1196         FLOW_TRACE();
1197
1198         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1199                 switch (actions->type) {
1200                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1201                         const struct rte_flow_action_queue *queue =
1202                                 (const struct rte_flow_action_queue *)
1203                                 actions->conf;
1204
1205                         if (overlap & FATE)
1206                                 return ENOTSUP;
1207                         overlap |= FATE;
1208                         enic_action->rq_idx =
1209                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1210                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1211                         break;
1212                 }
1213                 case RTE_FLOW_ACTION_TYPE_MARK: {
1214                         const struct rte_flow_action_mark *mark =
1215                                 (const struct rte_flow_action_mark *)
1216                                 actions->conf;
1217
1218                         if (overlap & MARK)
1219                                 return ENOTSUP;
1220                         overlap |= MARK;
1221                         /*
1222                          * Map mark ID (32-bit) to filter ID (16-bit):
1223                          * - Reject values > 16 bits
1224                          * - Filter ID 0 is reserved for filters that steer
1225                          *   but not mark. So add 1 to the mark ID to avoid
1226                          *   using 0.
1227                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1228                          *   reserved for the "flag" action below.
1229                          */
1230                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1231                                 return EINVAL;
1232                         enic_action->filter_id = mark->id + 1;
1233                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1234                         break;
1235                 }
1236                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1237                         if (overlap & MARK)
1238                                 return ENOTSUP;
1239                         overlap |= MARK;
1240                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1241                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1242                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1243                         break;
1244                 }
1245                 case RTE_FLOW_ACTION_TYPE_DROP: {
1246                         if (overlap & FATE)
1247                                 return ENOTSUP;
1248                         overlap |= FATE;
1249                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1250                         break;
1251                 }
1252                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1253                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1254                         break;
1255                 }
1256                 case RTE_FLOW_ACTION_TYPE_RSS: {
1257                         const struct rte_flow_action_rss *rss =
1258                                 (const struct rte_flow_action_rss *)
1259                                 actions->conf;
1260                         bool allow;
1261                         uint16_t i;
1262
1263                         /*
1264                          * Hardware does not support general RSS actions, but
1265                          * we can still support the dummy one that is used to
1266                          * "receive normally".
1267                          */
1268                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1269                                 rss->level == 0 &&
1270                                 (rss->types == 0 ||
1271                                  rss->types == enic->rss_hf) &&
1272                                 rss->queue_num == enic->rq_count &&
1273                                 rss->key_len == 0;
1274                         /* Identity queue map is ok */
1275                         for (i = 0; i < rss->queue_num; i++)
1276                                 allow = allow && (i == rss->queue[i]);
1277                         if (!allow)
1278                                 return ENOTSUP;
1279                         if (overlap & FATE)
1280                                 return ENOTSUP;
1281                         /* Need MARK or FLAG */
1282                         if (!(overlap & MARK))
1283                                 return ENOTSUP;
1284                         overlap |= FATE;
1285                         break;
1286                 }
1287                 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1288                         /*
1289                          * Like RSS above, PASSTHRU + MARK may be used to
1290                          * "mark and then receive normally". MARK usually comes
1291                          * after PASSTHRU, so remember we have seen passthru
1292                          * and check for mark later.
1293                          */
1294                         if (overlap & FATE)
1295                                 return ENOTSUP;
1296                         overlap |= FATE;
1297                         passthru = true;
1298                         break;
1299                 }
1300                 case RTE_FLOW_ACTION_TYPE_VOID:
1301                         continue;
1302                 default:
1303                         RTE_ASSERT(0);
1304                         break;
1305                 }
1306         }
1307         /* Only PASSTHRU + MARK is allowed */
1308         if (passthru && !(overlap & MARK))
1309                 return ENOTSUP;
1310         if (!(overlap & FATE))
1311                 return ENOTSUP;
1312         enic_action->type = FILTER_ACTION_V2;
1313         return 0;
1314 }
1315
1316 /** Check if the action is supported */
1317 static int
1318 enic_match_action(const struct rte_flow_action *action,
1319                   const enum rte_flow_action_type *supported_actions)
1320 {
1321         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1322              supported_actions++) {
1323                 if (action->type == *supported_actions)
1324                         return 1;
1325         }
1326         return 0;
1327 }
1328
1329 /** Get the NIC filter capabilties structure */
1330 static const struct enic_filter_cap *
1331 enic_get_filter_cap(struct enic *enic)
1332 {
1333         if (enic->flow_filter_mode)
1334                 return &enic_filter_cap[enic->flow_filter_mode];
1335
1336         return NULL;
1337 }
1338
1339 /** Get the actions for this NIC version. */
1340 static const struct enic_action_cap *
1341 enic_get_action_cap(struct enic *enic)
1342 {
1343         const struct enic_action_cap *ea;
1344         uint8_t actions;
1345
1346         actions = enic->filter_actions;
1347         if (actions & FILTER_ACTION_COUNTER_FLAG)
1348                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1349         else if (actions & FILTER_ACTION_DROP_FLAG)
1350                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1351         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1352                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1353         else
1354                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1355         return ea;
1356 }
1357
1358 /* Debug function to dump internal NIC action structure. */
1359 static void
1360 enic_dump_actions(const struct filter_action_v2 *ea)
1361 {
1362         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1363                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1364         } else if (ea->type == FILTER_ACTION_V2) {
1365                 FLOW_LOG(INFO, "Actions(V2)\n");
1366                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1367                         FLOW_LOG(INFO, "\tqueue: %u\n",
1368                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1369                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1370                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1371         }
1372 }
1373
1374 /* Debug function to dump internal NIC filter structure. */
1375 static void
1376 enic_dump_filter(const struct filter_v2 *filt)
1377 {
1378         const struct filter_generic_1 *gp;
1379         int i, j, mbyte;
1380         char buf[128], *bp;
1381         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1382         char l4csum[16], ipfrag[16];
1383
1384         switch (filt->type) {
1385         case FILTER_IPV4_5TUPLE:
1386                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1387                 break;
1388         case FILTER_USNIC_IP:
1389         case FILTER_DPDK_1:
1390                 /* FIXME: this should be a loop */
1391                 gp = &filt->u.generic_1;
1392                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1393                        gp->val_vlan, gp->mask_vlan);
1394
1395                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1396                         sprintf(ip4, "%s ",
1397                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1398                                  ? "ip4(y)" : "ip4(n)");
1399                 else
1400                         sprintf(ip4, "%s ", "ip4(x)");
1401
1402                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1403                         sprintf(ip6, "%s ",
1404                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1405                                  ? "ip6(y)" : "ip6(n)");
1406                 else
1407                         sprintf(ip6, "%s ", "ip6(x)");
1408
1409                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1410                         sprintf(udp, "%s ",
1411                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1412                                  ? "udp(y)" : "udp(n)");
1413                 else
1414                         sprintf(udp, "%s ", "udp(x)");
1415
1416                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1417                         sprintf(tcp, "%s ",
1418                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1419                                  ? "tcp(y)" : "tcp(n)");
1420                 else
1421                         sprintf(tcp, "%s ", "tcp(x)");
1422
1423                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1424                         sprintf(tcpudp, "%s ",
1425                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1426                                  ? "tcpudp(y)" : "tcpudp(n)");
1427                 else
1428                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1429
1430                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1431                         sprintf(ip4csum, "%s ",
1432                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1433                                  ? "ip4csum(y)" : "ip4csum(n)");
1434                 else
1435                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1436
1437                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1438                         sprintf(l4csum, "%s ",
1439                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1440                                  ? "l4csum(y)" : "l4csum(n)");
1441                 else
1442                         sprintf(l4csum, "%s ", "l4csum(x)");
1443
1444                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1445                         sprintf(ipfrag, "%s ",
1446                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1447                                  ? "ipfrag(y)" : "ipfrag(n)");
1448                 else
1449                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1450                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1451                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1452
1453                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1454                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1455                         while (mbyte && !gp->layer[i].mask[mbyte])
1456                                 mbyte--;
1457                         if (mbyte == 0)
1458                                 continue;
1459
1460                         bp = buf;
1461                         for (j = 0; j <= mbyte; j++) {
1462                                 sprintf(bp, "%02x",
1463                                         gp->layer[i].mask[j]);
1464                                 bp += 2;
1465                         }
1466                         *bp = '\0';
1467                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1468                         bp = buf;
1469                         for (j = 0; j <= mbyte; j++) {
1470                                 sprintf(bp, "%02x",
1471                                         gp->layer[i].val[j]);
1472                                 bp += 2;
1473                         }
1474                         *bp = '\0';
1475                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1476                 }
1477                 break;
1478         default:
1479                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1480                 break;
1481         }
1482 }
1483
1484 /* Debug function to dump internal NIC flow structures. */
1485 static void
1486 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1487 {
1488         enic_dump_filter(filt);
1489         enic_dump_actions(ea);
1490 }
1491
1492
1493 /**
1494  * Internal flow parse/validate function.
1495  *
1496  * @param dev[in]
1497  *   This device pointer.
1498  * @param pattern[in]
1499  * @param actions[in]
1500  * @param error[out]
1501  * @param enic_filter[out]
1502  *   Internal NIC filter structure pointer.
1503  * @param enic_action[out]
1504  *   Internal NIC action structure pointer.
1505  */
1506 static int
1507 enic_flow_parse(struct rte_eth_dev *dev,
1508                 const struct rte_flow_attr *attrs,
1509                 const struct rte_flow_item pattern[],
1510                 const struct rte_flow_action actions[],
1511                 struct rte_flow_error *error,
1512                 struct filter_v2 *enic_filter,
1513                 struct filter_action_v2 *enic_action)
1514 {
1515         unsigned int ret = 0;
1516         struct enic *enic = pmd_priv(dev);
1517         const struct enic_filter_cap *enic_filter_cap;
1518         const struct enic_action_cap *enic_action_cap;
1519         const struct rte_flow_action *action;
1520
1521         FLOW_TRACE();
1522
1523         memset(enic_filter, 0, sizeof(*enic_filter));
1524         memset(enic_action, 0, sizeof(*enic_action));
1525
1526         if (!pattern) {
1527                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1528                                    NULL, "No pattern specified");
1529                 return -rte_errno;
1530         }
1531
1532         if (!actions) {
1533                 rte_flow_error_set(error, EINVAL,
1534                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1535                                    NULL, "No action specified");
1536                 return -rte_errno;
1537         }
1538
1539         if (attrs) {
1540                 if (attrs->group) {
1541                         rte_flow_error_set(error, ENOTSUP,
1542                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1543                                            NULL,
1544                                            "priority groups are not supported");
1545                         return -rte_errno;
1546                 } else if (attrs->priority) {
1547                         rte_flow_error_set(error, ENOTSUP,
1548                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1549                                            NULL,
1550                                            "priorities are not supported");
1551                         return -rte_errno;
1552                 } else if (attrs->egress) {
1553                         rte_flow_error_set(error, ENOTSUP,
1554                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1555                                            NULL,
1556                                            "egress is not supported");
1557                         return -rte_errno;
1558                 } else if (attrs->transfer) {
1559                         rte_flow_error_set(error, ENOTSUP,
1560                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1561                                            NULL,
1562                                            "transfer is not supported");
1563                         return -rte_errno;
1564                 } else if (!attrs->ingress) {
1565                         rte_flow_error_set(error, ENOTSUP,
1566                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1567                                            NULL,
1568                                            "only ingress is supported");
1569                         return -rte_errno;
1570                 }
1571
1572         } else {
1573                 rte_flow_error_set(error, EINVAL,
1574                                    RTE_FLOW_ERROR_TYPE_ATTR,
1575                                    NULL, "No attribute specified");
1576                 return -rte_errno;
1577         }
1578
1579         /* Verify Actions. */
1580         enic_action_cap =  enic_get_action_cap(enic);
1581         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1582              action++) {
1583                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1584                         continue;
1585                 else if (!enic_match_action(action, enic_action_cap->actions))
1586                         break;
1587         }
1588         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1589                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1590                                    action, "Invalid action.");
1591                 return -rte_errno;
1592         }
1593         ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1594         if (ret) {
1595                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1596                            NULL, "Unsupported action.");
1597                 return -rte_errno;
1598         }
1599
1600         /* Verify Flow items. If copying the filter from flow format to enic
1601          * format fails, the flow is not supported
1602          */
1603         enic_filter_cap =  enic_get_filter_cap(enic);
1604         if (enic_filter_cap == NULL) {
1605                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1606                            NULL, "Flow API not available");
1607                 return -rte_errno;
1608         }
1609         enic_filter->type = enic->flow_filter_mode;
1610         ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1611                                        enic_filter, error);
1612         return ret;
1613 }
1614
1615 /**
1616  * Push filter/action to the NIC.
1617  *
1618  * @param enic[in]
1619  *   Device structure pointer.
1620  * @param enic_filter[in]
1621  *   Internal NIC filter structure pointer.
1622  * @param enic_action[in]
1623  *   Internal NIC action structure pointer.
1624  * @param error[out]
1625  */
1626 static struct rte_flow *
1627 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1628                    struct filter_action_v2 *enic_action,
1629                    struct rte_flow_error *error)
1630 {
1631         struct rte_flow *flow;
1632         int err;
1633         uint16_t entry;
1634         int ctr_idx;
1635         int last_max_flow_ctr;
1636
1637         FLOW_TRACE();
1638
1639         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1640         if (!flow) {
1641                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1642                                    NULL, "cannot allocate flow memory");
1643                 return NULL;
1644         }
1645
1646         flow->counter_idx = -1;
1647         last_max_flow_ctr = -1;
1648         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1649                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1650                         rte_flow_error_set(error, ENOMEM,
1651                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1652                                            NULL, "cannot allocate counter");
1653                         goto unwind_flow_alloc;
1654                 }
1655                 flow->counter_idx = ctr_idx;
1656                 enic_action->counter_index = ctr_idx;
1657
1658                 /* If index is the largest, increase the counter DMA size */
1659                 if (ctr_idx > enic->max_flow_counter) {
1660                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1661                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1662                                                  ctr_idx + 1);
1663                         if (err) {
1664                                 rte_flow_error_set(error, -err,
1665                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1666                                            NULL, "counter DMA config failed");
1667                                 goto unwind_ctr_alloc;
1668                         }
1669                         last_max_flow_ctr = enic->max_flow_counter;
1670                         enic->max_flow_counter = ctr_idx;
1671                 }
1672         }
1673
1674         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1675         entry = enic_action->rq_idx;
1676         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1677                                   enic_action);
1678         if (err) {
1679                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1680                                    NULL, "vnic_dev_classifier error");
1681                 goto unwind_ctr_dma_cfg;
1682         }
1683
1684         flow->enic_filter_id = entry;
1685         flow->enic_filter = *enic_filter;
1686
1687         return flow;
1688
1689 /* unwind if there are errors */
1690 unwind_ctr_dma_cfg:
1691         if (last_max_flow_ctr != -1) {
1692                 /* reduce counter DMA size */
1693                 vnic_dev_counter_dma_cfg(enic->vdev,
1694                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1695                                          last_max_flow_ctr + 1);
1696                 enic->max_flow_counter = last_max_flow_ctr;
1697         }
1698 unwind_ctr_alloc:
1699         if (flow->counter_idx != -1)
1700                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1701 unwind_flow_alloc:
1702         rte_free(flow);
1703         return NULL;
1704 }
1705
1706 /**
1707  * Remove filter/action from the NIC.
1708  *
1709  * @param enic[in]
1710  *   Device structure pointer.
1711  * @param filter_id[in]
1712  *   Id of NIC filter.
1713  * @param enic_action[in]
1714  *   Internal NIC action structure pointer.
1715  * @param error[out]
1716  */
1717 static int
1718 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1719                    struct rte_flow_error *error)
1720 {
1721         u16 filter_id;
1722         int err;
1723
1724         FLOW_TRACE();
1725
1726         filter_id = flow->enic_filter_id;
1727         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1728         if (err) {
1729                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1730                                    NULL, "vnic_dev_classifier failed");
1731                 return -err;
1732         }
1733
1734         if (flow->counter_idx != -1) {
1735                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1736                         dev_err(enic, "counter free failed, idx: %d\n",
1737                                 flow->counter_idx);
1738                 flow->counter_idx = -1;
1739         }
1740         return 0;
1741 }
1742
1743 /*
1744  * The following functions are callbacks for Generic flow API.
1745  */
1746
1747 /**
1748  * Validate a flow supported by the NIC.
1749  *
1750  * @see rte_flow_validate()
1751  * @see rte_flow_ops
1752  */
1753 static int
1754 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1755                    const struct rte_flow_item pattern[],
1756                    const struct rte_flow_action actions[],
1757                    struct rte_flow_error *error)
1758 {
1759         struct filter_v2 enic_filter;
1760         struct filter_action_v2 enic_action;
1761         int ret;
1762
1763         FLOW_TRACE();
1764
1765         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1766                                &enic_filter, &enic_action);
1767         if (!ret)
1768                 enic_dump_flow(&enic_action, &enic_filter);
1769         return ret;
1770 }
1771
1772 /**
1773  * Create a flow supported by the NIC.
1774  *
1775  * @see rte_flow_create()
1776  * @see rte_flow_ops
1777  */
1778 static struct rte_flow *
1779 enic_flow_create(struct rte_eth_dev *dev,
1780                  const struct rte_flow_attr *attrs,
1781                  const struct rte_flow_item pattern[],
1782                  const struct rte_flow_action actions[],
1783                  struct rte_flow_error *error)
1784 {
1785         int ret;
1786         struct filter_v2 enic_filter;
1787         struct filter_action_v2 enic_action;
1788         struct rte_flow *flow;
1789         struct enic *enic = pmd_priv(dev);
1790
1791         FLOW_TRACE();
1792
1793         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1794                               &enic_action);
1795         if (ret < 0)
1796                 return NULL;
1797
1798         rte_spinlock_lock(&enic->flows_lock);
1799         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1800                                     error);
1801         if (flow)
1802                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1803         rte_spinlock_unlock(&enic->flows_lock);
1804
1805         return flow;
1806 }
1807
1808 /**
1809  * Destroy a flow supported by the NIC.
1810  *
1811  * @see rte_flow_destroy()
1812  * @see rte_flow_ops
1813  */
1814 static int
1815 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1816                   __rte_unused struct rte_flow_error *error)
1817 {
1818         struct enic *enic = pmd_priv(dev);
1819
1820         FLOW_TRACE();
1821
1822         rte_spinlock_lock(&enic->flows_lock);
1823         enic_flow_del_filter(enic, flow, error);
1824         LIST_REMOVE(flow, next);
1825         rte_spinlock_unlock(&enic->flows_lock);
1826         rte_free(flow);
1827         return 0;
1828 }
1829
1830 /**
1831  * Flush all flows on the device.
1832  *
1833  * @see rte_flow_flush()
1834  * @see rte_flow_ops
1835  */
1836 static int
1837 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1838 {
1839         struct rte_flow *flow;
1840         struct enic *enic = pmd_priv(dev);
1841
1842         FLOW_TRACE();
1843
1844         rte_spinlock_lock(&enic->flows_lock);
1845
1846         while (!LIST_EMPTY(&enic->flows)) {
1847                 flow = LIST_FIRST(&enic->flows);
1848                 enic_flow_del_filter(enic, flow, error);
1849                 LIST_REMOVE(flow, next);
1850                 rte_free(flow);
1851         }
1852         rte_spinlock_unlock(&enic->flows_lock);
1853         return 0;
1854 }
1855
1856 static int
1857 enic_flow_query_count(struct rte_eth_dev *dev,
1858                       struct rte_flow *flow, void *data,
1859                       struct rte_flow_error *error)
1860 {
1861         struct enic *enic = pmd_priv(dev);
1862         struct rte_flow_query_count *query;
1863         uint64_t packets, bytes;
1864
1865         FLOW_TRACE();
1866
1867         if (flow->counter_idx == -1) {
1868                 return rte_flow_error_set(error, ENOTSUP,
1869                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1870                                           NULL,
1871                                           "flow does not have counter");
1872         }
1873         query = (struct rte_flow_query_count *)data;
1874         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1875                                     !!query->reset, &packets, &bytes)) {
1876                 return rte_flow_error_set
1877                         (error, EINVAL,
1878                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1879                          NULL,
1880                          "cannot read counter");
1881         }
1882         query->hits_set = 1;
1883         query->bytes_set = 1;
1884         query->hits = packets;
1885         query->bytes = bytes;
1886         return 0;
1887 }
1888
1889 static int
1890 enic_flow_query(struct rte_eth_dev *dev,
1891                 struct rte_flow *flow,
1892                 const struct rte_flow_action *actions,
1893                 void *data,
1894                 struct rte_flow_error *error)
1895 {
1896         int ret = 0;
1897
1898         FLOW_TRACE();
1899
1900         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1901                 switch (actions->type) {
1902                 case RTE_FLOW_ACTION_TYPE_VOID:
1903                         break;
1904                 case RTE_FLOW_ACTION_TYPE_COUNT:
1905                         ret = enic_flow_query_count(dev, flow, data, error);
1906                         break;
1907                 default:
1908                         return rte_flow_error_set(error, ENOTSUP,
1909                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1910                                                   actions,
1911                                                   "action not supported");
1912                 }
1913                 if (ret < 0)
1914                         return ret;
1915         }
1916         return 0;
1917 }
1918
1919 /**
1920  * Flow callback registration.
1921  *
1922  * @see rte_flow_ops
1923  */
1924 const struct rte_flow_ops enic_flow_ops = {
1925         .validate = enic_flow_validate,
1926         .create = enic_flow_create,
1927         .destroy = enic_flow_destroy,
1928         .flush = enic_flow_flush,
1929         .query = enic_flow_query,
1930 };