net: add rte prefix to ether structures
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /*
27  * Common arguments passed to copy_item functions. Use this structure
28  * so we can easily add new arguments.
29  * item: Item specification.
30  * filter: Partially filled in NIC filter structure.
31  * inner_ofst: If zero, this is an outer header. If non-zero, this is
32  *   the offset into L5 where the header begins.
33  * l2_proto_off: offset to EtherType eth or vlan header.
34  * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
35  */
36 struct copy_item_args {
37         const struct rte_flow_item *item;
38         struct filter_v2 *filter;
39         uint8_t *inner_ofst;
40         uint8_t l2_proto_off;
41         uint8_t l3_proto_off;
42         struct enic *enic;
43 };
44
45 /* functions for copying items into enic filters */
46 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
47
48 /** Info about how to copy items into enic filters. */
49 struct enic_items {
50         /** Function for copying and validating an item. */
51         enic_copy_item_fn *copy_item;
52         /** List of valid previous items. */
53         const enum rte_flow_item_type * const prev_items;
54         /** True if it's OK for this item to be the first item. For some NIC
55          * versions, it's invalid to start the stack above layer 3.
56          */
57         const u8 valid_start_item;
58         /* Inner packet version of copy_item. */
59         enic_copy_item_fn *inner_copy_item;
60 };
61
62 /** Filtering capabilities for various NIC and firmware versions. */
63 struct enic_filter_cap {
64         /** list of valid items and their handlers and attributes. */
65         const struct enic_items *item_info;
66         /* Max type in the above list, used to detect unsupported types */
67         enum rte_flow_item_type max_item_type;
68 };
69
70 /* functions for copying flow actions into enic actions */
71 typedef int (copy_action_fn)(struct enic *enic,
72                              const struct rte_flow_action actions[],
73                              struct filter_action_v2 *enic_action);
74
75 /** Action capabilities for various NICs. */
76 struct enic_action_cap {
77         /** list of valid actions */
78         const enum rte_flow_action_type *actions;
79         /** copy function for a particular NIC */
80         copy_action_fn *copy_fn;
81 };
82
83 /* Forward declarations */
84 static enic_copy_item_fn enic_copy_item_ipv4_v1;
85 static enic_copy_item_fn enic_copy_item_udp_v1;
86 static enic_copy_item_fn enic_copy_item_tcp_v1;
87 static enic_copy_item_fn enic_copy_item_raw_v2;
88 static enic_copy_item_fn enic_copy_item_eth_v2;
89 static enic_copy_item_fn enic_copy_item_vlan_v2;
90 static enic_copy_item_fn enic_copy_item_ipv4_v2;
91 static enic_copy_item_fn enic_copy_item_ipv6_v2;
92 static enic_copy_item_fn enic_copy_item_udp_v2;
93 static enic_copy_item_fn enic_copy_item_tcp_v2;
94 static enic_copy_item_fn enic_copy_item_sctp_v2;
95 static enic_copy_item_fn enic_copy_item_vxlan_v2;
96 static enic_copy_item_fn enic_copy_item_inner_eth_v2;
97 static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
98 static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
99 static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
100 static enic_copy_item_fn enic_copy_item_inner_udp_v2;
101 static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
102 static copy_action_fn enic_copy_action_v1;
103 static copy_action_fn enic_copy_action_v2;
104
105 /**
106  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
107  * is supported.
108  */
109 static const struct enic_items enic_items_v1[] = {
110         [RTE_FLOW_ITEM_TYPE_IPV4] = {
111                 .copy_item = enic_copy_item_ipv4_v1,
112                 .valid_start_item = 1,
113                 .prev_items = (const enum rte_flow_item_type[]) {
114                                RTE_FLOW_ITEM_TYPE_END,
115                 },
116                 .inner_copy_item = NULL,
117         },
118         [RTE_FLOW_ITEM_TYPE_UDP] = {
119                 .copy_item = enic_copy_item_udp_v1,
120                 .valid_start_item = 0,
121                 .prev_items = (const enum rte_flow_item_type[]) {
122                                RTE_FLOW_ITEM_TYPE_IPV4,
123                                RTE_FLOW_ITEM_TYPE_END,
124                 },
125                 .inner_copy_item = NULL,
126         },
127         [RTE_FLOW_ITEM_TYPE_TCP] = {
128                 .copy_item = enic_copy_item_tcp_v1,
129                 .valid_start_item = 0,
130                 .prev_items = (const enum rte_flow_item_type[]) {
131                                RTE_FLOW_ITEM_TYPE_IPV4,
132                                RTE_FLOW_ITEM_TYPE_END,
133                 },
134                 .inner_copy_item = NULL,
135         },
136 };
137
138 /**
139  * NICs have Advanced Filters capability but they are disabled. This means
140  * that layer 3 must be specified.
141  */
142 static const struct enic_items enic_items_v2[] = {
143         [RTE_FLOW_ITEM_TYPE_RAW] = {
144                 .copy_item = enic_copy_item_raw_v2,
145                 .valid_start_item = 0,
146                 .prev_items = (const enum rte_flow_item_type[]) {
147                                RTE_FLOW_ITEM_TYPE_UDP,
148                                RTE_FLOW_ITEM_TYPE_END,
149                 },
150                 .inner_copy_item = NULL,
151         },
152         [RTE_FLOW_ITEM_TYPE_ETH] = {
153                 .copy_item = enic_copy_item_eth_v2,
154                 .valid_start_item = 1,
155                 .prev_items = (const enum rte_flow_item_type[]) {
156                                RTE_FLOW_ITEM_TYPE_VXLAN,
157                                RTE_FLOW_ITEM_TYPE_END,
158                 },
159                 .inner_copy_item = enic_copy_item_inner_eth_v2,
160         },
161         [RTE_FLOW_ITEM_TYPE_VLAN] = {
162                 .copy_item = enic_copy_item_vlan_v2,
163                 .valid_start_item = 1,
164                 .prev_items = (const enum rte_flow_item_type[]) {
165                                RTE_FLOW_ITEM_TYPE_ETH,
166                                RTE_FLOW_ITEM_TYPE_END,
167                 },
168                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
169         },
170         [RTE_FLOW_ITEM_TYPE_IPV4] = {
171                 .copy_item = enic_copy_item_ipv4_v2,
172                 .valid_start_item = 1,
173                 .prev_items = (const enum rte_flow_item_type[]) {
174                                RTE_FLOW_ITEM_TYPE_ETH,
175                                RTE_FLOW_ITEM_TYPE_VLAN,
176                                RTE_FLOW_ITEM_TYPE_END,
177                 },
178                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
179         },
180         [RTE_FLOW_ITEM_TYPE_IPV6] = {
181                 .copy_item = enic_copy_item_ipv6_v2,
182                 .valid_start_item = 1,
183                 .prev_items = (const enum rte_flow_item_type[]) {
184                                RTE_FLOW_ITEM_TYPE_ETH,
185                                RTE_FLOW_ITEM_TYPE_VLAN,
186                                RTE_FLOW_ITEM_TYPE_END,
187                 },
188                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
189         },
190         [RTE_FLOW_ITEM_TYPE_UDP] = {
191                 .copy_item = enic_copy_item_udp_v2,
192                 .valid_start_item = 0,
193                 .prev_items = (const enum rte_flow_item_type[]) {
194                                RTE_FLOW_ITEM_TYPE_IPV4,
195                                RTE_FLOW_ITEM_TYPE_IPV6,
196                                RTE_FLOW_ITEM_TYPE_END,
197                 },
198                 .inner_copy_item = enic_copy_item_inner_udp_v2,
199         },
200         [RTE_FLOW_ITEM_TYPE_TCP] = {
201                 .copy_item = enic_copy_item_tcp_v2,
202                 .valid_start_item = 0,
203                 .prev_items = (const enum rte_flow_item_type[]) {
204                                RTE_FLOW_ITEM_TYPE_IPV4,
205                                RTE_FLOW_ITEM_TYPE_IPV6,
206                                RTE_FLOW_ITEM_TYPE_END,
207                 },
208                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
209         },
210         [RTE_FLOW_ITEM_TYPE_SCTP] = {
211                 .copy_item = enic_copy_item_sctp_v2,
212                 .valid_start_item = 0,
213                 .prev_items = (const enum rte_flow_item_type[]) {
214                                RTE_FLOW_ITEM_TYPE_IPV4,
215                                RTE_FLOW_ITEM_TYPE_IPV6,
216                                RTE_FLOW_ITEM_TYPE_END,
217                 },
218                 .inner_copy_item = NULL,
219         },
220         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
221                 .copy_item = enic_copy_item_vxlan_v2,
222                 .valid_start_item = 0,
223                 .prev_items = (const enum rte_flow_item_type[]) {
224                                RTE_FLOW_ITEM_TYPE_UDP,
225                                RTE_FLOW_ITEM_TYPE_END,
226                 },
227                 .inner_copy_item = NULL,
228         },
229 };
230
231 /** NICs with Advanced filters enabled */
232 static const struct enic_items enic_items_v3[] = {
233         [RTE_FLOW_ITEM_TYPE_RAW] = {
234                 .copy_item = enic_copy_item_raw_v2,
235                 .valid_start_item = 0,
236                 .prev_items = (const enum rte_flow_item_type[]) {
237                                RTE_FLOW_ITEM_TYPE_UDP,
238                                RTE_FLOW_ITEM_TYPE_END,
239                 },
240                 .inner_copy_item = NULL,
241         },
242         [RTE_FLOW_ITEM_TYPE_ETH] = {
243                 .copy_item = enic_copy_item_eth_v2,
244                 .valid_start_item = 1,
245                 .prev_items = (const enum rte_flow_item_type[]) {
246                                RTE_FLOW_ITEM_TYPE_VXLAN,
247                                RTE_FLOW_ITEM_TYPE_END,
248                 },
249                 .inner_copy_item = enic_copy_item_inner_eth_v2,
250         },
251         [RTE_FLOW_ITEM_TYPE_VLAN] = {
252                 .copy_item = enic_copy_item_vlan_v2,
253                 .valid_start_item = 1,
254                 .prev_items = (const enum rte_flow_item_type[]) {
255                                RTE_FLOW_ITEM_TYPE_ETH,
256                                RTE_FLOW_ITEM_TYPE_END,
257                 },
258                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
259         },
260         [RTE_FLOW_ITEM_TYPE_IPV4] = {
261                 .copy_item = enic_copy_item_ipv4_v2,
262                 .valid_start_item = 1,
263                 .prev_items = (const enum rte_flow_item_type[]) {
264                                RTE_FLOW_ITEM_TYPE_ETH,
265                                RTE_FLOW_ITEM_TYPE_VLAN,
266                                RTE_FLOW_ITEM_TYPE_END,
267                 },
268                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
269         },
270         [RTE_FLOW_ITEM_TYPE_IPV6] = {
271                 .copy_item = enic_copy_item_ipv6_v2,
272                 .valid_start_item = 1,
273                 .prev_items = (const enum rte_flow_item_type[]) {
274                                RTE_FLOW_ITEM_TYPE_ETH,
275                                RTE_FLOW_ITEM_TYPE_VLAN,
276                                RTE_FLOW_ITEM_TYPE_END,
277                 },
278                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
279         },
280         [RTE_FLOW_ITEM_TYPE_UDP] = {
281                 .copy_item = enic_copy_item_udp_v2,
282                 .valid_start_item = 1,
283                 .prev_items = (const enum rte_flow_item_type[]) {
284                                RTE_FLOW_ITEM_TYPE_IPV4,
285                                RTE_FLOW_ITEM_TYPE_IPV6,
286                                RTE_FLOW_ITEM_TYPE_END,
287                 },
288                 .inner_copy_item = enic_copy_item_inner_udp_v2,
289         },
290         [RTE_FLOW_ITEM_TYPE_TCP] = {
291                 .copy_item = enic_copy_item_tcp_v2,
292                 .valid_start_item = 1,
293                 .prev_items = (const enum rte_flow_item_type[]) {
294                                RTE_FLOW_ITEM_TYPE_IPV4,
295                                RTE_FLOW_ITEM_TYPE_IPV6,
296                                RTE_FLOW_ITEM_TYPE_END,
297                 },
298                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
299         },
300         [RTE_FLOW_ITEM_TYPE_SCTP] = {
301                 .copy_item = enic_copy_item_sctp_v2,
302                 .valid_start_item = 0,
303                 .prev_items = (const enum rte_flow_item_type[]) {
304                                RTE_FLOW_ITEM_TYPE_IPV4,
305                                RTE_FLOW_ITEM_TYPE_IPV6,
306                                RTE_FLOW_ITEM_TYPE_END,
307                 },
308                 .inner_copy_item = NULL,
309         },
310         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
311                 .copy_item = enic_copy_item_vxlan_v2,
312                 .valid_start_item = 1,
313                 .prev_items = (const enum rte_flow_item_type[]) {
314                                RTE_FLOW_ITEM_TYPE_UDP,
315                                RTE_FLOW_ITEM_TYPE_END,
316                 },
317                 .inner_copy_item = NULL,
318         },
319 };
320
321 /** Filtering capabilities indexed this NICs supported filter type. */
322 static const struct enic_filter_cap enic_filter_cap[] = {
323         [FILTER_IPV4_5TUPLE] = {
324                 .item_info = enic_items_v1,
325                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
326         },
327         [FILTER_USNIC_IP] = {
328                 .item_info = enic_items_v2,
329                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
330         },
331         [FILTER_DPDK_1] = {
332                 .item_info = enic_items_v3,
333                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
334         },
335 };
336
337 /** Supported actions for older NICs */
338 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
339         RTE_FLOW_ACTION_TYPE_QUEUE,
340         RTE_FLOW_ACTION_TYPE_END,
341 };
342
343 /** Supported actions for newer NICs */
344 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
345         RTE_FLOW_ACTION_TYPE_QUEUE,
346         RTE_FLOW_ACTION_TYPE_MARK,
347         RTE_FLOW_ACTION_TYPE_FLAG,
348         RTE_FLOW_ACTION_TYPE_RSS,
349         RTE_FLOW_ACTION_TYPE_PASSTHRU,
350         RTE_FLOW_ACTION_TYPE_END,
351 };
352
353 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
354         RTE_FLOW_ACTION_TYPE_QUEUE,
355         RTE_FLOW_ACTION_TYPE_MARK,
356         RTE_FLOW_ACTION_TYPE_FLAG,
357         RTE_FLOW_ACTION_TYPE_DROP,
358         RTE_FLOW_ACTION_TYPE_RSS,
359         RTE_FLOW_ACTION_TYPE_PASSTHRU,
360         RTE_FLOW_ACTION_TYPE_END,
361 };
362
363 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
364         RTE_FLOW_ACTION_TYPE_QUEUE,
365         RTE_FLOW_ACTION_TYPE_MARK,
366         RTE_FLOW_ACTION_TYPE_FLAG,
367         RTE_FLOW_ACTION_TYPE_DROP,
368         RTE_FLOW_ACTION_TYPE_COUNT,
369         RTE_FLOW_ACTION_TYPE_RSS,
370         RTE_FLOW_ACTION_TYPE_PASSTHRU,
371         RTE_FLOW_ACTION_TYPE_END,
372 };
373
374 /** Action capabilities indexed by NIC version information */
375 static const struct enic_action_cap enic_action_cap[] = {
376         [FILTER_ACTION_RQ_STEERING_FLAG] = {
377                 .actions = enic_supported_actions_v1,
378                 .copy_fn = enic_copy_action_v1,
379         },
380         [FILTER_ACTION_FILTER_ID_FLAG] = {
381                 .actions = enic_supported_actions_v2_id,
382                 .copy_fn = enic_copy_action_v2,
383         },
384         [FILTER_ACTION_DROP_FLAG] = {
385                 .actions = enic_supported_actions_v2_drop,
386                 .copy_fn = enic_copy_action_v2,
387         },
388         [FILTER_ACTION_COUNTER_FLAG] = {
389                 .actions = enic_supported_actions_v2_count,
390                 .copy_fn = enic_copy_action_v2,
391         },
392 };
393
394 static int
395 mask_exact_match(const u8 *supported, const u8 *supplied,
396                  unsigned int size)
397 {
398         unsigned int i;
399         for (i = 0; i < size; i++) {
400                 if (supported[i] != supplied[i])
401                         return 0;
402         }
403         return 1;
404 }
405
406 static int
407 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
408 {
409         const struct rte_flow_item *item = arg->item;
410         struct filter_v2 *enic_filter = arg->filter;
411         const struct rte_flow_item_ipv4 *spec = item->spec;
412         const struct rte_flow_item_ipv4 *mask = item->mask;
413         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
414         struct ipv4_hdr supported_mask = {
415                 .src_addr = 0xffffffff,
416                 .dst_addr = 0xffffffff,
417         };
418
419         FLOW_TRACE();
420
421         if (!mask)
422                 mask = &rte_flow_item_ipv4_mask;
423
424         /* This is an exact match filter, both fields must be set */
425         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
426                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
427                 return ENOTSUP;
428         }
429
430         /* check that the suppied mask exactly matches capabilty */
431         if (!mask_exact_match((const u8 *)&supported_mask,
432                               (const u8 *)item->mask, sizeof(*mask))) {
433                 FLOW_LOG(ERR, "IPv4 exact match mask");
434                 return ENOTSUP;
435         }
436
437         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
438         enic_5tup->src_addr = spec->hdr.src_addr;
439         enic_5tup->dst_addr = spec->hdr.dst_addr;
440
441         return 0;
442 }
443
444 static int
445 enic_copy_item_udp_v1(struct copy_item_args *arg)
446 {
447         const struct rte_flow_item *item = arg->item;
448         struct filter_v2 *enic_filter = arg->filter;
449         const struct rte_flow_item_udp *spec = item->spec;
450         const struct rte_flow_item_udp *mask = item->mask;
451         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
452         struct udp_hdr supported_mask = {
453                 .src_port = 0xffff,
454                 .dst_port = 0xffff,
455         };
456
457         FLOW_TRACE();
458
459         if (!mask)
460                 mask = &rte_flow_item_udp_mask;
461
462         /* This is an exact match filter, both ports must be set */
463         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
464                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
465                 return ENOTSUP;
466         }
467
468         /* check that the suppied mask exactly matches capabilty */
469         if (!mask_exact_match((const u8 *)&supported_mask,
470                               (const u8 *)item->mask, sizeof(*mask))) {
471                 FLOW_LOG(ERR, "UDP exact match mask");
472                 return ENOTSUP;
473         }
474
475         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
476         enic_5tup->src_port = spec->hdr.src_port;
477         enic_5tup->dst_port = spec->hdr.dst_port;
478         enic_5tup->protocol = PROTO_UDP;
479
480         return 0;
481 }
482
483 static int
484 enic_copy_item_tcp_v1(struct copy_item_args *arg)
485 {
486         const struct rte_flow_item *item = arg->item;
487         struct filter_v2 *enic_filter = arg->filter;
488         const struct rte_flow_item_tcp *spec = item->spec;
489         const struct rte_flow_item_tcp *mask = item->mask;
490         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
491         struct tcp_hdr supported_mask = {
492                 .src_port = 0xffff,
493                 .dst_port = 0xffff,
494         };
495
496         FLOW_TRACE();
497
498         if (!mask)
499                 mask = &rte_flow_item_tcp_mask;
500
501         /* This is an exact match filter, both ports must be set */
502         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
503                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
504                 return ENOTSUP;
505         }
506
507         /* check that the suppied mask exactly matches capabilty */
508         if (!mask_exact_match((const u8 *)&supported_mask,
509                              (const u8 *)item->mask, sizeof(*mask))) {
510                 FLOW_LOG(ERR, "TCP exact match mask");
511                 return ENOTSUP;
512         }
513
514         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
515         enic_5tup->src_port = spec->hdr.src_port;
516         enic_5tup->dst_port = spec->hdr.dst_port;
517         enic_5tup->protocol = PROTO_TCP;
518
519         return 0;
520 }
521
522 /*
523  * The common 'copy' function for all inner packet patterns. Patterns are
524  * first appended to the L5 pattern buffer. Then, since the NIC filter
525  * API has no special support for inner packet matching at the moment,
526  * we set EtherType and IP proto as necessary.
527  */
528 static int
529 copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
530                   const void *val, const void *mask, uint8_t val_size,
531                   uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
532 {
533         uint8_t *l5_mask, *l5_val;
534         uint8_t start_off;
535
536         /* No space left in the L5 pattern buffer. */
537         start_off = *inner_ofst;
538         if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
539                 return ENOTSUP;
540         l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
541         l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
542         /* Copy the pattern into the L5 buffer. */
543         if (val) {
544                 memcpy(l5_mask + start_off, mask, val_size);
545                 memcpy(l5_val + start_off, val, val_size);
546         }
547         /* Set the protocol field in the previous header. */
548         if (proto_off) {
549                 void *m, *v;
550
551                 m = l5_mask + proto_off;
552                 v = l5_val + proto_off;
553                 if (proto_size == 1) {
554                         *(uint8_t *)m = 0xff;
555                         *(uint8_t *)v = (uint8_t)proto_val;
556                 } else if (proto_size == 2) {
557                         *(uint16_t *)m = 0xffff;
558                         *(uint16_t *)v = proto_val;
559                 }
560         }
561         /* All inner headers land in L5 buffer even if their spec is null. */
562         *inner_ofst += val_size;
563         return 0;
564 }
565
566 static int
567 enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
568 {
569         const void *mask = arg->item->mask;
570         uint8_t *off = arg->inner_ofst;
571
572         FLOW_TRACE();
573         if (!mask)
574                 mask = &rte_flow_item_eth_mask;
575         arg->l2_proto_off = *off + offsetof(struct rte_ether_hdr, ether_type);
576         return copy_inner_common(&arg->filter->u.generic_1, off,
577                 arg->item->spec, mask, sizeof(struct rte_ether_hdr),
578                 0 /* no previous protocol */, 0, 0);
579 }
580
581 static int
582 enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
583 {
584         const void *mask = arg->item->mask;
585         uint8_t *off = arg->inner_ofst;
586         uint8_t eth_type_off;
587
588         FLOW_TRACE();
589         if (!mask)
590                 mask = &rte_flow_item_vlan_mask;
591         /* Append vlan header to L5 and set ether type = TPID */
592         eth_type_off = arg->l2_proto_off;
593         arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto);
594         return copy_inner_common(&arg->filter->u.generic_1, off,
595                 arg->item->spec, mask, sizeof(struct rte_vlan_hdr),
596                 eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
597 }
598
599 static int
600 enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
601 {
602         const void *mask = arg->item->mask;
603         uint8_t *off = arg->inner_ofst;
604
605         FLOW_TRACE();
606         if (!mask)
607                 mask = &rte_flow_item_ipv4_mask;
608         /* Append ipv4 header to L5 and set ether type = ipv4 */
609         arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
610         return copy_inner_common(&arg->filter->u.generic_1, off,
611                 arg->item->spec, mask, sizeof(struct ipv4_hdr),
612                 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
613 }
614
615 static int
616 enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
617 {
618         const void *mask = arg->item->mask;
619         uint8_t *off = arg->inner_ofst;
620
621         FLOW_TRACE();
622         if (!mask)
623                 mask = &rte_flow_item_ipv6_mask;
624         /* Append ipv6 header to L5 and set ether type = ipv6 */
625         arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
626         return copy_inner_common(&arg->filter->u.generic_1, off,
627                 arg->item->spec, mask, sizeof(struct ipv6_hdr),
628                 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
629 }
630
631 static int
632 enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
633 {
634         const void *mask = arg->item->mask;
635         uint8_t *off = arg->inner_ofst;
636
637         FLOW_TRACE();
638         if (!mask)
639                 mask = &rte_flow_item_udp_mask;
640         /* Append udp header to L5 and set ip proto = udp */
641         return copy_inner_common(&arg->filter->u.generic_1, off,
642                 arg->item->spec, mask, sizeof(struct udp_hdr),
643                 arg->l3_proto_off, IPPROTO_UDP, 1);
644 }
645
646 static int
647 enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
648 {
649         const void *mask = arg->item->mask;
650         uint8_t *off = arg->inner_ofst;
651
652         FLOW_TRACE();
653         if (!mask)
654                 mask = &rte_flow_item_tcp_mask;
655         /* Append tcp header to L5 and set ip proto = tcp */
656         return copy_inner_common(&arg->filter->u.generic_1, off,
657                 arg->item->spec, mask, sizeof(struct tcp_hdr),
658                 arg->l3_proto_off, IPPROTO_TCP, 1);
659 }
660
661 static int
662 enic_copy_item_eth_v2(struct copy_item_args *arg)
663 {
664         const struct rte_flow_item *item = arg->item;
665         struct filter_v2 *enic_filter = arg->filter;
666         struct rte_ether_hdr enic_spec;
667         struct rte_ether_hdr enic_mask;
668         const struct rte_flow_item_eth *spec = item->spec;
669         const struct rte_flow_item_eth *mask = item->mask;
670         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
671
672         FLOW_TRACE();
673
674         /* Match all if no spec */
675         if (!spec)
676                 return 0;
677
678         if (!mask)
679                 mask = &rte_flow_item_eth_mask;
680
681         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
682                ETHER_ADDR_LEN);
683         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
684                ETHER_ADDR_LEN);
685
686         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
687                ETHER_ADDR_LEN);
688         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
689                ETHER_ADDR_LEN);
690         enic_spec.ether_type = spec->type;
691         enic_mask.ether_type = mask->type;
692
693         /* outer header */
694         memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
695                sizeof(struct rte_ether_hdr));
696         memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
697                sizeof(struct rte_ether_hdr));
698         return 0;
699 }
700
701 static int
702 enic_copy_item_vlan_v2(struct copy_item_args *arg)
703 {
704         const struct rte_flow_item *item = arg->item;
705         struct filter_v2 *enic_filter = arg->filter;
706         const struct rte_flow_item_vlan *spec = item->spec;
707         const struct rte_flow_item_vlan *mask = item->mask;
708         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
709         struct rte_ether_hdr *eth_mask;
710         struct rte_ether_hdr *eth_val;
711
712         FLOW_TRACE();
713
714         /* Match all if no spec */
715         if (!spec)
716                 return 0;
717
718         if (!mask)
719                 mask = &rte_flow_item_vlan_mask;
720
721         eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
722         eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
723         /* Outer TPID cannot be matched */
724         if (eth_mask->ether_type)
725                 return ENOTSUP;
726         /*
727          * For recent models:
728          * When packet matching, the VIC always compares vlan-stripped
729          * L2, regardless of vlan stripping settings. So, the inner type
730          * from vlan becomes the ether type of the eth header.
731          *
732          * Older models w/o hardware vxlan parser have a different
733          * behavior when vlan stripping is disabled. In this case,
734          * vlan tag remains in the L2 buffer.
735          */
736         if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
737                 struct rte_vlan_hdr *vlan;
738
739                 vlan = (struct rte_vlan_hdr *)(eth_mask + 1);
740                 vlan->eth_proto = mask->inner_type;
741                 vlan = (struct rte_vlan_hdr *)(eth_val + 1);
742                 vlan->eth_proto = spec->inner_type;
743         } else {
744                 eth_mask->ether_type = mask->inner_type;
745                 eth_val->ether_type = spec->inner_type;
746         }
747         /* For TCI, use the vlan mask/val fields (little endian). */
748         gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
749         gp->val_vlan = rte_be_to_cpu_16(spec->tci);
750         return 0;
751 }
752
753 static int
754 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
755 {
756         const struct rte_flow_item *item = arg->item;
757         struct filter_v2 *enic_filter = arg->filter;
758         const struct rte_flow_item_ipv4 *spec = item->spec;
759         const struct rte_flow_item_ipv4 *mask = item->mask;
760         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
761
762         FLOW_TRACE();
763
764         /* Match IPv4 */
765         gp->mask_flags |= FILTER_GENERIC_1_IPV4;
766         gp->val_flags |= FILTER_GENERIC_1_IPV4;
767
768         /* Match all if no spec */
769         if (!spec)
770                 return 0;
771
772         if (!mask)
773                 mask = &rte_flow_item_ipv4_mask;
774
775         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
776                sizeof(struct ipv4_hdr));
777         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
778                sizeof(struct ipv4_hdr));
779         return 0;
780 }
781
782 static int
783 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
784 {
785         const struct rte_flow_item *item = arg->item;
786         struct filter_v2 *enic_filter = arg->filter;
787         const struct rte_flow_item_ipv6 *spec = item->spec;
788         const struct rte_flow_item_ipv6 *mask = item->mask;
789         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
790
791         FLOW_TRACE();
792
793         /* Match IPv6 */
794         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
795         gp->val_flags |= FILTER_GENERIC_1_IPV6;
796
797         /* Match all if no spec */
798         if (!spec)
799                 return 0;
800
801         if (!mask)
802                 mask = &rte_flow_item_ipv6_mask;
803
804         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
805                sizeof(struct ipv6_hdr));
806         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
807                sizeof(struct ipv6_hdr));
808         return 0;
809 }
810
811 static int
812 enic_copy_item_udp_v2(struct copy_item_args *arg)
813 {
814         const struct rte_flow_item *item = arg->item;
815         struct filter_v2 *enic_filter = arg->filter;
816         const struct rte_flow_item_udp *spec = item->spec;
817         const struct rte_flow_item_udp *mask = item->mask;
818         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
819
820         FLOW_TRACE();
821
822         /* Match UDP */
823         gp->mask_flags |= FILTER_GENERIC_1_UDP;
824         gp->val_flags |= FILTER_GENERIC_1_UDP;
825
826         /* Match all if no spec */
827         if (!spec)
828                 return 0;
829
830         if (!mask)
831                 mask = &rte_flow_item_udp_mask;
832
833         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
834                sizeof(struct udp_hdr));
835         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
836                sizeof(struct udp_hdr));
837         return 0;
838 }
839
840 static int
841 enic_copy_item_tcp_v2(struct copy_item_args *arg)
842 {
843         const struct rte_flow_item *item = arg->item;
844         struct filter_v2 *enic_filter = arg->filter;
845         const struct rte_flow_item_tcp *spec = item->spec;
846         const struct rte_flow_item_tcp *mask = item->mask;
847         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
848
849         FLOW_TRACE();
850
851         /* Match TCP */
852         gp->mask_flags |= FILTER_GENERIC_1_TCP;
853         gp->val_flags |= FILTER_GENERIC_1_TCP;
854
855         /* Match all if no spec */
856         if (!spec)
857                 return 0;
858
859         if (!mask)
860                 return ENOTSUP;
861
862         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
863                sizeof(struct tcp_hdr));
864         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
865                sizeof(struct tcp_hdr));
866         return 0;
867 }
868
869 static int
870 enic_copy_item_sctp_v2(struct copy_item_args *arg)
871 {
872         const struct rte_flow_item *item = arg->item;
873         struct filter_v2 *enic_filter = arg->filter;
874         const struct rte_flow_item_sctp *spec = item->spec;
875         const struct rte_flow_item_sctp *mask = item->mask;
876         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
877         uint8_t *ip_proto_mask = NULL;
878         uint8_t *ip_proto = NULL;
879
880         FLOW_TRACE();
881
882         /*
883          * The NIC filter API has no flags for "match sctp", so explicitly set
884          * the protocol number in the IP pattern.
885          */
886         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
887                 struct ipv4_hdr *ip;
888                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
889                 ip_proto_mask = &ip->next_proto_id;
890                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
891                 ip_proto = &ip->next_proto_id;
892         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
893                 struct ipv6_hdr *ip;
894                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
895                 ip_proto_mask = &ip->proto;
896                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
897                 ip_proto = &ip->proto;
898         } else {
899                 /* Need IPv4/IPv6 pattern first */
900                 return EINVAL;
901         }
902         *ip_proto = IPPROTO_SCTP;
903         *ip_proto_mask = 0xff;
904
905         /* Match all if no spec */
906         if (!spec)
907                 return 0;
908
909         if (!mask)
910                 mask = &rte_flow_item_sctp_mask;
911
912         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
913                sizeof(struct sctp_hdr));
914         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
915                sizeof(struct sctp_hdr));
916         return 0;
917 }
918
919 static int
920 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
921 {
922         const struct rte_flow_item *item = arg->item;
923         struct filter_v2 *enic_filter = arg->filter;
924         uint8_t *inner_ofst = arg->inner_ofst;
925         const struct rte_flow_item_vxlan *spec = item->spec;
926         const struct rte_flow_item_vxlan *mask = item->mask;
927         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
928         struct udp_hdr *udp;
929
930         FLOW_TRACE();
931
932         /*
933          * The NIC filter API has no flags for "match vxlan". Set UDP port to
934          * avoid false positives.
935          */
936         gp->mask_flags |= FILTER_GENERIC_1_UDP;
937         gp->val_flags |= FILTER_GENERIC_1_UDP;
938         udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
939         udp->dst_port = 0xffff;
940         udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
941         udp->dst_port = RTE_BE16(4789);
942         /* Match all if no spec */
943         if (!spec)
944                 return 0;
945
946         if (!mask)
947                 mask = &rte_flow_item_vxlan_mask;
948
949         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
950                sizeof(struct rte_vxlan_hdr));
951         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
952                sizeof(struct rte_vxlan_hdr));
953
954         *inner_ofst = sizeof(struct rte_vxlan_hdr);
955         return 0;
956 }
957
958 /*
959  * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
960  * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
961  * or geneve).
962  */
963 static int
964 enic_copy_item_raw_v2(struct copy_item_args *arg)
965 {
966         const struct rte_flow_item *item = arg->item;
967         struct filter_v2 *enic_filter = arg->filter;
968         uint8_t *inner_ofst = arg->inner_ofst;
969         const struct rte_flow_item_raw *spec = item->spec;
970         const struct rte_flow_item_raw *mask = item->mask;
971         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
972
973         FLOW_TRACE();
974
975         /* Cannot be used for inner packet */
976         if (*inner_ofst)
977                 return EINVAL;
978         /* Need both spec and mask */
979         if (!spec || !mask)
980                 return EINVAL;
981         /* Only supports relative with offset 0 */
982         if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
983                 return EINVAL;
984         /* Need non-null pattern that fits within the NIC's filter pattern */
985         if (spec->length == 0 ||
986             spec->length + sizeof(struct udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
987             !spec->pattern || !mask->pattern)
988                 return EINVAL;
989         /*
990          * Mask fields, including length, are often set to zero. Assume that
991          * means "same as spec" to avoid breaking existing apps. If length
992          * is not zero, then it should be >= spec length.
993          *
994          * No more pattern follows this, so append to the L4 layer instead of
995          * L5 to work with both recent and older VICs.
996          */
997         if (mask->length != 0 && mask->length < spec->length)
998                 return EINVAL;
999         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
1000                mask->pattern, spec->length);
1001         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
1002                spec->pattern, spec->length);
1003
1004         return 0;
1005 }
1006
1007 /**
1008  * Return 1 if current item is valid on top of the previous one.
1009  *
1010  * @param prev_item[in]
1011  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
1012  *   is the first item.
1013  * @param item_info[in]
1014  *   Info about this item, like valid previous items.
1015  * @param is_first[in]
1016  *   True if this the first item in the pattern.
1017  */
1018 static int
1019 item_stacking_valid(enum rte_flow_item_type prev_item,
1020                     const struct enic_items *item_info, u8 is_first_item)
1021 {
1022         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
1023
1024         FLOW_TRACE();
1025
1026         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
1027                 if (prev_item == *allowed_items)
1028                         return 1;
1029         }
1030
1031         /* This is the first item in the stack. Check if that's cool */
1032         if (is_first_item && item_info->valid_start_item)
1033                 return 1;
1034
1035         return 0;
1036 }
1037
1038 /*
1039  * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
1040  * Instead it is in L4 following the UDP header. Append the vxlan
1041  * pattern to L4 (udp) and shift any inner packet pattern in L5.
1042  */
1043 static void
1044 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
1045                uint8_t inner_ofst)
1046 {
1047         uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
1048         uint8_t inner;
1049         uint8_t vxlan;
1050
1051         if (!(inner_ofst > 0 && enic->vxlan))
1052                 return;
1053         FLOW_TRACE();
1054         vxlan = sizeof(struct rte_vxlan_hdr);
1055         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
1056                gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
1057         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
1058                gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
1059         inner = inner_ofst - vxlan;
1060         memset(layer, 0, sizeof(layer));
1061         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
1062         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
1063         memset(layer, 0, sizeof(layer));
1064         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
1065         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
1066 }
1067
1068 /**
1069  * Build the intenal enic filter structure from the provided pattern. The
1070  * pattern is validated as the items are copied.
1071  *
1072  * @param pattern[in]
1073  * @param items_info[in]
1074  *   Info about this NICs item support, like valid previous items.
1075  * @param enic_filter[out]
1076  *   NIC specfilc filters derived from the pattern.
1077  * @param error[out]
1078  */
1079 static int
1080 enic_copy_filter(const struct rte_flow_item pattern[],
1081                  const struct enic_filter_cap *cap,
1082                  struct enic *enic,
1083                  struct filter_v2 *enic_filter,
1084                  struct rte_flow_error *error)
1085 {
1086         int ret;
1087         const struct rte_flow_item *item = pattern;
1088         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1089         enum rte_flow_item_type prev_item;
1090         const struct enic_items *item_info;
1091         struct copy_item_args args;
1092         enic_copy_item_fn *copy_fn;
1093         u8 is_first_item = 1;
1094
1095         FLOW_TRACE();
1096
1097         prev_item = 0;
1098
1099         args.filter = enic_filter;
1100         args.inner_ofst = &inner_ofst;
1101         args.enic = enic;
1102         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1103                 /* Get info about how to validate and copy the item. If NULL
1104                  * is returned the nic does not support the item.
1105                  */
1106                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1107                         continue;
1108
1109                 item_info = &cap->item_info[item->type];
1110                 if (item->type > cap->max_item_type ||
1111                     item_info->copy_item == NULL ||
1112                     (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1113                         rte_flow_error_set(error, ENOTSUP,
1114                                 RTE_FLOW_ERROR_TYPE_ITEM,
1115                                 NULL, "Unsupported item.");
1116                         return -rte_errno;
1117                 }
1118
1119                 /* check to see if item stacking is valid */
1120                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1121                         goto stacking_error;
1122
1123                 args.item = item;
1124                 copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1125                         item_info->copy_item;
1126                 ret = copy_fn(&args);
1127                 if (ret)
1128                         goto item_not_supported;
1129                 prev_item = item->type;
1130                 is_first_item = 0;
1131         }
1132         fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1133
1134         return 0;
1135
1136 item_not_supported:
1137         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1138                            NULL, "enic type error");
1139         return -rte_errno;
1140
1141 stacking_error:
1142         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1143                            item, "stacking error");
1144         return -rte_errno;
1145 }
1146
1147 /**
1148  * Build the intenal version 1 NIC action structure from the provided pattern.
1149  * The pattern is validated as the items are copied.
1150  *
1151  * @param actions[in]
1152  * @param enic_action[out]
1153  *   NIC specfilc actions derived from the actions.
1154  * @param error[out]
1155  */
1156 static int
1157 enic_copy_action_v1(__rte_unused struct enic *enic,
1158                     const struct rte_flow_action actions[],
1159                     struct filter_action_v2 *enic_action)
1160 {
1161         enum { FATE = 1, };
1162         uint32_t overlap = 0;
1163
1164         FLOW_TRACE();
1165
1166         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1167                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1168                         continue;
1169
1170                 switch (actions->type) {
1171                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1172                         const struct rte_flow_action_queue *queue =
1173                                 (const struct rte_flow_action_queue *)
1174                                 actions->conf;
1175
1176                         if (overlap & FATE)
1177                                 return ENOTSUP;
1178                         overlap |= FATE;
1179                         enic_action->rq_idx =
1180                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1181                         break;
1182                 }
1183                 default:
1184                         RTE_ASSERT(0);
1185                         break;
1186                 }
1187         }
1188         if (!(overlap & FATE))
1189                 return ENOTSUP;
1190         enic_action->type = FILTER_ACTION_RQ_STEERING;
1191         return 0;
1192 }
1193
1194 /**
1195  * Build the intenal version 2 NIC action structure from the provided pattern.
1196  * The pattern is validated as the items are copied.
1197  *
1198  * @param actions[in]
1199  * @param enic_action[out]
1200  *   NIC specfilc actions derived from the actions.
1201  * @param error[out]
1202  */
1203 static int
1204 enic_copy_action_v2(struct enic *enic,
1205                     const struct rte_flow_action actions[],
1206                     struct filter_action_v2 *enic_action)
1207 {
1208         enum { FATE = 1, MARK = 2, };
1209         uint32_t overlap = 0;
1210         bool passthru = false;
1211
1212         FLOW_TRACE();
1213
1214         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1215                 switch (actions->type) {
1216                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1217                         const struct rte_flow_action_queue *queue =
1218                                 (const struct rte_flow_action_queue *)
1219                                 actions->conf;
1220
1221                         if (overlap & FATE)
1222                                 return ENOTSUP;
1223                         overlap |= FATE;
1224                         enic_action->rq_idx =
1225                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1226                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1227                         break;
1228                 }
1229                 case RTE_FLOW_ACTION_TYPE_MARK: {
1230                         const struct rte_flow_action_mark *mark =
1231                                 (const struct rte_flow_action_mark *)
1232                                 actions->conf;
1233
1234                         if (overlap & MARK)
1235                                 return ENOTSUP;
1236                         overlap |= MARK;
1237                         /*
1238                          * Map mark ID (32-bit) to filter ID (16-bit):
1239                          * - Reject values > 16 bits
1240                          * - Filter ID 0 is reserved for filters that steer
1241                          *   but not mark. So add 1 to the mark ID to avoid
1242                          *   using 0.
1243                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1244                          *   reserved for the "flag" action below.
1245                          */
1246                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1247                                 return EINVAL;
1248                         enic_action->filter_id = mark->id + 1;
1249                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1250                         break;
1251                 }
1252                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1253                         if (overlap & MARK)
1254                                 return ENOTSUP;
1255                         overlap |= MARK;
1256                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1257                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1258                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1259                         break;
1260                 }
1261                 case RTE_FLOW_ACTION_TYPE_DROP: {
1262                         if (overlap & FATE)
1263                                 return ENOTSUP;
1264                         overlap |= FATE;
1265                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1266                         break;
1267                 }
1268                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1269                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1270                         break;
1271                 }
1272                 case RTE_FLOW_ACTION_TYPE_RSS: {
1273                         const struct rte_flow_action_rss *rss =
1274                                 (const struct rte_flow_action_rss *)
1275                                 actions->conf;
1276                         bool allow;
1277                         uint16_t i;
1278
1279                         /*
1280                          * Hardware does not support general RSS actions, but
1281                          * we can still support the dummy one that is used to
1282                          * "receive normally".
1283                          */
1284                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1285                                 rss->level == 0 &&
1286                                 (rss->types == 0 ||
1287                                  rss->types == enic->rss_hf) &&
1288                                 rss->queue_num == enic->rq_count &&
1289                                 rss->key_len == 0;
1290                         /* Identity queue map is ok */
1291                         for (i = 0; i < rss->queue_num; i++)
1292                                 allow = allow && (i == rss->queue[i]);
1293                         if (!allow)
1294                                 return ENOTSUP;
1295                         if (overlap & FATE)
1296                                 return ENOTSUP;
1297                         /* Need MARK or FLAG */
1298                         if (!(overlap & MARK))
1299                                 return ENOTSUP;
1300                         overlap |= FATE;
1301                         break;
1302                 }
1303                 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1304                         /*
1305                          * Like RSS above, PASSTHRU + MARK may be used to
1306                          * "mark and then receive normally". MARK usually comes
1307                          * after PASSTHRU, so remember we have seen passthru
1308                          * and check for mark later.
1309                          */
1310                         if (overlap & FATE)
1311                                 return ENOTSUP;
1312                         overlap |= FATE;
1313                         passthru = true;
1314                         break;
1315                 }
1316                 case RTE_FLOW_ACTION_TYPE_VOID:
1317                         continue;
1318                 default:
1319                         RTE_ASSERT(0);
1320                         break;
1321                 }
1322         }
1323         /* Only PASSTHRU + MARK is allowed */
1324         if (passthru && !(overlap & MARK))
1325                 return ENOTSUP;
1326         if (!(overlap & FATE))
1327                 return ENOTSUP;
1328         enic_action->type = FILTER_ACTION_V2;
1329         return 0;
1330 }
1331
1332 /** Check if the action is supported */
1333 static int
1334 enic_match_action(const struct rte_flow_action *action,
1335                   const enum rte_flow_action_type *supported_actions)
1336 {
1337         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1338              supported_actions++) {
1339                 if (action->type == *supported_actions)
1340                         return 1;
1341         }
1342         return 0;
1343 }
1344
1345 /** Get the NIC filter capabilties structure */
1346 static const struct enic_filter_cap *
1347 enic_get_filter_cap(struct enic *enic)
1348 {
1349         if (enic->flow_filter_mode)
1350                 return &enic_filter_cap[enic->flow_filter_mode];
1351
1352         return NULL;
1353 }
1354
1355 /** Get the actions for this NIC version. */
1356 static const struct enic_action_cap *
1357 enic_get_action_cap(struct enic *enic)
1358 {
1359         const struct enic_action_cap *ea;
1360         uint8_t actions;
1361
1362         actions = enic->filter_actions;
1363         if (actions & FILTER_ACTION_COUNTER_FLAG)
1364                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1365         else if (actions & FILTER_ACTION_DROP_FLAG)
1366                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1367         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1368                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1369         else
1370                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1371         return ea;
1372 }
1373
1374 /* Debug function to dump internal NIC action structure. */
1375 static void
1376 enic_dump_actions(const struct filter_action_v2 *ea)
1377 {
1378         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1379                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1380         } else if (ea->type == FILTER_ACTION_V2) {
1381                 FLOW_LOG(INFO, "Actions(V2)\n");
1382                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1383                         FLOW_LOG(INFO, "\tqueue: %u\n",
1384                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1385                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1386                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1387         }
1388 }
1389
1390 /* Debug function to dump internal NIC filter structure. */
1391 static void
1392 enic_dump_filter(const struct filter_v2 *filt)
1393 {
1394         const struct filter_generic_1 *gp;
1395         int i, j, mbyte;
1396         char buf[128], *bp;
1397         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1398         char l4csum[16], ipfrag[16];
1399
1400         switch (filt->type) {
1401         case FILTER_IPV4_5TUPLE:
1402                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1403                 break;
1404         case FILTER_USNIC_IP:
1405         case FILTER_DPDK_1:
1406                 /* FIXME: this should be a loop */
1407                 gp = &filt->u.generic_1;
1408                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1409                        gp->val_vlan, gp->mask_vlan);
1410
1411                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1412                         sprintf(ip4, "%s ",
1413                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1414                                  ? "ip4(y)" : "ip4(n)");
1415                 else
1416                         sprintf(ip4, "%s ", "ip4(x)");
1417
1418                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1419                         sprintf(ip6, "%s ",
1420                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1421                                  ? "ip6(y)" : "ip6(n)");
1422                 else
1423                         sprintf(ip6, "%s ", "ip6(x)");
1424
1425                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1426                         sprintf(udp, "%s ",
1427                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1428                                  ? "udp(y)" : "udp(n)");
1429                 else
1430                         sprintf(udp, "%s ", "udp(x)");
1431
1432                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1433                         sprintf(tcp, "%s ",
1434                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1435                                  ? "tcp(y)" : "tcp(n)");
1436                 else
1437                         sprintf(tcp, "%s ", "tcp(x)");
1438
1439                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1440                         sprintf(tcpudp, "%s ",
1441                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1442                                  ? "tcpudp(y)" : "tcpudp(n)");
1443                 else
1444                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1445
1446                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1447                         sprintf(ip4csum, "%s ",
1448                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1449                                  ? "ip4csum(y)" : "ip4csum(n)");
1450                 else
1451                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1452
1453                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1454                         sprintf(l4csum, "%s ",
1455                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1456                                  ? "l4csum(y)" : "l4csum(n)");
1457                 else
1458                         sprintf(l4csum, "%s ", "l4csum(x)");
1459
1460                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1461                         sprintf(ipfrag, "%s ",
1462                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1463                                  ? "ipfrag(y)" : "ipfrag(n)");
1464                 else
1465                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1466                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1467                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1468
1469                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1470                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1471                         while (mbyte && !gp->layer[i].mask[mbyte])
1472                                 mbyte--;
1473                         if (mbyte == 0)
1474                                 continue;
1475
1476                         bp = buf;
1477                         for (j = 0; j <= mbyte; j++) {
1478                                 sprintf(bp, "%02x",
1479                                         gp->layer[i].mask[j]);
1480                                 bp += 2;
1481                         }
1482                         *bp = '\0';
1483                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1484                         bp = buf;
1485                         for (j = 0; j <= mbyte; j++) {
1486                                 sprintf(bp, "%02x",
1487                                         gp->layer[i].val[j]);
1488                                 bp += 2;
1489                         }
1490                         *bp = '\0';
1491                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1492                 }
1493                 break;
1494         default:
1495                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1496                 break;
1497         }
1498 }
1499
1500 /* Debug function to dump internal NIC flow structures. */
1501 static void
1502 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1503 {
1504         enic_dump_filter(filt);
1505         enic_dump_actions(ea);
1506 }
1507
1508
1509 /**
1510  * Internal flow parse/validate function.
1511  *
1512  * @param dev[in]
1513  *   This device pointer.
1514  * @param pattern[in]
1515  * @param actions[in]
1516  * @param error[out]
1517  * @param enic_filter[out]
1518  *   Internal NIC filter structure pointer.
1519  * @param enic_action[out]
1520  *   Internal NIC action structure pointer.
1521  */
1522 static int
1523 enic_flow_parse(struct rte_eth_dev *dev,
1524                 const struct rte_flow_attr *attrs,
1525                 const struct rte_flow_item pattern[],
1526                 const struct rte_flow_action actions[],
1527                 struct rte_flow_error *error,
1528                 struct filter_v2 *enic_filter,
1529                 struct filter_action_v2 *enic_action)
1530 {
1531         unsigned int ret = 0;
1532         struct enic *enic = pmd_priv(dev);
1533         const struct enic_filter_cap *enic_filter_cap;
1534         const struct enic_action_cap *enic_action_cap;
1535         const struct rte_flow_action *action;
1536
1537         FLOW_TRACE();
1538
1539         memset(enic_filter, 0, sizeof(*enic_filter));
1540         memset(enic_action, 0, sizeof(*enic_action));
1541
1542         if (!pattern) {
1543                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1544                                    NULL, "No pattern specified");
1545                 return -rte_errno;
1546         }
1547
1548         if (!actions) {
1549                 rte_flow_error_set(error, EINVAL,
1550                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1551                                    NULL, "No action specified");
1552                 return -rte_errno;
1553         }
1554
1555         if (attrs) {
1556                 if (attrs->group) {
1557                         rte_flow_error_set(error, ENOTSUP,
1558                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1559                                            NULL,
1560                                            "priority groups are not supported");
1561                         return -rte_errno;
1562                 } else if (attrs->priority) {
1563                         rte_flow_error_set(error, ENOTSUP,
1564                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1565                                            NULL,
1566                                            "priorities are not supported");
1567                         return -rte_errno;
1568                 } else if (attrs->egress) {
1569                         rte_flow_error_set(error, ENOTSUP,
1570                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1571                                            NULL,
1572                                            "egress is not supported");
1573                         return -rte_errno;
1574                 } else if (attrs->transfer) {
1575                         rte_flow_error_set(error, ENOTSUP,
1576                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1577                                            NULL,
1578                                            "transfer is not supported");
1579                         return -rte_errno;
1580                 } else if (!attrs->ingress) {
1581                         rte_flow_error_set(error, ENOTSUP,
1582                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1583                                            NULL,
1584                                            "only ingress is supported");
1585                         return -rte_errno;
1586                 }
1587
1588         } else {
1589                 rte_flow_error_set(error, EINVAL,
1590                                    RTE_FLOW_ERROR_TYPE_ATTR,
1591                                    NULL, "No attribute specified");
1592                 return -rte_errno;
1593         }
1594
1595         /* Verify Actions. */
1596         enic_action_cap =  enic_get_action_cap(enic);
1597         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1598              action++) {
1599                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1600                         continue;
1601                 else if (!enic_match_action(action, enic_action_cap->actions))
1602                         break;
1603         }
1604         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1605                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1606                                    action, "Invalid action.");
1607                 return -rte_errno;
1608         }
1609         ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1610         if (ret) {
1611                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1612                            NULL, "Unsupported action.");
1613                 return -rte_errno;
1614         }
1615
1616         /* Verify Flow items. If copying the filter from flow format to enic
1617          * format fails, the flow is not supported
1618          */
1619         enic_filter_cap =  enic_get_filter_cap(enic);
1620         if (enic_filter_cap == NULL) {
1621                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1622                            NULL, "Flow API not available");
1623                 return -rte_errno;
1624         }
1625         enic_filter->type = enic->flow_filter_mode;
1626         ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1627                                        enic_filter, error);
1628         return ret;
1629 }
1630
1631 /**
1632  * Push filter/action to the NIC.
1633  *
1634  * @param enic[in]
1635  *   Device structure pointer.
1636  * @param enic_filter[in]
1637  *   Internal NIC filter structure pointer.
1638  * @param enic_action[in]
1639  *   Internal NIC action structure pointer.
1640  * @param error[out]
1641  */
1642 static struct rte_flow *
1643 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1644                    struct filter_action_v2 *enic_action,
1645                    struct rte_flow_error *error)
1646 {
1647         struct rte_flow *flow;
1648         int err;
1649         uint16_t entry;
1650         int ctr_idx;
1651         int last_max_flow_ctr;
1652
1653         FLOW_TRACE();
1654
1655         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1656         if (!flow) {
1657                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1658                                    NULL, "cannot allocate flow memory");
1659                 return NULL;
1660         }
1661
1662         flow->counter_idx = -1;
1663         last_max_flow_ctr = -1;
1664         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1665                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1666                         rte_flow_error_set(error, ENOMEM,
1667                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1668                                            NULL, "cannot allocate counter");
1669                         goto unwind_flow_alloc;
1670                 }
1671                 flow->counter_idx = ctr_idx;
1672                 enic_action->counter_index = ctr_idx;
1673
1674                 /* If index is the largest, increase the counter DMA size */
1675                 if (ctr_idx > enic->max_flow_counter) {
1676                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1677                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1678                                                  ctr_idx + 1);
1679                         if (err) {
1680                                 rte_flow_error_set(error, -err,
1681                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1682                                            NULL, "counter DMA config failed");
1683                                 goto unwind_ctr_alloc;
1684                         }
1685                         last_max_flow_ctr = enic->max_flow_counter;
1686                         enic->max_flow_counter = ctr_idx;
1687                 }
1688         }
1689
1690         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1691         entry = enic_action->rq_idx;
1692         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1693                                   enic_action);
1694         if (err) {
1695                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1696                                    NULL, "vnic_dev_classifier error");
1697                 goto unwind_ctr_dma_cfg;
1698         }
1699
1700         flow->enic_filter_id = entry;
1701         flow->enic_filter = *enic_filter;
1702
1703         return flow;
1704
1705 /* unwind if there are errors */
1706 unwind_ctr_dma_cfg:
1707         if (last_max_flow_ctr != -1) {
1708                 /* reduce counter DMA size */
1709                 vnic_dev_counter_dma_cfg(enic->vdev,
1710                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1711                                          last_max_flow_ctr + 1);
1712                 enic->max_flow_counter = last_max_flow_ctr;
1713         }
1714 unwind_ctr_alloc:
1715         if (flow->counter_idx != -1)
1716                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1717 unwind_flow_alloc:
1718         rte_free(flow);
1719         return NULL;
1720 }
1721
1722 /**
1723  * Remove filter/action from the NIC.
1724  *
1725  * @param enic[in]
1726  *   Device structure pointer.
1727  * @param filter_id[in]
1728  *   Id of NIC filter.
1729  * @param enic_action[in]
1730  *   Internal NIC action structure pointer.
1731  * @param error[out]
1732  */
1733 static int
1734 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1735                    struct rte_flow_error *error)
1736 {
1737         u16 filter_id;
1738         int err;
1739
1740         FLOW_TRACE();
1741
1742         filter_id = flow->enic_filter_id;
1743         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1744         if (err) {
1745                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1746                                    NULL, "vnic_dev_classifier failed");
1747                 return -err;
1748         }
1749
1750         if (flow->counter_idx != -1) {
1751                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1752                         dev_err(enic, "counter free failed, idx: %d\n",
1753                                 flow->counter_idx);
1754                 flow->counter_idx = -1;
1755         }
1756         return 0;
1757 }
1758
1759 /*
1760  * The following functions are callbacks for Generic flow API.
1761  */
1762
1763 /**
1764  * Validate a flow supported by the NIC.
1765  *
1766  * @see rte_flow_validate()
1767  * @see rte_flow_ops
1768  */
1769 static int
1770 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1771                    const struct rte_flow_item pattern[],
1772                    const struct rte_flow_action actions[],
1773                    struct rte_flow_error *error)
1774 {
1775         struct filter_v2 enic_filter;
1776         struct filter_action_v2 enic_action;
1777         int ret;
1778
1779         FLOW_TRACE();
1780
1781         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1782                                &enic_filter, &enic_action);
1783         if (!ret)
1784                 enic_dump_flow(&enic_action, &enic_filter);
1785         return ret;
1786 }
1787
1788 /**
1789  * Create a flow supported by the NIC.
1790  *
1791  * @see rte_flow_create()
1792  * @see rte_flow_ops
1793  */
1794 static struct rte_flow *
1795 enic_flow_create(struct rte_eth_dev *dev,
1796                  const struct rte_flow_attr *attrs,
1797                  const struct rte_flow_item pattern[],
1798                  const struct rte_flow_action actions[],
1799                  struct rte_flow_error *error)
1800 {
1801         int ret;
1802         struct filter_v2 enic_filter;
1803         struct filter_action_v2 enic_action;
1804         struct rte_flow *flow;
1805         struct enic *enic = pmd_priv(dev);
1806
1807         FLOW_TRACE();
1808
1809         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1810                               &enic_action);
1811         if (ret < 0)
1812                 return NULL;
1813
1814         rte_spinlock_lock(&enic->flows_lock);
1815         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1816                                     error);
1817         if (flow)
1818                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1819         rte_spinlock_unlock(&enic->flows_lock);
1820
1821         return flow;
1822 }
1823
1824 /**
1825  * Destroy a flow supported by the NIC.
1826  *
1827  * @see rte_flow_destroy()
1828  * @see rte_flow_ops
1829  */
1830 static int
1831 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1832                   __rte_unused struct rte_flow_error *error)
1833 {
1834         struct enic *enic = pmd_priv(dev);
1835
1836         FLOW_TRACE();
1837
1838         rte_spinlock_lock(&enic->flows_lock);
1839         enic_flow_del_filter(enic, flow, error);
1840         LIST_REMOVE(flow, next);
1841         rte_spinlock_unlock(&enic->flows_lock);
1842         rte_free(flow);
1843         return 0;
1844 }
1845
1846 /**
1847  * Flush all flows on the device.
1848  *
1849  * @see rte_flow_flush()
1850  * @see rte_flow_ops
1851  */
1852 static int
1853 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1854 {
1855         struct rte_flow *flow;
1856         struct enic *enic = pmd_priv(dev);
1857
1858         FLOW_TRACE();
1859
1860         rte_spinlock_lock(&enic->flows_lock);
1861
1862         while (!LIST_EMPTY(&enic->flows)) {
1863                 flow = LIST_FIRST(&enic->flows);
1864                 enic_flow_del_filter(enic, flow, error);
1865                 LIST_REMOVE(flow, next);
1866                 rte_free(flow);
1867         }
1868         rte_spinlock_unlock(&enic->flows_lock);
1869         return 0;
1870 }
1871
1872 static int
1873 enic_flow_query_count(struct rte_eth_dev *dev,
1874                       struct rte_flow *flow, void *data,
1875                       struct rte_flow_error *error)
1876 {
1877         struct enic *enic = pmd_priv(dev);
1878         struct rte_flow_query_count *query;
1879         uint64_t packets, bytes;
1880
1881         FLOW_TRACE();
1882
1883         if (flow->counter_idx == -1) {
1884                 return rte_flow_error_set(error, ENOTSUP,
1885                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1886                                           NULL,
1887                                           "flow does not have counter");
1888         }
1889         query = (struct rte_flow_query_count *)data;
1890         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1891                                     !!query->reset, &packets, &bytes)) {
1892                 return rte_flow_error_set
1893                         (error, EINVAL,
1894                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1895                          NULL,
1896                          "cannot read counter");
1897         }
1898         query->hits_set = 1;
1899         query->bytes_set = 1;
1900         query->hits = packets;
1901         query->bytes = bytes;
1902         return 0;
1903 }
1904
1905 static int
1906 enic_flow_query(struct rte_eth_dev *dev,
1907                 struct rte_flow *flow,
1908                 const struct rte_flow_action *actions,
1909                 void *data,
1910                 struct rte_flow_error *error)
1911 {
1912         int ret = 0;
1913
1914         FLOW_TRACE();
1915
1916         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1917                 switch (actions->type) {
1918                 case RTE_FLOW_ACTION_TYPE_VOID:
1919                         break;
1920                 case RTE_FLOW_ACTION_TYPE_COUNT:
1921                         ret = enic_flow_query_count(dev, flow, data, error);
1922                         break;
1923                 default:
1924                         return rte_flow_error_set(error, ENOTSUP,
1925                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1926                                                   actions,
1927                                                   "action not supported");
1928                 }
1929                 if (ret < 0)
1930                         return ret;
1931         }
1932         return 0;
1933 }
1934
1935 /**
1936  * Flow callback registration.
1937  *
1938  * @see rte_flow_ops
1939  */
1940 const struct rte_flow_ops enic_flow_ops = {
1941         .validate = enic_flow_validate,
1942         .create = enic_flow_create,
1943         .destroy = enic_flow_destroy,
1944         .flush = enic_flow_flush,
1945         .query = enic_flow_query,
1946 };