net/enic: move arguments into struct
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /*
27  * Common arguments passed to copy_item functions. Use this structure
28  * so we can easily add new arguments.
29  * item: Item specification.
30  * filter: Partially filled in NIC filter structure.
31  * inner_ofst: If zero, this is an outer header. If non-zero, this is
32  *   the offset into L5 where the header begins.
33  */
34 struct copy_item_args {
35         const struct rte_flow_item *item;
36         struct filter_v2 *filter;
37         uint8_t *inner_ofst;
38 };
39
40 /* functions for copying items into enic filters */
41 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
42
43 /** Info about how to copy items into enic filters. */
44 struct enic_items {
45         /** Function for copying and validating an item. */
46         enic_copy_item_fn *copy_item;
47         /** List of valid previous items. */
48         const enum rte_flow_item_type * const prev_items;
49         /** True if it's OK for this item to be the first item. For some NIC
50          * versions, it's invalid to start the stack above layer 3.
51          */
52         const u8 valid_start_item;
53 };
54
55 /** Filtering capabilities for various NIC and firmware versions. */
56 struct enic_filter_cap {
57         /** list of valid items and their handlers and attributes. */
58         const struct enic_items *item_info;
59         /* Max type in the above list, used to detect unsupported types */
60         enum rte_flow_item_type max_item_type;
61 };
62
63 /* functions for copying flow actions into enic actions */
64 typedef int (copy_action_fn)(struct enic *enic,
65                              const struct rte_flow_action actions[],
66                              struct filter_action_v2 *enic_action);
67
68 /** Action capabilities for various NICs. */
69 struct enic_action_cap {
70         /** list of valid actions */
71         const enum rte_flow_action_type *actions;
72         /** copy function for a particular NIC */
73         copy_action_fn *copy_fn;
74 };
75
76 /* Forward declarations */
77 static enic_copy_item_fn enic_copy_item_ipv4_v1;
78 static enic_copy_item_fn enic_copy_item_udp_v1;
79 static enic_copy_item_fn enic_copy_item_tcp_v1;
80 static enic_copy_item_fn enic_copy_item_eth_v2;
81 static enic_copy_item_fn enic_copy_item_vlan_v2;
82 static enic_copy_item_fn enic_copy_item_ipv4_v2;
83 static enic_copy_item_fn enic_copy_item_ipv6_v2;
84 static enic_copy_item_fn enic_copy_item_udp_v2;
85 static enic_copy_item_fn enic_copy_item_tcp_v2;
86 static enic_copy_item_fn enic_copy_item_sctp_v2;
87 static enic_copy_item_fn enic_copy_item_vxlan_v2;
88 static copy_action_fn enic_copy_action_v1;
89 static copy_action_fn enic_copy_action_v2;
90
91 /**
92  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
93  * is supported.
94  */
95 static const struct enic_items enic_items_v1[] = {
96         [RTE_FLOW_ITEM_TYPE_IPV4] = {
97                 .copy_item = enic_copy_item_ipv4_v1,
98                 .valid_start_item = 1,
99                 .prev_items = (const enum rte_flow_item_type[]) {
100                                RTE_FLOW_ITEM_TYPE_END,
101                 },
102         },
103         [RTE_FLOW_ITEM_TYPE_UDP] = {
104                 .copy_item = enic_copy_item_udp_v1,
105                 .valid_start_item = 0,
106                 .prev_items = (const enum rte_flow_item_type[]) {
107                                RTE_FLOW_ITEM_TYPE_IPV4,
108                                RTE_FLOW_ITEM_TYPE_END,
109                 },
110         },
111         [RTE_FLOW_ITEM_TYPE_TCP] = {
112                 .copy_item = enic_copy_item_tcp_v1,
113                 .valid_start_item = 0,
114                 .prev_items = (const enum rte_flow_item_type[]) {
115                                RTE_FLOW_ITEM_TYPE_IPV4,
116                                RTE_FLOW_ITEM_TYPE_END,
117                 },
118         },
119 };
120
121 /**
122  * NICs have Advanced Filters capability but they are disabled. This means
123  * that layer 3 must be specified.
124  */
125 static const struct enic_items enic_items_v2[] = {
126         [RTE_FLOW_ITEM_TYPE_ETH] = {
127                 .copy_item = enic_copy_item_eth_v2,
128                 .valid_start_item = 1,
129                 .prev_items = (const enum rte_flow_item_type[]) {
130                                RTE_FLOW_ITEM_TYPE_VXLAN,
131                                RTE_FLOW_ITEM_TYPE_END,
132                 },
133         },
134         [RTE_FLOW_ITEM_TYPE_VLAN] = {
135                 .copy_item = enic_copy_item_vlan_v2,
136                 .valid_start_item = 1,
137                 .prev_items = (const enum rte_flow_item_type[]) {
138                                RTE_FLOW_ITEM_TYPE_ETH,
139                                RTE_FLOW_ITEM_TYPE_END,
140                 },
141         },
142         [RTE_FLOW_ITEM_TYPE_IPV4] = {
143                 .copy_item = enic_copy_item_ipv4_v2,
144                 .valid_start_item = 1,
145                 .prev_items = (const enum rte_flow_item_type[]) {
146                                RTE_FLOW_ITEM_TYPE_ETH,
147                                RTE_FLOW_ITEM_TYPE_VLAN,
148                                RTE_FLOW_ITEM_TYPE_END,
149                 },
150         },
151         [RTE_FLOW_ITEM_TYPE_IPV6] = {
152                 .copy_item = enic_copy_item_ipv6_v2,
153                 .valid_start_item = 1,
154                 .prev_items = (const enum rte_flow_item_type[]) {
155                                RTE_FLOW_ITEM_TYPE_ETH,
156                                RTE_FLOW_ITEM_TYPE_VLAN,
157                                RTE_FLOW_ITEM_TYPE_END,
158                 },
159         },
160         [RTE_FLOW_ITEM_TYPE_UDP] = {
161                 .copy_item = enic_copy_item_udp_v2,
162                 .valid_start_item = 0,
163                 .prev_items = (const enum rte_flow_item_type[]) {
164                                RTE_FLOW_ITEM_TYPE_IPV4,
165                                RTE_FLOW_ITEM_TYPE_IPV6,
166                                RTE_FLOW_ITEM_TYPE_END,
167                 },
168         },
169         [RTE_FLOW_ITEM_TYPE_TCP] = {
170                 .copy_item = enic_copy_item_tcp_v2,
171                 .valid_start_item = 0,
172                 .prev_items = (const enum rte_flow_item_type[]) {
173                                RTE_FLOW_ITEM_TYPE_IPV4,
174                                RTE_FLOW_ITEM_TYPE_IPV6,
175                                RTE_FLOW_ITEM_TYPE_END,
176                 },
177         },
178         [RTE_FLOW_ITEM_TYPE_SCTP] = {
179                 .copy_item = enic_copy_item_sctp_v2,
180                 .valid_start_item = 0,
181                 .prev_items = (const enum rte_flow_item_type[]) {
182                                RTE_FLOW_ITEM_TYPE_IPV4,
183                                RTE_FLOW_ITEM_TYPE_IPV6,
184                                RTE_FLOW_ITEM_TYPE_END,
185                 },
186         },
187         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
188                 .copy_item = enic_copy_item_vxlan_v2,
189                 .valid_start_item = 0,
190                 .prev_items = (const enum rte_flow_item_type[]) {
191                                RTE_FLOW_ITEM_TYPE_UDP,
192                                RTE_FLOW_ITEM_TYPE_END,
193                 },
194         },
195 };
196
197 /** NICs with Advanced filters enabled */
198 static const struct enic_items enic_items_v3[] = {
199         [RTE_FLOW_ITEM_TYPE_ETH] = {
200                 .copy_item = enic_copy_item_eth_v2,
201                 .valid_start_item = 1,
202                 .prev_items = (const enum rte_flow_item_type[]) {
203                                RTE_FLOW_ITEM_TYPE_VXLAN,
204                                RTE_FLOW_ITEM_TYPE_END,
205                 },
206         },
207         [RTE_FLOW_ITEM_TYPE_VLAN] = {
208                 .copy_item = enic_copy_item_vlan_v2,
209                 .valid_start_item = 1,
210                 .prev_items = (const enum rte_flow_item_type[]) {
211                                RTE_FLOW_ITEM_TYPE_ETH,
212                                RTE_FLOW_ITEM_TYPE_END,
213                 },
214         },
215         [RTE_FLOW_ITEM_TYPE_IPV4] = {
216                 .copy_item = enic_copy_item_ipv4_v2,
217                 .valid_start_item = 1,
218                 .prev_items = (const enum rte_flow_item_type[]) {
219                                RTE_FLOW_ITEM_TYPE_ETH,
220                                RTE_FLOW_ITEM_TYPE_VLAN,
221                                RTE_FLOW_ITEM_TYPE_END,
222                 },
223         },
224         [RTE_FLOW_ITEM_TYPE_IPV6] = {
225                 .copy_item = enic_copy_item_ipv6_v2,
226                 .valid_start_item = 1,
227                 .prev_items = (const enum rte_flow_item_type[]) {
228                                RTE_FLOW_ITEM_TYPE_ETH,
229                                RTE_FLOW_ITEM_TYPE_VLAN,
230                                RTE_FLOW_ITEM_TYPE_END,
231                 },
232         },
233         [RTE_FLOW_ITEM_TYPE_UDP] = {
234                 .copy_item = enic_copy_item_udp_v2,
235                 .valid_start_item = 1,
236                 .prev_items = (const enum rte_flow_item_type[]) {
237                                RTE_FLOW_ITEM_TYPE_IPV4,
238                                RTE_FLOW_ITEM_TYPE_IPV6,
239                                RTE_FLOW_ITEM_TYPE_END,
240                 },
241         },
242         [RTE_FLOW_ITEM_TYPE_TCP] = {
243                 .copy_item = enic_copy_item_tcp_v2,
244                 .valid_start_item = 1,
245                 .prev_items = (const enum rte_flow_item_type[]) {
246                                RTE_FLOW_ITEM_TYPE_IPV4,
247                                RTE_FLOW_ITEM_TYPE_IPV6,
248                                RTE_FLOW_ITEM_TYPE_END,
249                 },
250         },
251         [RTE_FLOW_ITEM_TYPE_SCTP] = {
252                 .copy_item = enic_copy_item_sctp_v2,
253                 .valid_start_item = 0,
254                 .prev_items = (const enum rte_flow_item_type[]) {
255                                RTE_FLOW_ITEM_TYPE_IPV4,
256                                RTE_FLOW_ITEM_TYPE_IPV6,
257                                RTE_FLOW_ITEM_TYPE_END,
258                 },
259         },
260         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
261                 .copy_item = enic_copy_item_vxlan_v2,
262                 .valid_start_item = 1,
263                 .prev_items = (const enum rte_flow_item_type[]) {
264                                RTE_FLOW_ITEM_TYPE_UDP,
265                                RTE_FLOW_ITEM_TYPE_END,
266                 },
267         },
268 };
269
270 /** Filtering capabilities indexed this NICs supported filter type. */
271 static const struct enic_filter_cap enic_filter_cap[] = {
272         [FILTER_IPV4_5TUPLE] = {
273                 .item_info = enic_items_v1,
274                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
275         },
276         [FILTER_USNIC_IP] = {
277                 .item_info = enic_items_v2,
278                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
279         },
280         [FILTER_DPDK_1] = {
281                 .item_info = enic_items_v3,
282                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
283         },
284 };
285
286 /** Supported actions for older NICs */
287 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
288         RTE_FLOW_ACTION_TYPE_QUEUE,
289         RTE_FLOW_ACTION_TYPE_END,
290 };
291
292 /** Supported actions for newer NICs */
293 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
294         RTE_FLOW_ACTION_TYPE_QUEUE,
295         RTE_FLOW_ACTION_TYPE_MARK,
296         RTE_FLOW_ACTION_TYPE_FLAG,
297         RTE_FLOW_ACTION_TYPE_RSS,
298         RTE_FLOW_ACTION_TYPE_PASSTHRU,
299         RTE_FLOW_ACTION_TYPE_END,
300 };
301
302 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
303         RTE_FLOW_ACTION_TYPE_QUEUE,
304         RTE_FLOW_ACTION_TYPE_MARK,
305         RTE_FLOW_ACTION_TYPE_FLAG,
306         RTE_FLOW_ACTION_TYPE_DROP,
307         RTE_FLOW_ACTION_TYPE_RSS,
308         RTE_FLOW_ACTION_TYPE_PASSTHRU,
309         RTE_FLOW_ACTION_TYPE_END,
310 };
311
312 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
313         RTE_FLOW_ACTION_TYPE_QUEUE,
314         RTE_FLOW_ACTION_TYPE_MARK,
315         RTE_FLOW_ACTION_TYPE_FLAG,
316         RTE_FLOW_ACTION_TYPE_DROP,
317         RTE_FLOW_ACTION_TYPE_COUNT,
318         RTE_FLOW_ACTION_TYPE_RSS,
319         RTE_FLOW_ACTION_TYPE_PASSTHRU,
320         RTE_FLOW_ACTION_TYPE_END,
321 };
322
323 /** Action capabilities indexed by NIC version information */
324 static const struct enic_action_cap enic_action_cap[] = {
325         [FILTER_ACTION_RQ_STEERING_FLAG] = {
326                 .actions = enic_supported_actions_v1,
327                 .copy_fn = enic_copy_action_v1,
328         },
329         [FILTER_ACTION_FILTER_ID_FLAG] = {
330                 .actions = enic_supported_actions_v2_id,
331                 .copy_fn = enic_copy_action_v2,
332         },
333         [FILTER_ACTION_DROP_FLAG] = {
334                 .actions = enic_supported_actions_v2_drop,
335                 .copy_fn = enic_copy_action_v2,
336         },
337         [FILTER_ACTION_COUNTER_FLAG] = {
338                 .actions = enic_supported_actions_v2_count,
339                 .copy_fn = enic_copy_action_v2,
340         },
341 };
342
343 static int
344 mask_exact_match(const u8 *supported, const u8 *supplied,
345                  unsigned int size)
346 {
347         unsigned int i;
348         for (i = 0; i < size; i++) {
349                 if (supported[i] != supplied[i])
350                         return 0;
351         }
352         return 1;
353 }
354
355 static int
356 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
357 {
358         const struct rte_flow_item *item = arg->item;
359         struct filter_v2 *enic_filter = arg->filter;
360         uint8_t *inner_ofst = arg->inner_ofst;
361         const struct rte_flow_item_ipv4 *spec = item->spec;
362         const struct rte_flow_item_ipv4 *mask = item->mask;
363         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
364         struct ipv4_hdr supported_mask = {
365                 .src_addr = 0xffffffff,
366                 .dst_addr = 0xffffffff,
367         };
368
369         FLOW_TRACE();
370
371         if (*inner_ofst)
372                 return ENOTSUP;
373
374         if (!mask)
375                 mask = &rte_flow_item_ipv4_mask;
376
377         /* This is an exact match filter, both fields must be set */
378         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
379                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
380                 return ENOTSUP;
381         }
382
383         /* check that the suppied mask exactly matches capabilty */
384         if (!mask_exact_match((const u8 *)&supported_mask,
385                               (const u8 *)item->mask, sizeof(*mask))) {
386                 FLOW_LOG(ERR, "IPv4 exact match mask");
387                 return ENOTSUP;
388         }
389
390         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
391         enic_5tup->src_addr = spec->hdr.src_addr;
392         enic_5tup->dst_addr = spec->hdr.dst_addr;
393
394         return 0;
395 }
396
397 static int
398 enic_copy_item_udp_v1(struct copy_item_args *arg)
399 {
400         const struct rte_flow_item *item = arg->item;
401         struct filter_v2 *enic_filter = arg->filter;
402         uint8_t *inner_ofst = arg->inner_ofst;
403         const struct rte_flow_item_udp *spec = item->spec;
404         const struct rte_flow_item_udp *mask = item->mask;
405         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
406         struct udp_hdr supported_mask = {
407                 .src_port = 0xffff,
408                 .dst_port = 0xffff,
409         };
410
411         FLOW_TRACE();
412
413         if (*inner_ofst)
414                 return ENOTSUP;
415
416         if (!mask)
417                 mask = &rte_flow_item_udp_mask;
418
419         /* This is an exact match filter, both ports must be set */
420         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
421                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
422                 return ENOTSUP;
423         }
424
425         /* check that the suppied mask exactly matches capabilty */
426         if (!mask_exact_match((const u8 *)&supported_mask,
427                               (const u8 *)item->mask, sizeof(*mask))) {
428                 FLOW_LOG(ERR, "UDP exact match mask");
429                 return ENOTSUP;
430         }
431
432         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
433         enic_5tup->src_port = spec->hdr.src_port;
434         enic_5tup->dst_port = spec->hdr.dst_port;
435         enic_5tup->protocol = PROTO_UDP;
436
437         return 0;
438 }
439
440 static int
441 enic_copy_item_tcp_v1(struct copy_item_args *arg)
442 {
443         const struct rte_flow_item *item = arg->item;
444         struct filter_v2 *enic_filter = arg->filter;
445         uint8_t *inner_ofst = arg->inner_ofst;
446         const struct rte_flow_item_tcp *spec = item->spec;
447         const struct rte_flow_item_tcp *mask = item->mask;
448         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
449         struct tcp_hdr supported_mask = {
450                 .src_port = 0xffff,
451                 .dst_port = 0xffff,
452         };
453
454         FLOW_TRACE();
455
456         if (*inner_ofst)
457                 return ENOTSUP;
458
459         if (!mask)
460                 mask = &rte_flow_item_tcp_mask;
461
462         /* This is an exact match filter, both ports must be set */
463         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
464                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
465                 return ENOTSUP;
466         }
467
468         /* check that the suppied mask exactly matches capabilty */
469         if (!mask_exact_match((const u8 *)&supported_mask,
470                              (const u8 *)item->mask, sizeof(*mask))) {
471                 FLOW_LOG(ERR, "TCP exact match mask");
472                 return ENOTSUP;
473         }
474
475         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
476         enic_5tup->src_port = spec->hdr.src_port;
477         enic_5tup->dst_port = spec->hdr.dst_port;
478         enic_5tup->protocol = PROTO_TCP;
479
480         return 0;
481 }
482
483 static int
484 enic_copy_item_eth_v2(struct copy_item_args *arg)
485 {
486         const struct rte_flow_item *item = arg->item;
487         struct filter_v2 *enic_filter = arg->filter;
488         uint8_t *inner_ofst = arg->inner_ofst;
489         struct ether_hdr enic_spec;
490         struct ether_hdr enic_mask;
491         const struct rte_flow_item_eth *spec = item->spec;
492         const struct rte_flow_item_eth *mask = item->mask;
493         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
494
495         FLOW_TRACE();
496
497         /* Match all if no spec */
498         if (!spec)
499                 return 0;
500
501         if (!mask)
502                 mask = &rte_flow_item_eth_mask;
503
504         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
505                ETHER_ADDR_LEN);
506         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
507                ETHER_ADDR_LEN);
508
509         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
510                ETHER_ADDR_LEN);
511         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
512                ETHER_ADDR_LEN);
513         enic_spec.ether_type = spec->type;
514         enic_mask.ether_type = mask->type;
515
516         if (*inner_ofst == 0) {
517                 /* outer header */
518                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
519                        sizeof(struct ether_hdr));
520                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
521                        sizeof(struct ether_hdr));
522         } else {
523                 /* inner header */
524                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
525                      FILTER_GENERIC_1_KEY_LEN)
526                         return ENOTSUP;
527                 /* Offset into L5 where inner Ethernet header goes */
528                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
529                        &enic_mask, sizeof(struct ether_hdr));
530                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
531                        &enic_spec, sizeof(struct ether_hdr));
532                 *inner_ofst += sizeof(struct ether_hdr);
533         }
534         return 0;
535 }
536
537 static int
538 enic_copy_item_vlan_v2(struct copy_item_args *arg)
539 {
540         const struct rte_flow_item *item = arg->item;
541         struct filter_v2 *enic_filter = arg->filter;
542         uint8_t *inner_ofst = arg->inner_ofst;
543         const struct rte_flow_item_vlan *spec = item->spec;
544         const struct rte_flow_item_vlan *mask = item->mask;
545         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
546
547         FLOW_TRACE();
548
549         /* Match all if no spec */
550         if (!spec)
551                 return 0;
552
553         if (!mask)
554                 mask = &rte_flow_item_vlan_mask;
555
556         if (*inner_ofst == 0) {
557                 struct ether_hdr *eth_mask =
558                         (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
559                 struct ether_hdr *eth_val =
560                         (void *)gp->layer[FILTER_GENERIC_1_L2].val;
561
562                 /* Outer TPID cannot be matched */
563                 if (eth_mask->ether_type)
564                         return ENOTSUP;
565                 eth_mask->ether_type = mask->inner_type;
566                 eth_val->ether_type = spec->inner_type;
567
568                 /* Outer header. Use the vlan mask/val fields */
569                 gp->mask_vlan = mask->tci;
570                 gp->val_vlan = spec->tci;
571         } else {
572                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
573                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
574                      FILTER_GENERIC_1_KEY_LEN)
575                         return ENOTSUP;
576                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
577                        mask, sizeof(struct vlan_hdr));
578                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
579                        spec, sizeof(struct vlan_hdr));
580                 *inner_ofst += sizeof(struct vlan_hdr);
581         }
582         return 0;
583 }
584
585 static int
586 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
587 {
588         const struct rte_flow_item *item = arg->item;
589         struct filter_v2 *enic_filter = arg->filter;
590         uint8_t *inner_ofst = arg->inner_ofst;
591         const struct rte_flow_item_ipv4 *spec = item->spec;
592         const struct rte_flow_item_ipv4 *mask = item->mask;
593         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
594
595         FLOW_TRACE();
596
597         if (*inner_ofst == 0) {
598                 /* Match IPv4 */
599                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
600                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
601
602                 /* Match all if no spec */
603                 if (!spec)
604                         return 0;
605
606                 if (!mask)
607                         mask = &rte_flow_item_ipv4_mask;
608
609                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
610                        sizeof(struct ipv4_hdr));
611                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
612                        sizeof(struct ipv4_hdr));
613         } else {
614                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
615                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
616                      FILTER_GENERIC_1_KEY_LEN)
617                         return ENOTSUP;
618                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
619                        mask, sizeof(struct ipv4_hdr));
620                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
621                        spec, sizeof(struct ipv4_hdr));
622                 *inner_ofst += sizeof(struct ipv4_hdr);
623         }
624         return 0;
625 }
626
627 static int
628 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
629 {
630         const struct rte_flow_item *item = arg->item;
631         struct filter_v2 *enic_filter = arg->filter;
632         uint8_t *inner_ofst = arg->inner_ofst;
633         const struct rte_flow_item_ipv6 *spec = item->spec;
634         const struct rte_flow_item_ipv6 *mask = item->mask;
635         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
636
637         FLOW_TRACE();
638
639         /* Match IPv6 */
640         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
641         gp->val_flags |= FILTER_GENERIC_1_IPV6;
642
643         /* Match all if no spec */
644         if (!spec)
645                 return 0;
646
647         if (!mask)
648                 mask = &rte_flow_item_ipv6_mask;
649
650         if (*inner_ofst == 0) {
651                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
652                        sizeof(struct ipv6_hdr));
653                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
654                        sizeof(struct ipv6_hdr));
655         } else {
656                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
657                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
658                      FILTER_GENERIC_1_KEY_LEN)
659                         return ENOTSUP;
660                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
661                        mask, sizeof(struct ipv6_hdr));
662                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
663                        spec, sizeof(struct ipv6_hdr));
664                 *inner_ofst += sizeof(struct ipv6_hdr);
665         }
666         return 0;
667 }
668
669 static int
670 enic_copy_item_udp_v2(struct copy_item_args *arg)
671 {
672         const struct rte_flow_item *item = arg->item;
673         struct filter_v2 *enic_filter = arg->filter;
674         uint8_t *inner_ofst = arg->inner_ofst;
675         const struct rte_flow_item_udp *spec = item->spec;
676         const struct rte_flow_item_udp *mask = item->mask;
677         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
678
679         FLOW_TRACE();
680
681         /* Match UDP */
682         gp->mask_flags |= FILTER_GENERIC_1_UDP;
683         gp->val_flags |= FILTER_GENERIC_1_UDP;
684
685         /* Match all if no spec */
686         if (!spec)
687                 return 0;
688
689         if (!mask)
690                 mask = &rte_flow_item_udp_mask;
691
692         if (*inner_ofst == 0) {
693                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
694                        sizeof(struct udp_hdr));
695                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
696                        sizeof(struct udp_hdr));
697         } else {
698                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
699                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
700                      FILTER_GENERIC_1_KEY_LEN)
701                         return ENOTSUP;
702                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
703                        mask, sizeof(struct udp_hdr));
704                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
705                        spec, sizeof(struct udp_hdr));
706                 *inner_ofst += sizeof(struct udp_hdr);
707         }
708         return 0;
709 }
710
711 static int
712 enic_copy_item_tcp_v2(struct copy_item_args *arg)
713 {
714         const struct rte_flow_item *item = arg->item;
715         struct filter_v2 *enic_filter = arg->filter;
716         uint8_t *inner_ofst = arg->inner_ofst;
717         const struct rte_flow_item_tcp *spec = item->spec;
718         const struct rte_flow_item_tcp *mask = item->mask;
719         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
720
721         FLOW_TRACE();
722
723         /* Match TCP */
724         gp->mask_flags |= FILTER_GENERIC_1_TCP;
725         gp->val_flags |= FILTER_GENERIC_1_TCP;
726
727         /* Match all if no spec */
728         if (!spec)
729                 return 0;
730
731         if (!mask)
732                 return ENOTSUP;
733
734         if (*inner_ofst == 0) {
735                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
736                        sizeof(struct tcp_hdr));
737                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
738                        sizeof(struct tcp_hdr));
739         } else {
740                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
741                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
742                      FILTER_GENERIC_1_KEY_LEN)
743                         return ENOTSUP;
744                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
745                        mask, sizeof(struct tcp_hdr));
746                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
747                        spec, sizeof(struct tcp_hdr));
748                 *inner_ofst += sizeof(struct tcp_hdr);
749         }
750         return 0;
751 }
752
753 static int
754 enic_copy_item_sctp_v2(struct copy_item_args *arg)
755 {
756         const struct rte_flow_item *item = arg->item;
757         struct filter_v2 *enic_filter = arg->filter;
758         uint8_t *inner_ofst = arg->inner_ofst;
759         const struct rte_flow_item_sctp *spec = item->spec;
760         const struct rte_flow_item_sctp *mask = item->mask;
761         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
762         uint8_t *ip_proto_mask = NULL;
763         uint8_t *ip_proto = NULL;
764
765         FLOW_TRACE();
766
767         if (*inner_ofst)
768                 return ENOTSUP;
769
770         /*
771          * The NIC filter API has no flags for "match sctp", so explicitly set
772          * the protocol number in the IP pattern.
773          */
774         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
775                 struct ipv4_hdr *ip;
776                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
777                 ip_proto_mask = &ip->next_proto_id;
778                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
779                 ip_proto = &ip->next_proto_id;
780         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
781                 struct ipv6_hdr *ip;
782                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
783                 ip_proto_mask = &ip->proto;
784                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
785                 ip_proto = &ip->proto;
786         } else {
787                 /* Need IPv4/IPv6 pattern first */
788                 return EINVAL;
789         }
790         *ip_proto = IPPROTO_SCTP;
791         *ip_proto_mask = 0xff;
792
793         /* Match all if no spec */
794         if (!spec)
795                 return 0;
796
797         if (!mask)
798                 mask = &rte_flow_item_sctp_mask;
799
800         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
801                sizeof(struct sctp_hdr));
802         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
803                sizeof(struct sctp_hdr));
804         return 0;
805 }
806
807 static int
808 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
809 {
810         const struct rte_flow_item *item = arg->item;
811         struct filter_v2 *enic_filter = arg->filter;
812         uint8_t *inner_ofst = arg->inner_ofst;
813         const struct rte_flow_item_vxlan *spec = item->spec;
814         const struct rte_flow_item_vxlan *mask = item->mask;
815         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
816
817         FLOW_TRACE();
818
819         if (*inner_ofst)
820                 return EINVAL;
821
822         /* Match all if no spec */
823         if (!spec)
824                 return 0;
825
826         if (!mask)
827                 mask = &rte_flow_item_vxlan_mask;
828
829         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
830                sizeof(struct vxlan_hdr));
831         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
832                sizeof(struct vxlan_hdr));
833
834         *inner_ofst = sizeof(struct vxlan_hdr);
835         return 0;
836 }
837
838 /**
839  * Return 1 if current item is valid on top of the previous one.
840  *
841  * @param prev_item[in]
842  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
843  *   is the first item.
844  * @param item_info[in]
845  *   Info about this item, like valid previous items.
846  * @param is_first[in]
847  *   True if this the first item in the pattern.
848  */
849 static int
850 item_stacking_valid(enum rte_flow_item_type prev_item,
851                     const struct enic_items *item_info, u8 is_first_item)
852 {
853         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
854
855         FLOW_TRACE();
856
857         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
858                 if (prev_item == *allowed_items)
859                         return 1;
860         }
861
862         /* This is the first item in the stack. Check if that's cool */
863         if (is_first_item && item_info->valid_start_item)
864                 return 1;
865
866         return 0;
867 }
868
869 /**
870  * Build the intenal enic filter structure from the provided pattern. The
871  * pattern is validated as the items are copied.
872  *
873  * @param pattern[in]
874  * @param items_info[in]
875  *   Info about this NICs item support, like valid previous items.
876  * @param enic_filter[out]
877  *   NIC specfilc filters derived from the pattern.
878  * @param error[out]
879  */
880 static int
881 enic_copy_filter(const struct rte_flow_item pattern[],
882                  const struct enic_filter_cap *cap,
883                  struct filter_v2 *enic_filter,
884                  struct rte_flow_error *error)
885 {
886         int ret;
887         const struct rte_flow_item *item = pattern;
888         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
889         enum rte_flow_item_type prev_item;
890         const struct enic_items *item_info;
891         struct copy_item_args args;
892         u8 is_first_item = 1;
893
894         FLOW_TRACE();
895
896         prev_item = 0;
897
898         args.filter = enic_filter;
899         args.inner_ofst = &inner_ofst;
900         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
901                 /* Get info about how to validate and copy the item. If NULL
902                  * is returned the nic does not support the item.
903                  */
904                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
905                         continue;
906
907                 item_info = &cap->item_info[item->type];
908                 if (item->type > cap->max_item_type ||
909                     item_info->copy_item == NULL) {
910                         rte_flow_error_set(error, ENOTSUP,
911                                 RTE_FLOW_ERROR_TYPE_ITEM,
912                                 NULL, "Unsupported item.");
913                         return -rte_errno;
914                 }
915
916                 /* check to see if item stacking is valid */
917                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
918                         goto stacking_error;
919
920                 args.item = item;
921                 ret = item_info->copy_item(&args);
922                 if (ret)
923                         goto item_not_supported;
924                 prev_item = item->type;
925                 is_first_item = 0;
926         }
927         return 0;
928
929 item_not_supported:
930         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
931                            NULL, "enic type error");
932         return -rte_errno;
933
934 stacking_error:
935         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
936                            item, "stacking error");
937         return -rte_errno;
938 }
939
940 /**
941  * Build the intenal version 1 NIC action structure from the provided pattern.
942  * The pattern is validated as the items are copied.
943  *
944  * @param actions[in]
945  * @param enic_action[out]
946  *   NIC specfilc actions derived from the actions.
947  * @param error[out]
948  */
949 static int
950 enic_copy_action_v1(__rte_unused struct enic *enic,
951                     const struct rte_flow_action actions[],
952                     struct filter_action_v2 *enic_action)
953 {
954         enum { FATE = 1, };
955         uint32_t overlap = 0;
956
957         FLOW_TRACE();
958
959         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
960                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
961                         continue;
962
963                 switch (actions->type) {
964                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
965                         const struct rte_flow_action_queue *queue =
966                                 (const struct rte_flow_action_queue *)
967                                 actions->conf;
968
969                         if (overlap & FATE)
970                                 return ENOTSUP;
971                         overlap |= FATE;
972                         enic_action->rq_idx =
973                                 enic_rte_rq_idx_to_sop_idx(queue->index);
974                         break;
975                 }
976                 default:
977                         RTE_ASSERT(0);
978                         break;
979                 }
980         }
981         if (!(overlap & FATE))
982                 return ENOTSUP;
983         enic_action->type = FILTER_ACTION_RQ_STEERING;
984         return 0;
985 }
986
987 /**
988  * Build the intenal version 2 NIC action structure from the provided pattern.
989  * The pattern is validated as the items are copied.
990  *
991  * @param actions[in]
992  * @param enic_action[out]
993  *   NIC specfilc actions derived from the actions.
994  * @param error[out]
995  */
996 static int
997 enic_copy_action_v2(struct enic *enic,
998                     const struct rte_flow_action actions[],
999                     struct filter_action_v2 *enic_action)
1000 {
1001         enum { FATE = 1, MARK = 2, };
1002         uint32_t overlap = 0;
1003         bool passthru = false;
1004
1005         FLOW_TRACE();
1006
1007         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1008                 switch (actions->type) {
1009                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1010                         const struct rte_flow_action_queue *queue =
1011                                 (const struct rte_flow_action_queue *)
1012                                 actions->conf;
1013
1014                         if (overlap & FATE)
1015                                 return ENOTSUP;
1016                         overlap |= FATE;
1017                         enic_action->rq_idx =
1018                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1019                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1020                         break;
1021                 }
1022                 case RTE_FLOW_ACTION_TYPE_MARK: {
1023                         const struct rte_flow_action_mark *mark =
1024                                 (const struct rte_flow_action_mark *)
1025                                 actions->conf;
1026
1027                         if (overlap & MARK)
1028                                 return ENOTSUP;
1029                         overlap |= MARK;
1030                         /*
1031                          * Map mark ID (32-bit) to filter ID (16-bit):
1032                          * - Reject values > 16 bits
1033                          * - Filter ID 0 is reserved for filters that steer
1034                          *   but not mark. So add 1 to the mark ID to avoid
1035                          *   using 0.
1036                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1037                          *   reserved for the "flag" action below.
1038                          */
1039                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1040                                 return EINVAL;
1041                         enic_action->filter_id = mark->id + 1;
1042                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1043                         break;
1044                 }
1045                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1046                         if (overlap & MARK)
1047                                 return ENOTSUP;
1048                         overlap |= MARK;
1049                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1050                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1051                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1052                         break;
1053                 }
1054                 case RTE_FLOW_ACTION_TYPE_DROP: {
1055                         if (overlap & FATE)
1056                                 return ENOTSUP;
1057                         overlap |= FATE;
1058                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1059                         break;
1060                 }
1061                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1062                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1063                         break;
1064                 }
1065                 case RTE_FLOW_ACTION_TYPE_RSS: {
1066                         const struct rte_flow_action_rss *rss =
1067                                 (const struct rte_flow_action_rss *)
1068                                 actions->conf;
1069                         bool allow;
1070                         uint16_t i;
1071
1072                         /*
1073                          * Hardware does not support general RSS actions, but
1074                          * we can still support the dummy one that is used to
1075                          * "receive normally".
1076                          */
1077                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1078                                 rss->level == 0 &&
1079                                 (rss->types == 0 ||
1080                                  rss->types == enic->rss_hf) &&
1081                                 rss->queue_num == enic->rq_count &&
1082                                 rss->key_len == 0;
1083                         /* Identity queue map is ok */
1084                         for (i = 0; i < rss->queue_num; i++)
1085                                 allow = allow && (i == rss->queue[i]);
1086                         if (!allow)
1087                                 return ENOTSUP;
1088                         if (overlap & FATE)
1089                                 return ENOTSUP;
1090                         /* Need MARK or FLAG */
1091                         if (!(overlap & MARK))
1092                                 return ENOTSUP;
1093                         overlap |= FATE;
1094                         break;
1095                 }
1096                 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1097                         /*
1098                          * Like RSS above, PASSTHRU + MARK may be used to
1099                          * "mark and then receive normally". MARK usually comes
1100                          * after PASSTHRU, so remember we have seen passthru
1101                          * and check for mark later.
1102                          */
1103                         if (overlap & FATE)
1104                                 return ENOTSUP;
1105                         overlap |= FATE;
1106                         passthru = true;
1107                         break;
1108                 }
1109                 case RTE_FLOW_ACTION_TYPE_VOID:
1110                         continue;
1111                 default:
1112                         RTE_ASSERT(0);
1113                         break;
1114                 }
1115         }
1116         /* Only PASSTHRU + MARK is allowed */
1117         if (passthru && !(overlap & MARK))
1118                 return ENOTSUP;
1119         if (!(overlap & FATE))
1120                 return ENOTSUP;
1121         enic_action->type = FILTER_ACTION_V2;
1122         return 0;
1123 }
1124
1125 /** Check if the action is supported */
1126 static int
1127 enic_match_action(const struct rte_flow_action *action,
1128                   const enum rte_flow_action_type *supported_actions)
1129 {
1130         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1131              supported_actions++) {
1132                 if (action->type == *supported_actions)
1133                         return 1;
1134         }
1135         return 0;
1136 }
1137
1138 /** Get the NIC filter capabilties structure */
1139 static const struct enic_filter_cap *
1140 enic_get_filter_cap(struct enic *enic)
1141 {
1142         if (enic->flow_filter_mode)
1143                 return &enic_filter_cap[enic->flow_filter_mode];
1144
1145         return NULL;
1146 }
1147
1148 /** Get the actions for this NIC version. */
1149 static const struct enic_action_cap *
1150 enic_get_action_cap(struct enic *enic)
1151 {
1152         const struct enic_action_cap *ea;
1153         uint8_t actions;
1154
1155         actions = enic->filter_actions;
1156         if (actions & FILTER_ACTION_COUNTER_FLAG)
1157                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1158         else if (actions & FILTER_ACTION_DROP_FLAG)
1159                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1160         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1161                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1162         else
1163                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1164         return ea;
1165 }
1166
1167 /* Debug function to dump internal NIC action structure. */
1168 static void
1169 enic_dump_actions(const struct filter_action_v2 *ea)
1170 {
1171         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1172                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1173         } else if (ea->type == FILTER_ACTION_V2) {
1174                 FLOW_LOG(INFO, "Actions(V2)\n");
1175                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1176                         FLOW_LOG(INFO, "\tqueue: %u\n",
1177                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1178                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1179                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1180         }
1181 }
1182
1183 /* Debug function to dump internal NIC filter structure. */
1184 static void
1185 enic_dump_filter(const struct filter_v2 *filt)
1186 {
1187         const struct filter_generic_1 *gp;
1188         int i, j, mbyte;
1189         char buf[128], *bp;
1190         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1191         char l4csum[16], ipfrag[16];
1192
1193         switch (filt->type) {
1194         case FILTER_IPV4_5TUPLE:
1195                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1196                 break;
1197         case FILTER_USNIC_IP:
1198         case FILTER_DPDK_1:
1199                 /* FIXME: this should be a loop */
1200                 gp = &filt->u.generic_1;
1201                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1202                        gp->val_vlan, gp->mask_vlan);
1203
1204                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1205                         sprintf(ip4, "%s ",
1206                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1207                                  ? "ip4(y)" : "ip4(n)");
1208                 else
1209                         sprintf(ip4, "%s ", "ip4(x)");
1210
1211                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1212                         sprintf(ip6, "%s ",
1213                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1214                                  ? "ip6(y)" : "ip6(n)");
1215                 else
1216                         sprintf(ip6, "%s ", "ip6(x)");
1217
1218                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1219                         sprintf(udp, "%s ",
1220                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1221                                  ? "udp(y)" : "udp(n)");
1222                 else
1223                         sprintf(udp, "%s ", "udp(x)");
1224
1225                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1226                         sprintf(tcp, "%s ",
1227                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1228                                  ? "tcp(y)" : "tcp(n)");
1229                 else
1230                         sprintf(tcp, "%s ", "tcp(x)");
1231
1232                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1233                         sprintf(tcpudp, "%s ",
1234                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1235                                  ? "tcpudp(y)" : "tcpudp(n)");
1236                 else
1237                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1238
1239                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1240                         sprintf(ip4csum, "%s ",
1241                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1242                                  ? "ip4csum(y)" : "ip4csum(n)");
1243                 else
1244                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1245
1246                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1247                         sprintf(l4csum, "%s ",
1248                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1249                                  ? "l4csum(y)" : "l4csum(n)");
1250                 else
1251                         sprintf(l4csum, "%s ", "l4csum(x)");
1252
1253                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1254                         sprintf(ipfrag, "%s ",
1255                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1256                                  ? "ipfrag(y)" : "ipfrag(n)");
1257                 else
1258                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1259                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1260                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1261
1262                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1263                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1264                         while (mbyte && !gp->layer[i].mask[mbyte])
1265                                 mbyte--;
1266                         if (mbyte == 0)
1267                                 continue;
1268
1269                         bp = buf;
1270                         for (j = 0; j <= mbyte; j++) {
1271                                 sprintf(bp, "%02x",
1272                                         gp->layer[i].mask[j]);
1273                                 bp += 2;
1274                         }
1275                         *bp = '\0';
1276                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1277                         bp = buf;
1278                         for (j = 0; j <= mbyte; j++) {
1279                                 sprintf(bp, "%02x",
1280                                         gp->layer[i].val[j]);
1281                                 bp += 2;
1282                         }
1283                         *bp = '\0';
1284                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1285                 }
1286                 break;
1287         default:
1288                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1289                 break;
1290         }
1291 }
1292
1293 /* Debug function to dump internal NIC flow structures. */
1294 static void
1295 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1296 {
1297         enic_dump_filter(filt);
1298         enic_dump_actions(ea);
1299 }
1300
1301
1302 /**
1303  * Internal flow parse/validate function.
1304  *
1305  * @param dev[in]
1306  *   This device pointer.
1307  * @param pattern[in]
1308  * @param actions[in]
1309  * @param error[out]
1310  * @param enic_filter[out]
1311  *   Internal NIC filter structure pointer.
1312  * @param enic_action[out]
1313  *   Internal NIC action structure pointer.
1314  */
1315 static int
1316 enic_flow_parse(struct rte_eth_dev *dev,
1317                 const struct rte_flow_attr *attrs,
1318                 const struct rte_flow_item pattern[],
1319                 const struct rte_flow_action actions[],
1320                 struct rte_flow_error *error,
1321                 struct filter_v2 *enic_filter,
1322                 struct filter_action_v2 *enic_action)
1323 {
1324         unsigned int ret = 0;
1325         struct enic *enic = pmd_priv(dev);
1326         const struct enic_filter_cap *enic_filter_cap;
1327         const struct enic_action_cap *enic_action_cap;
1328         const struct rte_flow_action *action;
1329
1330         FLOW_TRACE();
1331
1332         memset(enic_filter, 0, sizeof(*enic_filter));
1333         memset(enic_action, 0, sizeof(*enic_action));
1334
1335         if (!pattern) {
1336                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1337                                    NULL, "No pattern specified");
1338                 return -rte_errno;
1339         }
1340
1341         if (!actions) {
1342                 rte_flow_error_set(error, EINVAL,
1343                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1344                                    NULL, "No action specified");
1345                 return -rte_errno;
1346         }
1347
1348         if (attrs) {
1349                 if (attrs->group) {
1350                         rte_flow_error_set(error, ENOTSUP,
1351                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1352                                            NULL,
1353                                            "priority groups are not supported");
1354                         return -rte_errno;
1355                 } else if (attrs->priority) {
1356                         rte_flow_error_set(error, ENOTSUP,
1357                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1358                                            NULL,
1359                                            "priorities are not supported");
1360                         return -rte_errno;
1361                 } else if (attrs->egress) {
1362                         rte_flow_error_set(error, ENOTSUP,
1363                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1364                                            NULL,
1365                                            "egress is not supported");
1366                         return -rte_errno;
1367                 } else if (attrs->transfer) {
1368                         rte_flow_error_set(error, ENOTSUP,
1369                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1370                                            NULL,
1371                                            "transfer is not supported");
1372                         return -rte_errno;
1373                 } else if (!attrs->ingress) {
1374                         rte_flow_error_set(error, ENOTSUP,
1375                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1376                                            NULL,
1377                                            "only ingress is supported");
1378                         return -rte_errno;
1379                 }
1380
1381         } else {
1382                 rte_flow_error_set(error, EINVAL,
1383                                    RTE_FLOW_ERROR_TYPE_ATTR,
1384                                    NULL, "No attribute specified");
1385                 return -rte_errno;
1386         }
1387
1388         /* Verify Actions. */
1389         enic_action_cap =  enic_get_action_cap(enic);
1390         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1391              action++) {
1392                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1393                         continue;
1394                 else if (!enic_match_action(action, enic_action_cap->actions))
1395                         break;
1396         }
1397         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1398                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1399                                    action, "Invalid action.");
1400                 return -rte_errno;
1401         }
1402         ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1403         if (ret) {
1404                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1405                            NULL, "Unsupported action.");
1406                 return -rte_errno;
1407         }
1408
1409         /* Verify Flow items. If copying the filter from flow format to enic
1410          * format fails, the flow is not supported
1411          */
1412         enic_filter_cap =  enic_get_filter_cap(enic);
1413         if (enic_filter_cap == NULL) {
1414                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1415                            NULL, "Flow API not available");
1416                 return -rte_errno;
1417         }
1418         enic_filter->type = enic->flow_filter_mode;
1419         ret = enic_copy_filter(pattern, enic_filter_cap,
1420                                        enic_filter, error);
1421         return ret;
1422 }
1423
1424 /**
1425  * Push filter/action to the NIC.
1426  *
1427  * @param enic[in]
1428  *   Device structure pointer.
1429  * @param enic_filter[in]
1430  *   Internal NIC filter structure pointer.
1431  * @param enic_action[in]
1432  *   Internal NIC action structure pointer.
1433  * @param error[out]
1434  */
1435 static struct rte_flow *
1436 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1437                    struct filter_action_v2 *enic_action,
1438                    struct rte_flow_error *error)
1439 {
1440         struct rte_flow *flow;
1441         int err;
1442         uint16_t entry;
1443         int ctr_idx;
1444         int last_max_flow_ctr;
1445
1446         FLOW_TRACE();
1447
1448         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1449         if (!flow) {
1450                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1451                                    NULL, "cannot allocate flow memory");
1452                 return NULL;
1453         }
1454
1455         flow->counter_idx = -1;
1456         last_max_flow_ctr = -1;
1457         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1458                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1459                         rte_flow_error_set(error, ENOMEM,
1460                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1461                                            NULL, "cannot allocate counter");
1462                         goto unwind_flow_alloc;
1463                 }
1464                 flow->counter_idx = ctr_idx;
1465                 enic_action->counter_index = ctr_idx;
1466
1467                 /* If index is the largest, increase the counter DMA size */
1468                 if (ctr_idx > enic->max_flow_counter) {
1469                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1470                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1471                                                  ctr_idx + 1);
1472                         if (err) {
1473                                 rte_flow_error_set(error, -err,
1474                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1475                                            NULL, "counter DMA config failed");
1476                                 goto unwind_ctr_alloc;
1477                         }
1478                         last_max_flow_ctr = enic->max_flow_counter;
1479                         enic->max_flow_counter = ctr_idx;
1480                 }
1481         }
1482
1483         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1484         entry = enic_action->rq_idx;
1485         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1486                                   enic_action);
1487         if (err) {
1488                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1489                                    NULL, "vnic_dev_classifier error");
1490                 goto unwind_ctr_dma_cfg;
1491         }
1492
1493         flow->enic_filter_id = entry;
1494         flow->enic_filter = *enic_filter;
1495
1496         return flow;
1497
1498 /* unwind if there are errors */
1499 unwind_ctr_dma_cfg:
1500         if (last_max_flow_ctr != -1) {
1501                 /* reduce counter DMA size */
1502                 vnic_dev_counter_dma_cfg(enic->vdev,
1503                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1504                                          last_max_flow_ctr + 1);
1505                 enic->max_flow_counter = last_max_flow_ctr;
1506         }
1507 unwind_ctr_alloc:
1508         if (flow->counter_idx != -1)
1509                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1510 unwind_flow_alloc:
1511         rte_free(flow);
1512         return NULL;
1513 }
1514
1515 /**
1516  * Remove filter/action from the NIC.
1517  *
1518  * @param enic[in]
1519  *   Device structure pointer.
1520  * @param filter_id[in]
1521  *   Id of NIC filter.
1522  * @param enic_action[in]
1523  *   Internal NIC action structure pointer.
1524  * @param error[out]
1525  */
1526 static int
1527 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1528                    struct rte_flow_error *error)
1529 {
1530         u16 filter_id;
1531         int err;
1532
1533         FLOW_TRACE();
1534
1535         filter_id = flow->enic_filter_id;
1536         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1537         if (err) {
1538                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1539                                    NULL, "vnic_dev_classifier failed");
1540                 return -err;
1541         }
1542
1543         if (flow->counter_idx != -1) {
1544                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1545                         dev_err(enic, "counter free failed, idx: %d\n",
1546                                 flow->counter_idx);
1547                 flow->counter_idx = -1;
1548         }
1549         return 0;
1550 }
1551
1552 /*
1553  * The following functions are callbacks for Generic flow API.
1554  */
1555
1556 /**
1557  * Validate a flow supported by the NIC.
1558  *
1559  * @see rte_flow_validate()
1560  * @see rte_flow_ops
1561  */
1562 static int
1563 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1564                    const struct rte_flow_item pattern[],
1565                    const struct rte_flow_action actions[],
1566                    struct rte_flow_error *error)
1567 {
1568         struct filter_v2 enic_filter;
1569         struct filter_action_v2 enic_action;
1570         int ret;
1571
1572         FLOW_TRACE();
1573
1574         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1575                                &enic_filter, &enic_action);
1576         if (!ret)
1577                 enic_dump_flow(&enic_action, &enic_filter);
1578         return ret;
1579 }
1580
1581 /**
1582  * Create a flow supported by the NIC.
1583  *
1584  * @see rte_flow_create()
1585  * @see rte_flow_ops
1586  */
1587 static struct rte_flow *
1588 enic_flow_create(struct rte_eth_dev *dev,
1589                  const struct rte_flow_attr *attrs,
1590                  const struct rte_flow_item pattern[],
1591                  const struct rte_flow_action actions[],
1592                  struct rte_flow_error *error)
1593 {
1594         int ret;
1595         struct filter_v2 enic_filter;
1596         struct filter_action_v2 enic_action;
1597         struct rte_flow *flow;
1598         struct enic *enic = pmd_priv(dev);
1599
1600         FLOW_TRACE();
1601
1602         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1603                               &enic_action);
1604         if (ret < 0)
1605                 return NULL;
1606
1607         rte_spinlock_lock(&enic->flows_lock);
1608         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1609                                     error);
1610         if (flow)
1611                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1612         rte_spinlock_unlock(&enic->flows_lock);
1613
1614         return flow;
1615 }
1616
1617 /**
1618  * Destroy a flow supported by the NIC.
1619  *
1620  * @see rte_flow_destroy()
1621  * @see rte_flow_ops
1622  */
1623 static int
1624 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1625                   __rte_unused struct rte_flow_error *error)
1626 {
1627         struct enic *enic = pmd_priv(dev);
1628
1629         FLOW_TRACE();
1630
1631         rte_spinlock_lock(&enic->flows_lock);
1632         enic_flow_del_filter(enic, flow, error);
1633         LIST_REMOVE(flow, next);
1634         rte_spinlock_unlock(&enic->flows_lock);
1635         rte_free(flow);
1636         return 0;
1637 }
1638
1639 /**
1640  * Flush all flows on the device.
1641  *
1642  * @see rte_flow_flush()
1643  * @see rte_flow_ops
1644  */
1645 static int
1646 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1647 {
1648         struct rte_flow *flow;
1649         struct enic *enic = pmd_priv(dev);
1650
1651         FLOW_TRACE();
1652
1653         rte_spinlock_lock(&enic->flows_lock);
1654
1655         while (!LIST_EMPTY(&enic->flows)) {
1656                 flow = LIST_FIRST(&enic->flows);
1657                 enic_flow_del_filter(enic, flow, error);
1658                 LIST_REMOVE(flow, next);
1659                 rte_free(flow);
1660         }
1661         rte_spinlock_unlock(&enic->flows_lock);
1662         return 0;
1663 }
1664
1665 static int
1666 enic_flow_query_count(struct rte_eth_dev *dev,
1667                       struct rte_flow *flow, void *data,
1668                       struct rte_flow_error *error)
1669 {
1670         struct enic *enic = pmd_priv(dev);
1671         struct rte_flow_query_count *query;
1672         uint64_t packets, bytes;
1673
1674         FLOW_TRACE();
1675
1676         if (flow->counter_idx == -1) {
1677                 return rte_flow_error_set(error, ENOTSUP,
1678                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1679                                           NULL,
1680                                           "flow does not have counter");
1681         }
1682         query = (struct rte_flow_query_count *)data;
1683         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1684                                     !!query->reset, &packets, &bytes)) {
1685                 return rte_flow_error_set
1686                         (error, EINVAL,
1687                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1688                          NULL,
1689                          "cannot read counter");
1690         }
1691         query->hits_set = 1;
1692         query->bytes_set = 1;
1693         query->hits = packets;
1694         query->bytes = bytes;
1695         return 0;
1696 }
1697
1698 static int
1699 enic_flow_query(struct rte_eth_dev *dev,
1700                 struct rte_flow *flow,
1701                 const struct rte_flow_action *actions,
1702                 void *data,
1703                 struct rte_flow_error *error)
1704 {
1705         int ret = 0;
1706
1707         FLOW_TRACE();
1708
1709         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1710                 switch (actions->type) {
1711                 case RTE_FLOW_ACTION_TYPE_VOID:
1712                         break;
1713                 case RTE_FLOW_ACTION_TYPE_COUNT:
1714                         ret = enic_flow_query_count(dev, flow, data, error);
1715                         break;
1716                 default:
1717                         return rte_flow_error_set(error, ENOTSUP,
1718                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1719                                                   actions,
1720                                                   "action not supported");
1721                 }
1722                 if (ret < 0)
1723                         return ret;
1724         }
1725         return 0;
1726 }
1727
1728 /**
1729  * Flow callback registration.
1730  *
1731  * @see rte_flow_ops
1732  */
1733 const struct rte_flow_ops enic_flow_ops = {
1734         .validate = enic_flow_validate,
1735         .create = enic_flow_create,
1736         .destroy = enic_flow_destroy,
1737         .flush = enic_flow_flush,
1738         .query = enic_flow_query,
1739 };