net/enic: enable limited passthru flow action
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /** Info about how to copy items into enic filters. */
27 struct enic_items {
28         /** Function for copying and validating an item. */
29         int (*copy_item)(const struct rte_flow_item *item,
30                          struct filter_v2 *enic_filter, u8 *inner_ofst);
31         /** List of valid previous items. */
32         const enum rte_flow_item_type * const prev_items;
33         /** True if it's OK for this item to be the first item. For some NIC
34          * versions, it's invalid to start the stack above layer 3.
35          */
36         const u8 valid_start_item;
37 };
38
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41         /** list of valid items and their handlers and attributes. */
42         const struct enic_items *item_info;
43         /* Max type in the above list, used to detect unsupported types */
44         enum rte_flow_item_type max_item_type;
45 };
46
47 /* functions for copying flow actions into enic actions */
48 typedef int (copy_action_fn)(struct enic *enic,
49                              const struct rte_flow_action actions[],
50                              struct filter_action_v2 *enic_action);
51
52 /* functions for copying items into enic filters */
53 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
54                           struct filter_v2 *enic_filter, u8 *inner_ofst);
55
56 /** Action capabilities for various NICs. */
57 struct enic_action_cap {
58         /** list of valid actions */
59         const enum rte_flow_action_type *actions;
60         /** copy function for a particular NIC */
61         copy_action_fn *copy_fn;
62 };
63
64 /* Forward declarations */
65 static enic_copy_item_fn enic_copy_item_ipv4_v1;
66 static enic_copy_item_fn enic_copy_item_udp_v1;
67 static enic_copy_item_fn enic_copy_item_tcp_v1;
68 static enic_copy_item_fn enic_copy_item_eth_v2;
69 static enic_copy_item_fn enic_copy_item_vlan_v2;
70 static enic_copy_item_fn enic_copy_item_ipv4_v2;
71 static enic_copy_item_fn enic_copy_item_ipv6_v2;
72 static enic_copy_item_fn enic_copy_item_udp_v2;
73 static enic_copy_item_fn enic_copy_item_tcp_v2;
74 static enic_copy_item_fn enic_copy_item_sctp_v2;
75 static enic_copy_item_fn enic_copy_item_vxlan_v2;
76 static copy_action_fn enic_copy_action_v1;
77 static copy_action_fn enic_copy_action_v2;
78
79 /**
80  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
81  * is supported.
82  */
83 static const struct enic_items enic_items_v1[] = {
84         [RTE_FLOW_ITEM_TYPE_IPV4] = {
85                 .copy_item = enic_copy_item_ipv4_v1,
86                 .valid_start_item = 1,
87                 .prev_items = (const enum rte_flow_item_type[]) {
88                                RTE_FLOW_ITEM_TYPE_END,
89                 },
90         },
91         [RTE_FLOW_ITEM_TYPE_UDP] = {
92                 .copy_item = enic_copy_item_udp_v1,
93                 .valid_start_item = 0,
94                 .prev_items = (const enum rte_flow_item_type[]) {
95                                RTE_FLOW_ITEM_TYPE_IPV4,
96                                RTE_FLOW_ITEM_TYPE_END,
97                 },
98         },
99         [RTE_FLOW_ITEM_TYPE_TCP] = {
100                 .copy_item = enic_copy_item_tcp_v1,
101                 .valid_start_item = 0,
102                 .prev_items = (const enum rte_flow_item_type[]) {
103                                RTE_FLOW_ITEM_TYPE_IPV4,
104                                RTE_FLOW_ITEM_TYPE_END,
105                 },
106         },
107 };
108
109 /**
110  * NICs have Advanced Filters capability but they are disabled. This means
111  * that layer 3 must be specified.
112  */
113 static const struct enic_items enic_items_v2[] = {
114         [RTE_FLOW_ITEM_TYPE_ETH] = {
115                 .copy_item = enic_copy_item_eth_v2,
116                 .valid_start_item = 1,
117                 .prev_items = (const enum rte_flow_item_type[]) {
118                                RTE_FLOW_ITEM_TYPE_VXLAN,
119                                RTE_FLOW_ITEM_TYPE_END,
120                 },
121         },
122         [RTE_FLOW_ITEM_TYPE_VLAN] = {
123                 .copy_item = enic_copy_item_vlan_v2,
124                 .valid_start_item = 1,
125                 .prev_items = (const enum rte_flow_item_type[]) {
126                                RTE_FLOW_ITEM_TYPE_ETH,
127                                RTE_FLOW_ITEM_TYPE_END,
128                 },
129         },
130         [RTE_FLOW_ITEM_TYPE_IPV4] = {
131                 .copy_item = enic_copy_item_ipv4_v2,
132                 .valid_start_item = 1,
133                 .prev_items = (const enum rte_flow_item_type[]) {
134                                RTE_FLOW_ITEM_TYPE_ETH,
135                                RTE_FLOW_ITEM_TYPE_VLAN,
136                                RTE_FLOW_ITEM_TYPE_END,
137                 },
138         },
139         [RTE_FLOW_ITEM_TYPE_IPV6] = {
140                 .copy_item = enic_copy_item_ipv6_v2,
141                 .valid_start_item = 1,
142                 .prev_items = (const enum rte_flow_item_type[]) {
143                                RTE_FLOW_ITEM_TYPE_ETH,
144                                RTE_FLOW_ITEM_TYPE_VLAN,
145                                RTE_FLOW_ITEM_TYPE_END,
146                 },
147         },
148         [RTE_FLOW_ITEM_TYPE_UDP] = {
149                 .copy_item = enic_copy_item_udp_v2,
150                 .valid_start_item = 0,
151                 .prev_items = (const enum rte_flow_item_type[]) {
152                                RTE_FLOW_ITEM_TYPE_IPV4,
153                                RTE_FLOW_ITEM_TYPE_IPV6,
154                                RTE_FLOW_ITEM_TYPE_END,
155                 },
156         },
157         [RTE_FLOW_ITEM_TYPE_TCP] = {
158                 .copy_item = enic_copy_item_tcp_v2,
159                 .valid_start_item = 0,
160                 .prev_items = (const enum rte_flow_item_type[]) {
161                                RTE_FLOW_ITEM_TYPE_IPV4,
162                                RTE_FLOW_ITEM_TYPE_IPV6,
163                                RTE_FLOW_ITEM_TYPE_END,
164                 },
165         },
166         [RTE_FLOW_ITEM_TYPE_SCTP] = {
167                 .copy_item = enic_copy_item_sctp_v2,
168                 .valid_start_item = 0,
169                 .prev_items = (const enum rte_flow_item_type[]) {
170                                RTE_FLOW_ITEM_TYPE_IPV4,
171                                RTE_FLOW_ITEM_TYPE_IPV6,
172                                RTE_FLOW_ITEM_TYPE_END,
173                 },
174         },
175         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
176                 .copy_item = enic_copy_item_vxlan_v2,
177                 .valid_start_item = 0,
178                 .prev_items = (const enum rte_flow_item_type[]) {
179                                RTE_FLOW_ITEM_TYPE_UDP,
180                                RTE_FLOW_ITEM_TYPE_END,
181                 },
182         },
183 };
184
185 /** NICs with Advanced filters enabled */
186 static const struct enic_items enic_items_v3[] = {
187         [RTE_FLOW_ITEM_TYPE_ETH] = {
188                 .copy_item = enic_copy_item_eth_v2,
189                 .valid_start_item = 1,
190                 .prev_items = (const enum rte_flow_item_type[]) {
191                                RTE_FLOW_ITEM_TYPE_VXLAN,
192                                RTE_FLOW_ITEM_TYPE_END,
193                 },
194         },
195         [RTE_FLOW_ITEM_TYPE_VLAN] = {
196                 .copy_item = enic_copy_item_vlan_v2,
197                 .valid_start_item = 1,
198                 .prev_items = (const enum rte_flow_item_type[]) {
199                                RTE_FLOW_ITEM_TYPE_ETH,
200                                RTE_FLOW_ITEM_TYPE_END,
201                 },
202         },
203         [RTE_FLOW_ITEM_TYPE_IPV4] = {
204                 .copy_item = enic_copy_item_ipv4_v2,
205                 .valid_start_item = 1,
206                 .prev_items = (const enum rte_flow_item_type[]) {
207                                RTE_FLOW_ITEM_TYPE_ETH,
208                                RTE_FLOW_ITEM_TYPE_VLAN,
209                                RTE_FLOW_ITEM_TYPE_END,
210                 },
211         },
212         [RTE_FLOW_ITEM_TYPE_IPV6] = {
213                 .copy_item = enic_copy_item_ipv6_v2,
214                 .valid_start_item = 1,
215                 .prev_items = (const enum rte_flow_item_type[]) {
216                                RTE_FLOW_ITEM_TYPE_ETH,
217                                RTE_FLOW_ITEM_TYPE_VLAN,
218                                RTE_FLOW_ITEM_TYPE_END,
219                 },
220         },
221         [RTE_FLOW_ITEM_TYPE_UDP] = {
222                 .copy_item = enic_copy_item_udp_v2,
223                 .valid_start_item = 1,
224                 .prev_items = (const enum rte_flow_item_type[]) {
225                                RTE_FLOW_ITEM_TYPE_IPV4,
226                                RTE_FLOW_ITEM_TYPE_IPV6,
227                                RTE_FLOW_ITEM_TYPE_END,
228                 },
229         },
230         [RTE_FLOW_ITEM_TYPE_TCP] = {
231                 .copy_item = enic_copy_item_tcp_v2,
232                 .valid_start_item = 1,
233                 .prev_items = (const enum rte_flow_item_type[]) {
234                                RTE_FLOW_ITEM_TYPE_IPV4,
235                                RTE_FLOW_ITEM_TYPE_IPV6,
236                                RTE_FLOW_ITEM_TYPE_END,
237                 },
238         },
239         [RTE_FLOW_ITEM_TYPE_SCTP] = {
240                 .copy_item = enic_copy_item_sctp_v2,
241                 .valid_start_item = 0,
242                 .prev_items = (const enum rte_flow_item_type[]) {
243                                RTE_FLOW_ITEM_TYPE_IPV4,
244                                RTE_FLOW_ITEM_TYPE_IPV6,
245                                RTE_FLOW_ITEM_TYPE_END,
246                 },
247         },
248         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
249                 .copy_item = enic_copy_item_vxlan_v2,
250                 .valid_start_item = 1,
251                 .prev_items = (const enum rte_flow_item_type[]) {
252                                RTE_FLOW_ITEM_TYPE_UDP,
253                                RTE_FLOW_ITEM_TYPE_END,
254                 },
255         },
256 };
257
258 /** Filtering capabilities indexed this NICs supported filter type. */
259 static const struct enic_filter_cap enic_filter_cap[] = {
260         [FILTER_IPV4_5TUPLE] = {
261                 .item_info = enic_items_v1,
262                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
263         },
264         [FILTER_USNIC_IP] = {
265                 .item_info = enic_items_v2,
266                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
267         },
268         [FILTER_DPDK_1] = {
269                 .item_info = enic_items_v3,
270                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
271         },
272 };
273
274 /** Supported actions for older NICs */
275 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
276         RTE_FLOW_ACTION_TYPE_QUEUE,
277         RTE_FLOW_ACTION_TYPE_END,
278 };
279
280 /** Supported actions for newer NICs */
281 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
282         RTE_FLOW_ACTION_TYPE_QUEUE,
283         RTE_FLOW_ACTION_TYPE_MARK,
284         RTE_FLOW_ACTION_TYPE_FLAG,
285         RTE_FLOW_ACTION_TYPE_RSS,
286         RTE_FLOW_ACTION_TYPE_PASSTHRU,
287         RTE_FLOW_ACTION_TYPE_END,
288 };
289
290 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
291         RTE_FLOW_ACTION_TYPE_QUEUE,
292         RTE_FLOW_ACTION_TYPE_MARK,
293         RTE_FLOW_ACTION_TYPE_FLAG,
294         RTE_FLOW_ACTION_TYPE_DROP,
295         RTE_FLOW_ACTION_TYPE_RSS,
296         RTE_FLOW_ACTION_TYPE_PASSTHRU,
297         RTE_FLOW_ACTION_TYPE_END,
298 };
299
300 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
301         RTE_FLOW_ACTION_TYPE_QUEUE,
302         RTE_FLOW_ACTION_TYPE_MARK,
303         RTE_FLOW_ACTION_TYPE_FLAG,
304         RTE_FLOW_ACTION_TYPE_DROP,
305         RTE_FLOW_ACTION_TYPE_COUNT,
306         RTE_FLOW_ACTION_TYPE_RSS,
307         RTE_FLOW_ACTION_TYPE_PASSTHRU,
308         RTE_FLOW_ACTION_TYPE_END,
309 };
310
311 /** Action capabilities indexed by NIC version information */
312 static const struct enic_action_cap enic_action_cap[] = {
313         [FILTER_ACTION_RQ_STEERING_FLAG] = {
314                 .actions = enic_supported_actions_v1,
315                 .copy_fn = enic_copy_action_v1,
316         },
317         [FILTER_ACTION_FILTER_ID_FLAG] = {
318                 .actions = enic_supported_actions_v2_id,
319                 .copy_fn = enic_copy_action_v2,
320         },
321         [FILTER_ACTION_DROP_FLAG] = {
322                 .actions = enic_supported_actions_v2_drop,
323                 .copy_fn = enic_copy_action_v2,
324         },
325         [FILTER_ACTION_COUNTER_FLAG] = {
326                 .actions = enic_supported_actions_v2_count,
327                 .copy_fn = enic_copy_action_v2,
328         },
329 };
330
331 static int
332 mask_exact_match(const u8 *supported, const u8 *supplied,
333                  unsigned int size)
334 {
335         unsigned int i;
336         for (i = 0; i < size; i++) {
337                 if (supported[i] != supplied[i])
338                         return 0;
339         }
340         return 1;
341 }
342
343 /**
344  * Copy IPv4 item into version 1 NIC filter.
345  *
346  * @param item[in]
347  *   Item specification.
348  * @param enic_filter[out]
349  *   Partially filled in NIC filter structure.
350  * @param inner_ofst[in]
351  *   Should always be 0 for version 1.
352  */
353 static int
354 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
355                        struct filter_v2 *enic_filter, u8 *inner_ofst)
356 {
357         const struct rte_flow_item_ipv4 *spec = item->spec;
358         const struct rte_flow_item_ipv4 *mask = item->mask;
359         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
360         struct ipv4_hdr supported_mask = {
361                 .src_addr = 0xffffffff,
362                 .dst_addr = 0xffffffff,
363         };
364
365         FLOW_TRACE();
366
367         if (*inner_ofst)
368                 return ENOTSUP;
369
370         if (!mask)
371                 mask = &rte_flow_item_ipv4_mask;
372
373         /* This is an exact match filter, both fields must be set */
374         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
375                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
376                 return ENOTSUP;
377         }
378
379         /* check that the suppied mask exactly matches capabilty */
380         if (!mask_exact_match((const u8 *)&supported_mask,
381                               (const u8 *)item->mask, sizeof(*mask))) {
382                 FLOW_LOG(ERR, "IPv4 exact match mask");
383                 return ENOTSUP;
384         }
385
386         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
387         enic_5tup->src_addr = spec->hdr.src_addr;
388         enic_5tup->dst_addr = spec->hdr.dst_addr;
389
390         return 0;
391 }
392
393 /**
394  * Copy UDP item into version 1 NIC filter.
395  *
396  * @param item[in]
397  *   Item specification.
398  * @param enic_filter[out]
399  *   Partially filled in NIC filter structure.
400  * @param inner_ofst[in]
401  *   Should always be 0 for version 1.
402  */
403 static int
404 enic_copy_item_udp_v1(const struct rte_flow_item *item,
405                       struct filter_v2 *enic_filter, u8 *inner_ofst)
406 {
407         const struct rte_flow_item_udp *spec = item->spec;
408         const struct rte_flow_item_udp *mask = item->mask;
409         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
410         struct udp_hdr supported_mask = {
411                 .src_port = 0xffff,
412                 .dst_port = 0xffff,
413         };
414
415         FLOW_TRACE();
416
417         if (*inner_ofst)
418                 return ENOTSUP;
419
420         if (!mask)
421                 mask = &rte_flow_item_udp_mask;
422
423         /* This is an exact match filter, both ports must be set */
424         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
425                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
426                 return ENOTSUP;
427         }
428
429         /* check that the suppied mask exactly matches capabilty */
430         if (!mask_exact_match((const u8 *)&supported_mask,
431                               (const u8 *)item->mask, sizeof(*mask))) {
432                 FLOW_LOG(ERR, "UDP exact match mask");
433                 return ENOTSUP;
434         }
435
436         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
437         enic_5tup->src_port = spec->hdr.src_port;
438         enic_5tup->dst_port = spec->hdr.dst_port;
439         enic_5tup->protocol = PROTO_UDP;
440
441         return 0;
442 }
443
444 /**
445  * Copy TCP item into version 1 NIC filter.
446  *
447  * @param item[in]
448  *   Item specification.
449  * @param enic_filter[out]
450  *   Partially filled in NIC filter structure.
451  * @param inner_ofst[in]
452  *   Should always be 0 for version 1.
453  */
454 static int
455 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
456                       struct filter_v2 *enic_filter, u8 *inner_ofst)
457 {
458         const struct rte_flow_item_tcp *spec = item->spec;
459         const struct rte_flow_item_tcp *mask = item->mask;
460         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
461         struct tcp_hdr supported_mask = {
462                 .src_port = 0xffff,
463                 .dst_port = 0xffff,
464         };
465
466         FLOW_TRACE();
467
468         if (*inner_ofst)
469                 return ENOTSUP;
470
471         if (!mask)
472                 mask = &rte_flow_item_tcp_mask;
473
474         /* This is an exact match filter, both ports must be set */
475         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
476                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
477                 return ENOTSUP;
478         }
479
480         /* check that the suppied mask exactly matches capabilty */
481         if (!mask_exact_match((const u8 *)&supported_mask,
482                              (const u8 *)item->mask, sizeof(*mask))) {
483                 FLOW_LOG(ERR, "TCP exact match mask");
484                 return ENOTSUP;
485         }
486
487         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
488         enic_5tup->src_port = spec->hdr.src_port;
489         enic_5tup->dst_port = spec->hdr.dst_port;
490         enic_5tup->protocol = PROTO_TCP;
491
492         return 0;
493 }
494
495 /**
496  * Copy ETH item into version 2 NIC filter.
497  *
498  * @param item[in]
499  *   Item specification.
500  * @param enic_filter[out]
501  *   Partially filled in NIC filter structure.
502  * @param inner_ofst[in]
503  *   If zero, this is an outer header. If non-zero, this is the offset into L5
504  *   where the header begins.
505  */
506 static int
507 enic_copy_item_eth_v2(const struct rte_flow_item *item,
508                       struct filter_v2 *enic_filter, u8 *inner_ofst)
509 {
510         struct ether_hdr enic_spec;
511         struct ether_hdr enic_mask;
512         const struct rte_flow_item_eth *spec = item->spec;
513         const struct rte_flow_item_eth *mask = item->mask;
514         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
515
516         FLOW_TRACE();
517
518         /* Match all if no spec */
519         if (!spec)
520                 return 0;
521
522         if (!mask)
523                 mask = &rte_flow_item_eth_mask;
524
525         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
526                ETHER_ADDR_LEN);
527         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
528                ETHER_ADDR_LEN);
529
530         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
531                ETHER_ADDR_LEN);
532         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
533                ETHER_ADDR_LEN);
534         enic_spec.ether_type = spec->type;
535         enic_mask.ether_type = mask->type;
536
537         if (*inner_ofst == 0) {
538                 /* outer header */
539                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
540                        sizeof(struct ether_hdr));
541                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
542                        sizeof(struct ether_hdr));
543         } else {
544                 /* inner header */
545                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
546                      FILTER_GENERIC_1_KEY_LEN)
547                         return ENOTSUP;
548                 /* Offset into L5 where inner Ethernet header goes */
549                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
550                        &enic_mask, sizeof(struct ether_hdr));
551                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
552                        &enic_spec, sizeof(struct ether_hdr));
553                 *inner_ofst += sizeof(struct ether_hdr);
554         }
555         return 0;
556 }
557
558 /**
559  * Copy VLAN item into version 2 NIC filter.
560  *
561  * @param item[in]
562  *   Item specification.
563  * @param enic_filter[out]
564  *   Partially filled in NIC filter structure.
565  * @param inner_ofst[in]
566  *   If zero, this is an outer header. If non-zero, this is the offset into L5
567  *   where the header begins.
568  */
569 static int
570 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
571                        struct filter_v2 *enic_filter, u8 *inner_ofst)
572 {
573         const struct rte_flow_item_vlan *spec = item->spec;
574         const struct rte_flow_item_vlan *mask = item->mask;
575         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
576
577         FLOW_TRACE();
578
579         /* Match all if no spec */
580         if (!spec)
581                 return 0;
582
583         if (!mask)
584                 mask = &rte_flow_item_vlan_mask;
585
586         if (*inner_ofst == 0) {
587                 struct ether_hdr *eth_mask =
588                         (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
589                 struct ether_hdr *eth_val =
590                         (void *)gp->layer[FILTER_GENERIC_1_L2].val;
591
592                 /* Outer TPID cannot be matched */
593                 if (eth_mask->ether_type)
594                         return ENOTSUP;
595                 eth_mask->ether_type = mask->inner_type;
596                 eth_val->ether_type = spec->inner_type;
597
598                 /* Outer header. Use the vlan mask/val fields */
599                 gp->mask_vlan = mask->tci;
600                 gp->val_vlan = spec->tci;
601         } else {
602                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
603                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
604                      FILTER_GENERIC_1_KEY_LEN)
605                         return ENOTSUP;
606                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
607                        mask, sizeof(struct vlan_hdr));
608                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
609                        spec, sizeof(struct vlan_hdr));
610                 *inner_ofst += sizeof(struct vlan_hdr);
611         }
612         return 0;
613 }
614
615 /**
616  * Copy IPv4 item into version 2 NIC filter.
617  *
618  * @param item[in]
619  *   Item specification.
620  * @param enic_filter[out]
621  *   Partially filled in NIC filter structure.
622  * @param inner_ofst[in]
623  *   Must be 0. Don't support inner IPv4 filtering.
624  */
625 static int
626 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
627                        struct filter_v2 *enic_filter, u8 *inner_ofst)
628 {
629         const struct rte_flow_item_ipv4 *spec = item->spec;
630         const struct rte_flow_item_ipv4 *mask = item->mask;
631         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
632
633         FLOW_TRACE();
634
635         if (*inner_ofst == 0) {
636                 /* Match IPv4 */
637                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
638                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
639
640                 /* Match all if no spec */
641                 if (!spec)
642                         return 0;
643
644                 if (!mask)
645                         mask = &rte_flow_item_ipv4_mask;
646
647                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
648                        sizeof(struct ipv4_hdr));
649                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
650                        sizeof(struct ipv4_hdr));
651         } else {
652                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
653                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
654                      FILTER_GENERIC_1_KEY_LEN)
655                         return ENOTSUP;
656                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
657                        mask, sizeof(struct ipv4_hdr));
658                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
659                        spec, sizeof(struct ipv4_hdr));
660                 *inner_ofst += sizeof(struct ipv4_hdr);
661         }
662         return 0;
663 }
664
665 /**
666  * Copy IPv6 item into version 2 NIC filter.
667  *
668  * @param item[in]
669  *   Item specification.
670  * @param enic_filter[out]
671  *   Partially filled in NIC filter structure.
672  * @param inner_ofst[in]
673  *   Must be 0. Don't support inner IPv6 filtering.
674  */
675 static int
676 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
677                        struct filter_v2 *enic_filter, u8 *inner_ofst)
678 {
679         const struct rte_flow_item_ipv6 *spec = item->spec;
680         const struct rte_flow_item_ipv6 *mask = item->mask;
681         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
682
683         FLOW_TRACE();
684
685         /* Match IPv6 */
686         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
687         gp->val_flags |= FILTER_GENERIC_1_IPV6;
688
689         /* Match all if no spec */
690         if (!spec)
691                 return 0;
692
693         if (!mask)
694                 mask = &rte_flow_item_ipv6_mask;
695
696         if (*inner_ofst == 0) {
697                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
698                        sizeof(struct ipv6_hdr));
699                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
700                        sizeof(struct ipv6_hdr));
701         } else {
702                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
703                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
704                      FILTER_GENERIC_1_KEY_LEN)
705                         return ENOTSUP;
706                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
707                        mask, sizeof(struct ipv6_hdr));
708                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
709                        spec, sizeof(struct ipv6_hdr));
710                 *inner_ofst += sizeof(struct ipv6_hdr);
711         }
712         return 0;
713 }
714
715 /**
716  * Copy UDP item into version 2 NIC filter.
717  *
718  * @param item[in]
719  *   Item specification.
720  * @param enic_filter[out]
721  *   Partially filled in NIC filter structure.
722  * @param inner_ofst[in]
723  *   Must be 0. Don't support inner UDP filtering.
724  */
725 static int
726 enic_copy_item_udp_v2(const struct rte_flow_item *item,
727                       struct filter_v2 *enic_filter, u8 *inner_ofst)
728 {
729         const struct rte_flow_item_udp *spec = item->spec;
730         const struct rte_flow_item_udp *mask = item->mask;
731         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
732
733         FLOW_TRACE();
734
735         /* Match UDP */
736         gp->mask_flags |= FILTER_GENERIC_1_UDP;
737         gp->val_flags |= FILTER_GENERIC_1_UDP;
738
739         /* Match all if no spec */
740         if (!spec)
741                 return 0;
742
743         if (!mask)
744                 mask = &rte_flow_item_udp_mask;
745
746         if (*inner_ofst == 0) {
747                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
748                        sizeof(struct udp_hdr));
749                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
750                        sizeof(struct udp_hdr));
751         } else {
752                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
753                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
754                      FILTER_GENERIC_1_KEY_LEN)
755                         return ENOTSUP;
756                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
757                        mask, sizeof(struct udp_hdr));
758                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
759                        spec, sizeof(struct udp_hdr));
760                 *inner_ofst += sizeof(struct udp_hdr);
761         }
762         return 0;
763 }
764
765 /**
766  * Copy TCP item into version 2 NIC filter.
767  *
768  * @param item[in]
769  *   Item specification.
770  * @param enic_filter[out]
771  *   Partially filled in NIC filter structure.
772  * @param inner_ofst[in]
773  *   Must be 0. Don't support inner TCP filtering.
774  */
775 static int
776 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
777                       struct filter_v2 *enic_filter, u8 *inner_ofst)
778 {
779         const struct rte_flow_item_tcp *spec = item->spec;
780         const struct rte_flow_item_tcp *mask = item->mask;
781         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
782
783         FLOW_TRACE();
784
785         /* Match TCP */
786         gp->mask_flags |= FILTER_GENERIC_1_TCP;
787         gp->val_flags |= FILTER_GENERIC_1_TCP;
788
789         /* Match all if no spec */
790         if (!spec)
791                 return 0;
792
793         if (!mask)
794                 return ENOTSUP;
795
796         if (*inner_ofst == 0) {
797                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
798                        sizeof(struct tcp_hdr));
799                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
800                        sizeof(struct tcp_hdr));
801         } else {
802                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
803                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
804                      FILTER_GENERIC_1_KEY_LEN)
805                         return ENOTSUP;
806                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
807                        mask, sizeof(struct tcp_hdr));
808                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
809                        spec, sizeof(struct tcp_hdr));
810                 *inner_ofst += sizeof(struct tcp_hdr);
811         }
812         return 0;
813 }
814
815 /**
816  * Copy SCTP item into version 2 NIC filter.
817  *
818  * @param item[in]
819  *   Item specification.
820  * @param enic_filter[out]
821  *   Partially filled in NIC filter structure.
822  * @param inner_ofst[in]
823  *   Must be 0. Don't support inner SCTP filtering.
824  */
825 static int
826 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
827                        struct filter_v2 *enic_filter, u8 *inner_ofst)
828 {
829         const struct rte_flow_item_sctp *spec = item->spec;
830         const struct rte_flow_item_sctp *mask = item->mask;
831         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
832         uint8_t *ip_proto_mask = NULL;
833         uint8_t *ip_proto = NULL;
834
835         FLOW_TRACE();
836
837         if (*inner_ofst)
838                 return ENOTSUP;
839
840         /*
841          * The NIC filter API has no flags for "match sctp", so explicitly set
842          * the protocol number in the IP pattern.
843          */
844         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
845                 struct ipv4_hdr *ip;
846                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
847                 ip_proto_mask = &ip->next_proto_id;
848                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
849                 ip_proto = &ip->next_proto_id;
850         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
851                 struct ipv6_hdr *ip;
852                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
853                 ip_proto_mask = &ip->proto;
854                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
855                 ip_proto = &ip->proto;
856         } else {
857                 /* Need IPv4/IPv6 pattern first */
858                 return EINVAL;
859         }
860         *ip_proto = IPPROTO_SCTP;
861         *ip_proto_mask = 0xff;
862
863         /* Match all if no spec */
864         if (!spec)
865                 return 0;
866
867         if (!mask)
868                 mask = &rte_flow_item_sctp_mask;
869
870         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
871                sizeof(struct sctp_hdr));
872         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
873                sizeof(struct sctp_hdr));
874         return 0;
875 }
876
877 /**
878  * Copy UDP item into version 2 NIC filter.
879  *
880  * @param item[in]
881  *   Item specification.
882  * @param enic_filter[out]
883  *   Partially filled in NIC filter structure.
884  * @param inner_ofst[in]
885  *   Must be 0. VxLAN headers always start at the beginning of L5.
886  */
887 static int
888 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
889                         struct filter_v2 *enic_filter, u8 *inner_ofst)
890 {
891         const struct rte_flow_item_vxlan *spec = item->spec;
892         const struct rte_flow_item_vxlan *mask = item->mask;
893         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
894
895         FLOW_TRACE();
896
897         if (*inner_ofst)
898                 return EINVAL;
899
900         /* Match all if no spec */
901         if (!spec)
902                 return 0;
903
904         if (!mask)
905                 mask = &rte_flow_item_vxlan_mask;
906
907         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
908                sizeof(struct vxlan_hdr));
909         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
910                sizeof(struct vxlan_hdr));
911
912         *inner_ofst = sizeof(struct vxlan_hdr);
913         return 0;
914 }
915
916 /**
917  * Return 1 if current item is valid on top of the previous one.
918  *
919  * @param prev_item[in]
920  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
921  *   is the first item.
922  * @param item_info[in]
923  *   Info about this item, like valid previous items.
924  * @param is_first[in]
925  *   True if this the first item in the pattern.
926  */
927 static int
928 item_stacking_valid(enum rte_flow_item_type prev_item,
929                     const struct enic_items *item_info, u8 is_first_item)
930 {
931         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
932
933         FLOW_TRACE();
934
935         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
936                 if (prev_item == *allowed_items)
937                         return 1;
938         }
939
940         /* This is the first item in the stack. Check if that's cool */
941         if (is_first_item && item_info->valid_start_item)
942                 return 1;
943
944         return 0;
945 }
946
947 /**
948  * Build the intenal enic filter structure from the provided pattern. The
949  * pattern is validated as the items are copied.
950  *
951  * @param pattern[in]
952  * @param items_info[in]
953  *   Info about this NICs item support, like valid previous items.
954  * @param enic_filter[out]
955  *   NIC specfilc filters derived from the pattern.
956  * @param error[out]
957  */
958 static int
959 enic_copy_filter(const struct rte_flow_item pattern[],
960                  const struct enic_filter_cap *cap,
961                  struct filter_v2 *enic_filter,
962                  struct rte_flow_error *error)
963 {
964         int ret;
965         const struct rte_flow_item *item = pattern;
966         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
967         enum rte_flow_item_type prev_item;
968         const struct enic_items *item_info;
969
970         u8 is_first_item = 1;
971
972         FLOW_TRACE();
973
974         prev_item = 0;
975
976         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
977                 /* Get info about how to validate and copy the item. If NULL
978                  * is returned the nic does not support the item.
979                  */
980                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
981                         continue;
982
983                 item_info = &cap->item_info[item->type];
984                 if (item->type > cap->max_item_type ||
985                     item_info->copy_item == NULL) {
986                         rte_flow_error_set(error, ENOTSUP,
987                                 RTE_FLOW_ERROR_TYPE_ITEM,
988                                 NULL, "Unsupported item.");
989                         return -rte_errno;
990                 }
991
992                 /* check to see if item stacking is valid */
993                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
994                         goto stacking_error;
995
996                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
997                 if (ret)
998                         goto item_not_supported;
999                 prev_item = item->type;
1000                 is_first_item = 0;
1001         }
1002         return 0;
1003
1004 item_not_supported:
1005         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1006                            NULL, "enic type error");
1007         return -rte_errno;
1008
1009 stacking_error:
1010         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1011                            item, "stacking error");
1012         return -rte_errno;
1013 }
1014
1015 /**
1016  * Build the intenal version 1 NIC action structure from the provided pattern.
1017  * The pattern is validated as the items are copied.
1018  *
1019  * @param actions[in]
1020  * @param enic_action[out]
1021  *   NIC specfilc actions derived from the actions.
1022  * @param error[out]
1023  */
1024 static int
1025 enic_copy_action_v1(__rte_unused struct enic *enic,
1026                     const struct rte_flow_action actions[],
1027                     struct filter_action_v2 *enic_action)
1028 {
1029         enum { FATE = 1, };
1030         uint32_t overlap = 0;
1031
1032         FLOW_TRACE();
1033
1034         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1035                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1036                         continue;
1037
1038                 switch (actions->type) {
1039                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1040                         const struct rte_flow_action_queue *queue =
1041                                 (const struct rte_flow_action_queue *)
1042                                 actions->conf;
1043
1044                         if (overlap & FATE)
1045                                 return ENOTSUP;
1046                         overlap |= FATE;
1047                         enic_action->rq_idx =
1048                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1049                         break;
1050                 }
1051                 default:
1052                         RTE_ASSERT(0);
1053                         break;
1054                 }
1055         }
1056         if (!(overlap & FATE))
1057                 return ENOTSUP;
1058         enic_action->type = FILTER_ACTION_RQ_STEERING;
1059         return 0;
1060 }
1061
1062 /**
1063  * Build the intenal version 2 NIC action structure from the provided pattern.
1064  * The pattern is validated as the items are copied.
1065  *
1066  * @param actions[in]
1067  * @param enic_action[out]
1068  *   NIC specfilc actions derived from the actions.
1069  * @param error[out]
1070  */
1071 static int
1072 enic_copy_action_v2(struct enic *enic,
1073                     const struct rte_flow_action actions[],
1074                     struct filter_action_v2 *enic_action)
1075 {
1076         enum { FATE = 1, MARK = 2, };
1077         uint32_t overlap = 0;
1078         bool passthru = false;
1079
1080         FLOW_TRACE();
1081
1082         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1083                 switch (actions->type) {
1084                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1085                         const struct rte_flow_action_queue *queue =
1086                                 (const struct rte_flow_action_queue *)
1087                                 actions->conf;
1088
1089                         if (overlap & FATE)
1090                                 return ENOTSUP;
1091                         overlap |= FATE;
1092                         enic_action->rq_idx =
1093                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1094                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1095                         break;
1096                 }
1097                 case RTE_FLOW_ACTION_TYPE_MARK: {
1098                         const struct rte_flow_action_mark *mark =
1099                                 (const struct rte_flow_action_mark *)
1100                                 actions->conf;
1101
1102                         if (overlap & MARK)
1103                                 return ENOTSUP;
1104                         overlap |= MARK;
1105                         /*
1106                          * Map mark ID (32-bit) to filter ID (16-bit):
1107                          * - Reject values > 16 bits
1108                          * - Filter ID 0 is reserved for filters that steer
1109                          *   but not mark. So add 1 to the mark ID to avoid
1110                          *   using 0.
1111                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1112                          *   reserved for the "flag" action below.
1113                          */
1114                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1115                                 return EINVAL;
1116                         enic_action->filter_id = mark->id + 1;
1117                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1118                         break;
1119                 }
1120                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1121                         if (overlap & MARK)
1122                                 return ENOTSUP;
1123                         overlap |= MARK;
1124                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1125                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1126                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1127                         break;
1128                 }
1129                 case RTE_FLOW_ACTION_TYPE_DROP: {
1130                         if (overlap & FATE)
1131                                 return ENOTSUP;
1132                         overlap |= FATE;
1133                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1134                         break;
1135                 }
1136                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1137                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1138                         break;
1139                 }
1140                 case RTE_FLOW_ACTION_TYPE_RSS: {
1141                         const struct rte_flow_action_rss *rss =
1142                                 (const struct rte_flow_action_rss *)
1143                                 actions->conf;
1144                         bool allow;
1145                         uint16_t i;
1146
1147                         /*
1148                          * Hardware does not support general RSS actions, but
1149                          * we can still support the dummy one that is used to
1150                          * "receive normally".
1151                          */
1152                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1153                                 rss->level == 0 &&
1154                                 (rss->types == 0 ||
1155                                  rss->types == enic->rss_hf) &&
1156                                 rss->queue_num == enic->rq_count &&
1157                                 rss->key_len == 0;
1158                         /* Identity queue map is ok */
1159                         for (i = 0; i < rss->queue_num; i++)
1160                                 allow = allow && (i == rss->queue[i]);
1161                         if (!allow)
1162                                 return ENOTSUP;
1163                         if (overlap & FATE)
1164                                 return ENOTSUP;
1165                         /* Need MARK or FLAG */
1166                         if (!(overlap & MARK))
1167                                 return ENOTSUP;
1168                         overlap |= FATE;
1169                         break;
1170                 }
1171                 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1172                         /*
1173                          * Like RSS above, PASSTHRU + MARK may be used to
1174                          * "mark and then receive normally". MARK usually comes
1175                          * after PASSTHRU, so remember we have seen passthru
1176                          * and check for mark later.
1177                          */
1178                         if (overlap & FATE)
1179                                 return ENOTSUP;
1180                         overlap |= FATE;
1181                         passthru = true;
1182                         break;
1183                 }
1184                 case RTE_FLOW_ACTION_TYPE_VOID:
1185                         continue;
1186                 default:
1187                         RTE_ASSERT(0);
1188                         break;
1189                 }
1190         }
1191         /* Only PASSTHRU + MARK is allowed */
1192         if (passthru && !(overlap & MARK))
1193                 return ENOTSUP;
1194         if (!(overlap & FATE))
1195                 return ENOTSUP;
1196         enic_action->type = FILTER_ACTION_V2;
1197         return 0;
1198 }
1199
1200 /** Check if the action is supported */
1201 static int
1202 enic_match_action(const struct rte_flow_action *action,
1203                   const enum rte_flow_action_type *supported_actions)
1204 {
1205         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1206              supported_actions++) {
1207                 if (action->type == *supported_actions)
1208                         return 1;
1209         }
1210         return 0;
1211 }
1212
1213 /** Get the NIC filter capabilties structure */
1214 static const struct enic_filter_cap *
1215 enic_get_filter_cap(struct enic *enic)
1216 {
1217         if (enic->flow_filter_mode)
1218                 return &enic_filter_cap[enic->flow_filter_mode];
1219
1220         return NULL;
1221 }
1222
1223 /** Get the actions for this NIC version. */
1224 static const struct enic_action_cap *
1225 enic_get_action_cap(struct enic *enic)
1226 {
1227         const struct enic_action_cap *ea;
1228         uint8_t actions;
1229
1230         actions = enic->filter_actions;
1231         if (actions & FILTER_ACTION_COUNTER_FLAG)
1232                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1233         else if (actions & FILTER_ACTION_DROP_FLAG)
1234                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1235         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1236                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1237         else
1238                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1239         return ea;
1240 }
1241
1242 /* Debug function to dump internal NIC action structure. */
1243 static void
1244 enic_dump_actions(const struct filter_action_v2 *ea)
1245 {
1246         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1247                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1248         } else if (ea->type == FILTER_ACTION_V2) {
1249                 FLOW_LOG(INFO, "Actions(V2)\n");
1250                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1251                         FLOW_LOG(INFO, "\tqueue: %u\n",
1252                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1253                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1254                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1255         }
1256 }
1257
1258 /* Debug function to dump internal NIC filter structure. */
1259 static void
1260 enic_dump_filter(const struct filter_v2 *filt)
1261 {
1262         const struct filter_generic_1 *gp;
1263         int i, j, mbyte;
1264         char buf[128], *bp;
1265         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1266         char l4csum[16], ipfrag[16];
1267
1268         switch (filt->type) {
1269         case FILTER_IPV4_5TUPLE:
1270                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1271                 break;
1272         case FILTER_USNIC_IP:
1273         case FILTER_DPDK_1:
1274                 /* FIXME: this should be a loop */
1275                 gp = &filt->u.generic_1;
1276                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1277                        gp->val_vlan, gp->mask_vlan);
1278
1279                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1280                         sprintf(ip4, "%s ",
1281                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1282                                  ? "ip4(y)" : "ip4(n)");
1283                 else
1284                         sprintf(ip4, "%s ", "ip4(x)");
1285
1286                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1287                         sprintf(ip6, "%s ",
1288                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1289                                  ? "ip6(y)" : "ip6(n)");
1290                 else
1291                         sprintf(ip6, "%s ", "ip6(x)");
1292
1293                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1294                         sprintf(udp, "%s ",
1295                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1296                                  ? "udp(y)" : "udp(n)");
1297                 else
1298                         sprintf(udp, "%s ", "udp(x)");
1299
1300                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1301                         sprintf(tcp, "%s ",
1302                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1303                                  ? "tcp(y)" : "tcp(n)");
1304                 else
1305                         sprintf(tcp, "%s ", "tcp(x)");
1306
1307                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1308                         sprintf(tcpudp, "%s ",
1309                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1310                                  ? "tcpudp(y)" : "tcpudp(n)");
1311                 else
1312                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1313
1314                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1315                         sprintf(ip4csum, "%s ",
1316                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1317                                  ? "ip4csum(y)" : "ip4csum(n)");
1318                 else
1319                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1320
1321                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1322                         sprintf(l4csum, "%s ",
1323                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1324                                  ? "l4csum(y)" : "l4csum(n)");
1325                 else
1326                         sprintf(l4csum, "%s ", "l4csum(x)");
1327
1328                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1329                         sprintf(ipfrag, "%s ",
1330                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1331                                  ? "ipfrag(y)" : "ipfrag(n)");
1332                 else
1333                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1334                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1335                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1336
1337                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1338                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1339                         while (mbyte && !gp->layer[i].mask[mbyte])
1340                                 mbyte--;
1341                         if (mbyte == 0)
1342                                 continue;
1343
1344                         bp = buf;
1345                         for (j = 0; j <= mbyte; j++) {
1346                                 sprintf(bp, "%02x",
1347                                         gp->layer[i].mask[j]);
1348                                 bp += 2;
1349                         }
1350                         *bp = '\0';
1351                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1352                         bp = buf;
1353                         for (j = 0; j <= mbyte; j++) {
1354                                 sprintf(bp, "%02x",
1355                                         gp->layer[i].val[j]);
1356                                 bp += 2;
1357                         }
1358                         *bp = '\0';
1359                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1360                 }
1361                 break;
1362         default:
1363                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1364                 break;
1365         }
1366 }
1367
1368 /* Debug function to dump internal NIC flow structures. */
1369 static void
1370 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1371 {
1372         enic_dump_filter(filt);
1373         enic_dump_actions(ea);
1374 }
1375
1376
1377 /**
1378  * Internal flow parse/validate function.
1379  *
1380  * @param dev[in]
1381  *   This device pointer.
1382  * @param pattern[in]
1383  * @param actions[in]
1384  * @param error[out]
1385  * @param enic_filter[out]
1386  *   Internal NIC filter structure pointer.
1387  * @param enic_action[out]
1388  *   Internal NIC action structure pointer.
1389  */
1390 static int
1391 enic_flow_parse(struct rte_eth_dev *dev,
1392                 const struct rte_flow_attr *attrs,
1393                 const struct rte_flow_item pattern[],
1394                 const struct rte_flow_action actions[],
1395                 struct rte_flow_error *error,
1396                 struct filter_v2 *enic_filter,
1397                 struct filter_action_v2 *enic_action)
1398 {
1399         unsigned int ret = 0;
1400         struct enic *enic = pmd_priv(dev);
1401         const struct enic_filter_cap *enic_filter_cap;
1402         const struct enic_action_cap *enic_action_cap;
1403         const struct rte_flow_action *action;
1404
1405         FLOW_TRACE();
1406
1407         memset(enic_filter, 0, sizeof(*enic_filter));
1408         memset(enic_action, 0, sizeof(*enic_action));
1409
1410         if (!pattern) {
1411                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1412                                    NULL, "No pattern specified");
1413                 return -rte_errno;
1414         }
1415
1416         if (!actions) {
1417                 rte_flow_error_set(error, EINVAL,
1418                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1419                                    NULL, "No action specified");
1420                 return -rte_errno;
1421         }
1422
1423         if (attrs) {
1424                 if (attrs->group) {
1425                         rte_flow_error_set(error, ENOTSUP,
1426                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1427                                            NULL,
1428                                            "priority groups are not supported");
1429                         return -rte_errno;
1430                 } else if (attrs->priority) {
1431                         rte_flow_error_set(error, ENOTSUP,
1432                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1433                                            NULL,
1434                                            "priorities are not supported");
1435                         return -rte_errno;
1436                 } else if (attrs->egress) {
1437                         rte_flow_error_set(error, ENOTSUP,
1438                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1439                                            NULL,
1440                                            "egress is not supported");
1441                         return -rte_errno;
1442                 } else if (attrs->transfer) {
1443                         rte_flow_error_set(error, ENOTSUP,
1444                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1445                                            NULL,
1446                                            "transfer is not supported");
1447                         return -rte_errno;
1448                 } else if (!attrs->ingress) {
1449                         rte_flow_error_set(error, ENOTSUP,
1450                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1451                                            NULL,
1452                                            "only ingress is supported");
1453                         return -rte_errno;
1454                 }
1455
1456         } else {
1457                 rte_flow_error_set(error, EINVAL,
1458                                    RTE_FLOW_ERROR_TYPE_ATTR,
1459                                    NULL, "No attribute specified");
1460                 return -rte_errno;
1461         }
1462
1463         /* Verify Actions. */
1464         enic_action_cap =  enic_get_action_cap(enic);
1465         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1466              action++) {
1467                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1468                         continue;
1469                 else if (!enic_match_action(action, enic_action_cap->actions))
1470                         break;
1471         }
1472         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1473                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1474                                    action, "Invalid action.");
1475                 return -rte_errno;
1476         }
1477         ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1478         if (ret) {
1479                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1480                            NULL, "Unsupported action.");
1481                 return -rte_errno;
1482         }
1483
1484         /* Verify Flow items. If copying the filter from flow format to enic
1485          * format fails, the flow is not supported
1486          */
1487         enic_filter_cap =  enic_get_filter_cap(enic);
1488         if (enic_filter_cap == NULL) {
1489                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1490                            NULL, "Flow API not available");
1491                 return -rte_errno;
1492         }
1493         enic_filter->type = enic->flow_filter_mode;
1494         ret = enic_copy_filter(pattern, enic_filter_cap,
1495                                        enic_filter, error);
1496         return ret;
1497 }
1498
1499 /**
1500  * Push filter/action to the NIC.
1501  *
1502  * @param enic[in]
1503  *   Device structure pointer.
1504  * @param enic_filter[in]
1505  *   Internal NIC filter structure pointer.
1506  * @param enic_action[in]
1507  *   Internal NIC action structure pointer.
1508  * @param error[out]
1509  */
1510 static struct rte_flow *
1511 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1512                    struct filter_action_v2 *enic_action,
1513                    struct rte_flow_error *error)
1514 {
1515         struct rte_flow *flow;
1516         int err;
1517         uint16_t entry;
1518         int ctr_idx;
1519         int last_max_flow_ctr;
1520
1521         FLOW_TRACE();
1522
1523         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1524         if (!flow) {
1525                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1526                                    NULL, "cannot allocate flow memory");
1527                 return NULL;
1528         }
1529
1530         flow->counter_idx = -1;
1531         last_max_flow_ctr = -1;
1532         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1533                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1534                         rte_flow_error_set(error, ENOMEM,
1535                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1536                                            NULL, "cannot allocate counter");
1537                         goto unwind_flow_alloc;
1538                 }
1539                 flow->counter_idx = ctr_idx;
1540                 enic_action->counter_index = ctr_idx;
1541
1542                 /* If index is the largest, increase the counter DMA size */
1543                 if (ctr_idx > enic->max_flow_counter) {
1544                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1545                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1546                                                  ctr_idx + 1);
1547                         if (err) {
1548                                 rte_flow_error_set(error, -err,
1549                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1550                                            NULL, "counter DMA config failed");
1551                                 goto unwind_ctr_alloc;
1552                         }
1553                         last_max_flow_ctr = enic->max_flow_counter;
1554                         enic->max_flow_counter = ctr_idx;
1555                 }
1556         }
1557
1558         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1559         entry = enic_action->rq_idx;
1560         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1561                                   enic_action);
1562         if (err) {
1563                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1564                                    NULL, "vnic_dev_classifier error");
1565                 goto unwind_ctr_dma_cfg;
1566         }
1567
1568         flow->enic_filter_id = entry;
1569         flow->enic_filter = *enic_filter;
1570
1571         return flow;
1572
1573 /* unwind if there are errors */
1574 unwind_ctr_dma_cfg:
1575         if (last_max_flow_ctr != -1) {
1576                 /* reduce counter DMA size */
1577                 vnic_dev_counter_dma_cfg(enic->vdev,
1578                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1579                                          last_max_flow_ctr + 1);
1580                 enic->max_flow_counter = last_max_flow_ctr;
1581         }
1582 unwind_ctr_alloc:
1583         if (flow->counter_idx != -1)
1584                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1585 unwind_flow_alloc:
1586         rte_free(flow);
1587         return NULL;
1588 }
1589
1590 /**
1591  * Remove filter/action from the NIC.
1592  *
1593  * @param enic[in]
1594  *   Device structure pointer.
1595  * @param filter_id[in]
1596  *   Id of NIC filter.
1597  * @param enic_action[in]
1598  *   Internal NIC action structure pointer.
1599  * @param error[out]
1600  */
1601 static int
1602 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1603                    struct rte_flow_error *error)
1604 {
1605         u16 filter_id;
1606         int err;
1607
1608         FLOW_TRACE();
1609
1610         filter_id = flow->enic_filter_id;
1611         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1612         if (err) {
1613                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1614                                    NULL, "vnic_dev_classifier failed");
1615                 return -err;
1616         }
1617
1618         if (flow->counter_idx != -1) {
1619                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1620                         dev_err(enic, "counter free failed, idx: %d\n",
1621                                 flow->counter_idx);
1622                 flow->counter_idx = -1;
1623         }
1624         return 0;
1625 }
1626
1627 /*
1628  * The following functions are callbacks for Generic flow API.
1629  */
1630
1631 /**
1632  * Validate a flow supported by the NIC.
1633  *
1634  * @see rte_flow_validate()
1635  * @see rte_flow_ops
1636  */
1637 static int
1638 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1639                    const struct rte_flow_item pattern[],
1640                    const struct rte_flow_action actions[],
1641                    struct rte_flow_error *error)
1642 {
1643         struct filter_v2 enic_filter;
1644         struct filter_action_v2 enic_action;
1645         int ret;
1646
1647         FLOW_TRACE();
1648
1649         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1650                                &enic_filter, &enic_action);
1651         if (!ret)
1652                 enic_dump_flow(&enic_action, &enic_filter);
1653         return ret;
1654 }
1655
1656 /**
1657  * Create a flow supported by the NIC.
1658  *
1659  * @see rte_flow_create()
1660  * @see rte_flow_ops
1661  */
1662 static struct rte_flow *
1663 enic_flow_create(struct rte_eth_dev *dev,
1664                  const struct rte_flow_attr *attrs,
1665                  const struct rte_flow_item pattern[],
1666                  const struct rte_flow_action actions[],
1667                  struct rte_flow_error *error)
1668 {
1669         int ret;
1670         struct filter_v2 enic_filter;
1671         struct filter_action_v2 enic_action;
1672         struct rte_flow *flow;
1673         struct enic *enic = pmd_priv(dev);
1674
1675         FLOW_TRACE();
1676
1677         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1678                               &enic_action);
1679         if (ret < 0)
1680                 return NULL;
1681
1682         rte_spinlock_lock(&enic->flows_lock);
1683         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1684                                     error);
1685         if (flow)
1686                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1687         rte_spinlock_unlock(&enic->flows_lock);
1688
1689         return flow;
1690 }
1691
1692 /**
1693  * Destroy a flow supported by the NIC.
1694  *
1695  * @see rte_flow_destroy()
1696  * @see rte_flow_ops
1697  */
1698 static int
1699 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1700                   __rte_unused struct rte_flow_error *error)
1701 {
1702         struct enic *enic = pmd_priv(dev);
1703
1704         FLOW_TRACE();
1705
1706         rte_spinlock_lock(&enic->flows_lock);
1707         enic_flow_del_filter(enic, flow, error);
1708         LIST_REMOVE(flow, next);
1709         rte_spinlock_unlock(&enic->flows_lock);
1710         rte_free(flow);
1711         return 0;
1712 }
1713
1714 /**
1715  * Flush all flows on the device.
1716  *
1717  * @see rte_flow_flush()
1718  * @see rte_flow_ops
1719  */
1720 static int
1721 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1722 {
1723         struct rte_flow *flow;
1724         struct enic *enic = pmd_priv(dev);
1725
1726         FLOW_TRACE();
1727
1728         rte_spinlock_lock(&enic->flows_lock);
1729
1730         while (!LIST_EMPTY(&enic->flows)) {
1731                 flow = LIST_FIRST(&enic->flows);
1732                 enic_flow_del_filter(enic, flow, error);
1733                 LIST_REMOVE(flow, next);
1734                 rte_free(flow);
1735         }
1736         rte_spinlock_unlock(&enic->flows_lock);
1737         return 0;
1738 }
1739
1740 static int
1741 enic_flow_query_count(struct rte_eth_dev *dev,
1742                       struct rte_flow *flow, void *data,
1743                       struct rte_flow_error *error)
1744 {
1745         struct enic *enic = pmd_priv(dev);
1746         struct rte_flow_query_count *query;
1747         uint64_t packets, bytes;
1748
1749         FLOW_TRACE();
1750
1751         if (flow->counter_idx == -1) {
1752                 return rte_flow_error_set(error, ENOTSUP,
1753                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1754                                           NULL,
1755                                           "flow does not have counter");
1756         }
1757         query = (struct rte_flow_query_count *)data;
1758         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1759                                     !!query->reset, &packets, &bytes)) {
1760                 return rte_flow_error_set
1761                         (error, EINVAL,
1762                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1763                          NULL,
1764                          "cannot read counter");
1765         }
1766         query->hits_set = 1;
1767         query->bytes_set = 1;
1768         query->hits = packets;
1769         query->bytes = bytes;
1770         return 0;
1771 }
1772
1773 static int
1774 enic_flow_query(struct rte_eth_dev *dev,
1775                 struct rte_flow *flow,
1776                 const struct rte_flow_action *actions,
1777                 void *data,
1778                 struct rte_flow_error *error)
1779 {
1780         int ret = 0;
1781
1782         FLOW_TRACE();
1783
1784         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1785                 switch (actions->type) {
1786                 case RTE_FLOW_ACTION_TYPE_VOID:
1787                         break;
1788                 case RTE_FLOW_ACTION_TYPE_COUNT:
1789                         ret = enic_flow_query_count(dev, flow, data, error);
1790                         break;
1791                 default:
1792                         return rte_flow_error_set(error, ENOTSUP,
1793                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1794                                                   actions,
1795                                                   "action not supported");
1796                 }
1797                 if (ret < 0)
1798                         return ret;
1799         }
1800         return 0;
1801 }
1802
1803 /**
1804  * Flow callback registration.
1805  *
1806  * @see rte_flow_ops
1807  */
1808 const struct rte_flow_ops enic_flow_ops = {
1809         .validate = enic_flow_validate,
1810         .create = enic_flow_create,
1811         .destroy = enic_flow_destroy,
1812         .flush = enic_flow_flush,
1813         .query = enic_flow_query,
1814 };