net/enic: enable limited RSS flow action
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /** Info about how to copy items into enic filters. */
27 struct enic_items {
28         /** Function for copying and validating an item. */
29         int (*copy_item)(const struct rte_flow_item *item,
30                          struct filter_v2 *enic_filter, u8 *inner_ofst);
31         /** List of valid previous items. */
32         const enum rte_flow_item_type * const prev_items;
33         /** True if it's OK for this item to be the first item. For some NIC
34          * versions, it's invalid to start the stack above layer 3.
35          */
36         const u8 valid_start_item;
37 };
38
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41         /** list of valid items and their handlers and attributes. */
42         const struct enic_items *item_info;
43         /* Max type in the above list, used to detect unsupported types */
44         enum rte_flow_item_type max_item_type;
45 };
46
47 /* functions for copying flow actions into enic actions */
48 typedef int (copy_action_fn)(struct enic *enic,
49                              const struct rte_flow_action actions[],
50                              struct filter_action_v2 *enic_action);
51
52 /* functions for copying items into enic filters */
53 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
54                           struct filter_v2 *enic_filter, u8 *inner_ofst);
55
56 /** Action capabilities for various NICs. */
57 struct enic_action_cap {
58         /** list of valid actions */
59         const enum rte_flow_action_type *actions;
60         /** copy function for a particular NIC */
61         copy_action_fn *copy_fn;
62 };
63
64 /* Forward declarations */
65 static enic_copy_item_fn enic_copy_item_ipv4_v1;
66 static enic_copy_item_fn enic_copy_item_udp_v1;
67 static enic_copy_item_fn enic_copy_item_tcp_v1;
68 static enic_copy_item_fn enic_copy_item_eth_v2;
69 static enic_copy_item_fn enic_copy_item_vlan_v2;
70 static enic_copy_item_fn enic_copy_item_ipv4_v2;
71 static enic_copy_item_fn enic_copy_item_ipv6_v2;
72 static enic_copy_item_fn enic_copy_item_udp_v2;
73 static enic_copy_item_fn enic_copy_item_tcp_v2;
74 static enic_copy_item_fn enic_copy_item_sctp_v2;
75 static enic_copy_item_fn enic_copy_item_vxlan_v2;
76 static copy_action_fn enic_copy_action_v1;
77 static copy_action_fn enic_copy_action_v2;
78
79 /**
80  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
81  * is supported.
82  */
83 static const struct enic_items enic_items_v1[] = {
84         [RTE_FLOW_ITEM_TYPE_IPV4] = {
85                 .copy_item = enic_copy_item_ipv4_v1,
86                 .valid_start_item = 1,
87                 .prev_items = (const enum rte_flow_item_type[]) {
88                                RTE_FLOW_ITEM_TYPE_END,
89                 },
90         },
91         [RTE_FLOW_ITEM_TYPE_UDP] = {
92                 .copy_item = enic_copy_item_udp_v1,
93                 .valid_start_item = 0,
94                 .prev_items = (const enum rte_flow_item_type[]) {
95                                RTE_FLOW_ITEM_TYPE_IPV4,
96                                RTE_FLOW_ITEM_TYPE_END,
97                 },
98         },
99         [RTE_FLOW_ITEM_TYPE_TCP] = {
100                 .copy_item = enic_copy_item_tcp_v1,
101                 .valid_start_item = 0,
102                 .prev_items = (const enum rte_flow_item_type[]) {
103                                RTE_FLOW_ITEM_TYPE_IPV4,
104                                RTE_FLOW_ITEM_TYPE_END,
105                 },
106         },
107 };
108
109 /**
110  * NICs have Advanced Filters capability but they are disabled. This means
111  * that layer 3 must be specified.
112  */
113 static const struct enic_items enic_items_v2[] = {
114         [RTE_FLOW_ITEM_TYPE_ETH] = {
115                 .copy_item = enic_copy_item_eth_v2,
116                 .valid_start_item = 1,
117                 .prev_items = (const enum rte_flow_item_type[]) {
118                                RTE_FLOW_ITEM_TYPE_VXLAN,
119                                RTE_FLOW_ITEM_TYPE_END,
120                 },
121         },
122         [RTE_FLOW_ITEM_TYPE_VLAN] = {
123                 .copy_item = enic_copy_item_vlan_v2,
124                 .valid_start_item = 1,
125                 .prev_items = (const enum rte_flow_item_type[]) {
126                                RTE_FLOW_ITEM_TYPE_ETH,
127                                RTE_FLOW_ITEM_TYPE_END,
128                 },
129         },
130         [RTE_FLOW_ITEM_TYPE_IPV4] = {
131                 .copy_item = enic_copy_item_ipv4_v2,
132                 .valid_start_item = 1,
133                 .prev_items = (const enum rte_flow_item_type[]) {
134                                RTE_FLOW_ITEM_TYPE_ETH,
135                                RTE_FLOW_ITEM_TYPE_VLAN,
136                                RTE_FLOW_ITEM_TYPE_END,
137                 },
138         },
139         [RTE_FLOW_ITEM_TYPE_IPV6] = {
140                 .copy_item = enic_copy_item_ipv6_v2,
141                 .valid_start_item = 1,
142                 .prev_items = (const enum rte_flow_item_type[]) {
143                                RTE_FLOW_ITEM_TYPE_ETH,
144                                RTE_FLOW_ITEM_TYPE_VLAN,
145                                RTE_FLOW_ITEM_TYPE_END,
146                 },
147         },
148         [RTE_FLOW_ITEM_TYPE_UDP] = {
149                 .copy_item = enic_copy_item_udp_v2,
150                 .valid_start_item = 0,
151                 .prev_items = (const enum rte_flow_item_type[]) {
152                                RTE_FLOW_ITEM_TYPE_IPV4,
153                                RTE_FLOW_ITEM_TYPE_IPV6,
154                                RTE_FLOW_ITEM_TYPE_END,
155                 },
156         },
157         [RTE_FLOW_ITEM_TYPE_TCP] = {
158                 .copy_item = enic_copy_item_tcp_v2,
159                 .valid_start_item = 0,
160                 .prev_items = (const enum rte_flow_item_type[]) {
161                                RTE_FLOW_ITEM_TYPE_IPV4,
162                                RTE_FLOW_ITEM_TYPE_IPV6,
163                                RTE_FLOW_ITEM_TYPE_END,
164                 },
165         },
166         [RTE_FLOW_ITEM_TYPE_SCTP] = {
167                 .copy_item = enic_copy_item_sctp_v2,
168                 .valid_start_item = 0,
169                 .prev_items = (const enum rte_flow_item_type[]) {
170                                RTE_FLOW_ITEM_TYPE_IPV4,
171                                RTE_FLOW_ITEM_TYPE_IPV6,
172                                RTE_FLOW_ITEM_TYPE_END,
173                 },
174         },
175         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
176                 .copy_item = enic_copy_item_vxlan_v2,
177                 .valid_start_item = 0,
178                 .prev_items = (const enum rte_flow_item_type[]) {
179                                RTE_FLOW_ITEM_TYPE_UDP,
180                                RTE_FLOW_ITEM_TYPE_END,
181                 },
182         },
183 };
184
185 /** NICs with Advanced filters enabled */
186 static const struct enic_items enic_items_v3[] = {
187         [RTE_FLOW_ITEM_TYPE_ETH] = {
188                 .copy_item = enic_copy_item_eth_v2,
189                 .valid_start_item = 1,
190                 .prev_items = (const enum rte_flow_item_type[]) {
191                                RTE_FLOW_ITEM_TYPE_VXLAN,
192                                RTE_FLOW_ITEM_TYPE_END,
193                 },
194         },
195         [RTE_FLOW_ITEM_TYPE_VLAN] = {
196                 .copy_item = enic_copy_item_vlan_v2,
197                 .valid_start_item = 1,
198                 .prev_items = (const enum rte_flow_item_type[]) {
199                                RTE_FLOW_ITEM_TYPE_ETH,
200                                RTE_FLOW_ITEM_TYPE_END,
201                 },
202         },
203         [RTE_FLOW_ITEM_TYPE_IPV4] = {
204                 .copy_item = enic_copy_item_ipv4_v2,
205                 .valid_start_item = 1,
206                 .prev_items = (const enum rte_flow_item_type[]) {
207                                RTE_FLOW_ITEM_TYPE_ETH,
208                                RTE_FLOW_ITEM_TYPE_VLAN,
209                                RTE_FLOW_ITEM_TYPE_END,
210                 },
211         },
212         [RTE_FLOW_ITEM_TYPE_IPV6] = {
213                 .copy_item = enic_copy_item_ipv6_v2,
214                 .valid_start_item = 1,
215                 .prev_items = (const enum rte_flow_item_type[]) {
216                                RTE_FLOW_ITEM_TYPE_ETH,
217                                RTE_FLOW_ITEM_TYPE_VLAN,
218                                RTE_FLOW_ITEM_TYPE_END,
219                 },
220         },
221         [RTE_FLOW_ITEM_TYPE_UDP] = {
222                 .copy_item = enic_copy_item_udp_v2,
223                 .valid_start_item = 1,
224                 .prev_items = (const enum rte_flow_item_type[]) {
225                                RTE_FLOW_ITEM_TYPE_IPV4,
226                                RTE_FLOW_ITEM_TYPE_IPV6,
227                                RTE_FLOW_ITEM_TYPE_END,
228                 },
229         },
230         [RTE_FLOW_ITEM_TYPE_TCP] = {
231                 .copy_item = enic_copy_item_tcp_v2,
232                 .valid_start_item = 1,
233                 .prev_items = (const enum rte_flow_item_type[]) {
234                                RTE_FLOW_ITEM_TYPE_IPV4,
235                                RTE_FLOW_ITEM_TYPE_IPV6,
236                                RTE_FLOW_ITEM_TYPE_END,
237                 },
238         },
239         [RTE_FLOW_ITEM_TYPE_SCTP] = {
240                 .copy_item = enic_copy_item_sctp_v2,
241                 .valid_start_item = 0,
242                 .prev_items = (const enum rte_flow_item_type[]) {
243                                RTE_FLOW_ITEM_TYPE_IPV4,
244                                RTE_FLOW_ITEM_TYPE_IPV6,
245                                RTE_FLOW_ITEM_TYPE_END,
246                 },
247         },
248         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
249                 .copy_item = enic_copy_item_vxlan_v2,
250                 .valid_start_item = 1,
251                 .prev_items = (const enum rte_flow_item_type[]) {
252                                RTE_FLOW_ITEM_TYPE_UDP,
253                                RTE_FLOW_ITEM_TYPE_END,
254                 },
255         },
256 };
257
258 /** Filtering capabilities indexed this NICs supported filter type. */
259 static const struct enic_filter_cap enic_filter_cap[] = {
260         [FILTER_IPV4_5TUPLE] = {
261                 .item_info = enic_items_v1,
262                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
263         },
264         [FILTER_USNIC_IP] = {
265                 .item_info = enic_items_v2,
266                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
267         },
268         [FILTER_DPDK_1] = {
269                 .item_info = enic_items_v3,
270                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
271         },
272 };
273
274 /** Supported actions for older NICs */
275 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
276         RTE_FLOW_ACTION_TYPE_QUEUE,
277         RTE_FLOW_ACTION_TYPE_END,
278 };
279
280 /** Supported actions for newer NICs */
281 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
282         RTE_FLOW_ACTION_TYPE_QUEUE,
283         RTE_FLOW_ACTION_TYPE_MARK,
284         RTE_FLOW_ACTION_TYPE_FLAG,
285         RTE_FLOW_ACTION_TYPE_RSS,
286         RTE_FLOW_ACTION_TYPE_END,
287 };
288
289 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
290         RTE_FLOW_ACTION_TYPE_QUEUE,
291         RTE_FLOW_ACTION_TYPE_MARK,
292         RTE_FLOW_ACTION_TYPE_FLAG,
293         RTE_FLOW_ACTION_TYPE_DROP,
294         RTE_FLOW_ACTION_TYPE_RSS,
295         RTE_FLOW_ACTION_TYPE_END,
296 };
297
298 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
299         RTE_FLOW_ACTION_TYPE_QUEUE,
300         RTE_FLOW_ACTION_TYPE_MARK,
301         RTE_FLOW_ACTION_TYPE_FLAG,
302         RTE_FLOW_ACTION_TYPE_DROP,
303         RTE_FLOW_ACTION_TYPE_COUNT,
304         RTE_FLOW_ACTION_TYPE_RSS,
305         RTE_FLOW_ACTION_TYPE_END,
306 };
307
308 /** Action capabilities indexed by NIC version information */
309 static const struct enic_action_cap enic_action_cap[] = {
310         [FILTER_ACTION_RQ_STEERING_FLAG] = {
311                 .actions = enic_supported_actions_v1,
312                 .copy_fn = enic_copy_action_v1,
313         },
314         [FILTER_ACTION_FILTER_ID_FLAG] = {
315                 .actions = enic_supported_actions_v2_id,
316                 .copy_fn = enic_copy_action_v2,
317         },
318         [FILTER_ACTION_DROP_FLAG] = {
319                 .actions = enic_supported_actions_v2_drop,
320                 .copy_fn = enic_copy_action_v2,
321         },
322         [FILTER_ACTION_COUNTER_FLAG] = {
323                 .actions = enic_supported_actions_v2_count,
324                 .copy_fn = enic_copy_action_v2,
325         },
326 };
327
328 static int
329 mask_exact_match(const u8 *supported, const u8 *supplied,
330                  unsigned int size)
331 {
332         unsigned int i;
333         for (i = 0; i < size; i++) {
334                 if (supported[i] != supplied[i])
335                         return 0;
336         }
337         return 1;
338 }
339
340 /**
341  * Copy IPv4 item into version 1 NIC filter.
342  *
343  * @param item[in]
344  *   Item specification.
345  * @param enic_filter[out]
346  *   Partially filled in NIC filter structure.
347  * @param inner_ofst[in]
348  *   Should always be 0 for version 1.
349  */
350 static int
351 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
352                        struct filter_v2 *enic_filter, u8 *inner_ofst)
353 {
354         const struct rte_flow_item_ipv4 *spec = item->spec;
355         const struct rte_flow_item_ipv4 *mask = item->mask;
356         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
357         struct ipv4_hdr supported_mask = {
358                 .src_addr = 0xffffffff,
359                 .dst_addr = 0xffffffff,
360         };
361
362         FLOW_TRACE();
363
364         if (*inner_ofst)
365                 return ENOTSUP;
366
367         if (!mask)
368                 mask = &rte_flow_item_ipv4_mask;
369
370         /* This is an exact match filter, both fields must be set */
371         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
372                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
373                 return ENOTSUP;
374         }
375
376         /* check that the suppied mask exactly matches capabilty */
377         if (!mask_exact_match((const u8 *)&supported_mask,
378                               (const u8 *)item->mask, sizeof(*mask))) {
379                 FLOW_LOG(ERR, "IPv4 exact match mask");
380                 return ENOTSUP;
381         }
382
383         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
384         enic_5tup->src_addr = spec->hdr.src_addr;
385         enic_5tup->dst_addr = spec->hdr.dst_addr;
386
387         return 0;
388 }
389
390 /**
391  * Copy UDP item into version 1 NIC filter.
392  *
393  * @param item[in]
394  *   Item specification.
395  * @param enic_filter[out]
396  *   Partially filled in NIC filter structure.
397  * @param inner_ofst[in]
398  *   Should always be 0 for version 1.
399  */
400 static int
401 enic_copy_item_udp_v1(const struct rte_flow_item *item,
402                       struct filter_v2 *enic_filter, u8 *inner_ofst)
403 {
404         const struct rte_flow_item_udp *spec = item->spec;
405         const struct rte_flow_item_udp *mask = item->mask;
406         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
407         struct udp_hdr supported_mask = {
408                 .src_port = 0xffff,
409                 .dst_port = 0xffff,
410         };
411
412         FLOW_TRACE();
413
414         if (*inner_ofst)
415                 return ENOTSUP;
416
417         if (!mask)
418                 mask = &rte_flow_item_udp_mask;
419
420         /* This is an exact match filter, both ports must be set */
421         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
422                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
423                 return ENOTSUP;
424         }
425
426         /* check that the suppied mask exactly matches capabilty */
427         if (!mask_exact_match((const u8 *)&supported_mask,
428                               (const u8 *)item->mask, sizeof(*mask))) {
429                 FLOW_LOG(ERR, "UDP exact match mask");
430                 return ENOTSUP;
431         }
432
433         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
434         enic_5tup->src_port = spec->hdr.src_port;
435         enic_5tup->dst_port = spec->hdr.dst_port;
436         enic_5tup->protocol = PROTO_UDP;
437
438         return 0;
439 }
440
441 /**
442  * Copy TCP item into version 1 NIC filter.
443  *
444  * @param item[in]
445  *   Item specification.
446  * @param enic_filter[out]
447  *   Partially filled in NIC filter structure.
448  * @param inner_ofst[in]
449  *   Should always be 0 for version 1.
450  */
451 static int
452 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
453                       struct filter_v2 *enic_filter, u8 *inner_ofst)
454 {
455         const struct rte_flow_item_tcp *spec = item->spec;
456         const struct rte_flow_item_tcp *mask = item->mask;
457         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
458         struct tcp_hdr supported_mask = {
459                 .src_port = 0xffff,
460                 .dst_port = 0xffff,
461         };
462
463         FLOW_TRACE();
464
465         if (*inner_ofst)
466                 return ENOTSUP;
467
468         if (!mask)
469                 mask = &rte_flow_item_tcp_mask;
470
471         /* This is an exact match filter, both ports must be set */
472         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
473                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
474                 return ENOTSUP;
475         }
476
477         /* check that the suppied mask exactly matches capabilty */
478         if (!mask_exact_match((const u8 *)&supported_mask,
479                              (const u8 *)item->mask, sizeof(*mask))) {
480                 FLOW_LOG(ERR, "TCP exact match mask");
481                 return ENOTSUP;
482         }
483
484         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
485         enic_5tup->src_port = spec->hdr.src_port;
486         enic_5tup->dst_port = spec->hdr.dst_port;
487         enic_5tup->protocol = PROTO_TCP;
488
489         return 0;
490 }
491
492 /**
493  * Copy ETH item into version 2 NIC filter.
494  *
495  * @param item[in]
496  *   Item specification.
497  * @param enic_filter[out]
498  *   Partially filled in NIC filter structure.
499  * @param inner_ofst[in]
500  *   If zero, this is an outer header. If non-zero, this is the offset into L5
501  *   where the header begins.
502  */
503 static int
504 enic_copy_item_eth_v2(const struct rte_flow_item *item,
505                       struct filter_v2 *enic_filter, u8 *inner_ofst)
506 {
507         struct ether_hdr enic_spec;
508         struct ether_hdr enic_mask;
509         const struct rte_flow_item_eth *spec = item->spec;
510         const struct rte_flow_item_eth *mask = item->mask;
511         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
512
513         FLOW_TRACE();
514
515         /* Match all if no spec */
516         if (!spec)
517                 return 0;
518
519         if (!mask)
520                 mask = &rte_flow_item_eth_mask;
521
522         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
523                ETHER_ADDR_LEN);
524         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
525                ETHER_ADDR_LEN);
526
527         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
528                ETHER_ADDR_LEN);
529         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
530                ETHER_ADDR_LEN);
531         enic_spec.ether_type = spec->type;
532         enic_mask.ether_type = mask->type;
533
534         if (*inner_ofst == 0) {
535                 /* outer header */
536                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
537                        sizeof(struct ether_hdr));
538                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
539                        sizeof(struct ether_hdr));
540         } else {
541                 /* inner header */
542                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
543                      FILTER_GENERIC_1_KEY_LEN)
544                         return ENOTSUP;
545                 /* Offset into L5 where inner Ethernet header goes */
546                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
547                        &enic_mask, sizeof(struct ether_hdr));
548                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
549                        &enic_spec, sizeof(struct ether_hdr));
550                 *inner_ofst += sizeof(struct ether_hdr);
551         }
552         return 0;
553 }
554
555 /**
556  * Copy VLAN item into version 2 NIC filter.
557  *
558  * @param item[in]
559  *   Item specification.
560  * @param enic_filter[out]
561  *   Partially filled in NIC filter structure.
562  * @param inner_ofst[in]
563  *   If zero, this is an outer header. If non-zero, this is the offset into L5
564  *   where the header begins.
565  */
566 static int
567 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
568                        struct filter_v2 *enic_filter, u8 *inner_ofst)
569 {
570         const struct rte_flow_item_vlan *spec = item->spec;
571         const struct rte_flow_item_vlan *mask = item->mask;
572         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
573
574         FLOW_TRACE();
575
576         /* Match all if no spec */
577         if (!spec)
578                 return 0;
579
580         if (!mask)
581                 mask = &rte_flow_item_vlan_mask;
582
583         if (*inner_ofst == 0) {
584                 struct ether_hdr *eth_mask =
585                         (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
586                 struct ether_hdr *eth_val =
587                         (void *)gp->layer[FILTER_GENERIC_1_L2].val;
588
589                 /* Outer TPID cannot be matched */
590                 if (eth_mask->ether_type)
591                         return ENOTSUP;
592                 eth_mask->ether_type = mask->inner_type;
593                 eth_val->ether_type = spec->inner_type;
594
595                 /* Outer header. Use the vlan mask/val fields */
596                 gp->mask_vlan = mask->tci;
597                 gp->val_vlan = spec->tci;
598         } else {
599                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
600                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
601                      FILTER_GENERIC_1_KEY_LEN)
602                         return ENOTSUP;
603                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
604                        mask, sizeof(struct vlan_hdr));
605                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
606                        spec, sizeof(struct vlan_hdr));
607                 *inner_ofst += sizeof(struct vlan_hdr);
608         }
609         return 0;
610 }
611
612 /**
613  * Copy IPv4 item into version 2 NIC filter.
614  *
615  * @param item[in]
616  *   Item specification.
617  * @param enic_filter[out]
618  *   Partially filled in NIC filter structure.
619  * @param inner_ofst[in]
620  *   Must be 0. Don't support inner IPv4 filtering.
621  */
622 static int
623 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
624                        struct filter_v2 *enic_filter, u8 *inner_ofst)
625 {
626         const struct rte_flow_item_ipv4 *spec = item->spec;
627         const struct rte_flow_item_ipv4 *mask = item->mask;
628         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
629
630         FLOW_TRACE();
631
632         if (*inner_ofst == 0) {
633                 /* Match IPv4 */
634                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
635                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
636
637                 /* Match all if no spec */
638                 if (!spec)
639                         return 0;
640
641                 if (!mask)
642                         mask = &rte_flow_item_ipv4_mask;
643
644                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
645                        sizeof(struct ipv4_hdr));
646                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
647                        sizeof(struct ipv4_hdr));
648         } else {
649                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
650                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
651                      FILTER_GENERIC_1_KEY_LEN)
652                         return ENOTSUP;
653                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
654                        mask, sizeof(struct ipv4_hdr));
655                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
656                        spec, sizeof(struct ipv4_hdr));
657                 *inner_ofst += sizeof(struct ipv4_hdr);
658         }
659         return 0;
660 }
661
662 /**
663  * Copy IPv6 item into version 2 NIC filter.
664  *
665  * @param item[in]
666  *   Item specification.
667  * @param enic_filter[out]
668  *   Partially filled in NIC filter structure.
669  * @param inner_ofst[in]
670  *   Must be 0. Don't support inner IPv6 filtering.
671  */
672 static int
673 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
674                        struct filter_v2 *enic_filter, u8 *inner_ofst)
675 {
676         const struct rte_flow_item_ipv6 *spec = item->spec;
677         const struct rte_flow_item_ipv6 *mask = item->mask;
678         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
679
680         FLOW_TRACE();
681
682         /* Match IPv6 */
683         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
684         gp->val_flags |= FILTER_GENERIC_1_IPV6;
685
686         /* Match all if no spec */
687         if (!spec)
688                 return 0;
689
690         if (!mask)
691                 mask = &rte_flow_item_ipv6_mask;
692
693         if (*inner_ofst == 0) {
694                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
695                        sizeof(struct ipv6_hdr));
696                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
697                        sizeof(struct ipv6_hdr));
698         } else {
699                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
700                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
701                      FILTER_GENERIC_1_KEY_LEN)
702                         return ENOTSUP;
703                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
704                        mask, sizeof(struct ipv6_hdr));
705                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
706                        spec, sizeof(struct ipv6_hdr));
707                 *inner_ofst += sizeof(struct ipv6_hdr);
708         }
709         return 0;
710 }
711
712 /**
713  * Copy UDP item into version 2 NIC filter.
714  *
715  * @param item[in]
716  *   Item specification.
717  * @param enic_filter[out]
718  *   Partially filled in NIC filter structure.
719  * @param inner_ofst[in]
720  *   Must be 0. Don't support inner UDP filtering.
721  */
722 static int
723 enic_copy_item_udp_v2(const struct rte_flow_item *item,
724                       struct filter_v2 *enic_filter, u8 *inner_ofst)
725 {
726         const struct rte_flow_item_udp *spec = item->spec;
727         const struct rte_flow_item_udp *mask = item->mask;
728         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
729
730         FLOW_TRACE();
731
732         /* Match UDP */
733         gp->mask_flags |= FILTER_GENERIC_1_UDP;
734         gp->val_flags |= FILTER_GENERIC_1_UDP;
735
736         /* Match all if no spec */
737         if (!spec)
738                 return 0;
739
740         if (!mask)
741                 mask = &rte_flow_item_udp_mask;
742
743         if (*inner_ofst == 0) {
744                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
745                        sizeof(struct udp_hdr));
746                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
747                        sizeof(struct udp_hdr));
748         } else {
749                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
750                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
751                      FILTER_GENERIC_1_KEY_LEN)
752                         return ENOTSUP;
753                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
754                        mask, sizeof(struct udp_hdr));
755                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
756                        spec, sizeof(struct udp_hdr));
757                 *inner_ofst += sizeof(struct udp_hdr);
758         }
759         return 0;
760 }
761
762 /**
763  * Copy TCP item into version 2 NIC filter.
764  *
765  * @param item[in]
766  *   Item specification.
767  * @param enic_filter[out]
768  *   Partially filled in NIC filter structure.
769  * @param inner_ofst[in]
770  *   Must be 0. Don't support inner TCP filtering.
771  */
772 static int
773 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
774                       struct filter_v2 *enic_filter, u8 *inner_ofst)
775 {
776         const struct rte_flow_item_tcp *spec = item->spec;
777         const struct rte_flow_item_tcp *mask = item->mask;
778         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
779
780         FLOW_TRACE();
781
782         /* Match TCP */
783         gp->mask_flags |= FILTER_GENERIC_1_TCP;
784         gp->val_flags |= FILTER_GENERIC_1_TCP;
785
786         /* Match all if no spec */
787         if (!spec)
788                 return 0;
789
790         if (!mask)
791                 return ENOTSUP;
792
793         if (*inner_ofst == 0) {
794                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
795                        sizeof(struct tcp_hdr));
796                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
797                        sizeof(struct tcp_hdr));
798         } else {
799                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
800                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
801                      FILTER_GENERIC_1_KEY_LEN)
802                         return ENOTSUP;
803                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
804                        mask, sizeof(struct tcp_hdr));
805                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
806                        spec, sizeof(struct tcp_hdr));
807                 *inner_ofst += sizeof(struct tcp_hdr);
808         }
809         return 0;
810 }
811
812 /**
813  * Copy SCTP item into version 2 NIC filter.
814  *
815  * @param item[in]
816  *   Item specification.
817  * @param enic_filter[out]
818  *   Partially filled in NIC filter structure.
819  * @param inner_ofst[in]
820  *   Must be 0. Don't support inner SCTP filtering.
821  */
822 static int
823 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
824                        struct filter_v2 *enic_filter, u8 *inner_ofst)
825 {
826         const struct rte_flow_item_sctp *spec = item->spec;
827         const struct rte_flow_item_sctp *mask = item->mask;
828         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
829         uint8_t *ip_proto_mask = NULL;
830         uint8_t *ip_proto = NULL;
831
832         FLOW_TRACE();
833
834         if (*inner_ofst)
835                 return ENOTSUP;
836
837         /*
838          * The NIC filter API has no flags for "match sctp", so explicitly set
839          * the protocol number in the IP pattern.
840          */
841         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
842                 struct ipv4_hdr *ip;
843                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
844                 ip_proto_mask = &ip->next_proto_id;
845                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
846                 ip_proto = &ip->next_proto_id;
847         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
848                 struct ipv6_hdr *ip;
849                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
850                 ip_proto_mask = &ip->proto;
851                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
852                 ip_proto = &ip->proto;
853         } else {
854                 /* Need IPv4/IPv6 pattern first */
855                 return EINVAL;
856         }
857         *ip_proto = IPPROTO_SCTP;
858         *ip_proto_mask = 0xff;
859
860         /* Match all if no spec */
861         if (!spec)
862                 return 0;
863
864         if (!mask)
865                 mask = &rte_flow_item_sctp_mask;
866
867         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
868                sizeof(struct sctp_hdr));
869         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
870                sizeof(struct sctp_hdr));
871         return 0;
872 }
873
874 /**
875  * Copy UDP item into version 2 NIC filter.
876  *
877  * @param item[in]
878  *   Item specification.
879  * @param enic_filter[out]
880  *   Partially filled in NIC filter structure.
881  * @param inner_ofst[in]
882  *   Must be 0. VxLAN headers always start at the beginning of L5.
883  */
884 static int
885 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
886                         struct filter_v2 *enic_filter, u8 *inner_ofst)
887 {
888         const struct rte_flow_item_vxlan *spec = item->spec;
889         const struct rte_flow_item_vxlan *mask = item->mask;
890         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
891
892         FLOW_TRACE();
893
894         if (*inner_ofst)
895                 return EINVAL;
896
897         /* Match all if no spec */
898         if (!spec)
899                 return 0;
900
901         if (!mask)
902                 mask = &rte_flow_item_vxlan_mask;
903
904         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
905                sizeof(struct vxlan_hdr));
906         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
907                sizeof(struct vxlan_hdr));
908
909         *inner_ofst = sizeof(struct vxlan_hdr);
910         return 0;
911 }
912
913 /**
914  * Return 1 if current item is valid on top of the previous one.
915  *
916  * @param prev_item[in]
917  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
918  *   is the first item.
919  * @param item_info[in]
920  *   Info about this item, like valid previous items.
921  * @param is_first[in]
922  *   True if this the first item in the pattern.
923  */
924 static int
925 item_stacking_valid(enum rte_flow_item_type prev_item,
926                     const struct enic_items *item_info, u8 is_first_item)
927 {
928         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
929
930         FLOW_TRACE();
931
932         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
933                 if (prev_item == *allowed_items)
934                         return 1;
935         }
936
937         /* This is the first item in the stack. Check if that's cool */
938         if (is_first_item && item_info->valid_start_item)
939                 return 1;
940
941         return 0;
942 }
943
944 /**
945  * Build the intenal enic filter structure from the provided pattern. The
946  * pattern is validated as the items are copied.
947  *
948  * @param pattern[in]
949  * @param items_info[in]
950  *   Info about this NICs item support, like valid previous items.
951  * @param enic_filter[out]
952  *   NIC specfilc filters derived from the pattern.
953  * @param error[out]
954  */
955 static int
956 enic_copy_filter(const struct rte_flow_item pattern[],
957                  const struct enic_filter_cap *cap,
958                  struct filter_v2 *enic_filter,
959                  struct rte_flow_error *error)
960 {
961         int ret;
962         const struct rte_flow_item *item = pattern;
963         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
964         enum rte_flow_item_type prev_item;
965         const struct enic_items *item_info;
966
967         u8 is_first_item = 1;
968
969         FLOW_TRACE();
970
971         prev_item = 0;
972
973         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
974                 /* Get info about how to validate and copy the item. If NULL
975                  * is returned the nic does not support the item.
976                  */
977                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
978                         continue;
979
980                 item_info = &cap->item_info[item->type];
981                 if (item->type > cap->max_item_type ||
982                     item_info->copy_item == NULL) {
983                         rte_flow_error_set(error, ENOTSUP,
984                                 RTE_FLOW_ERROR_TYPE_ITEM,
985                                 NULL, "Unsupported item.");
986                         return -rte_errno;
987                 }
988
989                 /* check to see if item stacking is valid */
990                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
991                         goto stacking_error;
992
993                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
994                 if (ret)
995                         goto item_not_supported;
996                 prev_item = item->type;
997                 is_first_item = 0;
998         }
999         return 0;
1000
1001 item_not_supported:
1002         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1003                            NULL, "enic type error");
1004         return -rte_errno;
1005
1006 stacking_error:
1007         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1008                            item, "stacking error");
1009         return -rte_errno;
1010 }
1011
1012 /**
1013  * Build the intenal version 1 NIC action structure from the provided pattern.
1014  * The pattern is validated as the items are copied.
1015  *
1016  * @param actions[in]
1017  * @param enic_action[out]
1018  *   NIC specfilc actions derived from the actions.
1019  * @param error[out]
1020  */
1021 static int
1022 enic_copy_action_v1(__rte_unused struct enic *enic,
1023                     const struct rte_flow_action actions[],
1024                     struct filter_action_v2 *enic_action)
1025 {
1026         enum { FATE = 1, };
1027         uint32_t overlap = 0;
1028
1029         FLOW_TRACE();
1030
1031         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1032                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1033                         continue;
1034
1035                 switch (actions->type) {
1036                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1037                         const struct rte_flow_action_queue *queue =
1038                                 (const struct rte_flow_action_queue *)
1039                                 actions->conf;
1040
1041                         if (overlap & FATE)
1042                                 return ENOTSUP;
1043                         overlap |= FATE;
1044                         enic_action->rq_idx =
1045                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1046                         break;
1047                 }
1048                 default:
1049                         RTE_ASSERT(0);
1050                         break;
1051                 }
1052         }
1053         if (!(overlap & FATE))
1054                 return ENOTSUP;
1055         enic_action->type = FILTER_ACTION_RQ_STEERING;
1056         return 0;
1057 }
1058
1059 /**
1060  * Build the intenal version 2 NIC action structure from the provided pattern.
1061  * The pattern is validated as the items are copied.
1062  *
1063  * @param actions[in]
1064  * @param enic_action[out]
1065  *   NIC specfilc actions derived from the actions.
1066  * @param error[out]
1067  */
1068 static int
1069 enic_copy_action_v2(struct enic *enic,
1070                     const struct rte_flow_action actions[],
1071                     struct filter_action_v2 *enic_action)
1072 {
1073         enum { FATE = 1, MARK = 2, };
1074         uint32_t overlap = 0;
1075
1076         FLOW_TRACE();
1077
1078         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1079                 switch (actions->type) {
1080                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1081                         const struct rte_flow_action_queue *queue =
1082                                 (const struct rte_flow_action_queue *)
1083                                 actions->conf;
1084
1085                         if (overlap & FATE)
1086                                 return ENOTSUP;
1087                         overlap |= FATE;
1088                         enic_action->rq_idx =
1089                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1090                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1091                         break;
1092                 }
1093                 case RTE_FLOW_ACTION_TYPE_MARK: {
1094                         const struct rte_flow_action_mark *mark =
1095                                 (const struct rte_flow_action_mark *)
1096                                 actions->conf;
1097
1098                         if (overlap & MARK)
1099                                 return ENOTSUP;
1100                         overlap |= MARK;
1101                         /*
1102                          * Map mark ID (32-bit) to filter ID (16-bit):
1103                          * - Reject values > 16 bits
1104                          * - Filter ID 0 is reserved for filters that steer
1105                          *   but not mark. So add 1 to the mark ID to avoid
1106                          *   using 0.
1107                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1108                          *   reserved for the "flag" action below.
1109                          */
1110                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1111                                 return EINVAL;
1112                         enic_action->filter_id = mark->id + 1;
1113                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1114                         break;
1115                 }
1116                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1117                         if (overlap & MARK)
1118                                 return ENOTSUP;
1119                         overlap |= MARK;
1120                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1121                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1122                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1123                         break;
1124                 }
1125                 case RTE_FLOW_ACTION_TYPE_DROP: {
1126                         if (overlap & FATE)
1127                                 return ENOTSUP;
1128                         overlap |= FATE;
1129                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1130                         break;
1131                 }
1132                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1133                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1134                         break;
1135                 }
1136                 case RTE_FLOW_ACTION_TYPE_RSS: {
1137                         const struct rte_flow_action_rss *rss =
1138                                 (const struct rte_flow_action_rss *)
1139                                 actions->conf;
1140                         bool allow;
1141                         uint16_t i;
1142
1143                         /*
1144                          * Hardware does not support general RSS actions, but
1145                          * we can still support the dummy one that is used to
1146                          * "receive normally".
1147                          */
1148                         allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1149                                 rss->level == 0 &&
1150                                 (rss->types == 0 ||
1151                                  rss->types == enic->rss_hf) &&
1152                                 rss->queue_num == enic->rq_count &&
1153                                 rss->key_len == 0;
1154                         /* Identity queue map is ok */
1155                         for (i = 0; i < rss->queue_num; i++)
1156                                 allow = allow && (i == rss->queue[i]);
1157                         if (!allow)
1158                                 return ENOTSUP;
1159                         if (overlap & FATE)
1160                                 return ENOTSUP;
1161                         /* Need MARK or FLAG */
1162                         if (!(overlap & MARK))
1163                                 return ENOTSUP;
1164                         overlap |= FATE;
1165                         break;
1166                 }
1167                 case RTE_FLOW_ACTION_TYPE_VOID:
1168                         continue;
1169                 default:
1170                         RTE_ASSERT(0);
1171                         break;
1172                 }
1173         }
1174         if (!(overlap & FATE))
1175                 return ENOTSUP;
1176         enic_action->type = FILTER_ACTION_V2;
1177         return 0;
1178 }
1179
1180 /** Check if the action is supported */
1181 static int
1182 enic_match_action(const struct rte_flow_action *action,
1183                   const enum rte_flow_action_type *supported_actions)
1184 {
1185         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1186              supported_actions++) {
1187                 if (action->type == *supported_actions)
1188                         return 1;
1189         }
1190         return 0;
1191 }
1192
1193 /** Get the NIC filter capabilties structure */
1194 static const struct enic_filter_cap *
1195 enic_get_filter_cap(struct enic *enic)
1196 {
1197         if (enic->flow_filter_mode)
1198                 return &enic_filter_cap[enic->flow_filter_mode];
1199
1200         return NULL;
1201 }
1202
1203 /** Get the actions for this NIC version. */
1204 static const struct enic_action_cap *
1205 enic_get_action_cap(struct enic *enic)
1206 {
1207         const struct enic_action_cap *ea;
1208         uint8_t actions;
1209
1210         actions = enic->filter_actions;
1211         if (actions & FILTER_ACTION_COUNTER_FLAG)
1212                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1213         else if (actions & FILTER_ACTION_DROP_FLAG)
1214                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1215         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1216                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1217         else
1218                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1219         return ea;
1220 }
1221
1222 /* Debug function to dump internal NIC action structure. */
1223 static void
1224 enic_dump_actions(const struct filter_action_v2 *ea)
1225 {
1226         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1227                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1228         } else if (ea->type == FILTER_ACTION_V2) {
1229                 FLOW_LOG(INFO, "Actions(V2)\n");
1230                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1231                         FLOW_LOG(INFO, "\tqueue: %u\n",
1232                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1233                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1234                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1235         }
1236 }
1237
1238 /* Debug function to dump internal NIC filter structure. */
1239 static void
1240 enic_dump_filter(const struct filter_v2 *filt)
1241 {
1242         const struct filter_generic_1 *gp;
1243         int i, j, mbyte;
1244         char buf[128], *bp;
1245         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1246         char l4csum[16], ipfrag[16];
1247
1248         switch (filt->type) {
1249         case FILTER_IPV4_5TUPLE:
1250                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1251                 break;
1252         case FILTER_USNIC_IP:
1253         case FILTER_DPDK_1:
1254                 /* FIXME: this should be a loop */
1255                 gp = &filt->u.generic_1;
1256                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1257                        gp->val_vlan, gp->mask_vlan);
1258
1259                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1260                         sprintf(ip4, "%s ",
1261                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1262                                  ? "ip4(y)" : "ip4(n)");
1263                 else
1264                         sprintf(ip4, "%s ", "ip4(x)");
1265
1266                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1267                         sprintf(ip6, "%s ",
1268                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1269                                  ? "ip6(y)" : "ip6(n)");
1270                 else
1271                         sprintf(ip6, "%s ", "ip6(x)");
1272
1273                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1274                         sprintf(udp, "%s ",
1275                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1276                                  ? "udp(y)" : "udp(n)");
1277                 else
1278                         sprintf(udp, "%s ", "udp(x)");
1279
1280                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1281                         sprintf(tcp, "%s ",
1282                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1283                                  ? "tcp(y)" : "tcp(n)");
1284                 else
1285                         sprintf(tcp, "%s ", "tcp(x)");
1286
1287                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1288                         sprintf(tcpudp, "%s ",
1289                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1290                                  ? "tcpudp(y)" : "tcpudp(n)");
1291                 else
1292                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1293
1294                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1295                         sprintf(ip4csum, "%s ",
1296                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1297                                  ? "ip4csum(y)" : "ip4csum(n)");
1298                 else
1299                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1300
1301                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1302                         sprintf(l4csum, "%s ",
1303                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1304                                  ? "l4csum(y)" : "l4csum(n)");
1305                 else
1306                         sprintf(l4csum, "%s ", "l4csum(x)");
1307
1308                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1309                         sprintf(ipfrag, "%s ",
1310                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1311                                  ? "ipfrag(y)" : "ipfrag(n)");
1312                 else
1313                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1314                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1315                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1316
1317                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1318                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1319                         while (mbyte && !gp->layer[i].mask[mbyte])
1320                                 mbyte--;
1321                         if (mbyte == 0)
1322                                 continue;
1323
1324                         bp = buf;
1325                         for (j = 0; j <= mbyte; j++) {
1326                                 sprintf(bp, "%02x",
1327                                         gp->layer[i].mask[j]);
1328                                 bp += 2;
1329                         }
1330                         *bp = '\0';
1331                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1332                         bp = buf;
1333                         for (j = 0; j <= mbyte; j++) {
1334                                 sprintf(bp, "%02x",
1335                                         gp->layer[i].val[j]);
1336                                 bp += 2;
1337                         }
1338                         *bp = '\0';
1339                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1340                 }
1341                 break;
1342         default:
1343                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1344                 break;
1345         }
1346 }
1347
1348 /* Debug function to dump internal NIC flow structures. */
1349 static void
1350 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1351 {
1352         enic_dump_filter(filt);
1353         enic_dump_actions(ea);
1354 }
1355
1356
1357 /**
1358  * Internal flow parse/validate function.
1359  *
1360  * @param dev[in]
1361  *   This device pointer.
1362  * @param pattern[in]
1363  * @param actions[in]
1364  * @param error[out]
1365  * @param enic_filter[out]
1366  *   Internal NIC filter structure pointer.
1367  * @param enic_action[out]
1368  *   Internal NIC action structure pointer.
1369  */
1370 static int
1371 enic_flow_parse(struct rte_eth_dev *dev,
1372                 const struct rte_flow_attr *attrs,
1373                 const struct rte_flow_item pattern[],
1374                 const struct rte_flow_action actions[],
1375                 struct rte_flow_error *error,
1376                 struct filter_v2 *enic_filter,
1377                 struct filter_action_v2 *enic_action)
1378 {
1379         unsigned int ret = 0;
1380         struct enic *enic = pmd_priv(dev);
1381         const struct enic_filter_cap *enic_filter_cap;
1382         const struct enic_action_cap *enic_action_cap;
1383         const struct rte_flow_action *action;
1384
1385         FLOW_TRACE();
1386
1387         memset(enic_filter, 0, sizeof(*enic_filter));
1388         memset(enic_action, 0, sizeof(*enic_action));
1389
1390         if (!pattern) {
1391                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1392                                    NULL, "No pattern specified");
1393                 return -rte_errno;
1394         }
1395
1396         if (!actions) {
1397                 rte_flow_error_set(error, EINVAL,
1398                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1399                                    NULL, "No action specified");
1400                 return -rte_errno;
1401         }
1402
1403         if (attrs) {
1404                 if (attrs->group) {
1405                         rte_flow_error_set(error, ENOTSUP,
1406                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1407                                            NULL,
1408                                            "priority groups are not supported");
1409                         return -rte_errno;
1410                 } else if (attrs->priority) {
1411                         rte_flow_error_set(error, ENOTSUP,
1412                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1413                                            NULL,
1414                                            "priorities are not supported");
1415                         return -rte_errno;
1416                 } else if (attrs->egress) {
1417                         rte_flow_error_set(error, ENOTSUP,
1418                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1419                                            NULL,
1420                                            "egress is not supported");
1421                         return -rte_errno;
1422                 } else if (attrs->transfer) {
1423                         rte_flow_error_set(error, ENOTSUP,
1424                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1425                                            NULL,
1426                                            "transfer is not supported");
1427                         return -rte_errno;
1428                 } else if (!attrs->ingress) {
1429                         rte_flow_error_set(error, ENOTSUP,
1430                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1431                                            NULL,
1432                                            "only ingress is supported");
1433                         return -rte_errno;
1434                 }
1435
1436         } else {
1437                 rte_flow_error_set(error, EINVAL,
1438                                    RTE_FLOW_ERROR_TYPE_ATTR,
1439                                    NULL, "No attribute specified");
1440                 return -rte_errno;
1441         }
1442
1443         /* Verify Actions. */
1444         enic_action_cap =  enic_get_action_cap(enic);
1445         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1446              action++) {
1447                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1448                         continue;
1449                 else if (!enic_match_action(action, enic_action_cap->actions))
1450                         break;
1451         }
1452         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1453                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1454                                    action, "Invalid action.");
1455                 return -rte_errno;
1456         }
1457         ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1458         if (ret) {
1459                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1460                            NULL, "Unsupported action.");
1461                 return -rte_errno;
1462         }
1463
1464         /* Verify Flow items. If copying the filter from flow format to enic
1465          * format fails, the flow is not supported
1466          */
1467         enic_filter_cap =  enic_get_filter_cap(enic);
1468         if (enic_filter_cap == NULL) {
1469                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1470                            NULL, "Flow API not available");
1471                 return -rte_errno;
1472         }
1473         enic_filter->type = enic->flow_filter_mode;
1474         ret = enic_copy_filter(pattern, enic_filter_cap,
1475                                        enic_filter, error);
1476         return ret;
1477 }
1478
1479 /**
1480  * Push filter/action to the NIC.
1481  *
1482  * @param enic[in]
1483  *   Device structure pointer.
1484  * @param enic_filter[in]
1485  *   Internal NIC filter structure pointer.
1486  * @param enic_action[in]
1487  *   Internal NIC action structure pointer.
1488  * @param error[out]
1489  */
1490 static struct rte_flow *
1491 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1492                    struct filter_action_v2 *enic_action,
1493                    struct rte_flow_error *error)
1494 {
1495         struct rte_flow *flow;
1496         int err;
1497         uint16_t entry;
1498         int ctr_idx;
1499         int last_max_flow_ctr;
1500
1501         FLOW_TRACE();
1502
1503         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1504         if (!flow) {
1505                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1506                                    NULL, "cannot allocate flow memory");
1507                 return NULL;
1508         }
1509
1510         flow->counter_idx = -1;
1511         last_max_flow_ctr = -1;
1512         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1513                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1514                         rte_flow_error_set(error, ENOMEM,
1515                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1516                                            NULL, "cannot allocate counter");
1517                         goto unwind_flow_alloc;
1518                 }
1519                 flow->counter_idx = ctr_idx;
1520                 enic_action->counter_index = ctr_idx;
1521
1522                 /* If index is the largest, increase the counter DMA size */
1523                 if (ctr_idx > enic->max_flow_counter) {
1524                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1525                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1526                                                  ctr_idx + 1);
1527                         if (err) {
1528                                 rte_flow_error_set(error, -err,
1529                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1530                                            NULL, "counter DMA config failed");
1531                                 goto unwind_ctr_alloc;
1532                         }
1533                         last_max_flow_ctr = enic->max_flow_counter;
1534                         enic->max_flow_counter = ctr_idx;
1535                 }
1536         }
1537
1538         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1539         entry = enic_action->rq_idx;
1540         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1541                                   enic_action);
1542         if (err) {
1543                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1544                                    NULL, "vnic_dev_classifier error");
1545                 goto unwind_ctr_dma_cfg;
1546         }
1547
1548         flow->enic_filter_id = entry;
1549         flow->enic_filter = *enic_filter;
1550
1551         return flow;
1552
1553 /* unwind if there are errors */
1554 unwind_ctr_dma_cfg:
1555         if (last_max_flow_ctr != -1) {
1556                 /* reduce counter DMA size */
1557                 vnic_dev_counter_dma_cfg(enic->vdev,
1558                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1559                                          last_max_flow_ctr + 1);
1560                 enic->max_flow_counter = last_max_flow_ctr;
1561         }
1562 unwind_ctr_alloc:
1563         if (flow->counter_idx != -1)
1564                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1565 unwind_flow_alloc:
1566         rte_free(flow);
1567         return NULL;
1568 }
1569
1570 /**
1571  * Remove filter/action from the NIC.
1572  *
1573  * @param enic[in]
1574  *   Device structure pointer.
1575  * @param filter_id[in]
1576  *   Id of NIC filter.
1577  * @param enic_action[in]
1578  *   Internal NIC action structure pointer.
1579  * @param error[out]
1580  */
1581 static int
1582 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1583                    struct rte_flow_error *error)
1584 {
1585         u16 filter_id;
1586         int err;
1587
1588         FLOW_TRACE();
1589
1590         filter_id = flow->enic_filter_id;
1591         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1592         if (err) {
1593                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1594                                    NULL, "vnic_dev_classifier failed");
1595                 return -err;
1596         }
1597
1598         if (flow->counter_idx != -1) {
1599                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1600                         dev_err(enic, "counter free failed, idx: %d\n",
1601                                 flow->counter_idx);
1602                 flow->counter_idx = -1;
1603         }
1604         return 0;
1605 }
1606
1607 /*
1608  * The following functions are callbacks for Generic flow API.
1609  */
1610
1611 /**
1612  * Validate a flow supported by the NIC.
1613  *
1614  * @see rte_flow_validate()
1615  * @see rte_flow_ops
1616  */
1617 static int
1618 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1619                    const struct rte_flow_item pattern[],
1620                    const struct rte_flow_action actions[],
1621                    struct rte_flow_error *error)
1622 {
1623         struct filter_v2 enic_filter;
1624         struct filter_action_v2 enic_action;
1625         int ret;
1626
1627         FLOW_TRACE();
1628
1629         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1630                                &enic_filter, &enic_action);
1631         if (!ret)
1632                 enic_dump_flow(&enic_action, &enic_filter);
1633         return ret;
1634 }
1635
1636 /**
1637  * Create a flow supported by the NIC.
1638  *
1639  * @see rte_flow_create()
1640  * @see rte_flow_ops
1641  */
1642 static struct rte_flow *
1643 enic_flow_create(struct rte_eth_dev *dev,
1644                  const struct rte_flow_attr *attrs,
1645                  const struct rte_flow_item pattern[],
1646                  const struct rte_flow_action actions[],
1647                  struct rte_flow_error *error)
1648 {
1649         int ret;
1650         struct filter_v2 enic_filter;
1651         struct filter_action_v2 enic_action;
1652         struct rte_flow *flow;
1653         struct enic *enic = pmd_priv(dev);
1654
1655         FLOW_TRACE();
1656
1657         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1658                               &enic_action);
1659         if (ret < 0)
1660                 return NULL;
1661
1662         rte_spinlock_lock(&enic->flows_lock);
1663         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1664                                     error);
1665         if (flow)
1666                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1667         rte_spinlock_unlock(&enic->flows_lock);
1668
1669         return flow;
1670 }
1671
1672 /**
1673  * Destroy a flow supported by the NIC.
1674  *
1675  * @see rte_flow_destroy()
1676  * @see rte_flow_ops
1677  */
1678 static int
1679 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1680                   __rte_unused struct rte_flow_error *error)
1681 {
1682         struct enic *enic = pmd_priv(dev);
1683
1684         FLOW_TRACE();
1685
1686         rte_spinlock_lock(&enic->flows_lock);
1687         enic_flow_del_filter(enic, flow, error);
1688         LIST_REMOVE(flow, next);
1689         rte_spinlock_unlock(&enic->flows_lock);
1690         rte_free(flow);
1691         return 0;
1692 }
1693
1694 /**
1695  * Flush all flows on the device.
1696  *
1697  * @see rte_flow_flush()
1698  * @see rte_flow_ops
1699  */
1700 static int
1701 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1702 {
1703         struct rte_flow *flow;
1704         struct enic *enic = pmd_priv(dev);
1705
1706         FLOW_TRACE();
1707
1708         rte_spinlock_lock(&enic->flows_lock);
1709
1710         while (!LIST_EMPTY(&enic->flows)) {
1711                 flow = LIST_FIRST(&enic->flows);
1712                 enic_flow_del_filter(enic, flow, error);
1713                 LIST_REMOVE(flow, next);
1714                 rte_free(flow);
1715         }
1716         rte_spinlock_unlock(&enic->flows_lock);
1717         return 0;
1718 }
1719
1720 static int
1721 enic_flow_query_count(struct rte_eth_dev *dev,
1722                       struct rte_flow *flow, void *data,
1723                       struct rte_flow_error *error)
1724 {
1725         struct enic *enic = pmd_priv(dev);
1726         struct rte_flow_query_count *query;
1727         uint64_t packets, bytes;
1728
1729         FLOW_TRACE();
1730
1731         if (flow->counter_idx == -1) {
1732                 return rte_flow_error_set(error, ENOTSUP,
1733                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1734                                           NULL,
1735                                           "flow does not have counter");
1736         }
1737         query = (struct rte_flow_query_count *)data;
1738         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1739                                     !!query->reset, &packets, &bytes)) {
1740                 return rte_flow_error_set
1741                         (error, EINVAL,
1742                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1743                          NULL,
1744                          "cannot read counter");
1745         }
1746         query->hits_set = 1;
1747         query->bytes_set = 1;
1748         query->hits = packets;
1749         query->bytes = bytes;
1750         return 0;
1751 }
1752
1753 static int
1754 enic_flow_query(struct rte_eth_dev *dev,
1755                 struct rte_flow *flow,
1756                 const struct rte_flow_action *actions,
1757                 void *data,
1758                 struct rte_flow_error *error)
1759 {
1760         int ret = 0;
1761
1762         FLOW_TRACE();
1763
1764         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1765                 switch (actions->type) {
1766                 case RTE_FLOW_ACTION_TYPE_VOID:
1767                         break;
1768                 case RTE_FLOW_ACTION_TYPE_COUNT:
1769                         ret = enic_flow_query_count(dev, flow, data, error);
1770                         break;
1771                 default:
1772                         return rte_flow_error_set(error, ENOTSUP,
1773                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1774                                                   actions,
1775                                                   "action not supported");
1776                 }
1777                 if (ret < 0)
1778                         return ret;
1779         }
1780         return 0;
1781 }
1782
1783 /**
1784  * Flow callback registration.
1785  *
1786  * @see rte_flow_ops
1787  */
1788 const struct rte_flow_ops enic_flow_ops = {
1789         .validate = enic_flow_validate,
1790         .create = enic_flow_create,
1791         .destroy = enic_flow_destroy,
1792         .flush = enic_flow_flush,
1793         .query = enic_flow_query,
1794 };