net/enic: allow flow mark ID 0
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /** Info about how to copy items into enic filters. */
27 struct enic_items {
28         /** Function for copying and validating an item. */
29         int (*copy_item)(const struct rte_flow_item *item,
30                          struct filter_v2 *enic_filter, u8 *inner_ofst);
31         /** List of valid previous items. */
32         const enum rte_flow_item_type * const prev_items;
33         /** True if it's OK for this item to be the first item. For some NIC
34          * versions, it's invalid to start the stack above layer 3.
35          */
36         const u8 valid_start_item;
37 };
38
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41         /** list of valid items and their handlers and attributes. */
42         const struct enic_items *item_info;
43 };
44
45 /* functions for copying flow actions into enic actions */
46 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
47                              struct filter_action_v2 *enic_action);
48
49 /* functions for copying items into enic filters */
50 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
51                           struct filter_v2 *enic_filter, u8 *inner_ofst);
52
53 /** Action capabilities for various NICs. */
54 struct enic_action_cap {
55         /** list of valid actions */
56         const enum rte_flow_action_type *actions;
57         /** copy function for a particular NIC */
58         int (*copy_fn)(const struct rte_flow_action actions[],
59                        struct filter_action_v2 *enic_action);
60 };
61
62 /* Forward declarations */
63 static enic_copy_item_fn enic_copy_item_ipv4_v1;
64 static enic_copy_item_fn enic_copy_item_udp_v1;
65 static enic_copy_item_fn enic_copy_item_tcp_v1;
66 static enic_copy_item_fn enic_copy_item_eth_v2;
67 static enic_copy_item_fn enic_copy_item_vlan_v2;
68 static enic_copy_item_fn enic_copy_item_ipv4_v2;
69 static enic_copy_item_fn enic_copy_item_ipv6_v2;
70 static enic_copy_item_fn enic_copy_item_udp_v2;
71 static enic_copy_item_fn enic_copy_item_tcp_v2;
72 static enic_copy_item_fn enic_copy_item_sctp_v2;
73 static enic_copy_item_fn enic_copy_item_vxlan_v2;
74 static copy_action_fn enic_copy_action_v1;
75 static copy_action_fn enic_copy_action_v2;
76
77 /**
78  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
79  * is supported.
80  */
81 static const struct enic_items enic_items_v1[] = {
82         [RTE_FLOW_ITEM_TYPE_IPV4] = {
83                 .copy_item = enic_copy_item_ipv4_v1,
84                 .valid_start_item = 1,
85                 .prev_items = (const enum rte_flow_item_type[]) {
86                                RTE_FLOW_ITEM_TYPE_END,
87                 },
88         },
89         [RTE_FLOW_ITEM_TYPE_UDP] = {
90                 .copy_item = enic_copy_item_udp_v1,
91                 .valid_start_item = 0,
92                 .prev_items = (const enum rte_flow_item_type[]) {
93                                RTE_FLOW_ITEM_TYPE_IPV4,
94                                RTE_FLOW_ITEM_TYPE_END,
95                 },
96         },
97         [RTE_FLOW_ITEM_TYPE_TCP] = {
98                 .copy_item = enic_copy_item_tcp_v1,
99                 .valid_start_item = 0,
100                 .prev_items = (const enum rte_flow_item_type[]) {
101                                RTE_FLOW_ITEM_TYPE_IPV4,
102                                RTE_FLOW_ITEM_TYPE_END,
103                 },
104         },
105 };
106
107 /**
108  * NICs have Advanced Filters capability but they are disabled. This means
109  * that layer 3 must be specified.
110  */
111 static const struct enic_items enic_items_v2[] = {
112         [RTE_FLOW_ITEM_TYPE_ETH] = {
113                 .copy_item = enic_copy_item_eth_v2,
114                 .valid_start_item = 1,
115                 .prev_items = (const enum rte_flow_item_type[]) {
116                                RTE_FLOW_ITEM_TYPE_VXLAN,
117                                RTE_FLOW_ITEM_TYPE_END,
118                 },
119         },
120         [RTE_FLOW_ITEM_TYPE_VLAN] = {
121                 .copy_item = enic_copy_item_vlan_v2,
122                 .valid_start_item = 1,
123                 .prev_items = (const enum rte_flow_item_type[]) {
124                                RTE_FLOW_ITEM_TYPE_ETH,
125                                RTE_FLOW_ITEM_TYPE_END,
126                 },
127         },
128         [RTE_FLOW_ITEM_TYPE_IPV4] = {
129                 .copy_item = enic_copy_item_ipv4_v2,
130                 .valid_start_item = 1,
131                 .prev_items = (const enum rte_flow_item_type[]) {
132                                RTE_FLOW_ITEM_TYPE_ETH,
133                                RTE_FLOW_ITEM_TYPE_VLAN,
134                                RTE_FLOW_ITEM_TYPE_END,
135                 },
136         },
137         [RTE_FLOW_ITEM_TYPE_IPV6] = {
138                 .copy_item = enic_copy_item_ipv6_v2,
139                 .valid_start_item = 1,
140                 .prev_items = (const enum rte_flow_item_type[]) {
141                                RTE_FLOW_ITEM_TYPE_ETH,
142                                RTE_FLOW_ITEM_TYPE_VLAN,
143                                RTE_FLOW_ITEM_TYPE_END,
144                 },
145         },
146         [RTE_FLOW_ITEM_TYPE_UDP] = {
147                 .copy_item = enic_copy_item_udp_v2,
148                 .valid_start_item = 0,
149                 .prev_items = (const enum rte_flow_item_type[]) {
150                                RTE_FLOW_ITEM_TYPE_IPV4,
151                                RTE_FLOW_ITEM_TYPE_IPV6,
152                                RTE_FLOW_ITEM_TYPE_END,
153                 },
154         },
155         [RTE_FLOW_ITEM_TYPE_TCP] = {
156                 .copy_item = enic_copy_item_tcp_v2,
157                 .valid_start_item = 0,
158                 .prev_items = (const enum rte_flow_item_type[]) {
159                                RTE_FLOW_ITEM_TYPE_IPV4,
160                                RTE_FLOW_ITEM_TYPE_IPV6,
161                                RTE_FLOW_ITEM_TYPE_END,
162                 },
163         },
164         [RTE_FLOW_ITEM_TYPE_SCTP] = {
165                 .copy_item = enic_copy_item_sctp_v2,
166                 .valid_start_item = 0,
167                 .prev_items = (const enum rte_flow_item_type[]) {
168                                RTE_FLOW_ITEM_TYPE_IPV4,
169                                RTE_FLOW_ITEM_TYPE_IPV6,
170                                RTE_FLOW_ITEM_TYPE_END,
171                 },
172         },
173         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
174                 .copy_item = enic_copy_item_vxlan_v2,
175                 .valid_start_item = 0,
176                 .prev_items = (const enum rte_flow_item_type[]) {
177                                RTE_FLOW_ITEM_TYPE_UDP,
178                                RTE_FLOW_ITEM_TYPE_END,
179                 },
180         },
181 };
182
183 /** NICs with Advanced filters enabled */
184 static const struct enic_items enic_items_v3[] = {
185         [RTE_FLOW_ITEM_TYPE_ETH] = {
186                 .copy_item = enic_copy_item_eth_v2,
187                 .valid_start_item = 1,
188                 .prev_items = (const enum rte_flow_item_type[]) {
189                                RTE_FLOW_ITEM_TYPE_VXLAN,
190                                RTE_FLOW_ITEM_TYPE_END,
191                 },
192         },
193         [RTE_FLOW_ITEM_TYPE_VLAN] = {
194                 .copy_item = enic_copy_item_vlan_v2,
195                 .valid_start_item = 1,
196                 .prev_items = (const enum rte_flow_item_type[]) {
197                                RTE_FLOW_ITEM_TYPE_ETH,
198                                RTE_FLOW_ITEM_TYPE_END,
199                 },
200         },
201         [RTE_FLOW_ITEM_TYPE_IPV4] = {
202                 .copy_item = enic_copy_item_ipv4_v2,
203                 .valid_start_item = 1,
204                 .prev_items = (const enum rte_flow_item_type[]) {
205                                RTE_FLOW_ITEM_TYPE_ETH,
206                                RTE_FLOW_ITEM_TYPE_VLAN,
207                                RTE_FLOW_ITEM_TYPE_END,
208                 },
209         },
210         [RTE_FLOW_ITEM_TYPE_IPV6] = {
211                 .copy_item = enic_copy_item_ipv6_v2,
212                 .valid_start_item = 1,
213                 .prev_items = (const enum rte_flow_item_type[]) {
214                                RTE_FLOW_ITEM_TYPE_ETH,
215                                RTE_FLOW_ITEM_TYPE_VLAN,
216                                RTE_FLOW_ITEM_TYPE_END,
217                 },
218         },
219         [RTE_FLOW_ITEM_TYPE_UDP] = {
220                 .copy_item = enic_copy_item_udp_v2,
221                 .valid_start_item = 1,
222                 .prev_items = (const enum rte_flow_item_type[]) {
223                                RTE_FLOW_ITEM_TYPE_IPV4,
224                                RTE_FLOW_ITEM_TYPE_IPV6,
225                                RTE_FLOW_ITEM_TYPE_END,
226                 },
227         },
228         [RTE_FLOW_ITEM_TYPE_TCP] = {
229                 .copy_item = enic_copy_item_tcp_v2,
230                 .valid_start_item = 1,
231                 .prev_items = (const enum rte_flow_item_type[]) {
232                                RTE_FLOW_ITEM_TYPE_IPV4,
233                                RTE_FLOW_ITEM_TYPE_IPV6,
234                                RTE_FLOW_ITEM_TYPE_END,
235                 },
236         },
237         [RTE_FLOW_ITEM_TYPE_SCTP] = {
238                 .copy_item = enic_copy_item_sctp_v2,
239                 .valid_start_item = 0,
240                 .prev_items = (const enum rte_flow_item_type[]) {
241                                RTE_FLOW_ITEM_TYPE_IPV4,
242                                RTE_FLOW_ITEM_TYPE_IPV6,
243                                RTE_FLOW_ITEM_TYPE_END,
244                 },
245         },
246         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
247                 .copy_item = enic_copy_item_vxlan_v2,
248                 .valid_start_item = 1,
249                 .prev_items = (const enum rte_flow_item_type[]) {
250                                RTE_FLOW_ITEM_TYPE_UDP,
251                                RTE_FLOW_ITEM_TYPE_END,
252                 },
253         },
254 };
255
256 /** Filtering capabilities indexed this NICs supported filter type. */
257 static const struct enic_filter_cap enic_filter_cap[] = {
258         [FILTER_IPV4_5TUPLE] = {
259                 .item_info = enic_items_v1,
260         },
261         [FILTER_USNIC_IP] = {
262                 .item_info = enic_items_v2,
263         },
264         [FILTER_DPDK_1] = {
265                 .item_info = enic_items_v3,
266         },
267 };
268
269 /** Supported actions for older NICs */
270 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
271         RTE_FLOW_ACTION_TYPE_QUEUE,
272         RTE_FLOW_ACTION_TYPE_END,
273 };
274
275 /** Supported actions for newer NICs */
276 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
277         RTE_FLOW_ACTION_TYPE_QUEUE,
278         RTE_FLOW_ACTION_TYPE_MARK,
279         RTE_FLOW_ACTION_TYPE_FLAG,
280         RTE_FLOW_ACTION_TYPE_END,
281 };
282
283 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
284         RTE_FLOW_ACTION_TYPE_QUEUE,
285         RTE_FLOW_ACTION_TYPE_MARK,
286         RTE_FLOW_ACTION_TYPE_FLAG,
287         RTE_FLOW_ACTION_TYPE_DROP,
288         RTE_FLOW_ACTION_TYPE_END,
289 };
290
291 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
292         RTE_FLOW_ACTION_TYPE_QUEUE,
293         RTE_FLOW_ACTION_TYPE_MARK,
294         RTE_FLOW_ACTION_TYPE_FLAG,
295         RTE_FLOW_ACTION_TYPE_DROP,
296         RTE_FLOW_ACTION_TYPE_COUNT,
297         RTE_FLOW_ACTION_TYPE_END,
298 };
299
300 /** Action capabilities indexed by NIC version information */
301 static const struct enic_action_cap enic_action_cap[] = {
302         [FILTER_ACTION_RQ_STEERING_FLAG] = {
303                 .actions = enic_supported_actions_v1,
304                 .copy_fn = enic_copy_action_v1,
305         },
306         [FILTER_ACTION_FILTER_ID_FLAG] = {
307                 .actions = enic_supported_actions_v2_id,
308                 .copy_fn = enic_copy_action_v2,
309         },
310         [FILTER_ACTION_DROP_FLAG] = {
311                 .actions = enic_supported_actions_v2_drop,
312                 .copy_fn = enic_copy_action_v2,
313         },
314         [FILTER_ACTION_COUNTER_FLAG] = {
315                 .actions = enic_supported_actions_v2_count,
316                 .copy_fn = enic_copy_action_v2,
317         },
318 };
319
320 static int
321 mask_exact_match(const u8 *supported, const u8 *supplied,
322                  unsigned int size)
323 {
324         unsigned int i;
325         for (i = 0; i < size; i++) {
326                 if (supported[i] != supplied[i])
327                         return 0;
328         }
329         return 1;
330 }
331
332 /**
333  * Copy IPv4 item into version 1 NIC filter.
334  *
335  * @param item[in]
336  *   Item specification.
337  * @param enic_filter[out]
338  *   Partially filled in NIC filter structure.
339  * @param inner_ofst[in]
340  *   Should always be 0 for version 1.
341  */
342 static int
343 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
344                        struct filter_v2 *enic_filter, u8 *inner_ofst)
345 {
346         const struct rte_flow_item_ipv4 *spec = item->spec;
347         const struct rte_flow_item_ipv4 *mask = item->mask;
348         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
349         struct ipv4_hdr supported_mask = {
350                 .src_addr = 0xffffffff,
351                 .dst_addr = 0xffffffff,
352         };
353
354         FLOW_TRACE();
355
356         if (*inner_ofst)
357                 return ENOTSUP;
358
359         if (!mask)
360                 mask = &rte_flow_item_ipv4_mask;
361
362         /* This is an exact match filter, both fields must be set */
363         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
364                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
365                 return ENOTSUP;
366         }
367
368         /* check that the suppied mask exactly matches capabilty */
369         if (!mask_exact_match((const u8 *)&supported_mask,
370                               (const u8 *)item->mask, sizeof(*mask))) {
371                 FLOW_LOG(ERR, "IPv4 exact match mask");
372                 return ENOTSUP;
373         }
374
375         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
376         enic_5tup->src_addr = spec->hdr.src_addr;
377         enic_5tup->dst_addr = spec->hdr.dst_addr;
378
379         return 0;
380 }
381
382 /**
383  * Copy UDP item into version 1 NIC filter.
384  *
385  * @param item[in]
386  *   Item specification.
387  * @param enic_filter[out]
388  *   Partially filled in NIC filter structure.
389  * @param inner_ofst[in]
390  *   Should always be 0 for version 1.
391  */
392 static int
393 enic_copy_item_udp_v1(const struct rte_flow_item *item,
394                       struct filter_v2 *enic_filter, u8 *inner_ofst)
395 {
396         const struct rte_flow_item_udp *spec = item->spec;
397         const struct rte_flow_item_udp *mask = item->mask;
398         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
399         struct udp_hdr supported_mask = {
400                 .src_port = 0xffff,
401                 .dst_port = 0xffff,
402         };
403
404         FLOW_TRACE();
405
406         if (*inner_ofst)
407                 return ENOTSUP;
408
409         if (!mask)
410                 mask = &rte_flow_item_udp_mask;
411
412         /* This is an exact match filter, both ports must be set */
413         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
414                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
415                 return ENOTSUP;
416         }
417
418         /* check that the suppied mask exactly matches capabilty */
419         if (!mask_exact_match((const u8 *)&supported_mask,
420                               (const u8 *)item->mask, sizeof(*mask))) {
421                 FLOW_LOG(ERR, "UDP exact match mask");
422                 return ENOTSUP;
423         }
424
425         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
426         enic_5tup->src_port = spec->hdr.src_port;
427         enic_5tup->dst_port = spec->hdr.dst_port;
428         enic_5tup->protocol = PROTO_UDP;
429
430         return 0;
431 }
432
433 /**
434  * Copy TCP item into version 1 NIC filter.
435  *
436  * @param item[in]
437  *   Item specification.
438  * @param enic_filter[out]
439  *   Partially filled in NIC filter structure.
440  * @param inner_ofst[in]
441  *   Should always be 0 for version 1.
442  */
443 static int
444 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
445                       struct filter_v2 *enic_filter, u8 *inner_ofst)
446 {
447         const struct rte_flow_item_tcp *spec = item->spec;
448         const struct rte_flow_item_tcp *mask = item->mask;
449         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
450         struct tcp_hdr supported_mask = {
451                 .src_port = 0xffff,
452                 .dst_port = 0xffff,
453         };
454
455         FLOW_TRACE();
456
457         if (*inner_ofst)
458                 return ENOTSUP;
459
460         if (!mask)
461                 mask = &rte_flow_item_tcp_mask;
462
463         /* This is an exact match filter, both ports must be set */
464         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
465                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
466                 return ENOTSUP;
467         }
468
469         /* check that the suppied mask exactly matches capabilty */
470         if (!mask_exact_match((const u8 *)&supported_mask,
471                              (const u8 *)item->mask, sizeof(*mask))) {
472                 FLOW_LOG(ERR, "TCP exact match mask");
473                 return ENOTSUP;
474         }
475
476         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
477         enic_5tup->src_port = spec->hdr.src_port;
478         enic_5tup->dst_port = spec->hdr.dst_port;
479         enic_5tup->protocol = PROTO_TCP;
480
481         return 0;
482 }
483
484 /**
485  * Copy ETH item into version 2 NIC filter.
486  *
487  * @param item[in]
488  *   Item specification.
489  * @param enic_filter[out]
490  *   Partially filled in NIC filter structure.
491  * @param inner_ofst[in]
492  *   If zero, this is an outer header. If non-zero, this is the offset into L5
493  *   where the header begins.
494  */
495 static int
496 enic_copy_item_eth_v2(const struct rte_flow_item *item,
497                       struct filter_v2 *enic_filter, u8 *inner_ofst)
498 {
499         struct ether_hdr enic_spec;
500         struct ether_hdr enic_mask;
501         const struct rte_flow_item_eth *spec = item->spec;
502         const struct rte_flow_item_eth *mask = item->mask;
503         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
504
505         FLOW_TRACE();
506
507         /* Match all if no spec */
508         if (!spec)
509                 return 0;
510
511         if (!mask)
512                 mask = &rte_flow_item_eth_mask;
513
514         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
515                ETHER_ADDR_LEN);
516         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
517                ETHER_ADDR_LEN);
518
519         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
520                ETHER_ADDR_LEN);
521         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
522                ETHER_ADDR_LEN);
523         enic_spec.ether_type = spec->type;
524         enic_mask.ether_type = mask->type;
525
526         if (*inner_ofst == 0) {
527                 /* outer header */
528                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
529                        sizeof(struct ether_hdr));
530                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
531                        sizeof(struct ether_hdr));
532         } else {
533                 /* inner header */
534                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
535                      FILTER_GENERIC_1_KEY_LEN)
536                         return ENOTSUP;
537                 /* Offset into L5 where inner Ethernet header goes */
538                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
539                        &enic_mask, sizeof(struct ether_hdr));
540                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
541                        &enic_spec, sizeof(struct ether_hdr));
542                 *inner_ofst += sizeof(struct ether_hdr);
543         }
544         return 0;
545 }
546
547 /**
548  * Copy VLAN item into version 2 NIC filter.
549  *
550  * @param item[in]
551  *   Item specification.
552  * @param enic_filter[out]
553  *   Partially filled in NIC filter structure.
554  * @param inner_ofst[in]
555  *   If zero, this is an outer header. If non-zero, this is the offset into L5
556  *   where the header begins.
557  */
558 static int
559 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
560                        struct filter_v2 *enic_filter, u8 *inner_ofst)
561 {
562         const struct rte_flow_item_vlan *spec = item->spec;
563         const struct rte_flow_item_vlan *mask = item->mask;
564         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
565
566         FLOW_TRACE();
567
568         /* Match all if no spec */
569         if (!spec)
570                 return 0;
571
572         if (!mask)
573                 mask = &rte_flow_item_vlan_mask;
574
575         if (*inner_ofst == 0) {
576                 struct ether_hdr *eth_mask =
577                         (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
578                 struct ether_hdr *eth_val =
579                         (void *)gp->layer[FILTER_GENERIC_1_L2].val;
580
581                 /* Outer TPID cannot be matched */
582                 if (eth_mask->ether_type)
583                         return ENOTSUP;
584                 eth_mask->ether_type = mask->inner_type;
585                 eth_val->ether_type = spec->inner_type;
586
587                 /* Outer header. Use the vlan mask/val fields */
588                 gp->mask_vlan = mask->tci;
589                 gp->val_vlan = spec->tci;
590         } else {
591                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
592                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
593                      FILTER_GENERIC_1_KEY_LEN)
594                         return ENOTSUP;
595                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
596                        mask, sizeof(struct vlan_hdr));
597                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
598                        spec, sizeof(struct vlan_hdr));
599                 *inner_ofst += sizeof(struct vlan_hdr);
600         }
601         return 0;
602 }
603
604 /**
605  * Copy IPv4 item into version 2 NIC filter.
606  *
607  * @param item[in]
608  *   Item specification.
609  * @param enic_filter[out]
610  *   Partially filled in NIC filter structure.
611  * @param inner_ofst[in]
612  *   Must be 0. Don't support inner IPv4 filtering.
613  */
614 static int
615 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
616                        struct filter_v2 *enic_filter, u8 *inner_ofst)
617 {
618         const struct rte_flow_item_ipv4 *spec = item->spec;
619         const struct rte_flow_item_ipv4 *mask = item->mask;
620         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
621
622         FLOW_TRACE();
623
624         if (*inner_ofst == 0) {
625                 /* Match IPv4 */
626                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
627                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
628
629                 /* Match all if no spec */
630                 if (!spec)
631                         return 0;
632
633                 if (!mask)
634                         mask = &rte_flow_item_ipv4_mask;
635
636                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
637                        sizeof(struct ipv4_hdr));
638                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
639                        sizeof(struct ipv4_hdr));
640         } else {
641                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
642                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
643                      FILTER_GENERIC_1_KEY_LEN)
644                         return ENOTSUP;
645                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
646                        mask, sizeof(struct ipv4_hdr));
647                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
648                        spec, sizeof(struct ipv4_hdr));
649                 *inner_ofst += sizeof(struct ipv4_hdr);
650         }
651         return 0;
652 }
653
654 /**
655  * Copy IPv6 item into version 2 NIC filter.
656  *
657  * @param item[in]
658  *   Item specification.
659  * @param enic_filter[out]
660  *   Partially filled in NIC filter structure.
661  * @param inner_ofst[in]
662  *   Must be 0. Don't support inner IPv6 filtering.
663  */
664 static int
665 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
666                        struct filter_v2 *enic_filter, u8 *inner_ofst)
667 {
668         const struct rte_flow_item_ipv6 *spec = item->spec;
669         const struct rte_flow_item_ipv6 *mask = item->mask;
670         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
671
672         FLOW_TRACE();
673
674         /* Match IPv6 */
675         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
676         gp->val_flags |= FILTER_GENERIC_1_IPV6;
677
678         /* Match all if no spec */
679         if (!spec)
680                 return 0;
681
682         if (!mask)
683                 mask = &rte_flow_item_ipv6_mask;
684
685         if (*inner_ofst == 0) {
686                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
687                        sizeof(struct ipv6_hdr));
688                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
689                        sizeof(struct ipv6_hdr));
690         } else {
691                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
692                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
693                      FILTER_GENERIC_1_KEY_LEN)
694                         return ENOTSUP;
695                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
696                        mask, sizeof(struct ipv6_hdr));
697                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
698                        spec, sizeof(struct ipv6_hdr));
699                 *inner_ofst += sizeof(struct ipv6_hdr);
700         }
701         return 0;
702 }
703
704 /**
705  * Copy UDP item into version 2 NIC filter.
706  *
707  * @param item[in]
708  *   Item specification.
709  * @param enic_filter[out]
710  *   Partially filled in NIC filter structure.
711  * @param inner_ofst[in]
712  *   Must be 0. Don't support inner UDP filtering.
713  */
714 static int
715 enic_copy_item_udp_v2(const struct rte_flow_item *item,
716                       struct filter_v2 *enic_filter, u8 *inner_ofst)
717 {
718         const struct rte_flow_item_udp *spec = item->spec;
719         const struct rte_flow_item_udp *mask = item->mask;
720         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
721
722         FLOW_TRACE();
723
724         /* Match UDP */
725         gp->mask_flags |= FILTER_GENERIC_1_UDP;
726         gp->val_flags |= FILTER_GENERIC_1_UDP;
727
728         /* Match all if no spec */
729         if (!spec)
730                 return 0;
731
732         if (!mask)
733                 mask = &rte_flow_item_udp_mask;
734
735         if (*inner_ofst == 0) {
736                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
737                        sizeof(struct udp_hdr));
738                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
739                        sizeof(struct udp_hdr));
740         } else {
741                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
742                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
743                      FILTER_GENERIC_1_KEY_LEN)
744                         return ENOTSUP;
745                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
746                        mask, sizeof(struct udp_hdr));
747                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
748                        spec, sizeof(struct udp_hdr));
749                 *inner_ofst += sizeof(struct udp_hdr);
750         }
751         return 0;
752 }
753
754 /**
755  * Copy TCP item into version 2 NIC filter.
756  *
757  * @param item[in]
758  *   Item specification.
759  * @param enic_filter[out]
760  *   Partially filled in NIC filter structure.
761  * @param inner_ofst[in]
762  *   Must be 0. Don't support inner TCP filtering.
763  */
764 static int
765 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
766                       struct filter_v2 *enic_filter, u8 *inner_ofst)
767 {
768         const struct rte_flow_item_tcp *spec = item->spec;
769         const struct rte_flow_item_tcp *mask = item->mask;
770         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
771
772         FLOW_TRACE();
773
774         /* Match TCP */
775         gp->mask_flags |= FILTER_GENERIC_1_TCP;
776         gp->val_flags |= FILTER_GENERIC_1_TCP;
777
778         /* Match all if no spec */
779         if (!spec)
780                 return 0;
781
782         if (!mask)
783                 return ENOTSUP;
784
785         if (*inner_ofst == 0) {
786                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
787                        sizeof(struct tcp_hdr));
788                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
789                        sizeof(struct tcp_hdr));
790         } else {
791                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
792                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
793                      FILTER_GENERIC_1_KEY_LEN)
794                         return ENOTSUP;
795                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
796                        mask, sizeof(struct tcp_hdr));
797                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
798                        spec, sizeof(struct tcp_hdr));
799                 *inner_ofst += sizeof(struct tcp_hdr);
800         }
801         return 0;
802 }
803
804 /**
805  * Copy SCTP item into version 2 NIC filter.
806  *
807  * @param item[in]
808  *   Item specification.
809  * @param enic_filter[out]
810  *   Partially filled in NIC filter structure.
811  * @param inner_ofst[in]
812  *   Must be 0. Don't support inner SCTP filtering.
813  */
814 static int
815 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
816                        struct filter_v2 *enic_filter, u8 *inner_ofst)
817 {
818         const struct rte_flow_item_sctp *spec = item->spec;
819         const struct rte_flow_item_sctp *mask = item->mask;
820         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
821         uint8_t *ip_proto_mask = NULL;
822         uint8_t *ip_proto = NULL;
823
824         FLOW_TRACE();
825
826         if (*inner_ofst)
827                 return ENOTSUP;
828
829         /*
830          * The NIC filter API has no flags for "match sctp", so explicitly set
831          * the protocol number in the IP pattern.
832          */
833         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
834                 struct ipv4_hdr *ip;
835                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
836                 ip_proto_mask = &ip->next_proto_id;
837                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
838                 ip_proto = &ip->next_proto_id;
839         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
840                 struct ipv6_hdr *ip;
841                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
842                 ip_proto_mask = &ip->proto;
843                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
844                 ip_proto = &ip->proto;
845         } else {
846                 /* Need IPv4/IPv6 pattern first */
847                 return EINVAL;
848         }
849         *ip_proto = IPPROTO_SCTP;
850         *ip_proto_mask = 0xff;
851
852         /* Match all if no spec */
853         if (!spec)
854                 return 0;
855
856         if (!mask)
857                 mask = &rte_flow_item_sctp_mask;
858
859         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
860                sizeof(struct sctp_hdr));
861         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
862                sizeof(struct sctp_hdr));
863         return 0;
864 }
865
866 /**
867  * Copy UDP item into version 2 NIC filter.
868  *
869  * @param item[in]
870  *   Item specification.
871  * @param enic_filter[out]
872  *   Partially filled in NIC filter structure.
873  * @param inner_ofst[in]
874  *   Must be 0. VxLAN headers always start at the beginning of L5.
875  */
876 static int
877 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
878                         struct filter_v2 *enic_filter, u8 *inner_ofst)
879 {
880         const struct rte_flow_item_vxlan *spec = item->spec;
881         const struct rte_flow_item_vxlan *mask = item->mask;
882         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
883
884         FLOW_TRACE();
885
886         if (*inner_ofst)
887                 return EINVAL;
888
889         /* Match all if no spec */
890         if (!spec)
891                 return 0;
892
893         if (!mask)
894                 mask = &rte_flow_item_vxlan_mask;
895
896         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
897                sizeof(struct vxlan_hdr));
898         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
899                sizeof(struct vxlan_hdr));
900
901         *inner_ofst = sizeof(struct vxlan_hdr);
902         return 0;
903 }
904
905 /**
906  * Return 1 if current item is valid on top of the previous one.
907  *
908  * @param prev_item[in]
909  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
910  *   is the first item.
911  * @param item_info[in]
912  *   Info about this item, like valid previous items.
913  * @param is_first[in]
914  *   True if this the first item in the pattern.
915  */
916 static int
917 item_stacking_valid(enum rte_flow_item_type prev_item,
918                     const struct enic_items *item_info, u8 is_first_item)
919 {
920         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
921
922         FLOW_TRACE();
923
924         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
925                 if (prev_item == *allowed_items)
926                         return 1;
927         }
928
929         /* This is the first item in the stack. Check if that's cool */
930         if (is_first_item && item_info->valid_start_item)
931                 return 1;
932
933         return 0;
934 }
935
936 /**
937  * Build the intenal enic filter structure from the provided pattern. The
938  * pattern is validated as the items are copied.
939  *
940  * @param pattern[in]
941  * @param items_info[in]
942  *   Info about this NICs item support, like valid previous items.
943  * @param enic_filter[out]
944  *   NIC specfilc filters derived from the pattern.
945  * @param error[out]
946  */
947 static int
948 enic_copy_filter(const struct rte_flow_item pattern[],
949                  const struct enic_items *items_info,
950                  struct filter_v2 *enic_filter,
951                  struct rte_flow_error *error)
952 {
953         int ret;
954         const struct rte_flow_item *item = pattern;
955         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
956         enum rte_flow_item_type prev_item;
957         const struct enic_items *item_info;
958
959         u8 is_first_item = 1;
960
961         FLOW_TRACE();
962
963         prev_item = 0;
964
965         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
966                 /* Get info about how to validate and copy the item. If NULL
967                  * is returned the nic does not support the item.
968                  */
969                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
970                         continue;
971
972                 item_info = &items_info[item->type];
973
974                 /* check to see if item stacking is valid */
975                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
976                         goto stacking_error;
977
978                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
979                 if (ret)
980                         goto item_not_supported;
981                 prev_item = item->type;
982                 is_first_item = 0;
983         }
984         return 0;
985
986 item_not_supported:
987         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
988                            NULL, "enic type error");
989         return -rte_errno;
990
991 stacking_error:
992         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
993                            item, "stacking error");
994         return -rte_errno;
995 }
996
997 /**
998  * Build the intenal version 1 NIC action structure from the provided pattern.
999  * The pattern is validated as the items are copied.
1000  *
1001  * @param actions[in]
1002  * @param enic_action[out]
1003  *   NIC specfilc actions derived from the actions.
1004  * @param error[out]
1005  */
1006 static int
1007 enic_copy_action_v1(const struct rte_flow_action actions[],
1008                     struct filter_action_v2 *enic_action)
1009 {
1010         enum { FATE = 1, };
1011         uint32_t overlap = 0;
1012
1013         FLOW_TRACE();
1014
1015         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1016                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1017                         continue;
1018
1019                 switch (actions->type) {
1020                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1021                         const struct rte_flow_action_queue *queue =
1022                                 (const struct rte_flow_action_queue *)
1023                                 actions->conf;
1024
1025                         if (overlap & FATE)
1026                                 return ENOTSUP;
1027                         overlap |= FATE;
1028                         enic_action->rq_idx =
1029                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1030                         break;
1031                 }
1032                 default:
1033                         RTE_ASSERT(0);
1034                         break;
1035                 }
1036         }
1037         if (!(overlap & FATE))
1038                 return ENOTSUP;
1039         enic_action->type = FILTER_ACTION_RQ_STEERING;
1040         return 0;
1041 }
1042
1043 /**
1044  * Build the intenal version 2 NIC action structure from the provided pattern.
1045  * The pattern is validated as the items are copied.
1046  *
1047  * @param actions[in]
1048  * @param enic_action[out]
1049  *   NIC specfilc actions derived from the actions.
1050  * @param error[out]
1051  */
1052 static int
1053 enic_copy_action_v2(const struct rte_flow_action actions[],
1054                     struct filter_action_v2 *enic_action)
1055 {
1056         enum { FATE = 1, MARK = 2, };
1057         uint32_t overlap = 0;
1058
1059         FLOW_TRACE();
1060
1061         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1062                 switch (actions->type) {
1063                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1064                         const struct rte_flow_action_queue *queue =
1065                                 (const struct rte_flow_action_queue *)
1066                                 actions->conf;
1067
1068                         if (overlap & FATE)
1069                                 return ENOTSUP;
1070                         overlap |= FATE;
1071                         enic_action->rq_idx =
1072                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1073                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1074                         break;
1075                 }
1076                 case RTE_FLOW_ACTION_TYPE_MARK: {
1077                         const struct rte_flow_action_mark *mark =
1078                                 (const struct rte_flow_action_mark *)
1079                                 actions->conf;
1080
1081                         if (overlap & MARK)
1082                                 return ENOTSUP;
1083                         overlap |= MARK;
1084                         /*
1085                          * Map mark ID (32-bit) to filter ID (16-bit):
1086                          * - Reject values > 16 bits
1087                          * - Filter ID 0 is reserved for filters that steer
1088                          *   but not mark. So add 1 to the mark ID to avoid
1089                          *   using 0.
1090                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1091                          *   reserved for the "flag" action below.
1092                          */
1093                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1094                                 return EINVAL;
1095                         enic_action->filter_id = mark->id + 1;
1096                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1097                         break;
1098                 }
1099                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1100                         if (overlap & MARK)
1101                                 return ENOTSUP;
1102                         overlap |= MARK;
1103                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1104                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1105                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1106                         break;
1107                 }
1108                 case RTE_FLOW_ACTION_TYPE_DROP: {
1109                         if (overlap & FATE)
1110                                 return ENOTSUP;
1111                         overlap |= FATE;
1112                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1113                         break;
1114                 }
1115                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1116                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1117                         break;
1118                 }
1119                 case RTE_FLOW_ACTION_TYPE_VOID:
1120                         continue;
1121                 default:
1122                         RTE_ASSERT(0);
1123                         break;
1124                 }
1125         }
1126         if (!(overlap & FATE))
1127                 return ENOTSUP;
1128         enic_action->type = FILTER_ACTION_V2;
1129         return 0;
1130 }
1131
1132 /** Check if the action is supported */
1133 static int
1134 enic_match_action(const struct rte_flow_action *action,
1135                   const enum rte_flow_action_type *supported_actions)
1136 {
1137         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1138              supported_actions++) {
1139                 if (action->type == *supported_actions)
1140                         return 1;
1141         }
1142         return 0;
1143 }
1144
1145 /** Get the NIC filter capabilties structure */
1146 static const struct enic_filter_cap *
1147 enic_get_filter_cap(struct enic *enic)
1148 {
1149         if (enic->flow_filter_mode)
1150                 return &enic_filter_cap[enic->flow_filter_mode];
1151
1152         return NULL;
1153 }
1154
1155 /** Get the actions for this NIC version. */
1156 static const struct enic_action_cap *
1157 enic_get_action_cap(struct enic *enic)
1158 {
1159         const struct enic_action_cap *ea;
1160         uint8_t actions;
1161
1162         actions = enic->filter_actions;
1163         if (actions & FILTER_ACTION_COUNTER_FLAG)
1164                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1165         else if (actions & FILTER_ACTION_DROP_FLAG)
1166                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1167         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1168                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1169         else
1170                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1171         return ea;
1172 }
1173
1174 /* Debug function to dump internal NIC action structure. */
1175 static void
1176 enic_dump_actions(const struct filter_action_v2 *ea)
1177 {
1178         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1179                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1180         } else if (ea->type == FILTER_ACTION_V2) {
1181                 FLOW_LOG(INFO, "Actions(V2)\n");
1182                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1183                         FLOW_LOG(INFO, "\tqueue: %u\n",
1184                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1185                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1186                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1187         }
1188 }
1189
1190 /* Debug function to dump internal NIC filter structure. */
1191 static void
1192 enic_dump_filter(const struct filter_v2 *filt)
1193 {
1194         const struct filter_generic_1 *gp;
1195         int i, j, mbyte;
1196         char buf[128], *bp;
1197         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1198         char l4csum[16], ipfrag[16];
1199
1200         switch (filt->type) {
1201         case FILTER_IPV4_5TUPLE:
1202                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1203                 break;
1204         case FILTER_USNIC_IP:
1205         case FILTER_DPDK_1:
1206                 /* FIXME: this should be a loop */
1207                 gp = &filt->u.generic_1;
1208                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1209                        gp->val_vlan, gp->mask_vlan);
1210
1211                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1212                         sprintf(ip4, "%s ",
1213                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1214                                  ? "ip4(y)" : "ip4(n)");
1215                 else
1216                         sprintf(ip4, "%s ", "ip4(x)");
1217
1218                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1219                         sprintf(ip6, "%s ",
1220                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1221                                  ? "ip6(y)" : "ip6(n)");
1222                 else
1223                         sprintf(ip6, "%s ", "ip6(x)");
1224
1225                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1226                         sprintf(udp, "%s ",
1227                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1228                                  ? "udp(y)" : "udp(n)");
1229                 else
1230                         sprintf(udp, "%s ", "udp(x)");
1231
1232                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1233                         sprintf(tcp, "%s ",
1234                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1235                                  ? "tcp(y)" : "tcp(n)");
1236                 else
1237                         sprintf(tcp, "%s ", "tcp(x)");
1238
1239                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1240                         sprintf(tcpudp, "%s ",
1241                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1242                                  ? "tcpudp(y)" : "tcpudp(n)");
1243                 else
1244                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1245
1246                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1247                         sprintf(ip4csum, "%s ",
1248                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1249                                  ? "ip4csum(y)" : "ip4csum(n)");
1250                 else
1251                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1252
1253                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1254                         sprintf(l4csum, "%s ",
1255                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1256                                  ? "l4csum(y)" : "l4csum(n)");
1257                 else
1258                         sprintf(l4csum, "%s ", "l4csum(x)");
1259
1260                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1261                         sprintf(ipfrag, "%s ",
1262                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1263                                  ? "ipfrag(y)" : "ipfrag(n)");
1264                 else
1265                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1266                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1267                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1268
1269                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1270                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1271                         while (mbyte && !gp->layer[i].mask[mbyte])
1272                                 mbyte--;
1273                         if (mbyte == 0)
1274                                 continue;
1275
1276                         bp = buf;
1277                         for (j = 0; j <= mbyte; j++) {
1278                                 sprintf(bp, "%02x",
1279                                         gp->layer[i].mask[j]);
1280                                 bp += 2;
1281                         }
1282                         *bp = '\0';
1283                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1284                         bp = buf;
1285                         for (j = 0; j <= mbyte; j++) {
1286                                 sprintf(bp, "%02x",
1287                                         gp->layer[i].val[j]);
1288                                 bp += 2;
1289                         }
1290                         *bp = '\0';
1291                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1292                 }
1293                 break;
1294         default:
1295                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1296                 break;
1297         }
1298 }
1299
1300 /* Debug function to dump internal NIC flow structures. */
1301 static void
1302 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1303 {
1304         enic_dump_filter(filt);
1305         enic_dump_actions(ea);
1306 }
1307
1308
1309 /**
1310  * Internal flow parse/validate function.
1311  *
1312  * @param dev[in]
1313  *   This device pointer.
1314  * @param pattern[in]
1315  * @param actions[in]
1316  * @param error[out]
1317  * @param enic_filter[out]
1318  *   Internal NIC filter structure pointer.
1319  * @param enic_action[out]
1320  *   Internal NIC action structure pointer.
1321  */
1322 static int
1323 enic_flow_parse(struct rte_eth_dev *dev,
1324                 const struct rte_flow_attr *attrs,
1325                 const struct rte_flow_item pattern[],
1326                 const struct rte_flow_action actions[],
1327                 struct rte_flow_error *error,
1328                 struct filter_v2 *enic_filter,
1329                 struct filter_action_v2 *enic_action)
1330 {
1331         unsigned int ret = 0;
1332         struct enic *enic = pmd_priv(dev);
1333         const struct enic_filter_cap *enic_filter_cap;
1334         const struct enic_action_cap *enic_action_cap;
1335         const struct rte_flow_action *action;
1336
1337         FLOW_TRACE();
1338
1339         memset(enic_filter, 0, sizeof(*enic_filter));
1340         memset(enic_action, 0, sizeof(*enic_action));
1341
1342         if (!pattern) {
1343                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1344                                    NULL, "No pattern specified");
1345                 return -rte_errno;
1346         }
1347
1348         if (!actions) {
1349                 rte_flow_error_set(error, EINVAL,
1350                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1351                                    NULL, "No action specified");
1352                 return -rte_errno;
1353         }
1354
1355         if (attrs) {
1356                 if (attrs->group) {
1357                         rte_flow_error_set(error, ENOTSUP,
1358                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1359                                            NULL,
1360                                            "priority groups are not supported");
1361                         return -rte_errno;
1362                 } else if (attrs->priority) {
1363                         rte_flow_error_set(error, ENOTSUP,
1364                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1365                                            NULL,
1366                                            "priorities are not supported");
1367                         return -rte_errno;
1368                 } else if (attrs->egress) {
1369                         rte_flow_error_set(error, ENOTSUP,
1370                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1371                                            NULL,
1372                                            "egress is not supported");
1373                         return -rte_errno;
1374                 } else if (attrs->transfer) {
1375                         rte_flow_error_set(error, ENOTSUP,
1376                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1377                                            NULL,
1378                                            "transfer is not supported");
1379                         return -rte_errno;
1380                 } else if (!attrs->ingress) {
1381                         rte_flow_error_set(error, ENOTSUP,
1382                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1383                                            NULL,
1384                                            "only ingress is supported");
1385                         return -rte_errno;
1386                 }
1387
1388         } else {
1389                 rte_flow_error_set(error, EINVAL,
1390                                    RTE_FLOW_ERROR_TYPE_ATTR,
1391                                    NULL, "No attribute specified");
1392                 return -rte_errno;
1393         }
1394
1395         /* Verify Actions. */
1396         enic_action_cap =  enic_get_action_cap(enic);
1397         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1398              action++) {
1399                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1400                         continue;
1401                 else if (!enic_match_action(action, enic_action_cap->actions))
1402                         break;
1403         }
1404         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1405                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1406                                    action, "Invalid action.");
1407                 return -rte_errno;
1408         }
1409         ret = enic_action_cap->copy_fn(actions, enic_action);
1410         if (ret) {
1411                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1412                            NULL, "Unsupported action.");
1413                 return -rte_errno;
1414         }
1415
1416         /* Verify Flow items. If copying the filter from flow format to enic
1417          * format fails, the flow is not supported
1418          */
1419         enic_filter_cap =  enic_get_filter_cap(enic);
1420         if (enic_filter_cap == NULL) {
1421                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1422                            NULL, "Flow API not available");
1423                 return -rte_errno;
1424         }
1425         enic_filter->type = enic->flow_filter_mode;
1426         ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1427                                        enic_filter, error);
1428         return ret;
1429 }
1430
1431 /**
1432  * Push filter/action to the NIC.
1433  *
1434  * @param enic[in]
1435  *   Device structure pointer.
1436  * @param enic_filter[in]
1437  *   Internal NIC filter structure pointer.
1438  * @param enic_action[in]
1439  *   Internal NIC action structure pointer.
1440  * @param error[out]
1441  */
1442 static struct rte_flow *
1443 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1444                    struct filter_action_v2 *enic_action,
1445                    struct rte_flow_error *error)
1446 {
1447         struct rte_flow *flow;
1448         int err;
1449         uint16_t entry;
1450         int ctr_idx;
1451         int last_max_flow_ctr;
1452
1453         FLOW_TRACE();
1454
1455         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1456         if (!flow) {
1457                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1458                                    NULL, "cannot allocate flow memory");
1459                 return NULL;
1460         }
1461
1462         flow->counter_idx = -1;
1463         last_max_flow_ctr = -1;
1464         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1465                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1466                         rte_flow_error_set(error, ENOMEM,
1467                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1468                                            NULL, "cannot allocate counter");
1469                         goto unwind_flow_alloc;
1470                 }
1471                 flow->counter_idx = ctr_idx;
1472                 enic_action->counter_index = ctr_idx;
1473
1474                 /* If index is the largest, increase the counter DMA size */
1475                 if (ctr_idx > enic->max_flow_counter) {
1476                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1477                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1478                                                  ctr_idx + 1);
1479                         if (err) {
1480                                 rte_flow_error_set(error, -err,
1481                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1482                                            NULL, "counter DMA config failed");
1483                                 goto unwind_ctr_alloc;
1484                         }
1485                         last_max_flow_ctr = enic->max_flow_counter;
1486                         enic->max_flow_counter = ctr_idx;
1487                 }
1488         }
1489
1490         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1491         entry = enic_action->rq_idx;
1492         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1493                                   enic_action);
1494         if (err) {
1495                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1496                                    NULL, "vnic_dev_classifier error");
1497                 goto unwind_ctr_dma_cfg;
1498         }
1499
1500         flow->enic_filter_id = entry;
1501         flow->enic_filter = *enic_filter;
1502
1503         return flow;
1504
1505 /* unwind if there are errors */
1506 unwind_ctr_dma_cfg:
1507         if (last_max_flow_ctr != -1) {
1508                 /* reduce counter DMA size */
1509                 vnic_dev_counter_dma_cfg(enic->vdev,
1510                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1511                                          last_max_flow_ctr + 1);
1512                 enic->max_flow_counter = last_max_flow_ctr;
1513         }
1514 unwind_ctr_alloc:
1515         if (flow->counter_idx != -1)
1516                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1517 unwind_flow_alloc:
1518         rte_free(flow);
1519         return NULL;
1520 }
1521
1522 /**
1523  * Remove filter/action from the NIC.
1524  *
1525  * @param enic[in]
1526  *   Device structure pointer.
1527  * @param filter_id[in]
1528  *   Id of NIC filter.
1529  * @param enic_action[in]
1530  *   Internal NIC action structure pointer.
1531  * @param error[out]
1532  */
1533 static int
1534 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1535                    struct rte_flow_error *error)
1536 {
1537         u16 filter_id;
1538         int err;
1539
1540         FLOW_TRACE();
1541
1542         filter_id = flow->enic_filter_id;
1543         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1544         if (err) {
1545                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1546                                    NULL, "vnic_dev_classifier failed");
1547                 return -err;
1548         }
1549
1550         if (flow->counter_idx != -1) {
1551                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1552                         dev_err(enic, "counter free failed, idx: %d\n",
1553                                 flow->counter_idx);
1554                 flow->counter_idx = -1;
1555         }
1556         return 0;
1557 }
1558
1559 /*
1560  * The following functions are callbacks for Generic flow API.
1561  */
1562
1563 /**
1564  * Validate a flow supported by the NIC.
1565  *
1566  * @see rte_flow_validate()
1567  * @see rte_flow_ops
1568  */
1569 static int
1570 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1571                    const struct rte_flow_item pattern[],
1572                    const struct rte_flow_action actions[],
1573                    struct rte_flow_error *error)
1574 {
1575         struct filter_v2 enic_filter;
1576         struct filter_action_v2 enic_action;
1577         int ret;
1578
1579         FLOW_TRACE();
1580
1581         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1582                                &enic_filter, &enic_action);
1583         if (!ret)
1584                 enic_dump_flow(&enic_action, &enic_filter);
1585         return ret;
1586 }
1587
1588 /**
1589  * Create a flow supported by the NIC.
1590  *
1591  * @see rte_flow_create()
1592  * @see rte_flow_ops
1593  */
1594 static struct rte_flow *
1595 enic_flow_create(struct rte_eth_dev *dev,
1596                  const struct rte_flow_attr *attrs,
1597                  const struct rte_flow_item pattern[],
1598                  const struct rte_flow_action actions[],
1599                  struct rte_flow_error *error)
1600 {
1601         int ret;
1602         struct filter_v2 enic_filter;
1603         struct filter_action_v2 enic_action;
1604         struct rte_flow *flow;
1605         struct enic *enic = pmd_priv(dev);
1606
1607         FLOW_TRACE();
1608
1609         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1610                               &enic_action);
1611         if (ret < 0)
1612                 return NULL;
1613
1614         rte_spinlock_lock(&enic->flows_lock);
1615         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1616                                     error);
1617         if (flow)
1618                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1619         rte_spinlock_unlock(&enic->flows_lock);
1620
1621         return flow;
1622 }
1623
1624 /**
1625  * Destroy a flow supported by the NIC.
1626  *
1627  * @see rte_flow_destroy()
1628  * @see rte_flow_ops
1629  */
1630 static int
1631 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1632                   __rte_unused struct rte_flow_error *error)
1633 {
1634         struct enic *enic = pmd_priv(dev);
1635
1636         FLOW_TRACE();
1637
1638         rte_spinlock_lock(&enic->flows_lock);
1639         enic_flow_del_filter(enic, flow, error);
1640         LIST_REMOVE(flow, next);
1641         rte_spinlock_unlock(&enic->flows_lock);
1642         rte_free(flow);
1643         return 0;
1644 }
1645
1646 /**
1647  * Flush all flows on the device.
1648  *
1649  * @see rte_flow_flush()
1650  * @see rte_flow_ops
1651  */
1652 static int
1653 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1654 {
1655         struct rte_flow *flow;
1656         struct enic *enic = pmd_priv(dev);
1657
1658         FLOW_TRACE();
1659
1660         rte_spinlock_lock(&enic->flows_lock);
1661
1662         while (!LIST_EMPTY(&enic->flows)) {
1663                 flow = LIST_FIRST(&enic->flows);
1664                 enic_flow_del_filter(enic, flow, error);
1665                 LIST_REMOVE(flow, next);
1666                 rte_free(flow);
1667         }
1668         rte_spinlock_unlock(&enic->flows_lock);
1669         return 0;
1670 }
1671
1672 static int
1673 enic_flow_query_count(struct rte_eth_dev *dev,
1674                       struct rte_flow *flow, void *data,
1675                       struct rte_flow_error *error)
1676 {
1677         struct enic *enic = pmd_priv(dev);
1678         struct rte_flow_query_count *query;
1679         uint64_t packets, bytes;
1680
1681         FLOW_TRACE();
1682
1683         if (flow->counter_idx == -1) {
1684                 return rte_flow_error_set(error, ENOTSUP,
1685                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1686                                           NULL,
1687                                           "flow does not have counter");
1688         }
1689         query = (struct rte_flow_query_count *)data;
1690         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1691                                     !!query->reset, &packets, &bytes)) {
1692                 return rte_flow_error_set
1693                         (error, EINVAL,
1694                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1695                          NULL,
1696                          "cannot read counter");
1697         }
1698         query->hits_set = 1;
1699         query->bytes_set = 1;
1700         query->hits = packets;
1701         query->bytes = bytes;
1702         return 0;
1703 }
1704
1705 static int
1706 enic_flow_query(struct rte_eth_dev *dev,
1707                 struct rte_flow *flow,
1708                 const struct rte_flow_action *actions,
1709                 void *data,
1710                 struct rte_flow_error *error)
1711 {
1712         int ret = 0;
1713
1714         FLOW_TRACE();
1715
1716         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1717                 switch (actions->type) {
1718                 case RTE_FLOW_ACTION_TYPE_VOID:
1719                         break;
1720                 case RTE_FLOW_ACTION_TYPE_COUNT:
1721                         ret = enic_flow_query_count(dev, flow, data, error);
1722                         break;
1723                 default:
1724                         return rte_flow_error_set(error, ENOTSUP,
1725                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1726                                                   actions,
1727                                                   "action not supported");
1728                 }
1729                 if (ret < 0)
1730                         return ret;
1731         }
1732         return 0;
1733 }
1734
1735 /**
1736  * Flow callback registration.
1737  *
1738  * @see rte_flow_ops
1739  */
1740 const struct rte_flow_ops enic_flow_ops = {
1741         .validate = enic_flow_validate,
1742         .create = enic_flow_create,
1743         .destroy = enic_flow_destroy,
1744         .flush = enic_flow_flush,
1745         .query = enic_flow_query,
1746 };