net/enic: fix SCTP match for flow API
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /** Info about how to copy items into enic filters. */
27 struct enic_items {
28         /** Function for copying and validating an item. */
29         int (*copy_item)(const struct rte_flow_item *item,
30                          struct filter_v2 *enic_filter, u8 *inner_ofst);
31         /** List of valid previous items. */
32         const enum rte_flow_item_type * const prev_items;
33         /** True if it's OK for this item to be the first item. For some NIC
34          * versions, it's invalid to start the stack above layer 3.
35          */
36         const u8 valid_start_item;
37 };
38
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41         /** list of valid items and their handlers and attributes. */
42         const struct enic_items *item_info;
43 };
44
45 /* functions for copying flow actions into enic actions */
46 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
47                              struct filter_action_v2 *enic_action);
48
49 /* functions for copying items into enic filters */
50 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
51                           struct filter_v2 *enic_filter, u8 *inner_ofst);
52
53 /** Action capabilities for various NICs. */
54 struct enic_action_cap {
55         /** list of valid actions */
56         const enum rte_flow_action_type *actions;
57         /** copy function for a particular NIC */
58         int (*copy_fn)(const struct rte_flow_action actions[],
59                        struct filter_action_v2 *enic_action);
60 };
61
62 /* Forward declarations */
63 static enic_copy_item_fn enic_copy_item_ipv4_v1;
64 static enic_copy_item_fn enic_copy_item_udp_v1;
65 static enic_copy_item_fn enic_copy_item_tcp_v1;
66 static enic_copy_item_fn enic_copy_item_eth_v2;
67 static enic_copy_item_fn enic_copy_item_vlan_v2;
68 static enic_copy_item_fn enic_copy_item_ipv4_v2;
69 static enic_copy_item_fn enic_copy_item_ipv6_v2;
70 static enic_copy_item_fn enic_copy_item_udp_v2;
71 static enic_copy_item_fn enic_copy_item_tcp_v2;
72 static enic_copy_item_fn enic_copy_item_sctp_v2;
73 static enic_copy_item_fn enic_copy_item_vxlan_v2;
74 static copy_action_fn enic_copy_action_v1;
75 static copy_action_fn enic_copy_action_v2;
76
77 /**
78  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
79  * is supported.
80  */
81 static const struct enic_items enic_items_v1[] = {
82         [RTE_FLOW_ITEM_TYPE_IPV4] = {
83                 .copy_item = enic_copy_item_ipv4_v1,
84                 .valid_start_item = 1,
85                 .prev_items = (const enum rte_flow_item_type[]) {
86                                RTE_FLOW_ITEM_TYPE_END,
87                 },
88         },
89         [RTE_FLOW_ITEM_TYPE_UDP] = {
90                 .copy_item = enic_copy_item_udp_v1,
91                 .valid_start_item = 0,
92                 .prev_items = (const enum rte_flow_item_type[]) {
93                                RTE_FLOW_ITEM_TYPE_IPV4,
94                                RTE_FLOW_ITEM_TYPE_END,
95                 },
96         },
97         [RTE_FLOW_ITEM_TYPE_TCP] = {
98                 .copy_item = enic_copy_item_tcp_v1,
99                 .valid_start_item = 0,
100                 .prev_items = (const enum rte_flow_item_type[]) {
101                                RTE_FLOW_ITEM_TYPE_IPV4,
102                                RTE_FLOW_ITEM_TYPE_END,
103                 },
104         },
105 };
106
107 /**
108  * NICs have Advanced Filters capability but they are disabled. This means
109  * that layer 3 must be specified.
110  */
111 static const struct enic_items enic_items_v2[] = {
112         [RTE_FLOW_ITEM_TYPE_ETH] = {
113                 .copy_item = enic_copy_item_eth_v2,
114                 .valid_start_item = 1,
115                 .prev_items = (const enum rte_flow_item_type[]) {
116                                RTE_FLOW_ITEM_TYPE_VXLAN,
117                                RTE_FLOW_ITEM_TYPE_END,
118                 },
119         },
120         [RTE_FLOW_ITEM_TYPE_VLAN] = {
121                 .copy_item = enic_copy_item_vlan_v2,
122                 .valid_start_item = 1,
123                 .prev_items = (const enum rte_flow_item_type[]) {
124                                RTE_FLOW_ITEM_TYPE_ETH,
125                                RTE_FLOW_ITEM_TYPE_END,
126                 },
127         },
128         [RTE_FLOW_ITEM_TYPE_IPV4] = {
129                 .copy_item = enic_copy_item_ipv4_v2,
130                 .valid_start_item = 1,
131                 .prev_items = (const enum rte_flow_item_type[]) {
132                                RTE_FLOW_ITEM_TYPE_ETH,
133                                RTE_FLOW_ITEM_TYPE_VLAN,
134                                RTE_FLOW_ITEM_TYPE_END,
135                 },
136         },
137         [RTE_FLOW_ITEM_TYPE_IPV6] = {
138                 .copy_item = enic_copy_item_ipv6_v2,
139                 .valid_start_item = 1,
140                 .prev_items = (const enum rte_flow_item_type[]) {
141                                RTE_FLOW_ITEM_TYPE_ETH,
142                                RTE_FLOW_ITEM_TYPE_VLAN,
143                                RTE_FLOW_ITEM_TYPE_END,
144                 },
145         },
146         [RTE_FLOW_ITEM_TYPE_UDP] = {
147                 .copy_item = enic_copy_item_udp_v2,
148                 .valid_start_item = 0,
149                 .prev_items = (const enum rte_flow_item_type[]) {
150                                RTE_FLOW_ITEM_TYPE_IPV4,
151                                RTE_FLOW_ITEM_TYPE_IPV6,
152                                RTE_FLOW_ITEM_TYPE_END,
153                 },
154         },
155         [RTE_FLOW_ITEM_TYPE_TCP] = {
156                 .copy_item = enic_copy_item_tcp_v2,
157                 .valid_start_item = 0,
158                 .prev_items = (const enum rte_flow_item_type[]) {
159                                RTE_FLOW_ITEM_TYPE_IPV4,
160                                RTE_FLOW_ITEM_TYPE_IPV6,
161                                RTE_FLOW_ITEM_TYPE_END,
162                 },
163         },
164         [RTE_FLOW_ITEM_TYPE_SCTP] = {
165                 .copy_item = enic_copy_item_sctp_v2,
166                 .valid_start_item = 0,
167                 .prev_items = (const enum rte_flow_item_type[]) {
168                                RTE_FLOW_ITEM_TYPE_IPV4,
169                                RTE_FLOW_ITEM_TYPE_IPV6,
170                                RTE_FLOW_ITEM_TYPE_END,
171                 },
172         },
173         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
174                 .copy_item = enic_copy_item_vxlan_v2,
175                 .valid_start_item = 0,
176                 .prev_items = (const enum rte_flow_item_type[]) {
177                                RTE_FLOW_ITEM_TYPE_UDP,
178                                RTE_FLOW_ITEM_TYPE_END,
179                 },
180         },
181 };
182
183 /** NICs with Advanced filters enabled */
184 static const struct enic_items enic_items_v3[] = {
185         [RTE_FLOW_ITEM_TYPE_ETH] = {
186                 .copy_item = enic_copy_item_eth_v2,
187                 .valid_start_item = 1,
188                 .prev_items = (const enum rte_flow_item_type[]) {
189                                RTE_FLOW_ITEM_TYPE_VXLAN,
190                                RTE_FLOW_ITEM_TYPE_END,
191                 },
192         },
193         [RTE_FLOW_ITEM_TYPE_VLAN] = {
194                 .copy_item = enic_copy_item_vlan_v2,
195                 .valid_start_item = 1,
196                 .prev_items = (const enum rte_flow_item_type[]) {
197                                RTE_FLOW_ITEM_TYPE_ETH,
198                                RTE_FLOW_ITEM_TYPE_END,
199                 },
200         },
201         [RTE_FLOW_ITEM_TYPE_IPV4] = {
202                 .copy_item = enic_copy_item_ipv4_v2,
203                 .valid_start_item = 1,
204                 .prev_items = (const enum rte_flow_item_type[]) {
205                                RTE_FLOW_ITEM_TYPE_ETH,
206                                RTE_FLOW_ITEM_TYPE_VLAN,
207                                RTE_FLOW_ITEM_TYPE_END,
208                 },
209         },
210         [RTE_FLOW_ITEM_TYPE_IPV6] = {
211                 .copy_item = enic_copy_item_ipv6_v2,
212                 .valid_start_item = 1,
213                 .prev_items = (const enum rte_flow_item_type[]) {
214                                RTE_FLOW_ITEM_TYPE_ETH,
215                                RTE_FLOW_ITEM_TYPE_VLAN,
216                                RTE_FLOW_ITEM_TYPE_END,
217                 },
218         },
219         [RTE_FLOW_ITEM_TYPE_UDP] = {
220                 .copy_item = enic_copy_item_udp_v2,
221                 .valid_start_item = 1,
222                 .prev_items = (const enum rte_flow_item_type[]) {
223                                RTE_FLOW_ITEM_TYPE_IPV4,
224                                RTE_FLOW_ITEM_TYPE_IPV6,
225                                RTE_FLOW_ITEM_TYPE_END,
226                 },
227         },
228         [RTE_FLOW_ITEM_TYPE_TCP] = {
229                 .copy_item = enic_copy_item_tcp_v2,
230                 .valid_start_item = 1,
231                 .prev_items = (const enum rte_flow_item_type[]) {
232                                RTE_FLOW_ITEM_TYPE_IPV4,
233                                RTE_FLOW_ITEM_TYPE_IPV6,
234                                RTE_FLOW_ITEM_TYPE_END,
235                 },
236         },
237         [RTE_FLOW_ITEM_TYPE_SCTP] = {
238                 .copy_item = enic_copy_item_sctp_v2,
239                 .valid_start_item = 0,
240                 .prev_items = (const enum rte_flow_item_type[]) {
241                                RTE_FLOW_ITEM_TYPE_IPV4,
242                                RTE_FLOW_ITEM_TYPE_IPV6,
243                                RTE_FLOW_ITEM_TYPE_END,
244                 },
245         },
246         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
247                 .copy_item = enic_copy_item_vxlan_v2,
248                 .valid_start_item = 1,
249                 .prev_items = (const enum rte_flow_item_type[]) {
250                                RTE_FLOW_ITEM_TYPE_UDP,
251                                RTE_FLOW_ITEM_TYPE_END,
252                 },
253         },
254 };
255
256 /** Filtering capabilities indexed this NICs supported filter type. */
257 static const struct enic_filter_cap enic_filter_cap[] = {
258         [FILTER_IPV4_5TUPLE] = {
259                 .item_info = enic_items_v1,
260         },
261         [FILTER_USNIC_IP] = {
262                 .item_info = enic_items_v2,
263         },
264         [FILTER_DPDK_1] = {
265                 .item_info = enic_items_v3,
266         },
267 };
268
269 /** Supported actions for older NICs */
270 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
271         RTE_FLOW_ACTION_TYPE_QUEUE,
272         RTE_FLOW_ACTION_TYPE_END,
273 };
274
275 /** Supported actions for newer NICs */
276 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
277         RTE_FLOW_ACTION_TYPE_QUEUE,
278         RTE_FLOW_ACTION_TYPE_MARK,
279         RTE_FLOW_ACTION_TYPE_FLAG,
280         RTE_FLOW_ACTION_TYPE_END,
281 };
282
283 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
284         RTE_FLOW_ACTION_TYPE_QUEUE,
285         RTE_FLOW_ACTION_TYPE_MARK,
286         RTE_FLOW_ACTION_TYPE_FLAG,
287         RTE_FLOW_ACTION_TYPE_DROP,
288         RTE_FLOW_ACTION_TYPE_END,
289 };
290
291 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
292         RTE_FLOW_ACTION_TYPE_QUEUE,
293         RTE_FLOW_ACTION_TYPE_MARK,
294         RTE_FLOW_ACTION_TYPE_FLAG,
295         RTE_FLOW_ACTION_TYPE_DROP,
296         RTE_FLOW_ACTION_TYPE_COUNT,
297         RTE_FLOW_ACTION_TYPE_END,
298 };
299
300 /** Action capabilities indexed by NIC version information */
301 static const struct enic_action_cap enic_action_cap[] = {
302         [FILTER_ACTION_RQ_STEERING_FLAG] = {
303                 .actions = enic_supported_actions_v1,
304                 .copy_fn = enic_copy_action_v1,
305         },
306         [FILTER_ACTION_FILTER_ID_FLAG] = {
307                 .actions = enic_supported_actions_v2_id,
308                 .copy_fn = enic_copy_action_v2,
309         },
310         [FILTER_ACTION_DROP_FLAG] = {
311                 .actions = enic_supported_actions_v2_drop,
312                 .copy_fn = enic_copy_action_v2,
313         },
314         [FILTER_ACTION_COUNTER_FLAG] = {
315                 .actions = enic_supported_actions_v2_count,
316                 .copy_fn = enic_copy_action_v2,
317         },
318 };
319
320 static int
321 mask_exact_match(const u8 *supported, const u8 *supplied,
322                  unsigned int size)
323 {
324         unsigned int i;
325         for (i = 0; i < size; i++) {
326                 if (supported[i] != supplied[i])
327                         return 0;
328         }
329         return 1;
330 }
331
332 /**
333  * Copy IPv4 item into version 1 NIC filter.
334  *
335  * @param item[in]
336  *   Item specification.
337  * @param enic_filter[out]
338  *   Partially filled in NIC filter structure.
339  * @param inner_ofst[in]
340  *   Should always be 0 for version 1.
341  */
342 static int
343 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
344                        struct filter_v2 *enic_filter, u8 *inner_ofst)
345 {
346         const struct rte_flow_item_ipv4 *spec = item->spec;
347         const struct rte_flow_item_ipv4 *mask = item->mask;
348         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
349         struct ipv4_hdr supported_mask = {
350                 .src_addr = 0xffffffff,
351                 .dst_addr = 0xffffffff,
352         };
353
354         FLOW_TRACE();
355
356         if (*inner_ofst)
357                 return ENOTSUP;
358
359         if (!mask)
360                 mask = &rte_flow_item_ipv4_mask;
361
362         /* This is an exact match filter, both fields must be set */
363         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
364                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
365                 return ENOTSUP;
366         }
367
368         /* check that the suppied mask exactly matches capabilty */
369         if (!mask_exact_match((const u8 *)&supported_mask,
370                               (const u8 *)item->mask, sizeof(*mask))) {
371                 FLOW_LOG(ERR, "IPv4 exact match mask");
372                 return ENOTSUP;
373         }
374
375         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
376         enic_5tup->src_addr = spec->hdr.src_addr;
377         enic_5tup->dst_addr = spec->hdr.dst_addr;
378
379         return 0;
380 }
381
382 /**
383  * Copy UDP item into version 1 NIC filter.
384  *
385  * @param item[in]
386  *   Item specification.
387  * @param enic_filter[out]
388  *   Partially filled in NIC filter structure.
389  * @param inner_ofst[in]
390  *   Should always be 0 for version 1.
391  */
392 static int
393 enic_copy_item_udp_v1(const struct rte_flow_item *item,
394                       struct filter_v2 *enic_filter, u8 *inner_ofst)
395 {
396         const struct rte_flow_item_udp *spec = item->spec;
397         const struct rte_flow_item_udp *mask = item->mask;
398         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
399         struct udp_hdr supported_mask = {
400                 .src_port = 0xffff,
401                 .dst_port = 0xffff,
402         };
403
404         FLOW_TRACE();
405
406         if (*inner_ofst)
407                 return ENOTSUP;
408
409         if (!mask)
410                 mask = &rte_flow_item_udp_mask;
411
412         /* This is an exact match filter, both ports must be set */
413         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
414                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
415                 return ENOTSUP;
416         }
417
418         /* check that the suppied mask exactly matches capabilty */
419         if (!mask_exact_match((const u8 *)&supported_mask,
420                               (const u8 *)item->mask, sizeof(*mask))) {
421                 FLOW_LOG(ERR, "UDP exact match mask");
422                 return ENOTSUP;
423         }
424
425         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
426         enic_5tup->src_port = spec->hdr.src_port;
427         enic_5tup->dst_port = spec->hdr.dst_port;
428         enic_5tup->protocol = PROTO_UDP;
429
430         return 0;
431 }
432
433 /**
434  * Copy TCP item into version 1 NIC filter.
435  *
436  * @param item[in]
437  *   Item specification.
438  * @param enic_filter[out]
439  *   Partially filled in NIC filter structure.
440  * @param inner_ofst[in]
441  *   Should always be 0 for version 1.
442  */
443 static int
444 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
445                       struct filter_v2 *enic_filter, u8 *inner_ofst)
446 {
447         const struct rte_flow_item_tcp *spec = item->spec;
448         const struct rte_flow_item_tcp *mask = item->mask;
449         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
450         struct tcp_hdr supported_mask = {
451                 .src_port = 0xffff,
452                 .dst_port = 0xffff,
453         };
454
455         FLOW_TRACE();
456
457         if (*inner_ofst)
458                 return ENOTSUP;
459
460         if (!mask)
461                 mask = &rte_flow_item_tcp_mask;
462
463         /* This is an exact match filter, both ports must be set */
464         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
465                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
466                 return ENOTSUP;
467         }
468
469         /* check that the suppied mask exactly matches capabilty */
470         if (!mask_exact_match((const u8 *)&supported_mask,
471                              (const u8 *)item->mask, sizeof(*mask))) {
472                 FLOW_LOG(ERR, "TCP exact match mask");
473                 return ENOTSUP;
474         }
475
476         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
477         enic_5tup->src_port = spec->hdr.src_port;
478         enic_5tup->dst_port = spec->hdr.dst_port;
479         enic_5tup->protocol = PROTO_TCP;
480
481         return 0;
482 }
483
484 /**
485  * Copy ETH item into version 2 NIC filter.
486  *
487  * @param item[in]
488  *   Item specification.
489  * @param enic_filter[out]
490  *   Partially filled in NIC filter structure.
491  * @param inner_ofst[in]
492  *   If zero, this is an outer header. If non-zero, this is the offset into L5
493  *   where the header begins.
494  */
495 static int
496 enic_copy_item_eth_v2(const struct rte_flow_item *item,
497                       struct filter_v2 *enic_filter, u8 *inner_ofst)
498 {
499         struct ether_hdr enic_spec;
500         struct ether_hdr enic_mask;
501         const struct rte_flow_item_eth *spec = item->spec;
502         const struct rte_flow_item_eth *mask = item->mask;
503         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
504
505         FLOW_TRACE();
506
507         /* Match all if no spec */
508         if (!spec)
509                 return 0;
510
511         if (!mask)
512                 mask = &rte_flow_item_eth_mask;
513
514         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
515                ETHER_ADDR_LEN);
516         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
517                ETHER_ADDR_LEN);
518
519         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
520                ETHER_ADDR_LEN);
521         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
522                ETHER_ADDR_LEN);
523         enic_spec.ether_type = spec->type;
524         enic_mask.ether_type = mask->type;
525
526         if (*inner_ofst == 0) {
527                 /* outer header */
528                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
529                        sizeof(struct ether_hdr));
530                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
531                        sizeof(struct ether_hdr));
532         } else {
533                 /* inner header */
534                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
535                      FILTER_GENERIC_1_KEY_LEN)
536                         return ENOTSUP;
537                 /* Offset into L5 where inner Ethernet header goes */
538                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
539                        &enic_mask, sizeof(struct ether_hdr));
540                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
541                        &enic_spec, sizeof(struct ether_hdr));
542                 *inner_ofst += sizeof(struct ether_hdr);
543         }
544         return 0;
545 }
546
547 /**
548  * Copy VLAN item into version 2 NIC filter.
549  *
550  * @param item[in]
551  *   Item specification.
552  * @param enic_filter[out]
553  *   Partially filled in NIC filter structure.
554  * @param inner_ofst[in]
555  *   If zero, this is an outer header. If non-zero, this is the offset into L5
556  *   where the header begins.
557  */
558 static int
559 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
560                        struct filter_v2 *enic_filter, u8 *inner_ofst)
561 {
562         const struct rte_flow_item_vlan *spec = item->spec;
563         const struct rte_flow_item_vlan *mask = item->mask;
564         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
565
566         FLOW_TRACE();
567
568         /* Match all if no spec */
569         if (!spec)
570                 return 0;
571
572         if (!mask)
573                 mask = &rte_flow_item_vlan_mask;
574
575         if (*inner_ofst == 0) {
576                 struct ether_hdr *eth_mask =
577                         (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
578                 struct ether_hdr *eth_val =
579                         (void *)gp->layer[FILTER_GENERIC_1_L2].val;
580
581                 /* Outer TPID cannot be matched */
582                 if (eth_mask->ether_type)
583                         return ENOTSUP;
584                 eth_mask->ether_type = mask->inner_type;
585                 eth_val->ether_type = spec->inner_type;
586
587                 /* Outer header. Use the vlan mask/val fields */
588                 gp->mask_vlan = mask->tci;
589                 gp->val_vlan = spec->tci;
590         } else {
591                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
592                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
593                      FILTER_GENERIC_1_KEY_LEN)
594                         return ENOTSUP;
595                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
596                        mask, sizeof(struct vlan_hdr));
597                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
598                        spec, sizeof(struct vlan_hdr));
599                 *inner_ofst += sizeof(struct vlan_hdr);
600         }
601         return 0;
602 }
603
604 /**
605  * Copy IPv4 item into version 2 NIC filter.
606  *
607  * @param item[in]
608  *   Item specification.
609  * @param enic_filter[out]
610  *   Partially filled in NIC filter structure.
611  * @param inner_ofst[in]
612  *   Must be 0. Don't support inner IPv4 filtering.
613  */
614 static int
615 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
616                        struct filter_v2 *enic_filter, u8 *inner_ofst)
617 {
618         const struct rte_flow_item_ipv4 *spec = item->spec;
619         const struct rte_flow_item_ipv4 *mask = item->mask;
620         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
621
622         FLOW_TRACE();
623
624         if (*inner_ofst == 0) {
625                 /* Match IPv4 */
626                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
627                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
628
629                 /* Match all if no spec */
630                 if (!spec)
631                         return 0;
632
633                 if (!mask)
634                         mask = &rte_flow_item_ipv4_mask;
635
636                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
637                        sizeof(struct ipv4_hdr));
638                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
639                        sizeof(struct ipv4_hdr));
640         } else {
641                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
642                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
643                      FILTER_GENERIC_1_KEY_LEN)
644                         return ENOTSUP;
645                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
646                        mask, sizeof(struct ipv4_hdr));
647                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
648                        spec, sizeof(struct ipv4_hdr));
649                 *inner_ofst += sizeof(struct ipv4_hdr);
650         }
651         return 0;
652 }
653
654 /**
655  * Copy IPv6 item into version 2 NIC filter.
656  *
657  * @param item[in]
658  *   Item specification.
659  * @param enic_filter[out]
660  *   Partially filled in NIC filter structure.
661  * @param inner_ofst[in]
662  *   Must be 0. Don't support inner IPv6 filtering.
663  */
664 static int
665 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
666                        struct filter_v2 *enic_filter, u8 *inner_ofst)
667 {
668         const struct rte_flow_item_ipv6 *spec = item->spec;
669         const struct rte_flow_item_ipv6 *mask = item->mask;
670         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
671
672         FLOW_TRACE();
673
674         /* Match IPv6 */
675         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
676         gp->val_flags |= FILTER_GENERIC_1_IPV6;
677
678         /* Match all if no spec */
679         if (!spec)
680                 return 0;
681
682         if (!mask)
683                 mask = &rte_flow_item_ipv6_mask;
684
685         if (*inner_ofst == 0) {
686                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
687                        sizeof(struct ipv6_hdr));
688                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
689                        sizeof(struct ipv6_hdr));
690         } else {
691                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
692                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
693                      FILTER_GENERIC_1_KEY_LEN)
694                         return ENOTSUP;
695                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
696                        mask, sizeof(struct ipv6_hdr));
697                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
698                        spec, sizeof(struct ipv6_hdr));
699                 *inner_ofst += sizeof(struct ipv6_hdr);
700         }
701         return 0;
702 }
703
704 /**
705  * Copy UDP item into version 2 NIC filter.
706  *
707  * @param item[in]
708  *   Item specification.
709  * @param enic_filter[out]
710  *   Partially filled in NIC filter structure.
711  * @param inner_ofst[in]
712  *   Must be 0. Don't support inner UDP filtering.
713  */
714 static int
715 enic_copy_item_udp_v2(const struct rte_flow_item *item,
716                       struct filter_v2 *enic_filter, u8 *inner_ofst)
717 {
718         const struct rte_flow_item_udp *spec = item->spec;
719         const struct rte_flow_item_udp *mask = item->mask;
720         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
721
722         FLOW_TRACE();
723
724         /* Match UDP */
725         gp->mask_flags |= FILTER_GENERIC_1_UDP;
726         gp->val_flags |= FILTER_GENERIC_1_UDP;
727
728         /* Match all if no spec */
729         if (!spec)
730                 return 0;
731
732         if (!mask)
733                 mask = &rte_flow_item_udp_mask;
734
735         if (*inner_ofst == 0) {
736                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
737                        sizeof(struct udp_hdr));
738                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
739                        sizeof(struct udp_hdr));
740         } else {
741                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
742                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
743                      FILTER_GENERIC_1_KEY_LEN)
744                         return ENOTSUP;
745                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
746                        mask, sizeof(struct udp_hdr));
747                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
748                        spec, sizeof(struct udp_hdr));
749                 *inner_ofst += sizeof(struct udp_hdr);
750         }
751         return 0;
752 }
753
754 /**
755  * Copy TCP item into version 2 NIC filter.
756  *
757  * @param item[in]
758  *   Item specification.
759  * @param enic_filter[out]
760  *   Partially filled in NIC filter structure.
761  * @param inner_ofst[in]
762  *   Must be 0. Don't support inner TCP filtering.
763  */
764 static int
765 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
766                       struct filter_v2 *enic_filter, u8 *inner_ofst)
767 {
768         const struct rte_flow_item_tcp *spec = item->spec;
769         const struct rte_flow_item_tcp *mask = item->mask;
770         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
771
772         FLOW_TRACE();
773
774         /* Match TCP */
775         gp->mask_flags |= FILTER_GENERIC_1_TCP;
776         gp->val_flags |= FILTER_GENERIC_1_TCP;
777
778         /* Match all if no spec */
779         if (!spec)
780                 return 0;
781
782         if (!mask)
783                 return ENOTSUP;
784
785         if (*inner_ofst == 0) {
786                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
787                        sizeof(struct tcp_hdr));
788                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
789                        sizeof(struct tcp_hdr));
790         } else {
791                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
792                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
793                      FILTER_GENERIC_1_KEY_LEN)
794                         return ENOTSUP;
795                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
796                        mask, sizeof(struct tcp_hdr));
797                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
798                        spec, sizeof(struct tcp_hdr));
799                 *inner_ofst += sizeof(struct tcp_hdr);
800         }
801         return 0;
802 }
803
804 /**
805  * Copy SCTP item into version 2 NIC filter.
806  *
807  * @param item[in]
808  *   Item specification.
809  * @param enic_filter[out]
810  *   Partially filled in NIC filter structure.
811  * @param inner_ofst[in]
812  *   Must be 0. Don't support inner SCTP filtering.
813  */
814 static int
815 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
816                        struct filter_v2 *enic_filter, u8 *inner_ofst)
817 {
818         const struct rte_flow_item_sctp *spec = item->spec;
819         const struct rte_flow_item_sctp *mask = item->mask;
820         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
821         uint8_t *ip_proto_mask = NULL;
822         uint8_t *ip_proto = NULL;
823
824         FLOW_TRACE();
825
826         if (*inner_ofst)
827                 return ENOTSUP;
828
829         /*
830          * The NIC filter API has no flags for "match sctp", so explicitly set
831          * the protocol number in the IP pattern.
832          */
833         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
834                 struct ipv4_hdr *ip;
835                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
836                 ip_proto_mask = &ip->next_proto_id;
837                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
838                 ip_proto = &ip->next_proto_id;
839         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
840                 struct ipv6_hdr *ip;
841                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
842                 ip_proto_mask = &ip->proto;
843                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
844                 ip_proto = &ip->proto;
845         } else {
846                 /* Need IPv4/IPv6 pattern first */
847                 return EINVAL;
848         }
849         *ip_proto = IPPROTO_SCTP;
850         *ip_proto_mask = 0xff;
851
852         /* Match all if no spec */
853         if (!spec)
854                 return 0;
855
856         if (!mask)
857                 mask = &rte_flow_item_sctp_mask;
858
859         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
860                sizeof(struct sctp_hdr));
861         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
862                sizeof(struct sctp_hdr));
863         return 0;
864 }
865
866 /**
867  * Copy UDP item into version 2 NIC filter.
868  *
869  * @param item[in]
870  *   Item specification.
871  * @param enic_filter[out]
872  *   Partially filled in NIC filter structure.
873  * @param inner_ofst[in]
874  *   Must be 0. VxLAN headers always start at the beginning of L5.
875  */
876 static int
877 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
878                         struct filter_v2 *enic_filter, u8 *inner_ofst)
879 {
880         const struct rte_flow_item_vxlan *spec = item->spec;
881         const struct rte_flow_item_vxlan *mask = item->mask;
882         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
883
884         FLOW_TRACE();
885
886         if (*inner_ofst)
887                 return EINVAL;
888
889         /* Match all if no spec */
890         if (!spec)
891                 return 0;
892
893         if (!mask)
894                 mask = &rte_flow_item_vxlan_mask;
895
896         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
897                sizeof(struct vxlan_hdr));
898         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
899                sizeof(struct vxlan_hdr));
900
901         *inner_ofst = sizeof(struct vxlan_hdr);
902         return 0;
903 }
904
905 /**
906  * Return 1 if current item is valid on top of the previous one.
907  *
908  * @param prev_item[in]
909  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
910  *   is the first item.
911  * @param item_info[in]
912  *   Info about this item, like valid previous items.
913  * @param is_first[in]
914  *   True if this the first item in the pattern.
915  */
916 static int
917 item_stacking_valid(enum rte_flow_item_type prev_item,
918                     const struct enic_items *item_info, u8 is_first_item)
919 {
920         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
921
922         FLOW_TRACE();
923
924         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
925                 if (prev_item == *allowed_items)
926                         return 1;
927         }
928
929         /* This is the first item in the stack. Check if that's cool */
930         if (is_first_item && item_info->valid_start_item)
931                 return 1;
932
933         return 0;
934 }
935
936 /**
937  * Build the intenal enic filter structure from the provided pattern. The
938  * pattern is validated as the items are copied.
939  *
940  * @param pattern[in]
941  * @param items_info[in]
942  *   Info about this NICs item support, like valid previous items.
943  * @param enic_filter[out]
944  *   NIC specfilc filters derived from the pattern.
945  * @param error[out]
946  */
947 static int
948 enic_copy_filter(const struct rte_flow_item pattern[],
949                  const struct enic_items *items_info,
950                  struct filter_v2 *enic_filter,
951                  struct rte_flow_error *error)
952 {
953         int ret;
954         const struct rte_flow_item *item = pattern;
955         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
956         enum rte_flow_item_type prev_item;
957         const struct enic_items *item_info;
958
959         u8 is_first_item = 1;
960
961         FLOW_TRACE();
962
963         prev_item = 0;
964
965         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
966                 /* Get info about how to validate and copy the item. If NULL
967                  * is returned the nic does not support the item.
968                  */
969                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
970                         continue;
971
972                 item_info = &items_info[item->type];
973
974                 /* check to see if item stacking is valid */
975                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
976                         goto stacking_error;
977
978                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
979                 if (ret)
980                         goto item_not_supported;
981                 prev_item = item->type;
982                 is_first_item = 0;
983         }
984         return 0;
985
986 item_not_supported:
987         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
988                            NULL, "enic type error");
989         return -rte_errno;
990
991 stacking_error:
992         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
993                            item, "stacking error");
994         return -rte_errno;
995 }
996
997 /**
998  * Build the intenal version 1 NIC action structure from the provided pattern.
999  * The pattern is validated as the items are copied.
1000  *
1001  * @param actions[in]
1002  * @param enic_action[out]
1003  *   NIC specfilc actions derived from the actions.
1004  * @param error[out]
1005  */
1006 static int
1007 enic_copy_action_v1(const struct rte_flow_action actions[],
1008                     struct filter_action_v2 *enic_action)
1009 {
1010         enum { FATE = 1, };
1011         uint32_t overlap = 0;
1012
1013         FLOW_TRACE();
1014
1015         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1016                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1017                         continue;
1018
1019                 switch (actions->type) {
1020                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1021                         const struct rte_flow_action_queue *queue =
1022                                 (const struct rte_flow_action_queue *)
1023                                 actions->conf;
1024
1025                         if (overlap & FATE)
1026                                 return ENOTSUP;
1027                         overlap |= FATE;
1028                         enic_action->rq_idx =
1029                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1030                         break;
1031                 }
1032                 default:
1033                         RTE_ASSERT(0);
1034                         break;
1035                 }
1036         }
1037         if (!(overlap & FATE))
1038                 return ENOTSUP;
1039         enic_action->type = FILTER_ACTION_RQ_STEERING;
1040         return 0;
1041 }
1042
1043 /**
1044  * Build the intenal version 2 NIC action structure from the provided pattern.
1045  * The pattern is validated as the items are copied.
1046  *
1047  * @param actions[in]
1048  * @param enic_action[out]
1049  *   NIC specfilc actions derived from the actions.
1050  * @param error[out]
1051  */
1052 static int
1053 enic_copy_action_v2(const struct rte_flow_action actions[],
1054                     struct filter_action_v2 *enic_action)
1055 {
1056         enum { FATE = 1, MARK = 2, };
1057         uint32_t overlap = 0;
1058
1059         FLOW_TRACE();
1060
1061         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1062                 switch (actions->type) {
1063                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1064                         const struct rte_flow_action_queue *queue =
1065                                 (const struct rte_flow_action_queue *)
1066                                 actions->conf;
1067
1068                         if (overlap & FATE)
1069                                 return ENOTSUP;
1070                         overlap |= FATE;
1071                         enic_action->rq_idx =
1072                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1073                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1074                         break;
1075                 }
1076                 case RTE_FLOW_ACTION_TYPE_MARK: {
1077                         const struct rte_flow_action_mark *mark =
1078                                 (const struct rte_flow_action_mark *)
1079                                 actions->conf;
1080
1081                         if (overlap & MARK)
1082                                 return ENOTSUP;
1083                         overlap |= MARK;
1084                         /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
1085                          * in the range of allows mark ids.
1086                          */
1087                         if (mark->id >= ENIC_MAGIC_FILTER_ID)
1088                                 return EINVAL;
1089                         enic_action->filter_id = mark->id;
1090                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1091                         break;
1092                 }
1093                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1094                         if (overlap & MARK)
1095                                 return ENOTSUP;
1096                         overlap |= MARK;
1097                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1098                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1099                         break;
1100                 }
1101                 case RTE_FLOW_ACTION_TYPE_DROP: {
1102                         if (overlap & FATE)
1103                                 return ENOTSUP;
1104                         overlap |= FATE;
1105                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1106                         break;
1107                 }
1108                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1109                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1110                         break;
1111                 }
1112                 case RTE_FLOW_ACTION_TYPE_VOID:
1113                         continue;
1114                 default:
1115                         RTE_ASSERT(0);
1116                         break;
1117                 }
1118         }
1119         if (!(overlap & FATE))
1120                 return ENOTSUP;
1121         enic_action->type = FILTER_ACTION_V2;
1122         return 0;
1123 }
1124
1125 /** Check if the action is supported */
1126 static int
1127 enic_match_action(const struct rte_flow_action *action,
1128                   const enum rte_flow_action_type *supported_actions)
1129 {
1130         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1131              supported_actions++) {
1132                 if (action->type == *supported_actions)
1133                         return 1;
1134         }
1135         return 0;
1136 }
1137
1138 /** Get the NIC filter capabilties structure */
1139 static const struct enic_filter_cap *
1140 enic_get_filter_cap(struct enic *enic)
1141 {
1142         if (enic->flow_filter_mode)
1143                 return &enic_filter_cap[enic->flow_filter_mode];
1144
1145         return NULL;
1146 }
1147
1148 /** Get the actions for this NIC version. */
1149 static const struct enic_action_cap *
1150 enic_get_action_cap(struct enic *enic)
1151 {
1152         const struct enic_action_cap *ea;
1153         uint8_t actions;
1154
1155         actions = enic->filter_actions;
1156         if (actions & FILTER_ACTION_COUNTER_FLAG)
1157                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1158         else if (actions & FILTER_ACTION_DROP_FLAG)
1159                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1160         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1161                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1162         else
1163                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1164         return ea;
1165 }
1166
1167 /* Debug function to dump internal NIC action structure. */
1168 static void
1169 enic_dump_actions(const struct filter_action_v2 *ea)
1170 {
1171         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1172                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1173         } else if (ea->type == FILTER_ACTION_V2) {
1174                 FLOW_LOG(INFO, "Actions(V2)\n");
1175                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1176                         FLOW_LOG(INFO, "\tqueue: %u\n",
1177                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1178                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1179                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1180         }
1181 }
1182
1183 /* Debug function to dump internal NIC filter structure. */
1184 static void
1185 enic_dump_filter(const struct filter_v2 *filt)
1186 {
1187         const struct filter_generic_1 *gp;
1188         int i, j, mbyte;
1189         char buf[128], *bp;
1190         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1191         char l4csum[16], ipfrag[16];
1192
1193         switch (filt->type) {
1194         case FILTER_IPV4_5TUPLE:
1195                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1196                 break;
1197         case FILTER_USNIC_IP:
1198         case FILTER_DPDK_1:
1199                 /* FIXME: this should be a loop */
1200                 gp = &filt->u.generic_1;
1201                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1202                        gp->val_vlan, gp->mask_vlan);
1203
1204                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1205                         sprintf(ip4, "%s ",
1206                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1207                                  ? "ip4(y)" : "ip4(n)");
1208                 else
1209                         sprintf(ip4, "%s ", "ip4(x)");
1210
1211                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1212                         sprintf(ip6, "%s ",
1213                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1214                                  ? "ip6(y)" : "ip6(n)");
1215                 else
1216                         sprintf(ip6, "%s ", "ip6(x)");
1217
1218                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1219                         sprintf(udp, "%s ",
1220                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1221                                  ? "udp(y)" : "udp(n)");
1222                 else
1223                         sprintf(udp, "%s ", "udp(x)");
1224
1225                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1226                         sprintf(tcp, "%s ",
1227                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1228                                  ? "tcp(y)" : "tcp(n)");
1229                 else
1230                         sprintf(tcp, "%s ", "tcp(x)");
1231
1232                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1233                         sprintf(tcpudp, "%s ",
1234                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1235                                  ? "tcpudp(y)" : "tcpudp(n)");
1236                 else
1237                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1238
1239                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1240                         sprintf(ip4csum, "%s ",
1241                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1242                                  ? "ip4csum(y)" : "ip4csum(n)");
1243                 else
1244                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1245
1246                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1247                         sprintf(l4csum, "%s ",
1248                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1249                                  ? "l4csum(y)" : "l4csum(n)");
1250                 else
1251                         sprintf(l4csum, "%s ", "l4csum(x)");
1252
1253                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1254                         sprintf(ipfrag, "%s ",
1255                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1256                                  ? "ipfrag(y)" : "ipfrag(n)");
1257                 else
1258                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1259                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1260                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1261
1262                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1263                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1264                         while (mbyte && !gp->layer[i].mask[mbyte])
1265                                 mbyte--;
1266                         if (mbyte == 0)
1267                                 continue;
1268
1269                         bp = buf;
1270                         for (j = 0; j <= mbyte; j++) {
1271                                 sprintf(bp, "%02x",
1272                                         gp->layer[i].mask[j]);
1273                                 bp += 2;
1274                         }
1275                         *bp = '\0';
1276                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1277                         bp = buf;
1278                         for (j = 0; j <= mbyte; j++) {
1279                                 sprintf(bp, "%02x",
1280                                         gp->layer[i].val[j]);
1281                                 bp += 2;
1282                         }
1283                         *bp = '\0';
1284                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1285                 }
1286                 break;
1287         default:
1288                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1289                 break;
1290         }
1291 }
1292
1293 /* Debug function to dump internal NIC flow structures. */
1294 static void
1295 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1296 {
1297         enic_dump_filter(filt);
1298         enic_dump_actions(ea);
1299 }
1300
1301
1302 /**
1303  * Internal flow parse/validate function.
1304  *
1305  * @param dev[in]
1306  *   This device pointer.
1307  * @param pattern[in]
1308  * @param actions[in]
1309  * @param error[out]
1310  * @param enic_filter[out]
1311  *   Internal NIC filter structure pointer.
1312  * @param enic_action[out]
1313  *   Internal NIC action structure pointer.
1314  */
1315 static int
1316 enic_flow_parse(struct rte_eth_dev *dev,
1317                 const struct rte_flow_attr *attrs,
1318                 const struct rte_flow_item pattern[],
1319                 const struct rte_flow_action actions[],
1320                 struct rte_flow_error *error,
1321                 struct filter_v2 *enic_filter,
1322                 struct filter_action_v2 *enic_action)
1323 {
1324         unsigned int ret = 0;
1325         struct enic *enic = pmd_priv(dev);
1326         const struct enic_filter_cap *enic_filter_cap;
1327         const struct enic_action_cap *enic_action_cap;
1328         const struct rte_flow_action *action;
1329
1330         FLOW_TRACE();
1331
1332         memset(enic_filter, 0, sizeof(*enic_filter));
1333         memset(enic_action, 0, sizeof(*enic_action));
1334
1335         if (!pattern) {
1336                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1337                                    NULL, "No pattern specified");
1338                 return -rte_errno;
1339         }
1340
1341         if (!actions) {
1342                 rte_flow_error_set(error, EINVAL,
1343                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1344                                    NULL, "No action specified");
1345                 return -rte_errno;
1346         }
1347
1348         if (attrs) {
1349                 if (attrs->group) {
1350                         rte_flow_error_set(error, ENOTSUP,
1351                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1352                                            NULL,
1353                                            "priority groups are not supported");
1354                         return -rte_errno;
1355                 } else if (attrs->priority) {
1356                         rte_flow_error_set(error, ENOTSUP,
1357                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1358                                            NULL,
1359                                            "priorities are not supported");
1360                         return -rte_errno;
1361                 } else if (attrs->egress) {
1362                         rte_flow_error_set(error, ENOTSUP,
1363                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1364                                            NULL,
1365                                            "egress is not supported");
1366                         return -rte_errno;
1367                 } else if (attrs->transfer) {
1368                         rte_flow_error_set(error, ENOTSUP,
1369                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1370                                            NULL,
1371                                            "transfer is not supported");
1372                         return -rte_errno;
1373                 } else if (!attrs->ingress) {
1374                         rte_flow_error_set(error, ENOTSUP,
1375                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1376                                            NULL,
1377                                            "only ingress is supported");
1378                         return -rte_errno;
1379                 }
1380
1381         } else {
1382                 rte_flow_error_set(error, EINVAL,
1383                                    RTE_FLOW_ERROR_TYPE_ATTR,
1384                                    NULL, "No attribute specified");
1385                 return -rte_errno;
1386         }
1387
1388         /* Verify Actions. */
1389         enic_action_cap =  enic_get_action_cap(enic);
1390         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1391              action++) {
1392                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1393                         continue;
1394                 else if (!enic_match_action(action, enic_action_cap->actions))
1395                         break;
1396         }
1397         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1398                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1399                                    action, "Invalid action.");
1400                 return -rte_errno;
1401         }
1402         ret = enic_action_cap->copy_fn(actions, enic_action);
1403         if (ret) {
1404                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1405                            NULL, "Unsupported action.");
1406                 return -rte_errno;
1407         }
1408
1409         /* Verify Flow items. If copying the filter from flow format to enic
1410          * format fails, the flow is not supported
1411          */
1412         enic_filter_cap =  enic_get_filter_cap(enic);
1413         if (enic_filter_cap == NULL) {
1414                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1415                            NULL, "Flow API not available");
1416                 return -rte_errno;
1417         }
1418         enic_filter->type = enic->flow_filter_mode;
1419         ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1420                                        enic_filter, error);
1421         return ret;
1422 }
1423
1424 /**
1425  * Push filter/action to the NIC.
1426  *
1427  * @param enic[in]
1428  *   Device structure pointer.
1429  * @param enic_filter[in]
1430  *   Internal NIC filter structure pointer.
1431  * @param enic_action[in]
1432  *   Internal NIC action structure pointer.
1433  * @param error[out]
1434  */
1435 static struct rte_flow *
1436 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1437                    struct filter_action_v2 *enic_action,
1438                    struct rte_flow_error *error)
1439 {
1440         struct rte_flow *flow;
1441         int err;
1442         uint16_t entry;
1443         int ctr_idx;
1444         int last_max_flow_ctr;
1445
1446         FLOW_TRACE();
1447
1448         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1449         if (!flow) {
1450                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1451                                    NULL, "cannot allocate flow memory");
1452                 return NULL;
1453         }
1454
1455         flow->counter_idx = -1;
1456         last_max_flow_ctr = -1;
1457         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1458                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1459                         rte_flow_error_set(error, ENOMEM,
1460                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1461                                            NULL, "cannot allocate counter");
1462                         goto unwind_flow_alloc;
1463                 }
1464                 flow->counter_idx = ctr_idx;
1465                 enic_action->counter_index = ctr_idx;
1466
1467                 /* If index is the largest, increase the counter DMA size */
1468                 if (ctr_idx > enic->max_flow_counter) {
1469                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1470                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1471                                                  ctr_idx + 1);
1472                         if (err) {
1473                                 rte_flow_error_set(error, -err,
1474                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1475                                            NULL, "counter DMA config failed");
1476                                 goto unwind_ctr_alloc;
1477                         }
1478                         last_max_flow_ctr = enic->max_flow_counter;
1479                         enic->max_flow_counter = ctr_idx;
1480                 }
1481         }
1482
1483         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1484         entry = enic_action->rq_idx;
1485         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1486                                   enic_action);
1487         if (err) {
1488                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1489                                    NULL, "vnic_dev_classifier error");
1490                 goto unwind_ctr_dma_cfg;
1491         }
1492
1493         flow->enic_filter_id = entry;
1494         flow->enic_filter = *enic_filter;
1495
1496         return flow;
1497
1498 /* unwind if there are errors */
1499 unwind_ctr_dma_cfg:
1500         if (last_max_flow_ctr != -1) {
1501                 /* reduce counter DMA size */
1502                 vnic_dev_counter_dma_cfg(enic->vdev,
1503                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1504                                          last_max_flow_ctr + 1);
1505                 enic->max_flow_counter = last_max_flow_ctr;
1506         }
1507 unwind_ctr_alloc:
1508         if (flow->counter_idx != -1)
1509                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1510 unwind_flow_alloc:
1511         rte_free(flow);
1512         return NULL;
1513 }
1514
1515 /**
1516  * Remove filter/action from the NIC.
1517  *
1518  * @param enic[in]
1519  *   Device structure pointer.
1520  * @param filter_id[in]
1521  *   Id of NIC filter.
1522  * @param enic_action[in]
1523  *   Internal NIC action structure pointer.
1524  * @param error[out]
1525  */
1526 static int
1527 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1528                    struct rte_flow_error *error)
1529 {
1530         u16 filter_id;
1531         int err;
1532
1533         FLOW_TRACE();
1534
1535         filter_id = flow->enic_filter_id;
1536         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1537         if (err) {
1538                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1539                                    NULL, "vnic_dev_classifier failed");
1540                 return -err;
1541         }
1542
1543         if (flow->counter_idx != -1) {
1544                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1545                         dev_err(enic, "counter free failed, idx: %d\n",
1546                                 flow->counter_idx);
1547                 flow->counter_idx = -1;
1548         }
1549         return 0;
1550 }
1551
1552 /*
1553  * The following functions are callbacks for Generic flow API.
1554  */
1555
1556 /**
1557  * Validate a flow supported by the NIC.
1558  *
1559  * @see rte_flow_validate()
1560  * @see rte_flow_ops
1561  */
1562 static int
1563 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1564                    const struct rte_flow_item pattern[],
1565                    const struct rte_flow_action actions[],
1566                    struct rte_flow_error *error)
1567 {
1568         struct filter_v2 enic_filter;
1569         struct filter_action_v2 enic_action;
1570         int ret;
1571
1572         FLOW_TRACE();
1573
1574         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1575                                &enic_filter, &enic_action);
1576         if (!ret)
1577                 enic_dump_flow(&enic_action, &enic_filter);
1578         return ret;
1579 }
1580
1581 /**
1582  * Create a flow supported by the NIC.
1583  *
1584  * @see rte_flow_create()
1585  * @see rte_flow_ops
1586  */
1587 static struct rte_flow *
1588 enic_flow_create(struct rte_eth_dev *dev,
1589                  const struct rte_flow_attr *attrs,
1590                  const struct rte_flow_item pattern[],
1591                  const struct rte_flow_action actions[],
1592                  struct rte_flow_error *error)
1593 {
1594         int ret;
1595         struct filter_v2 enic_filter;
1596         struct filter_action_v2 enic_action;
1597         struct rte_flow *flow;
1598         struct enic *enic = pmd_priv(dev);
1599
1600         FLOW_TRACE();
1601
1602         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1603                               &enic_action);
1604         if (ret < 0)
1605                 return NULL;
1606
1607         rte_spinlock_lock(&enic->flows_lock);
1608         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1609                                     error);
1610         if (flow)
1611                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1612         rte_spinlock_unlock(&enic->flows_lock);
1613
1614         return flow;
1615 }
1616
1617 /**
1618  * Destroy a flow supported by the NIC.
1619  *
1620  * @see rte_flow_destroy()
1621  * @see rte_flow_ops
1622  */
1623 static int
1624 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1625                   __rte_unused struct rte_flow_error *error)
1626 {
1627         struct enic *enic = pmd_priv(dev);
1628
1629         FLOW_TRACE();
1630
1631         rte_spinlock_lock(&enic->flows_lock);
1632         enic_flow_del_filter(enic, flow, error);
1633         LIST_REMOVE(flow, next);
1634         rte_spinlock_unlock(&enic->flows_lock);
1635         rte_free(flow);
1636         return 0;
1637 }
1638
1639 /**
1640  * Flush all flows on the device.
1641  *
1642  * @see rte_flow_flush()
1643  * @see rte_flow_ops
1644  */
1645 static int
1646 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1647 {
1648         struct rte_flow *flow;
1649         struct enic *enic = pmd_priv(dev);
1650
1651         FLOW_TRACE();
1652
1653         rte_spinlock_lock(&enic->flows_lock);
1654
1655         while (!LIST_EMPTY(&enic->flows)) {
1656                 flow = LIST_FIRST(&enic->flows);
1657                 enic_flow_del_filter(enic, flow, error);
1658                 LIST_REMOVE(flow, next);
1659                 rte_free(flow);
1660         }
1661         rte_spinlock_unlock(&enic->flows_lock);
1662         return 0;
1663 }
1664
1665 static int
1666 enic_flow_query_count(struct rte_eth_dev *dev,
1667                       struct rte_flow *flow, void *data,
1668                       struct rte_flow_error *error)
1669 {
1670         struct enic *enic = pmd_priv(dev);
1671         struct rte_flow_query_count *query;
1672         uint64_t packets, bytes;
1673
1674         FLOW_TRACE();
1675
1676         if (flow->counter_idx == -1) {
1677                 return rte_flow_error_set(error, ENOTSUP,
1678                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1679                                           NULL,
1680                                           "flow does not have counter");
1681         }
1682         query = (struct rte_flow_query_count *)data;
1683         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1684                                     !!query->reset, &packets, &bytes)) {
1685                 return rte_flow_error_set
1686                         (error, EINVAL,
1687                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1688                          NULL,
1689                          "cannot read counter");
1690         }
1691         query->hits_set = 1;
1692         query->bytes_set = 1;
1693         query->hits = packets;
1694         query->bytes = bytes;
1695         return 0;
1696 }
1697
1698 static int
1699 enic_flow_query(struct rte_eth_dev *dev,
1700                 struct rte_flow *flow,
1701                 const struct rte_flow_action *actions,
1702                 void *data,
1703                 struct rte_flow_error *error)
1704 {
1705         int ret = 0;
1706
1707         FLOW_TRACE();
1708
1709         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1710                 switch (actions->type) {
1711                 case RTE_FLOW_ACTION_TYPE_VOID:
1712                         break;
1713                 case RTE_FLOW_ACTION_TYPE_COUNT:
1714                         ret = enic_flow_query_count(dev, flow, data, error);
1715                         break;
1716                 default:
1717                         return rte_flow_error_set(error, ENOTSUP,
1718                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1719                                                   actions,
1720                                                   "action not supported");
1721                 }
1722                 if (ret < 0)
1723                         return ret;
1724         }
1725         return 0;
1726 }
1727
1728 /**
1729  * Flow callback registration.
1730  *
1731  * @see rte_flow_ops
1732  */
1733 const struct rte_flow_ops enic_flow_ops = {
1734         .validate = enic_flow_validate,
1735         .create = enic_flow_create,
1736         .destroy = enic_flow_destroy,
1737         .flush = enic_flow_flush,
1738         .query = enic_flow_query,
1739 };