net/enic: flow API for NICs with advanced filters disabled
[dpdk.git] / drivers / net / enic / enic_flow.c
1 /*
2  * Copyright (c) 2017, Cisco Systems, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright
13  * notice, this list of conditions and the following disclaimer in
14  * the documentation and/or other materials provided with the
15  * distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
21  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
27  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  */
31
32 #include <errno.h>
33 #include <rte_log.h>
34 #include <rte_ethdev.h>
35 #include <rte_flow_driver.h>
36 #include <rte_ether.h>
37 #include <rte_ip.h>
38 #include <rte_udp.h>
39
40 #include "enic_compat.h"
41 #include "enic.h"
42 #include "vnic_dev.h"
43 #include "vnic_nic.h"
44
45 #ifdef RTE_LIBRTE_ENIC_DEBUG_FLOW
46 #define FLOW_TRACE() \
47         RTE_LOG(DEBUG, PMD, "%s()\n", __func__)
48 #define FLOW_LOG(level, fmt, args...) \
49         RTE_LOG(level, PMD, fmt, ## args)
50 #else
51 #define FLOW_TRACE() do { } while (0)
52 #define FLOW_LOG(level, fmt, args...) do { } while (0)
53 #endif
54
55 /** Info about how to copy items into enic filters. */
56 struct enic_items {
57         /** Function for copying and validating an item. */
58         int (*copy_item)(const struct rte_flow_item *item,
59                          struct filter_v2 *enic_filter, u8 *inner_ofst);
60         /** List of valid previous items. */
61         const enum rte_flow_item_type * const prev_items;
62         /** True if it's OK for this item to be the first item. For some NIC
63          * versions, it's invalid to start the stack above layer 3.
64          */
65         const u8 valid_start_item;
66 };
67
68 /** Filtering capabilities for various NIC and firmware versions. */
69 struct enic_filter_cap {
70         /** list of valid items and their handlers and attributes. */
71         const struct enic_items *item_info;
72 };
73
74 /* functions for copying flow actions into enic actions */
75 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
76                              struct filter_action_v2 *enic_action);
77
78 /* functions for copying items into enic filters */
79 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
80                           struct filter_v2 *enic_filter, u8 *inner_ofst);
81
82 /** Action capabilities for various NICs. */
83 struct enic_action_cap {
84         /** list of valid actions */
85         const enum rte_flow_action_type *actions;
86         /** copy function for a particular NIC */
87         int (*copy_fn)(const struct rte_flow_action actions[],
88                        struct filter_action_v2 *enic_action);
89 };
90
91 /* Forward declarations */
92 static enic_copy_item_fn enic_copy_item_eth_v2;
93 static enic_copy_item_fn enic_copy_item_vlan_v2;
94 static enic_copy_item_fn enic_copy_item_ipv4_v2;
95 static enic_copy_item_fn enic_copy_item_ipv6_v2;
96 static enic_copy_item_fn enic_copy_item_udp_v2;
97 static enic_copy_item_fn enic_copy_item_tcp_v2;
98 static enic_copy_item_fn enic_copy_item_sctp_v2;
99 static enic_copy_item_fn enic_copy_item_sctp_v2;
100 static enic_copy_item_fn enic_copy_item_vxlan_v2;
101 static copy_action_fn enic_copy_action_v1;
102 static copy_action_fn enic_copy_action_v2;
103
104 /**
105  * NICs have Advanced Filters capability but they are disabled. This means
106  * that layer 3 must be specified.
107  */
108 static const struct enic_items enic_items_v2[] = {
109         [RTE_FLOW_ITEM_TYPE_ETH] = {
110                 .copy_item = enic_copy_item_eth_v2,
111                 .valid_start_item = 1,
112                 .prev_items = (const enum rte_flow_item_type[]) {
113                                RTE_FLOW_ITEM_TYPE_VXLAN,
114                                RTE_FLOW_ITEM_TYPE_END,
115                 },
116         },
117         [RTE_FLOW_ITEM_TYPE_VLAN] = {
118                 .copy_item = enic_copy_item_vlan_v2,
119                 .valid_start_item = 1,
120                 .prev_items = (const enum rte_flow_item_type[]) {
121                                RTE_FLOW_ITEM_TYPE_ETH,
122                                RTE_FLOW_ITEM_TYPE_END,
123                 },
124         },
125         [RTE_FLOW_ITEM_TYPE_IPV4] = {
126                 .copy_item = enic_copy_item_ipv4_v2,
127                 .valid_start_item = 1,
128                 .prev_items = (const enum rte_flow_item_type[]) {
129                                RTE_FLOW_ITEM_TYPE_ETH,
130                                RTE_FLOW_ITEM_TYPE_VLAN,
131                                RTE_FLOW_ITEM_TYPE_END,
132                 },
133         },
134         [RTE_FLOW_ITEM_TYPE_IPV6] = {
135                 .copy_item = enic_copy_item_ipv6_v2,
136                 .valid_start_item = 1,
137                 .prev_items = (const enum rte_flow_item_type[]) {
138                                RTE_FLOW_ITEM_TYPE_ETH,
139                                RTE_FLOW_ITEM_TYPE_VLAN,
140                                RTE_FLOW_ITEM_TYPE_END,
141                 },
142         },
143         [RTE_FLOW_ITEM_TYPE_UDP] = {
144                 .copy_item = enic_copy_item_udp_v2,
145                 .valid_start_item = 0,
146                 .prev_items = (const enum rte_flow_item_type[]) {
147                                RTE_FLOW_ITEM_TYPE_IPV4,
148                                RTE_FLOW_ITEM_TYPE_IPV6,
149                                RTE_FLOW_ITEM_TYPE_END,
150                 },
151         },
152         [RTE_FLOW_ITEM_TYPE_TCP] = {
153                 .copy_item = enic_copy_item_tcp_v2,
154                 .valid_start_item = 0,
155                 .prev_items = (const enum rte_flow_item_type[]) {
156                                RTE_FLOW_ITEM_TYPE_IPV4,
157                                RTE_FLOW_ITEM_TYPE_IPV6,
158                                RTE_FLOW_ITEM_TYPE_END,
159                 },
160         },
161         [RTE_FLOW_ITEM_TYPE_SCTP] = {
162                 .copy_item = enic_copy_item_sctp_v2,
163                 .valid_start_item = 0,
164                 .prev_items = (const enum rte_flow_item_type[]) {
165                                RTE_FLOW_ITEM_TYPE_IPV4,
166                                RTE_FLOW_ITEM_TYPE_IPV6,
167                                RTE_FLOW_ITEM_TYPE_END,
168                 },
169         },
170         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
171                 .copy_item = enic_copy_item_vxlan_v2,
172                 .valid_start_item = 0,
173                 .prev_items = (const enum rte_flow_item_type[]) {
174                                RTE_FLOW_ITEM_TYPE_UDP,
175                                RTE_FLOW_ITEM_TYPE_END,
176                 },
177         },
178 };
179
180 /** NICs with Advanced filters enabled */
181 static const struct enic_items enic_items_v3[] = {
182         [RTE_FLOW_ITEM_TYPE_ETH] = {
183                 .copy_item = enic_copy_item_eth_v2,
184                 .valid_start_item = 1,
185                 .prev_items = (const enum rte_flow_item_type[]) {
186                                RTE_FLOW_ITEM_TYPE_VXLAN,
187                                RTE_FLOW_ITEM_TYPE_END,
188                 },
189         },
190         [RTE_FLOW_ITEM_TYPE_VLAN] = {
191                 .copy_item = enic_copy_item_vlan_v2,
192                 .valid_start_item = 1,
193                 .prev_items = (const enum rte_flow_item_type[]) {
194                                RTE_FLOW_ITEM_TYPE_ETH,
195                                RTE_FLOW_ITEM_TYPE_END,
196                 },
197         },
198         [RTE_FLOW_ITEM_TYPE_IPV4] = {
199                 .copy_item = enic_copy_item_ipv4_v2,
200                 .valid_start_item = 1,
201                 .prev_items = (const enum rte_flow_item_type[]) {
202                                RTE_FLOW_ITEM_TYPE_ETH,
203                                RTE_FLOW_ITEM_TYPE_VLAN,
204                                RTE_FLOW_ITEM_TYPE_END,
205                 },
206         },
207         [RTE_FLOW_ITEM_TYPE_IPV6] = {
208                 .copy_item = enic_copy_item_ipv6_v2,
209                 .valid_start_item = 1,
210                 .prev_items = (const enum rte_flow_item_type[]) {
211                                RTE_FLOW_ITEM_TYPE_ETH,
212                                RTE_FLOW_ITEM_TYPE_VLAN,
213                                RTE_FLOW_ITEM_TYPE_END,
214                 },
215         },
216         [RTE_FLOW_ITEM_TYPE_UDP] = {
217                 .copy_item = enic_copy_item_udp_v2,
218                 .valid_start_item = 1,
219                 .prev_items = (const enum rte_flow_item_type[]) {
220                                RTE_FLOW_ITEM_TYPE_IPV4,
221                                RTE_FLOW_ITEM_TYPE_IPV6,
222                                RTE_FLOW_ITEM_TYPE_END,
223                 },
224         },
225         [RTE_FLOW_ITEM_TYPE_TCP] = {
226                 .copy_item = enic_copy_item_tcp_v2,
227                 .valid_start_item = 1,
228                 .prev_items = (const enum rte_flow_item_type[]) {
229                                RTE_FLOW_ITEM_TYPE_IPV4,
230                                RTE_FLOW_ITEM_TYPE_IPV6,
231                                RTE_FLOW_ITEM_TYPE_END,
232                 },
233         },
234         [RTE_FLOW_ITEM_TYPE_SCTP] = {
235                 .copy_item = enic_copy_item_sctp_v2,
236                 .valid_start_item = 1,
237                 .prev_items = (const enum rte_flow_item_type[]) {
238                                RTE_FLOW_ITEM_TYPE_IPV4,
239                                RTE_FLOW_ITEM_TYPE_IPV6,
240                                RTE_FLOW_ITEM_TYPE_END,
241                 },
242         },
243         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
244                 .copy_item = enic_copy_item_vxlan_v2,
245                 .valid_start_item = 1,
246                 .prev_items = (const enum rte_flow_item_type[]) {
247                                RTE_FLOW_ITEM_TYPE_UDP,
248                                RTE_FLOW_ITEM_TYPE_END,
249                 },
250         },
251 };
252
253 /** Filtering capabilities indexed this NICs supported filter type. */
254 static const struct enic_filter_cap enic_filter_cap[] = {
255         [FILTER_USNIC_IP] = {
256                 .item_info = enic_items_v2,
257         },
258         [FILTER_DPDK_1] = {
259                 .item_info = enic_items_v3,
260         },
261 };
262
263 /** Supported actions for older NICs */
264 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
265         RTE_FLOW_ACTION_TYPE_QUEUE,
266         RTE_FLOW_ACTION_TYPE_END,
267 };
268
269 /** Supported actions for newer NICs */
270 static const enum rte_flow_action_type enic_supported_actions_v2[] = {
271         RTE_FLOW_ACTION_TYPE_QUEUE,
272         RTE_FLOW_ACTION_TYPE_MARK,
273         RTE_FLOW_ACTION_TYPE_FLAG,
274         RTE_FLOW_ACTION_TYPE_END,
275 };
276
277 /** Action capabilities indexed by NIC version information */
278 static const struct enic_action_cap enic_action_cap[] = {
279         [FILTER_ACTION_RQ_STEERING_FLAG] = {
280                 .actions = enic_supported_actions_v1,
281                 .copy_fn = enic_copy_action_v1,
282         },
283         [FILTER_ACTION_V2_ALL] = {
284                 .actions = enic_supported_actions_v2,
285                 .copy_fn = enic_copy_action_v2,
286         },
287 };
288 /**
289  * Copy ETH item into version 2 NIC filter.
290  *
291  * @param item[in]
292  *   Item specification.
293  * @param enic_filter[out]
294  *   Partially filled in NIC filter structure.
295  * @param inner_ofst[in]
296  *   If zero, this is an outer header. If non-zero, this is the offset into L5
297  *   where the header begins.
298  */
299 static int
300 enic_copy_item_eth_v2(const struct rte_flow_item *item,
301                       struct filter_v2 *enic_filter, u8 *inner_ofst)
302 {
303         struct ether_hdr enic_spec;
304         struct ether_hdr enic_mask;
305         const struct rte_flow_item_eth *spec = item->spec;
306         const struct rte_flow_item_eth *mask = item->mask;
307         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
308
309         FLOW_TRACE();
310
311         /* Match all if no spec */
312         if (!spec)
313                 return 0;
314
315         if (!mask)
316                 mask = &rte_flow_item_eth_mask;
317
318         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
319                ETHER_ADDR_LEN);
320         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
321                ETHER_ADDR_LEN);
322
323         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
324                ETHER_ADDR_LEN);
325         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
326                ETHER_ADDR_LEN);
327         enic_spec.ether_type = spec->type;
328         enic_mask.ether_type = mask->type;
329
330         if (*inner_ofst == 0) {
331                 /* outer header */
332                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
333                        sizeof(struct ether_hdr));
334                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
335                        sizeof(struct ether_hdr));
336         } else {
337                 /* inner header */
338                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
339                      FILTER_GENERIC_1_KEY_LEN)
340                         return ENOTSUP;
341                 /* Offset into L5 where inner Ethernet header goes */
342                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
343                        &enic_mask, sizeof(struct ether_hdr));
344                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
345                        &enic_spec, sizeof(struct ether_hdr));
346                 *inner_ofst += sizeof(struct ether_hdr);
347         }
348         return 0;
349 }
350
351 /**
352  * Copy VLAN item into version 2 NIC filter.
353  *
354  * @param item[in]
355  *   Item specification.
356  * @param enic_filter[out]
357  *   Partially filled in NIC filter structure.
358  * @param inner_ofst[in]
359  *   If zero, this is an outer header. If non-zero, this is the offset into L5
360  *   where the header begins.
361  */
362 static int
363 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
364                        struct filter_v2 *enic_filter, u8 *inner_ofst)
365 {
366         const struct rte_flow_item_vlan *spec = item->spec;
367         const struct rte_flow_item_vlan *mask = item->mask;
368         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
369
370         FLOW_TRACE();
371
372         /* Match all if no spec */
373         if (!spec)
374                 return 0;
375
376         /* Don't support filtering in tpid */
377         if (mask) {
378                 if (mask->tpid != 0)
379                         return ENOTSUP;
380         } else {
381                 mask = &rte_flow_item_vlan_mask;
382                 RTE_ASSERT(mask->tpid == 0);
383         }
384
385         if (*inner_ofst == 0) {
386                 /* Outer header. Use the vlan mask/val fields */
387                 gp->mask_vlan = mask->tci;
388                 gp->val_vlan = spec->tci;
389         } else {
390                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
391                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
392                      FILTER_GENERIC_1_KEY_LEN)
393                         return ENOTSUP;
394                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
395                        mask, sizeof(struct vlan_hdr));
396                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
397                        spec, sizeof(struct vlan_hdr));
398                 *inner_ofst += sizeof(struct vlan_hdr);
399         }
400         return 0;
401 }
402
403 /**
404  * Copy IPv4 item into version 2 NIC filter.
405  *
406  * @param item[in]
407  *   Item specification.
408  * @param enic_filter[out]
409  *   Partially filled in NIC filter structure.
410  * @param inner_ofst[in]
411  *   Must be 0. Don't support inner IPv4 filtering.
412  */
413 static int
414 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
415                        struct filter_v2 *enic_filter, u8 *inner_ofst)
416 {
417         const struct rte_flow_item_ipv4 *spec = item->spec;
418         const struct rte_flow_item_ipv4 *mask = item->mask;
419         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
420
421         FLOW_TRACE();
422
423         if (*inner_ofst == 0) {
424                 /* Match IPv4 */
425                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
426                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
427
428                 /* Match all if no spec */
429                 if (!spec)
430                         return 0;
431
432                 if (!mask)
433                         mask = &rte_flow_item_ipv4_mask;
434
435                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
436                        sizeof(struct ipv4_hdr));
437                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
438                        sizeof(struct ipv4_hdr));
439         } else {
440                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
441                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
442                      FILTER_GENERIC_1_KEY_LEN)
443                         return ENOTSUP;
444                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
445                        mask, sizeof(struct ipv4_hdr));
446                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
447                        spec, sizeof(struct ipv4_hdr));
448                 *inner_ofst += sizeof(struct ipv4_hdr);
449         }
450         return 0;
451 }
452
453 /**
454  * Copy IPv6 item into version 2 NIC filter.
455  *
456  * @param item[in]
457  *   Item specification.
458  * @param enic_filter[out]
459  *   Partially filled in NIC filter structure.
460  * @param inner_ofst[in]
461  *   Must be 0. Don't support inner IPv6 filtering.
462  */
463 static int
464 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
465                        struct filter_v2 *enic_filter, u8 *inner_ofst)
466 {
467         const struct rte_flow_item_ipv6 *spec = item->spec;
468         const struct rte_flow_item_ipv6 *mask = item->mask;
469         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
470
471         FLOW_TRACE();
472
473         /* Match IPv6 */
474         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
475         gp->val_flags |= FILTER_GENERIC_1_IPV6;
476
477         /* Match all if no spec */
478         if (!spec)
479                 return 0;
480
481         if (!mask)
482                 mask = &rte_flow_item_ipv6_mask;
483
484         if (*inner_ofst == 0) {
485                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
486                        sizeof(struct ipv6_hdr));
487                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
488                        sizeof(struct ipv6_hdr));
489         } else {
490                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
491                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
492                      FILTER_GENERIC_1_KEY_LEN)
493                         return ENOTSUP;
494                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
495                        mask, sizeof(struct ipv6_hdr));
496                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
497                        spec, sizeof(struct ipv6_hdr));
498                 *inner_ofst += sizeof(struct ipv6_hdr);
499         }
500         return 0;
501 }
502
503 /**
504  * Copy UDP item into version 2 NIC filter.
505  *
506  * @param item[in]
507  *   Item specification.
508  * @param enic_filter[out]
509  *   Partially filled in NIC filter structure.
510  * @param inner_ofst[in]
511  *   Must be 0. Don't support inner UDP filtering.
512  */
513 static int
514 enic_copy_item_udp_v2(const struct rte_flow_item *item,
515                       struct filter_v2 *enic_filter, u8 *inner_ofst)
516 {
517         const struct rte_flow_item_udp *spec = item->spec;
518         const struct rte_flow_item_udp *mask = item->mask;
519         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
520
521         FLOW_TRACE();
522
523         /* Match UDP */
524         gp->mask_flags |= FILTER_GENERIC_1_UDP;
525         gp->val_flags |= FILTER_GENERIC_1_UDP;
526
527         /* Match all if no spec */
528         if (!spec)
529                 return 0;
530
531         if (!mask)
532                 mask = &rte_flow_item_udp_mask;
533
534         if (*inner_ofst == 0) {
535                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
536                        sizeof(struct udp_hdr));
537                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
538                        sizeof(struct udp_hdr));
539         } else {
540                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
541                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
542                      FILTER_GENERIC_1_KEY_LEN)
543                         return ENOTSUP;
544                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
545                        mask, sizeof(struct udp_hdr));
546                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
547                        spec, sizeof(struct udp_hdr));
548                 *inner_ofst += sizeof(struct udp_hdr);
549         }
550         return 0;
551 }
552
553 /**
554  * Copy TCP item into version 2 NIC filter.
555  *
556  * @param item[in]
557  *   Item specification.
558  * @param enic_filter[out]
559  *   Partially filled in NIC filter structure.
560  * @param inner_ofst[in]
561  *   Must be 0. Don't support inner TCP filtering.
562  */
563 static int
564 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
565                       struct filter_v2 *enic_filter, u8 *inner_ofst)
566 {
567         const struct rte_flow_item_tcp *spec = item->spec;
568         const struct rte_flow_item_tcp *mask = item->mask;
569         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
570
571         FLOW_TRACE();
572
573         /* Match TCP */
574         gp->mask_flags |= FILTER_GENERIC_1_TCP;
575         gp->val_flags |= FILTER_GENERIC_1_TCP;
576
577         /* Match all if no spec */
578         if (!spec)
579                 return 0;
580
581         if (!mask)
582                 return ENOTSUP;
583
584         if (*inner_ofst == 0) {
585                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
586                        sizeof(struct tcp_hdr));
587                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
588                        sizeof(struct tcp_hdr));
589         } else {
590                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
591                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
592                      FILTER_GENERIC_1_KEY_LEN)
593                         return ENOTSUP;
594                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
595                        mask, sizeof(struct tcp_hdr));
596                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
597                        spec, sizeof(struct tcp_hdr));
598                 *inner_ofst += sizeof(struct tcp_hdr);
599         }
600         return 0;
601 }
602
603 /**
604  * Copy SCTP item into version 2 NIC filter.
605  *
606  * @param item[in]
607  *   Item specification.
608  * @param enic_filter[out]
609  *   Partially filled in NIC filter structure.
610  * @param inner_ofst[in]
611  *   Must be 0. Don't support inner SCTP filtering.
612  */
613 static int
614 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
615                        struct filter_v2 *enic_filter, u8 *inner_ofst)
616 {
617         const struct rte_flow_item_sctp *spec = item->spec;
618         const struct rte_flow_item_sctp *mask = item->mask;
619         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
620
621         FLOW_TRACE();
622
623         if (*inner_ofst)
624                 return ENOTSUP;
625
626         /* Match all if no spec */
627         if (!spec)
628                 return 0;
629
630         if (!mask)
631                 mask = &rte_flow_item_sctp_mask;
632
633         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
634                sizeof(struct sctp_hdr));
635         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
636                sizeof(struct sctp_hdr));
637         return 0;
638 }
639
640 /**
641  * Copy UDP item into version 2 NIC filter.
642  *
643  * @param item[in]
644  *   Item specification.
645  * @param enic_filter[out]
646  *   Partially filled in NIC filter structure.
647  * @param inner_ofst[in]
648  *   Must be 0. VxLAN headers always start at the beginning of L5.
649  */
650 static int
651 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
652                         struct filter_v2 *enic_filter, u8 *inner_ofst)
653 {
654         const struct rte_flow_item_vxlan *spec = item->spec;
655         const struct rte_flow_item_vxlan *mask = item->mask;
656         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
657
658         FLOW_TRACE();
659
660         if (*inner_ofst)
661                 return EINVAL;
662
663         /* Match all if no spec */
664         if (!spec)
665                 return 0;
666
667         if (!mask)
668                 mask = &rte_flow_item_vxlan_mask;
669
670         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
671                sizeof(struct vxlan_hdr));
672         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
673                sizeof(struct vxlan_hdr));
674
675         *inner_ofst = sizeof(struct vxlan_hdr);
676         return 0;
677 }
678
679 /**
680  * Return 1 if current item is valid on top of the previous one.
681  *
682  * @param prev_item[in]
683  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
684  *   is the first item.
685  * @param item_info[in]
686  *   Info about this item, like valid previous items.
687  * @param is_first[in]
688  *   True if this the first item in the pattern.
689  */
690 static int
691 item_stacking_valid(enum rte_flow_item_type prev_item,
692                     const struct enic_items *item_info, u8 is_first_item)
693 {
694         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
695
696         FLOW_TRACE();
697
698         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
699                 if (prev_item == *allowed_items)
700                         return 1;
701         }
702
703         /* This is the first item in the stack. Check if that's cool */
704         if (is_first_item && item_info->valid_start_item)
705                 return 1;
706
707         return 0;
708 }
709
710 /**
711  * Build the intenal enic filter structure from the provided pattern. The
712  * pattern is validated as the items are copied.
713  *
714  * @param pattern[in]
715  * @param items_info[in]
716  *   Info about this NICs item support, like valid previous items.
717  * @param enic_filter[out]
718  *   NIC specfilc filters derived from the pattern.
719  * @param error[out]
720  */
721 static int
722 enic_copy_filter(const struct rte_flow_item pattern[],
723                  const struct enic_items *items_info,
724                  struct filter_v2 *enic_filter,
725                  struct rte_flow_error *error)
726 {
727         int ret;
728         const struct rte_flow_item *item = pattern;
729         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
730         enum rte_flow_item_type prev_item;
731         const struct enic_items *item_info;
732
733         u8 is_first_item = 1;
734
735         FLOW_TRACE();
736
737         prev_item = 0;
738
739         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
740                 /* Get info about how to validate and copy the item. If NULL
741                  * is returned the nic does not support the item.
742                  */
743                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
744                         continue;
745
746                 item_info = &items_info[item->type];
747
748                 /* check to see if item stacking is valid */
749                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
750                         goto stacking_error;
751
752                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
753                 if (ret)
754                         goto item_not_supported;
755                 prev_item = item->type;
756                 is_first_item = 0;
757         }
758         return 0;
759
760 item_not_supported:
761         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
762                            NULL, "enic type error");
763         return -rte_errno;
764
765 stacking_error:
766         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
767                            item, "stacking error");
768         return -rte_errno;
769 }
770
771 /**
772  * Build the intenal version 1 NIC action structure from the provided pattern.
773  * The pattern is validated as the items are copied.
774  *
775  * @param actions[in]
776  * @param enic_action[out]
777  *   NIC specfilc actions derived from the actions.
778  * @param error[out]
779  */
780 static int
781 enic_copy_action_v1(const struct rte_flow_action actions[],
782                     struct filter_action_v2 *enic_action)
783 {
784         FLOW_TRACE();
785
786         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
787                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
788                         continue;
789
790                 switch (actions->type) {
791                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
792                         const struct rte_flow_action_queue *queue =
793                                 (const struct rte_flow_action_queue *)
794                                 actions->conf;
795                         enic_action->rq_idx =
796                                 enic_rte_rq_idx_to_sop_idx(queue->index);
797                         break;
798                 }
799                 default:
800                         RTE_ASSERT(0);
801                         break;
802                 }
803         }
804         enic_action->type = FILTER_ACTION_RQ_STEERING;
805         return 0;
806 }
807
808 /**
809  * Build the intenal version 2 NIC action structure from the provided pattern.
810  * The pattern is validated as the items are copied.
811  *
812  * @param actions[in]
813  * @param enic_action[out]
814  *   NIC specfilc actions derived from the actions.
815  * @param error[out]
816  */
817 static int
818 enic_copy_action_v2(const struct rte_flow_action actions[],
819                     struct filter_action_v2 *enic_action)
820 {
821         FLOW_TRACE();
822
823         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
824                 switch (actions->type) {
825                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
826                         const struct rte_flow_action_queue *queue =
827                                 (const struct rte_flow_action_queue *)
828                                 actions->conf;
829                         enic_action->rq_idx =
830                                 enic_rte_rq_idx_to_sop_idx(queue->index);
831                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
832                         break;
833                 }
834                 case RTE_FLOW_ACTION_TYPE_MARK: {
835                         const struct rte_flow_action_mark *mark =
836                                 (const struct rte_flow_action_mark *)
837                                 actions->conf;
838
839                         /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
840                          * in the range of allows mark ids.
841                          */
842                         if (mark->id >= ENIC_MAGIC_FILTER_ID)
843                                 return EINVAL;
844                         enic_action->filter_id = mark->id;
845                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
846                         break;
847                 }
848                 case RTE_FLOW_ACTION_TYPE_FLAG: {
849                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
850                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
851                         break;
852                 }
853                 case RTE_FLOW_ACTION_TYPE_VOID:
854                         continue;
855                 default:
856                         RTE_ASSERT(0);
857                         break;
858                 }
859         }
860         enic_action->type = FILTER_ACTION_V2;
861         return 0;
862 }
863
864 /** Check if the action is supported */
865 static int
866 enic_match_action(const struct rte_flow_action *action,
867                   const enum rte_flow_action_type *supported_actions)
868 {
869         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
870              supported_actions++) {
871                 if (action->type == *supported_actions)
872                         return 1;
873         }
874         return 0;
875 }
876
877 /** Get the NIC filter capabilties structure */
878 static const struct enic_filter_cap *
879 enic_get_filter_cap(struct enic *enic)
880 {
881         /* FIXME: only support advanced filters for now */
882         if ((enic->flow_filter_mode != FILTER_DPDK_1) &&
883            (enic->flow_filter_mode != FILTER_USNIC_IP))
884                 return (const struct enic_filter_cap *)NULL;
885
886         if (enic->flow_filter_mode)
887                 return &enic_filter_cap[enic->flow_filter_mode];
888
889         return NULL;
890 }
891
892 /** Get the actions for this NIC version. */
893 static const struct enic_action_cap *
894 enic_get_action_cap(struct enic *enic)
895 {
896         static const struct enic_action_cap *ea;
897
898         if (enic->filter_tags)
899                 ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
900         else
901                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
902         return ea;
903 }
904 /**
905  * Internal flow parse/validate function.
906  *
907  * @param dev[in]
908  *   This device pointer.
909  * @param pattern[in]
910  * @param actions[in]
911  * @param error[out]
912  * @param enic_filter[out]
913  *   Internal NIC filter structure pointer.
914  * @param enic_action[out]
915  *   Internal NIC action structure pointer.
916  */
917 static int
918 enic_flow_parse(struct rte_eth_dev *dev,
919                 const struct rte_flow_attr *attrs,
920                 const struct rte_flow_item pattern[],
921                 const struct rte_flow_action actions[],
922                 struct rte_flow_error *error,
923                 struct filter_v2 *enic_filter,
924                 struct filter_action_v2 *enic_action)
925 {
926         unsigned int ret = 0;
927         struct enic *enic = pmd_priv(dev);
928         const struct enic_filter_cap *enic_filter_cap;
929         const struct enic_action_cap *enic_action_cap;
930         const struct rte_flow_action *action;
931
932         FLOW_TRACE();
933
934         memset(enic_filter, 0, sizeof(*enic_filter));
935         memset(enic_action, 0, sizeof(*enic_action));
936
937         if (!pattern) {
938                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
939                                    NULL, "No pattern specified");
940                 return -rte_errno;
941         }
942
943         if (!actions) {
944                 rte_flow_error_set(error, EINVAL,
945                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
946                                    NULL, "No action specified");
947                 return -rte_errno;
948         }
949
950         if (attrs) {
951                 if (attrs->group) {
952                         rte_flow_error_set(error, ENOTSUP,
953                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
954                                            NULL,
955                                            "priority groups are not supported");
956                         return -rte_errno;
957                 } else if (attrs->priority) {
958                         rte_flow_error_set(error, ENOTSUP,
959                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
960                                            NULL,
961                                            "priorities are not supported");
962                         return -rte_errno;
963                 } else if (attrs->egress) {
964                         rte_flow_error_set(error, ENOTSUP,
965                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
966                                            NULL,
967                                            "egress is not supported");
968                         return -rte_errno;
969                 } else if (!attrs->ingress) {
970                         rte_flow_error_set(error, ENOTSUP,
971                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
972                                            NULL,
973                                            "only ingress is supported");
974                         return -rte_errno;
975                 }
976
977         } else {
978                 rte_flow_error_set(error, EINVAL,
979                                    RTE_FLOW_ERROR_TYPE_ATTR,
980                                    NULL, "No attribute specified");
981                 return -rte_errno;
982         }
983
984         /* Verify Actions. */
985         enic_action_cap =  enic_get_action_cap(enic);
986         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
987              action++) {
988                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
989                         continue;
990                 else if (!enic_match_action(action, enic_action_cap->actions))
991                         break;
992         }
993         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
994                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
995                                    action, "Invalid action.");
996                 return -rte_errno;
997         }
998         ret = enic_action_cap->copy_fn(actions, enic_action);
999         if (ret) {
1000                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1001                            NULL, "Unsupported action.");
1002                 return -rte_errno;
1003         }
1004
1005         /* Verify Flow items. If copying the filter from flow format to enic
1006          * format fails, the flow is not supported
1007          */
1008         enic_filter_cap =  enic_get_filter_cap(enic);
1009         if (enic_filter_cap == NULL) {
1010                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1011                            NULL, "Flow API not available");
1012                 return -rte_errno;
1013         }
1014         enic_filter->type = enic->flow_filter_mode;
1015         ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1016                                        enic_filter, error);
1017         return ret;
1018 }
1019
1020 /**
1021  * Push filter/action to the NIC.
1022  *
1023  * @param enic[in]
1024  *   Device structure pointer.
1025  * @param enic_filter[in]
1026  *   Internal NIC filter structure pointer.
1027  * @param enic_action[in]
1028  *   Internal NIC action structure pointer.
1029  * @param error[out]
1030  */
1031 static struct rte_flow *
1032 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1033                    struct filter_action_v2 *enic_action,
1034                    struct rte_flow_error *error)
1035 {
1036         struct rte_flow *flow;
1037         int ret;
1038         u16 entry;
1039
1040         FLOW_TRACE();
1041
1042         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1043         if (!flow) {
1044                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1045                                    NULL, "cannot allocate flow memory");
1046                 return NULL;
1047         }
1048
1049         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1050         entry = enic_action->rq_idx;
1051         ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1052                                   enic_action);
1053         if (!ret) {
1054                 flow->enic_filter_id = entry;
1055                 flow->enic_filter = *enic_filter;
1056         } else {
1057                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1058                                    NULL, "vnic_dev_classifier error");
1059                 rte_free(flow);
1060                 return NULL;
1061         }
1062         return flow;
1063 }
1064
1065 /**
1066  * Remove filter/action from the NIC.
1067  *
1068  * @param enic[in]
1069  *   Device structure pointer.
1070  * @param filter_id[in]
1071  *   Id of NIC filter.
1072  * @param enic_action[in]
1073  *   Internal NIC action structure pointer.
1074  * @param error[out]
1075  */
1076 static int
1077 enic_flow_del_filter(struct enic *enic, u16 filter_id,
1078                    struct rte_flow_error *error)
1079 {
1080         int ret;
1081
1082         FLOW_TRACE();
1083
1084         ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1085         if (!ret)
1086                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1087                                    NULL, "vnic_dev_classifier failed");
1088         return ret;
1089 }
1090
1091 /*
1092  * The following functions are callbacks for Generic flow API.
1093  */
1094
1095 /**
1096  * Validate a flow supported by the NIC.
1097  *
1098  * @see rte_flow_validate()
1099  * @see rte_flow_ops
1100  */
1101 static int
1102 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1103                    const struct rte_flow_item pattern[],
1104                    const struct rte_flow_action actions[],
1105                    struct rte_flow_error *error)
1106 {
1107         struct filter_v2 enic_filter;
1108         struct filter_action_v2 enic_action;
1109         int ret;
1110
1111         FLOW_TRACE();
1112
1113         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1114                                &enic_filter, &enic_action);
1115         return ret;
1116 }
1117
1118 /**
1119  * Create a flow supported by the NIC.
1120  *
1121  * @see rte_flow_create()
1122  * @see rte_flow_ops
1123  */
1124 static struct rte_flow *
1125 enic_flow_create(struct rte_eth_dev *dev,
1126                  const struct rte_flow_attr *attrs,
1127                  const struct rte_flow_item pattern[],
1128                  const struct rte_flow_action actions[],
1129                  struct rte_flow_error *error)
1130 {
1131         int ret;
1132         struct filter_v2 enic_filter;
1133         struct filter_action_v2 enic_action;
1134         struct rte_flow *flow;
1135         struct enic *enic = pmd_priv(dev);
1136
1137         FLOW_TRACE();
1138
1139         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1140                               &enic_action);
1141         if (ret < 0)
1142                 return NULL;
1143
1144         rte_spinlock_lock(&enic->flows_lock);
1145         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1146                                     error);
1147         if (flow)
1148                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1149         rte_spinlock_unlock(&enic->flows_lock);
1150
1151         return flow;
1152 }
1153
1154 /**
1155  * Destroy a flow supported by the NIC.
1156  *
1157  * @see rte_flow_destroy()
1158  * @see rte_flow_ops
1159  */
1160 static int
1161 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1162                   __rte_unused struct rte_flow_error *error)
1163 {
1164         struct enic *enic = pmd_priv(dev);
1165
1166         FLOW_TRACE();
1167
1168         rte_spinlock_lock(&enic->flows_lock);
1169         enic_flow_del_filter(enic, flow->enic_filter_id, error);
1170         LIST_REMOVE(flow, next);
1171         rte_spinlock_unlock(&enic->flows_lock);
1172         return 0;
1173 }
1174
1175 /**
1176  * Flush all flows on the device.
1177  *
1178  * @see rte_flow_flush()
1179  * @see rte_flow_ops
1180  */
1181 static int
1182 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1183 {
1184         struct rte_flow *flow;
1185         struct enic *enic = pmd_priv(dev);
1186
1187         FLOW_TRACE();
1188
1189         rte_spinlock_lock(&enic->flows_lock);
1190
1191         while (!LIST_EMPTY(&enic->flows)) {
1192                 flow = LIST_FIRST(&enic->flows);
1193                 enic_flow_del_filter(enic, flow->enic_filter_id, error);
1194                 LIST_REMOVE(flow, next);
1195         }
1196         rte_spinlock_unlock(&enic->flows_lock);
1197         return 0;
1198 }
1199
1200 /**
1201  * Flow callback registration.
1202  *
1203  * @see rte_flow_ops
1204  */
1205 const struct rte_flow_ops enic_flow_ops = {
1206         .validate = enic_flow_validate,
1207         .create = enic_flow_create,
1208         .destroy = enic_flow_destroy,
1209         .flush = enic_flow_flush,
1210 };