net/tap: use SPDX tags on 6WIND copyrighted files
[dpdk.git] / drivers / net / tap / tap_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox.
4  */
5
6 #include <errno.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/queue.h>
10 #include <sys/resource.h>
11
12 #include <rte_byteorder.h>
13 #include <rte_jhash.h>
14 #include <rte_malloc.h>
15 #include <rte_eth_tap.h>
16 #include <tap_flow.h>
17 #include <tap_autoconf.h>
18 #include <tap_tcmsgs.h>
19 #include <tap_rss.h>
20
21 #ifndef HAVE_TC_FLOWER
22 /*
23  * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
24  * avoid sending TC messages the kernel cannot understand.
25  */
26 enum {
27         TCA_FLOWER_UNSPEC,
28         TCA_FLOWER_CLASSID,
29         TCA_FLOWER_INDEV,
30         TCA_FLOWER_ACT,
31         TCA_FLOWER_KEY_ETH_DST,         /* ETH_ALEN */
32         TCA_FLOWER_KEY_ETH_DST_MASK,    /* ETH_ALEN */
33         TCA_FLOWER_KEY_ETH_SRC,         /* ETH_ALEN */
34         TCA_FLOWER_KEY_ETH_SRC_MASK,    /* ETH_ALEN */
35         TCA_FLOWER_KEY_ETH_TYPE,        /* be16 */
36         TCA_FLOWER_KEY_IP_PROTO,        /* u8 */
37         TCA_FLOWER_KEY_IPV4_SRC,        /* be32 */
38         TCA_FLOWER_KEY_IPV4_SRC_MASK,   /* be32 */
39         TCA_FLOWER_KEY_IPV4_DST,        /* be32 */
40         TCA_FLOWER_KEY_IPV4_DST_MASK,   /* be32 */
41         TCA_FLOWER_KEY_IPV6_SRC,        /* struct in6_addr */
42         TCA_FLOWER_KEY_IPV6_SRC_MASK,   /* struct in6_addr */
43         TCA_FLOWER_KEY_IPV6_DST,        /* struct in6_addr */
44         TCA_FLOWER_KEY_IPV6_DST_MASK,   /* struct in6_addr */
45         TCA_FLOWER_KEY_TCP_SRC,         /* be16 */
46         TCA_FLOWER_KEY_TCP_DST,         /* be16 */
47         TCA_FLOWER_KEY_UDP_SRC,         /* be16 */
48         TCA_FLOWER_KEY_UDP_DST,         /* be16 */
49 };
50 #endif
51 #ifndef HAVE_TC_VLAN_ID
52 enum {
53         /* TCA_FLOWER_FLAGS, */
54         TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
55         TCA_FLOWER_KEY_VLAN_PRIO,       /* u8   */
56         TCA_FLOWER_KEY_VLAN_ETH_TYPE,   /* be16 */
57 };
58 #endif
59 /*
60  * For kernels < 4.2 BPF related enums may not be defined.
61  * Runtime checks will be carried out to gracefully report on TC messages that
62  * are rejected by the kernel. Rejection reasons may be due to:
63  * 1. enum is not defined
64  * 2. enum is defined but kernel is not configured to support BPF system calls,
65  *    BPF classifications or BPF actions.
66  */
67 #ifndef HAVE_TC_BPF
68 enum {
69         TCA_BPF_UNSPEC,
70         TCA_BPF_ACT,
71         TCA_BPF_POLICE,
72         TCA_BPF_CLASSID,
73         TCA_BPF_OPS_LEN,
74         TCA_BPF_OPS,
75 };
76 #endif
77 #ifndef HAVE_TC_BPF_FD
78 enum {
79         TCA_BPF_FD = TCA_BPF_OPS + 1,
80         TCA_BPF_NAME,
81 };
82 #endif
83 #ifndef HAVE_TC_ACT_BPF
84 #define tc_gen \
85         __u32                 index; \
86         __u32                 capab; \
87         int                   action; \
88         int                   refcnt; \
89         int                   bindcnt
90
91 struct tc_act_bpf {
92         tc_gen;
93 };
94
95 enum {
96         TCA_ACT_BPF_UNSPEC,
97         TCA_ACT_BPF_TM,
98         TCA_ACT_BPF_PARMS,
99         TCA_ACT_BPF_OPS_LEN,
100         TCA_ACT_BPF_OPS,
101 };
102
103 #endif
104 #ifndef HAVE_TC_ACT_BPF_FD
105 enum {
106         TCA_ACT_BPF_FD = TCA_ACT_BPF_OPS + 1,
107         TCA_ACT_BPF_NAME,
108 };
109 #endif
110
111 /* RSS key management */
112 enum bpf_rss_key_e {
113         KEY_CMD_GET = 1,
114         KEY_CMD_RELEASE,
115         KEY_CMD_INIT,
116         KEY_CMD_DEINIT,
117 };
118
119 enum key_status_e {
120         KEY_STAT_UNSPEC,
121         KEY_STAT_USED,
122         KEY_STAT_AVAILABLE,
123 };
124
125 #define ISOLATE_HANDLE 1
126
127 struct rte_flow {
128         LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
129         struct rte_flow *remote_flow; /* associated remote flow */
130         int bpf_fd[SEC_MAX]; /* list of bfs fds per ELF section */
131         uint32_t key_idx; /* RSS rule key index into BPF map */
132         struct nlmsg msg;
133 };
134
135 struct convert_data {
136         uint16_t eth_type;
137         uint16_t ip_proto;
138         uint8_t vlan;
139         struct rte_flow *flow;
140 };
141
142 struct remote_rule {
143         struct rte_flow_attr attr;
144         struct rte_flow_item items[2];
145         struct rte_flow_action actions[2];
146         int mirred;
147 };
148
149 struct action_data {
150         char id[16];
151
152         union {
153                 struct tc_gact gact;
154                 struct tc_mirred mirred;
155                 struct skbedit {
156                         struct tc_skbedit skbedit;
157                         uint16_t queue;
158                 } skbedit;
159                 struct bpf {
160                         struct tc_act_bpf bpf;
161                         int bpf_fd;
162                         const char *annotation;
163                 } bpf;
164         };
165 };
166
167 static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
168 static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
169 static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
170 static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
171 static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
172 static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
173 static int
174 tap_flow_validate(struct rte_eth_dev *dev,
175                   const struct rte_flow_attr *attr,
176                   const struct rte_flow_item items[],
177                   const struct rte_flow_action actions[],
178                   struct rte_flow_error *error);
179
180 static struct rte_flow *
181 tap_flow_create(struct rte_eth_dev *dev,
182                 const struct rte_flow_attr *attr,
183                 const struct rte_flow_item items[],
184                 const struct rte_flow_action actions[],
185                 struct rte_flow_error *error);
186
187 static int
188 tap_flow_destroy(struct rte_eth_dev *dev,
189                  struct rte_flow *flow,
190                  struct rte_flow_error *error);
191
192 static int
193 tap_flow_isolate(struct rte_eth_dev *dev,
194                  int set,
195                  struct rte_flow_error *error);
196
197 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
198 static int rss_enable(struct pmd_internals *pmd,
199                         const struct rte_flow_attr *attr,
200                         struct rte_flow_error *error);
201 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
202                         const struct rte_flow_action_rss *rss,
203                         struct rte_flow_error *error);
204
205 static const struct rte_flow_ops tap_flow_ops = {
206         .validate = tap_flow_validate,
207         .create = tap_flow_create,
208         .destroy = tap_flow_destroy,
209         .flush = tap_flow_flush,
210         .isolate = tap_flow_isolate,
211 };
212
213 /* Static initializer for items. */
214 #define ITEMS(...) \
215         (const enum rte_flow_item_type []){ \
216                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
217         }
218
219 /* Structure to generate a simple graph of layers supported by the NIC. */
220 struct tap_flow_items {
221         /* Bit-mask corresponding to what is supported for this item. */
222         const void *mask;
223         const unsigned int mask_sz; /* Bit-mask size in bytes. */
224         /*
225          * Bit-mask corresponding to the default mask, if none is provided
226          * along with the item.
227          */
228         const void *default_mask;
229         /**
230          * Conversion function from rte_flow to netlink attributes.
231          *
232          * @param item
233          *   rte_flow item to convert.
234          * @param data
235          *   Internal structure to store the conversion.
236          *
237          * @return
238          *   0 on success, negative value otherwise.
239          */
240         int (*convert)(const struct rte_flow_item *item, void *data);
241         /** List of possible following items.  */
242         const enum rte_flow_item_type *const items;
243 };
244
245 /* Graph of supported items and associated actions. */
246 static const struct tap_flow_items tap_flow_items[] = {
247         [RTE_FLOW_ITEM_TYPE_END] = {
248                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
249         },
250         [RTE_FLOW_ITEM_TYPE_ETH] = {
251                 .items = ITEMS(
252                         RTE_FLOW_ITEM_TYPE_VLAN,
253                         RTE_FLOW_ITEM_TYPE_IPV4,
254                         RTE_FLOW_ITEM_TYPE_IPV6),
255                 .mask = &(const struct rte_flow_item_eth){
256                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
257                         .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
258                         .type = -1,
259                 },
260                 .mask_sz = sizeof(struct rte_flow_item_eth),
261                 .default_mask = &rte_flow_item_eth_mask,
262                 .convert = tap_flow_create_eth,
263         },
264         [RTE_FLOW_ITEM_TYPE_VLAN] = {
265                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
266                                RTE_FLOW_ITEM_TYPE_IPV6),
267                 .mask = &(const struct rte_flow_item_vlan){
268                         .tpid = -1,
269                         /* DEI matching is not supported */
270 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
271                         .tci = 0xffef,
272 #else
273                         .tci = 0xefff,
274 #endif
275                 },
276                 .mask_sz = sizeof(struct rte_flow_item_vlan),
277                 .default_mask = &rte_flow_item_vlan_mask,
278                 .convert = tap_flow_create_vlan,
279         },
280         [RTE_FLOW_ITEM_TYPE_IPV4] = {
281                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
282                                RTE_FLOW_ITEM_TYPE_TCP),
283                 .mask = &(const struct rte_flow_item_ipv4){
284                         .hdr = {
285                                 .src_addr = -1,
286                                 .dst_addr = -1,
287                                 .next_proto_id = -1,
288                         },
289                 },
290                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
291                 .default_mask = &rte_flow_item_ipv4_mask,
292                 .convert = tap_flow_create_ipv4,
293         },
294         [RTE_FLOW_ITEM_TYPE_IPV6] = {
295                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
296                                RTE_FLOW_ITEM_TYPE_TCP),
297                 .mask = &(const struct rte_flow_item_ipv6){
298                         .hdr = {
299                                 .src_addr = {
300                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
301                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
302                                 },
303                                 .dst_addr = {
304                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
305                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
306                                 },
307                                 .proto = -1,
308                         },
309                 },
310                 .mask_sz = sizeof(struct rte_flow_item_ipv6),
311                 .default_mask = &rte_flow_item_ipv6_mask,
312                 .convert = tap_flow_create_ipv6,
313         },
314         [RTE_FLOW_ITEM_TYPE_UDP] = {
315                 .mask = &(const struct rte_flow_item_udp){
316                         .hdr = {
317                                 .src_port = -1,
318                                 .dst_port = -1,
319                         },
320                 },
321                 .mask_sz = sizeof(struct rte_flow_item_udp),
322                 .default_mask = &rte_flow_item_udp_mask,
323                 .convert = tap_flow_create_udp,
324         },
325         [RTE_FLOW_ITEM_TYPE_TCP] = {
326                 .mask = &(const struct rte_flow_item_tcp){
327                         .hdr = {
328                                 .src_port = -1,
329                                 .dst_port = -1,
330                         },
331                 },
332                 .mask_sz = sizeof(struct rte_flow_item_tcp),
333                 .default_mask = &rte_flow_item_tcp_mask,
334                 .convert = tap_flow_create_tcp,
335         },
336 };
337
338 /*
339  *                TC rules, by growing priority
340  *
341  *        Remote netdevice                  Tap netdevice
342  * +-------------+-------------+  +-------------+-------------+
343  * |   Ingress   |   Egress    |  |   Ingress   |   Egress    |
344  * |-------------|-------------|  |-------------|-------------|
345  * |             |  \       /  |  |             |  REMOTE TX  | prio 1
346  * |             |   \     /   |  |             |   \     /   | prio 2
347  * |  EXPLICIT   |    \   /    |  |  EXPLICIT   |    \   /    |   .
348  * |             |     \ /     |  |             |     \ /     |   .
349  * |    RULES    |      X      |  |    RULES    |      X      |   .
350  * |      .      |     / \     |  |      .      |     / \     |   .
351  * |      .      |    /   \    |  |      .      |    /   \    |   .
352  * |      .      |   /     \   |  |      .      |   /     \   |   .
353  * |      .      |  /       \  |  |      .      |  /       \  |   .
354  *
355  *      ....           ....           ....           ....
356  *
357  * |      .      |  \       /  |  |      .      |  \       /  |   .
358  * |      .      |   \     /   |  |      .      |   \     /   |   .
359  * |             |    \   /    |  |             |    \   /    |
360  * |  LOCAL_MAC  |     \ /     |  |    \   /    |     \ /     | last prio - 5
361  * |   PROMISC   |      X      |  |     \ /     |      X      | last prio - 4
362  * |   ALLMULTI  |     / \     |  |      X      |     / \     | last prio - 3
363  * |  BROADCAST  |    /   \    |  |     / \     |    /   \    | last prio - 2
364  * | BROADCASTV6 |   /     \   |  |    /   \    |   /     \   | last prio - 1
365  * |     xx      |  /       \  |  |   ISOLATE   |  /       \  | last prio
366  * +-------------+-------------+  +-------------+-------------+
367  *
368  * The implicit flow rules are stored in a list in with mandatorily the last two
369  * being the ISOLATE and REMOTE_TX rules. e.g.:
370  *
371  * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
372  *
373  * That enables tap_flow_isolate() to remove implicit rules by popping the list
374  * head and remove it as long as it applies on the remote netdevice. The
375  * implicit rule for TX redirection is not removed, as isolate concerns only
376  * incoming traffic.
377  */
378
379 static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
380         [TAP_REMOTE_LOCAL_MAC] = {
381                 .attr = {
382                         .group = MAX_GROUP,
383                         .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
384                         .ingress = 1,
385                 },
386                 .items[0] = {
387                         .type = RTE_FLOW_ITEM_TYPE_ETH,
388                         .mask =  &(const struct rte_flow_item_eth){
389                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
390                         },
391                 },
392                 .items[1] = {
393                         .type = RTE_FLOW_ITEM_TYPE_END,
394                 },
395                 .mirred = TCA_EGRESS_REDIR,
396         },
397         [TAP_REMOTE_BROADCAST] = {
398                 .attr = {
399                         .group = MAX_GROUP,
400                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
401                         .ingress = 1,
402                 },
403                 .items[0] = {
404                         .type = RTE_FLOW_ITEM_TYPE_ETH,
405                         .mask =  &(const struct rte_flow_item_eth){
406                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
407                         },
408                         .spec = &(const struct rte_flow_item_eth){
409                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
410                         },
411                 },
412                 .items[1] = {
413                         .type = RTE_FLOW_ITEM_TYPE_END,
414                 },
415                 .mirred = TCA_EGRESS_MIRROR,
416         },
417         [TAP_REMOTE_BROADCASTV6] = {
418                 .attr = {
419                         .group = MAX_GROUP,
420                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
421                         .ingress = 1,
422                 },
423                 .items[0] = {
424                         .type = RTE_FLOW_ITEM_TYPE_ETH,
425                         .mask =  &(const struct rte_flow_item_eth){
426                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
427                         },
428                         .spec = &(const struct rte_flow_item_eth){
429                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
430                         },
431                 },
432                 .items[1] = {
433                         .type = RTE_FLOW_ITEM_TYPE_END,
434                 },
435                 .mirred = TCA_EGRESS_MIRROR,
436         },
437         [TAP_REMOTE_PROMISC] = {
438                 .attr = {
439                         .group = MAX_GROUP,
440                         .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
441                         .ingress = 1,
442                 },
443                 .items[0] = {
444                         .type = RTE_FLOW_ITEM_TYPE_VOID,
445                 },
446                 .items[1] = {
447                         .type = RTE_FLOW_ITEM_TYPE_END,
448                 },
449                 .mirred = TCA_EGRESS_MIRROR,
450         },
451         [TAP_REMOTE_ALLMULTI] = {
452                 .attr = {
453                         .group = MAX_GROUP,
454                         .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
455                         .ingress = 1,
456                 },
457                 .items[0] = {
458                         .type = RTE_FLOW_ITEM_TYPE_ETH,
459                         .mask =  &(const struct rte_flow_item_eth){
460                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
461                         },
462                         .spec = &(const struct rte_flow_item_eth){
463                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
464                         },
465                 },
466                 .items[1] = {
467                         .type = RTE_FLOW_ITEM_TYPE_END,
468                 },
469                 .mirred = TCA_EGRESS_MIRROR,
470         },
471         [TAP_REMOTE_TX] = {
472                 .attr = {
473                         .group = 0,
474                         .priority = TAP_REMOTE_TX,
475                         .egress = 1,
476                 },
477                 .items[0] = {
478                         .type = RTE_FLOW_ITEM_TYPE_VOID,
479                 },
480                 .items[1] = {
481                         .type = RTE_FLOW_ITEM_TYPE_END,
482                 },
483                 .mirred = TCA_EGRESS_MIRROR,
484         },
485         [TAP_ISOLATE] = {
486                 .attr = {
487                         .group = MAX_GROUP,
488                         .priority = PRIORITY_MASK - TAP_ISOLATE,
489                         .ingress = 1,
490                 },
491                 .items[0] = {
492                         .type = RTE_FLOW_ITEM_TYPE_VOID,
493                 },
494                 .items[1] = {
495                         .type = RTE_FLOW_ITEM_TYPE_END,
496                 },
497         },
498 };
499
500 /**
501  * Make as much checks as possible on an Ethernet item, and if a flow is
502  * provided, fill it appropriately with Ethernet info.
503  *
504  * @param[in] item
505  *   Item specification.
506  * @param[in, out] data
507  *   Additional data structure to tell next layers we've been here.
508  *
509  * @return
510  *   0 if checks are alright, -1 otherwise.
511  */
512 static int
513 tap_flow_create_eth(const struct rte_flow_item *item, void *data)
514 {
515         struct convert_data *info = (struct convert_data *)data;
516         const struct rte_flow_item_eth *spec = item->spec;
517         const struct rte_flow_item_eth *mask = item->mask;
518         struct rte_flow *flow = info->flow;
519         struct nlmsg *msg;
520
521         /* use default mask if none provided */
522         if (!mask)
523                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
524         /* TC does not support eth_type masking. Only accept if exact match. */
525         if (mask->type && mask->type != 0xffff)
526                 return -1;
527         if (!spec)
528                 return 0;
529         /* store eth_type for consistency if ipv4/6 pattern item comes next */
530         if (spec->type & mask->type)
531                 info->eth_type = spec->type;
532         if (!flow)
533                 return 0;
534         msg = &flow->msg;
535         if (!is_zero_ether_addr(&spec->dst)) {
536                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
537                            &spec->dst.addr_bytes);
538                 tap_nlattr_add(&msg->nh,
539                            TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
540                            &mask->dst.addr_bytes);
541         }
542         if (!is_zero_ether_addr(&mask->src)) {
543                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
544                            &spec->src.addr_bytes);
545                 tap_nlattr_add(&msg->nh,
546                            TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
547                            &mask->src.addr_bytes);
548         }
549         return 0;
550 }
551
552 /**
553  * Make as much checks as possible on a VLAN item, and if a flow is provided,
554  * fill it appropriately with VLAN info.
555  *
556  * @param[in] item
557  *   Item specification.
558  * @param[in, out] data
559  *   Additional data structure to tell next layers we've been here.
560  *
561  * @return
562  *   0 if checks are alright, -1 otherwise.
563  */
564 static int
565 tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
566 {
567         struct convert_data *info = (struct convert_data *)data;
568         const struct rte_flow_item_vlan *spec = item->spec;
569         const struct rte_flow_item_vlan *mask = item->mask;
570         struct rte_flow *flow = info->flow;
571         struct nlmsg *msg;
572
573         /* use default mask if none provided */
574         if (!mask)
575                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
576         /* TC does not support tpid masking. Only accept if exact match. */
577         if (mask->tpid && mask->tpid != 0xffff)
578                 return -1;
579         /* Double-tagging not supported. */
580         if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q))
581                 return -1;
582         info->vlan = 1;
583         if (!flow)
584                 return 0;
585         msg = &flow->msg;
586         msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
587 #define VLAN_PRIO(tci) ((tci) >> 13)
588 #define VLAN_ID(tci) ((tci) & 0xfff)
589         if (!spec)
590                 return 0;
591         if (spec->tci) {
592                 uint16_t tci = ntohs(spec->tci) & mask->tci;
593                 uint16_t prio = VLAN_PRIO(tci);
594                 uint8_t vid = VLAN_ID(tci);
595
596                 if (prio)
597                         tap_nlattr_add8(&msg->nh,
598                                         TCA_FLOWER_KEY_VLAN_PRIO, prio);
599                 if (vid)
600                         tap_nlattr_add16(&msg->nh,
601                                          TCA_FLOWER_KEY_VLAN_ID, vid);
602         }
603         return 0;
604 }
605
606 /**
607  * Make as much checks as possible on an IPv4 item, and if a flow is provided,
608  * fill it appropriately with IPv4 info.
609  *
610  * @param[in] item
611  *   Item specification.
612  * @param[in, out] data
613  *   Additional data structure to tell next layers we've been here.
614  *
615  * @return
616  *   0 if checks are alright, -1 otherwise.
617  */
618 static int
619 tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
620 {
621         struct convert_data *info = (struct convert_data *)data;
622         const struct rte_flow_item_ipv4 *spec = item->spec;
623         const struct rte_flow_item_ipv4 *mask = item->mask;
624         struct rte_flow *flow = info->flow;
625         struct nlmsg *msg;
626
627         /* use default mask if none provided */
628         if (!mask)
629                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
630         /* check that previous eth type is compatible with ipv4 */
631         if (info->eth_type && info->eth_type != htons(ETH_P_IP))
632                 return -1;
633         /* store ip_proto for consistency if udp/tcp pattern item comes next */
634         if (spec)
635                 info->ip_proto = spec->hdr.next_proto_id;
636         if (!flow)
637                 return 0;
638         msg = &flow->msg;
639         if (!info->eth_type)
640                 info->eth_type = htons(ETH_P_IP);
641         if (!spec)
642                 return 0;
643         if (spec->hdr.dst_addr) {
644                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
645                              spec->hdr.dst_addr);
646                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
647                              mask->hdr.dst_addr);
648         }
649         if (spec->hdr.src_addr) {
650                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
651                              spec->hdr.src_addr);
652                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
653                              mask->hdr.src_addr);
654         }
655         if (spec->hdr.next_proto_id)
656                 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
657                             spec->hdr.next_proto_id);
658         return 0;
659 }
660
661 /**
662  * Make as much checks as possible on an IPv6 item, and if a flow is provided,
663  * fill it appropriately with IPv6 info.
664  *
665  * @param[in] item
666  *   Item specification.
667  * @param[in, out] data
668  *   Additional data structure to tell next layers we've been here.
669  *
670  * @return
671  *   0 if checks are alright, -1 otherwise.
672  */
673 static int
674 tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
675 {
676         struct convert_data *info = (struct convert_data *)data;
677         const struct rte_flow_item_ipv6 *spec = item->spec;
678         const struct rte_flow_item_ipv6 *mask = item->mask;
679         struct rte_flow *flow = info->flow;
680         uint8_t empty_addr[16] = { 0 };
681         struct nlmsg *msg;
682
683         /* use default mask if none provided */
684         if (!mask)
685                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
686         /* check that previous eth type is compatible with ipv6 */
687         if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
688                 return -1;
689         /* store ip_proto for consistency if udp/tcp pattern item comes next */
690         if (spec)
691                 info->ip_proto = spec->hdr.proto;
692         if (!flow)
693                 return 0;
694         msg = &flow->msg;
695         if (!info->eth_type)
696                 info->eth_type = htons(ETH_P_IPV6);
697         if (!spec)
698                 return 0;
699         if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
700                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
701                            sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
702                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
703                            sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
704         }
705         if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
706                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
707                            sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
708                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
709                            sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
710         }
711         if (spec->hdr.proto)
712                 tap_nlattr_add8(&msg->nh,
713                                 TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
714         return 0;
715 }
716
717 /**
718  * Make as much checks as possible on a UDP item, and if a flow is provided,
719  * fill it appropriately with UDP info.
720  *
721  * @param[in] item
722  *   Item specification.
723  * @param[in, out] data
724  *   Additional data structure to tell next layers we've been here.
725  *
726  * @return
727  *   0 if checks are alright, -1 otherwise.
728  */
729 static int
730 tap_flow_create_udp(const struct rte_flow_item *item, void *data)
731 {
732         struct convert_data *info = (struct convert_data *)data;
733         const struct rte_flow_item_udp *spec = item->spec;
734         const struct rte_flow_item_udp *mask = item->mask;
735         struct rte_flow *flow = info->flow;
736         struct nlmsg *msg;
737
738         /* use default mask if none provided */
739         if (!mask)
740                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
741         /* check that previous ip_proto is compatible with udp */
742         if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
743                 return -1;
744         /* TC does not support UDP port masking. Only accept if exact match. */
745         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
746             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
747                 return -1;
748         if (!flow)
749                 return 0;
750         msg = &flow->msg;
751         tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
752         if (!spec)
753                 return 0;
754         if (spec->hdr.dst_port & mask->hdr.dst_port)
755                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
756                              spec->hdr.dst_port);
757         if (spec->hdr.src_port & mask->hdr.src_port)
758                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
759                              spec->hdr.src_port);
760         return 0;
761 }
762
763 /**
764  * Make as much checks as possible on a TCP item, and if a flow is provided,
765  * fill it appropriately with TCP info.
766  *
767  * @param[in] item
768  *   Item specification.
769  * @param[in, out] data
770  *   Additional data structure to tell next layers we've been here.
771  *
772  * @return
773  *   0 if checks are alright, -1 otherwise.
774  */
775 static int
776 tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
777 {
778         struct convert_data *info = (struct convert_data *)data;
779         const struct rte_flow_item_tcp *spec = item->spec;
780         const struct rte_flow_item_tcp *mask = item->mask;
781         struct rte_flow *flow = info->flow;
782         struct nlmsg *msg;
783
784         /* use default mask if none provided */
785         if (!mask)
786                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
787         /* check that previous ip_proto is compatible with tcp */
788         if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
789                 return -1;
790         /* TC does not support TCP port masking. Only accept if exact match. */
791         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
792             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
793                 return -1;
794         if (!flow)
795                 return 0;
796         msg = &flow->msg;
797         tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
798         if (!spec)
799                 return 0;
800         if (spec->hdr.dst_port & mask->hdr.dst_port)
801                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
802                              spec->hdr.dst_port);
803         if (spec->hdr.src_port & mask->hdr.src_port)
804                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
805                              spec->hdr.src_port);
806         return 0;
807 }
808
809 /**
810  * Check support for a given item.
811  *
812  * @param[in] item
813  *   Item specification.
814  * @param size
815  *   Bit-Mask size in bytes.
816  * @param[in] supported_mask
817  *   Bit-mask covering supported fields to compare with spec, last and mask in
818  *   \item.
819  * @param[in] default_mask
820  *   Bit-mask default mask if none is provided in \item.
821  *
822  * @return
823  *   0 on success.
824  */
825 static int
826 tap_flow_item_validate(const struct rte_flow_item *item,
827                        unsigned int size,
828                        const uint8_t *supported_mask,
829                        const uint8_t *default_mask)
830 {
831         int ret = 0;
832
833         /* An empty layer is allowed, as long as all fields are NULL */
834         if (!item->spec && (item->mask || item->last))
835                 return -1;
836         /* Is the item spec compatible with what the NIC supports? */
837         if (item->spec && !item->mask) {
838                 unsigned int i;
839                 const uint8_t *spec = item->spec;
840
841                 for (i = 0; i < size; ++i)
842                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
843                                 return -1;
844                 /* Is the default mask compatible with what the NIC supports? */
845                 for (i = 0; i < size; i++)
846                         if ((default_mask[i] | supported_mask[i]) !=
847                             supported_mask[i])
848                                 return -1;
849         }
850         /* Is the item last compatible with what the NIC supports? */
851         if (item->last && !item->mask) {
852                 unsigned int i;
853                 const uint8_t *spec = item->last;
854
855                 for (i = 0; i < size; ++i)
856                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
857                                 return -1;
858         }
859         /* Is the item mask compatible with what the NIC supports? */
860         if (item->mask) {
861                 unsigned int i;
862                 const uint8_t *spec = item->mask;
863
864                 for (i = 0; i < size; ++i)
865                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
866                                 return -1;
867         }
868         /**
869          * Once masked, Are item spec and item last equal?
870          * TC does not support range so anything else is invalid.
871          */
872         if (item->spec && item->last) {
873                 uint8_t spec[size];
874                 uint8_t last[size];
875                 const uint8_t *apply = default_mask;
876                 unsigned int i;
877
878                 if (item->mask)
879                         apply = item->mask;
880                 for (i = 0; i < size; ++i) {
881                         spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
882                         last[i] = ((const uint8_t *)item->last)[i] & apply[i];
883                 }
884                 ret = memcmp(spec, last, size);
885         }
886         return ret;
887 }
888
889 /**
890  * Configure the kernel with a TC action and its configured parameters
891  * Handled actions: "gact", "mirred", "skbedit", "bpf"
892  *
893  * @param[in] flow
894  *   Pointer to rte flow containing the netlink message
895  *
896  * @param[in, out] act_index
897  *   Pointer to action sequence number in the TC command
898  *
899  * @param[in] adata
900  *  Pointer to struct holding the action parameters
901  *
902  * @return
903  *   -1 on failure, 0 on success
904  */
905 static int
906 add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
907 {
908         struct nlmsg *msg = &flow->msg;
909
910         if (tap_nlattr_nested_start(msg, (*act_index)++) < 0)
911                 return -1;
912
913         tap_nlattr_add(&msg->nh, TCA_ACT_KIND,
914                                 strlen(adata->id) + 1, adata->id);
915         if (tap_nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
916                 return -1;
917         if (strcmp("gact", adata->id) == 0) {
918                 tap_nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(adata->gact),
919                            &adata->gact);
920         } else if (strcmp("mirred", adata->id) == 0) {
921                 if (adata->mirred.eaction == TCA_EGRESS_MIRROR)
922                         adata->mirred.action = TC_ACT_PIPE;
923                 else /* REDIRECT */
924                         adata->mirred.action = TC_ACT_STOLEN;
925                 tap_nlattr_add(&msg->nh, TCA_MIRRED_PARMS,
926                            sizeof(adata->mirred),
927                            &adata->mirred);
928         } else if (strcmp("skbedit", adata->id) == 0) {
929                 tap_nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS,
930                            sizeof(adata->skbedit.skbedit),
931                            &adata->skbedit.skbedit);
932                 tap_nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING,
933                              adata->skbedit.queue);
934         } else if (strcmp("bpf", adata->id) == 0) {
935                 tap_nlattr_add32(&msg->nh, TCA_ACT_BPF_FD, adata->bpf.bpf_fd);
936                 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
937                            strlen(adata->bpf.annotation) + 1,
938                            adata->bpf.annotation);
939                 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
940                            sizeof(adata->bpf.bpf),
941                            &adata->bpf.bpf);
942         } else {
943                 return -1;
944         }
945         tap_nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
946         tap_nlattr_nested_finish(msg); /* nested act_index */
947         return 0;
948 }
949
950 /**
951  * Helper function to send a serie of TC actions to the kernel
952  *
953  * @param[in] flow
954  *   Pointer to rte flow containing the netlink message
955  *
956  * @param[in] nb_actions
957  *   Number of actions in an array of action structs
958  *
959  * @param[in] data
960  *   Pointer to an array of action structs
961  *
962  * @param[in] classifier_actions
963  *   The classifier on behave of which the actions are configured
964  *
965  * @return
966  *   -1 on failure, 0 on success
967  */
968 static int
969 add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
970             int classifier_action)
971 {
972         struct nlmsg *msg = &flow->msg;
973         size_t act_index = 1;
974         int i;
975
976         if (tap_nlattr_nested_start(msg, classifier_action) < 0)
977                 return -1;
978         for (i = 0; i < nb_actions; i++)
979                 if (add_action(flow, &act_index, data + i) < 0)
980                         return -1;
981         tap_nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
982         return 0;
983 }
984
985 /**
986  * Validate a flow supported by TC.
987  * If flow param is not NULL, then also fill the netlink message inside.
988  *
989  * @param pmd
990  *   Pointer to private structure.
991  * @param[in] attr
992  *   Flow rule attributes.
993  * @param[in] pattern
994  *   Pattern specification (list terminated by the END pattern item).
995  * @param[in] actions
996  *   Associated actions (list terminated by the END action).
997  * @param[out] error
998  *   Perform verbose error reporting if not NULL.
999  * @param[in, out] flow
1000  *   Flow structure to update.
1001  * @param[in] mirred
1002  *   If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
1003  *   redirection to the tap netdevice, and the TC rule will be configured
1004  *   on the remote netdevice in pmd.
1005  *   If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
1006  *   mirroring to the tap netdevice, and the TC rule will be configured
1007  *   on the remote netdevice in pmd. Matching packets will thus be duplicated.
1008  *   If set to 0, the standard behavior is to be used: set correct actions for
1009  *   the TC rule, and apply it on the tap netdevice.
1010  *
1011  * @return
1012  *   0 on success, a negative errno value otherwise and rte_errno is set.
1013  */
1014 static int
1015 priv_flow_process(struct pmd_internals *pmd,
1016                   const struct rte_flow_attr *attr,
1017                   const struct rte_flow_item items[],
1018                   const struct rte_flow_action actions[],
1019                   struct rte_flow_error *error,
1020                   struct rte_flow *flow,
1021                   int mirred)
1022 {
1023         const struct tap_flow_items *cur_item = tap_flow_items;
1024         struct convert_data data = {
1025                 .eth_type = 0,
1026                 .ip_proto = 0,
1027                 .flow = flow,
1028         };
1029         int action = 0; /* Only one action authorized for now */
1030
1031         if (attr->group > MAX_GROUP) {
1032                 rte_flow_error_set(
1033                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1034                         NULL, "group value too big: cannot exceed 15");
1035                 return -rte_errno;
1036         }
1037         if (attr->priority > MAX_PRIORITY) {
1038                 rte_flow_error_set(
1039                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1040                         NULL, "priority value too big");
1041                 return -rte_errno;
1042         } else if (flow) {
1043                 uint16_t group = attr->group << GROUP_SHIFT;
1044                 uint16_t prio = group | (attr->priority +
1045                                 RSS_PRIORITY_OFFSET + PRIORITY_OFFSET);
1046                 flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
1047                                                  flow->msg.t.tcm_info);
1048         }
1049         if (flow) {
1050                 if (mirred) {
1051                         /*
1052                          * If attr->ingress, the rule applies on remote ingress
1053                          * to match incoming packets
1054                          * If attr->egress, the rule applies on tap ingress (as
1055                          * seen from the kernel) to deal with packets going out
1056                          * from the DPDK app.
1057                          */
1058                         flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
1059                 } else {
1060                         /* Standard rule on tap egress (kernel standpoint). */
1061                         flow->msg.t.tcm_parent =
1062                                 TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1063                 }
1064                 /* use flower filter type */
1065                 tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
1066                 if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
1067                         goto exit_item_not_supported;
1068         }
1069         for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
1070                 const struct tap_flow_items *token = NULL;
1071                 unsigned int i;
1072                 int err = 0;
1073
1074                 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
1075                         continue;
1076                 for (i = 0;
1077                      cur_item->items &&
1078                      cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
1079                      ++i) {
1080                         if (cur_item->items[i] == items->type) {
1081                                 token = &tap_flow_items[items->type];
1082                                 break;
1083                         }
1084                 }
1085                 if (!token)
1086                         goto exit_item_not_supported;
1087                 cur_item = token;
1088                 err = tap_flow_item_validate(
1089                         items, cur_item->mask_sz,
1090                         (const uint8_t *)cur_item->mask,
1091                         (const uint8_t *)cur_item->default_mask);
1092                 if (err)
1093                         goto exit_item_not_supported;
1094                 if (flow && cur_item->convert) {
1095                         err = cur_item->convert(items, &data);
1096                         if (err)
1097                                 goto exit_item_not_supported;
1098                 }
1099         }
1100         if (flow) {
1101                 if (data.vlan) {
1102                         tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1103                                      htons(ETH_P_8021Q));
1104                         tap_nlattr_add16(&flow->msg.nh,
1105                                      TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1106                                      data.eth_type ?
1107                                      data.eth_type : htons(ETH_P_ALL));
1108                 } else if (data.eth_type) {
1109                         tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1110                                      data.eth_type);
1111                 }
1112         }
1113         if (mirred && flow) {
1114                 struct action_data adata = {
1115                         .id = "mirred",
1116                         .mirred = {
1117                                 .eaction = mirred,
1118                         },
1119                 };
1120
1121                 /*
1122                  * If attr->egress && mirred, then this is a special
1123                  * case where the rule must be applied on the tap, to
1124                  * redirect packets coming from the DPDK App, out
1125                  * through the remote netdevice.
1126                  */
1127                 adata.mirred.ifindex = attr->ingress ? pmd->if_index :
1128                         pmd->remote_if_index;
1129                 if (mirred == TCA_EGRESS_MIRROR)
1130                         adata.mirred.action = TC_ACT_PIPE;
1131                 else
1132                         adata.mirred.action = TC_ACT_STOLEN;
1133                 if (add_actions(flow, 1, &adata, TCA_FLOWER_ACT) < 0)
1134                         goto exit_action_not_supported;
1135                 else
1136                         goto end;
1137         }
1138         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
1139                 int err = 0;
1140
1141                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
1142                         continue;
1143                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
1144                         if (action)
1145                                 goto exit_action_not_supported;
1146                         action = 1;
1147                         if (flow) {
1148                                 struct action_data adata = {
1149                                         .id = "gact",
1150                                         .gact = {
1151                                                 .action = TC_ACT_SHOT,
1152                                         },
1153                                 };
1154
1155                                 err = add_actions(flow, 1, &adata,
1156                                                   TCA_FLOWER_ACT);
1157                         }
1158                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
1159                         if (action)
1160                                 goto exit_action_not_supported;
1161                         action = 1;
1162                         if (flow) {
1163                                 struct action_data adata = {
1164                                         .id = "gact",
1165                                         .gact = {
1166                                                 /* continue */
1167                                                 .action = TC_ACT_UNSPEC,
1168                                         },
1169                                 };
1170
1171                                 err = add_actions(flow, 1, &adata,
1172                                                   TCA_FLOWER_ACT);
1173                         }
1174                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1175                         const struct rte_flow_action_queue *queue =
1176                                 (const struct rte_flow_action_queue *)
1177                                 actions->conf;
1178
1179                         if (action)
1180                                 goto exit_action_not_supported;
1181                         action = 1;
1182                         if (!queue ||
1183                             (queue->index > pmd->dev->data->nb_rx_queues - 1))
1184                                 goto exit_action_not_supported;
1185                         if (flow) {
1186                                 struct action_data adata = {
1187                                         .id = "skbedit",
1188                                         .skbedit = {
1189                                                 .skbedit = {
1190                                                         .action = TC_ACT_PIPE,
1191                                                 },
1192                                                 .queue = queue->index,
1193                                         },
1194                                 };
1195
1196                                 err = add_actions(flow, 1, &adata,
1197                                         TCA_FLOWER_ACT);
1198                         }
1199                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
1200                         const struct rte_flow_action_rss *rss =
1201                                 (const struct rte_flow_action_rss *)
1202                                 actions->conf;
1203
1204                         if (action++)
1205                                 goto exit_action_not_supported;
1206
1207                         if (!pmd->rss_enabled) {
1208                                 err = rss_enable(pmd, attr, error);
1209                                 if (err)
1210                                         goto exit_action_not_supported;
1211                         }
1212                         if (flow && rss)
1213                                 err = rss_add_actions(flow, pmd, rss, error);
1214                 } else {
1215                         goto exit_action_not_supported;
1216                 }
1217                 if (err)
1218                         goto exit_action_not_supported;
1219         }
1220 end:
1221         if (flow)
1222                 tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
1223         return 0;
1224 exit_item_not_supported:
1225         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1226                            items, "item not supported");
1227         return -rte_errno;
1228 exit_action_not_supported:
1229         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1230                            actions, "action not supported");
1231         return -rte_errno;
1232 }
1233
1234
1235
1236 /**
1237  * Validate a flow.
1238  *
1239  * @see rte_flow_validate()
1240  * @see rte_flow_ops
1241  */
1242 static int
1243 tap_flow_validate(struct rte_eth_dev *dev,
1244                   const struct rte_flow_attr *attr,
1245                   const struct rte_flow_item items[],
1246                   const struct rte_flow_action actions[],
1247                   struct rte_flow_error *error)
1248 {
1249         struct pmd_internals *pmd = dev->data->dev_private;
1250
1251         return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
1252 }
1253
1254 /**
1255  * Set a unique handle in a flow.
1256  *
1257  * The kernel supports TC rules with equal priority, as long as they use the
1258  * same matching fields (e.g.: dst mac and ipv4) with different values (and
1259  * full mask to ensure no collision is possible).
1260  * In those rules, the handle (uint32_t) is the part that would identify
1261  * specifically each rule.
1262  *
1263  * On 32-bit architectures, the handle can simply be the flow's pointer address.
1264  * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
1265  * unique handle.
1266  *
1267  * @param[in, out] flow
1268  *   The flow that needs its handle set.
1269  */
1270 static void
1271 tap_flow_set_handle(struct rte_flow *flow)
1272 {
1273         uint32_t handle = 0;
1274
1275         if (sizeof(flow) > 4)
1276                 handle = rte_jhash(&flow, sizeof(flow), 1);
1277         else
1278                 handle = (uintptr_t)flow;
1279         /* must be at least 1 to avoid letting the kernel choose one for us */
1280         if (!handle)
1281                 handle = 1;
1282         flow->msg.t.tcm_handle = handle;
1283 }
1284
1285 /**
1286  * Create a flow.
1287  *
1288  * @see rte_flow_create()
1289  * @see rte_flow_ops
1290  */
1291 static struct rte_flow *
1292 tap_flow_create(struct rte_eth_dev *dev,
1293                 const struct rte_flow_attr *attr,
1294                 const struct rte_flow_item items[],
1295                 const struct rte_flow_action actions[],
1296                 struct rte_flow_error *error)
1297 {
1298         struct pmd_internals *pmd = dev->data->dev_private;
1299         struct rte_flow *remote_flow = NULL;
1300         struct rte_flow *flow = NULL;
1301         struct nlmsg *msg = NULL;
1302         int err;
1303
1304         if (!pmd->if_index) {
1305                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1306                                    NULL,
1307                                    "can't create rule, ifindex not found");
1308                 goto fail;
1309         }
1310         /*
1311          * No rules configured through standard rte_flow should be set on the
1312          * priorities used by implicit rules.
1313          */
1314         if ((attr->group == MAX_GROUP) &&
1315             attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
1316                 rte_flow_error_set(
1317                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1318                         NULL, "priority value too big");
1319                 goto fail;
1320         }
1321         flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1322         if (!flow) {
1323                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1324                                    NULL, "cannot allocate memory for rte_flow");
1325                 goto fail;
1326         }
1327         msg = &flow->msg;
1328         tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
1329                     NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1330         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1331         tap_flow_set_handle(flow);
1332         if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
1333                 goto fail;
1334         err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1335         if (err < 0) {
1336                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1337                                    NULL, "couldn't send request to kernel");
1338                 goto fail;
1339         }
1340         err = tap_nl_recv_ack(pmd->nlsk_fd);
1341         if (err < 0) {
1342                 RTE_LOG(ERR, PMD,
1343                         "Kernel refused TC filter rule creation (%d): %s\n",
1344                         errno, strerror(errno));
1345                 rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
1346                                    NULL,
1347                                    "overlapping rules or Kernel too old for flower support");
1348                 goto fail;
1349         }
1350         LIST_INSERT_HEAD(&pmd->flows, flow, next);
1351         /**
1352          * If a remote device is configured, a TC rule with identical items for
1353          * matching must be set on that device, with a single action: redirect
1354          * to the local pmd->if_index.
1355          */
1356         if (pmd->remote_if_index) {
1357                 remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1358                 if (!remote_flow) {
1359                         rte_flow_error_set(
1360                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1361                                 "cannot allocate memory for rte_flow");
1362                         goto fail;
1363                 }
1364                 msg = &remote_flow->msg;
1365                 /* set the rule if_index for the remote netdevice */
1366                 tc_init_msg(
1367                         msg, pmd->remote_if_index, RTM_NEWTFILTER,
1368                         NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1369                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1370                 tap_flow_set_handle(remote_flow);
1371                 if (priv_flow_process(pmd, attr, items, NULL,
1372                                       error, remote_flow, TCA_EGRESS_REDIR)) {
1373                         rte_flow_error_set(
1374                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1375                                 NULL, "rte flow rule validation failed");
1376                         goto fail;
1377                 }
1378                 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1379                 if (err < 0) {
1380                         rte_flow_error_set(
1381                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1382                                 NULL, "Failure sending nl request");
1383                         goto fail;
1384                 }
1385                 err = tap_nl_recv_ack(pmd->nlsk_fd);
1386                 if (err < 0) {
1387                         RTE_LOG(ERR, PMD,
1388                                 "Kernel refused TC filter rule creation (%d): %s\n",
1389                                 errno, strerror(errno));
1390                         rte_flow_error_set(
1391                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1392                                 NULL,
1393                                 "overlapping rules or Kernel too old for flower support");
1394                         goto fail;
1395                 }
1396                 flow->remote_flow = remote_flow;
1397         }
1398         return flow;
1399 fail:
1400         if (remote_flow)
1401                 rte_free(remote_flow);
1402         if (flow)
1403                 rte_free(flow);
1404         return NULL;
1405 }
1406
1407 /**
1408  * Destroy a flow using pointer to pmd_internal.
1409  *
1410  * @param[in, out] pmd
1411  *   Pointer to private structure.
1412  * @param[in] flow
1413  *   Pointer to the flow to destroy.
1414  * @param[in, out] error
1415  *   Pointer to the flow error handler
1416  *
1417  * @return 0 if the flow could be destroyed, -1 otherwise.
1418  */
1419 static int
1420 tap_flow_destroy_pmd(struct pmd_internals *pmd,
1421                      struct rte_flow *flow,
1422                      struct rte_flow_error *error)
1423 {
1424         struct rte_flow *remote_flow = flow->remote_flow;
1425         int i;
1426         int ret = 0;
1427
1428         LIST_REMOVE(flow, next);
1429         flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1430         flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1431
1432         ret = tap_nl_send(pmd->nlsk_fd, &flow->msg.nh);
1433         if (ret < 0) {
1434                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1435                                    NULL, "couldn't send request to kernel");
1436                 goto end;
1437         }
1438         ret = tap_nl_recv_ack(pmd->nlsk_fd);
1439         /* If errno is ENOENT, the rule is already no longer in the kernel. */
1440         if (ret < 0 && errno == ENOENT)
1441                 ret = 0;
1442         if (ret < 0) {
1443                 RTE_LOG(ERR, PMD,
1444                         "Kernel refused TC filter rule deletion (%d): %s\n",
1445                         errno, strerror(errno));
1446                 rte_flow_error_set(
1447                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1448                         "couldn't receive kernel ack to our request");
1449                 goto end;
1450         }
1451         /* Close opened BPF file descriptors of this flow */
1452         for (i = 0; i < SEC_MAX; i++)
1453                 if (flow->bpf_fd[i] != 0) {
1454                         close(flow->bpf_fd[i]);
1455                         flow->bpf_fd[i] = 0;
1456                 }
1457
1458         /* Release map key for this RSS rule */
1459         ret = bpf_rss_key(KEY_CMD_RELEASE, &flow->key_idx);
1460         if (ret < 0) {
1461                 rte_flow_error_set(
1462                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1463                         "Failed to release BPF RSS key");
1464
1465                 goto end;
1466         }
1467
1468         if (remote_flow) {
1469                 remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1470                 remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1471
1472                 ret = tap_nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
1473                 if (ret < 0) {
1474                         rte_flow_error_set(
1475                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1476                                 NULL, "Failure sending nl request");
1477                         goto end;
1478                 }
1479                 ret = tap_nl_recv_ack(pmd->nlsk_fd);
1480                 if (ret < 0 && errno == ENOENT)
1481                         ret = 0;
1482                 if (ret < 0) {
1483                         RTE_LOG(ERR, PMD,
1484                                 "Kernel refused TC filter rule deletion (%d): %s\n",
1485                                 errno, strerror(errno));
1486                         rte_flow_error_set(
1487                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1488                                 NULL, "Failure trying to receive nl ack");
1489                         goto end;
1490                 }
1491         }
1492 end:
1493         if (remote_flow)
1494                 rte_free(remote_flow);
1495         rte_free(flow);
1496         return ret;
1497 }
1498
1499 /**
1500  * Destroy a flow.
1501  *
1502  * @see rte_flow_destroy()
1503  * @see rte_flow_ops
1504  */
1505 static int
1506 tap_flow_destroy(struct rte_eth_dev *dev,
1507                  struct rte_flow *flow,
1508                  struct rte_flow_error *error)
1509 {
1510         struct pmd_internals *pmd = dev->data->dev_private;
1511
1512         return tap_flow_destroy_pmd(pmd, flow, error);
1513 }
1514
1515 /**
1516  * Enable/disable flow isolation.
1517  *
1518  * @see rte_flow_isolate()
1519  * @see rte_flow_ops
1520  */
1521 static int
1522 tap_flow_isolate(struct rte_eth_dev *dev,
1523                  int set,
1524                  struct rte_flow_error *error __rte_unused)
1525 {
1526         struct pmd_internals *pmd = dev->data->dev_private;
1527
1528         if (set)
1529                 pmd->flow_isolate = 1;
1530         else
1531                 pmd->flow_isolate = 0;
1532         /*
1533          * If netdevice is there, setup appropriate flow rules immediately.
1534          * Otherwise it will be set when bringing up the netdevice (tun_alloc).
1535          */
1536         if (!pmd->rxq[0].fd)
1537                 return 0;
1538         if (set) {
1539                 struct rte_flow *flow;
1540
1541                 while (1) {
1542                         flow = LIST_FIRST(&pmd->implicit_flows);
1543                         if (!flow)
1544                                 break;
1545                         /*
1546                          * Remove all implicit rules on the remote.
1547                          * Keep the local rule to redirect packets on TX.
1548                          * Keep also the last implicit local rule: ISOLATE.
1549                          */
1550                         if (flow->msg.t.tcm_ifindex == pmd->if_index)
1551                                 break;
1552                         if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0)
1553                                 goto error;
1554                 }
1555                 /* Switch the TC rule according to pmd->flow_isolate */
1556                 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1557                         goto error;
1558         } else {
1559                 /* Switch the TC rule according to pmd->flow_isolate */
1560                 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1561                         goto error;
1562                 if (!pmd->remote_if_index)
1563                         return 0;
1564                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
1565                         goto error;
1566                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
1567                         goto error;
1568                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
1569                         goto error;
1570                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
1571                         goto error;
1572                 if (dev->data->promiscuous &&
1573                     tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
1574                         goto error;
1575                 if (dev->data->all_multicast &&
1576                     tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
1577                         goto error;
1578         }
1579         return 0;
1580 error:
1581         pmd->flow_isolate = 0;
1582         return rte_flow_error_set(
1583                 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1584                 "TC rule creation failed");
1585 }
1586
1587 /**
1588  * Destroy all flows.
1589  *
1590  * @see rte_flow_flush()
1591  * @see rte_flow_ops
1592  */
1593 int
1594 tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1595 {
1596         struct pmd_internals *pmd = dev->data->dev_private;
1597         struct rte_flow *flow;
1598
1599         while (!LIST_EMPTY(&pmd->flows)) {
1600                 flow = LIST_FIRST(&pmd->flows);
1601                 if (tap_flow_destroy(dev, flow, error) < 0)
1602                         return -1;
1603         }
1604         return 0;
1605 }
1606
1607 /**
1608  * Add an implicit flow rule on the remote device to make sure traffic gets to
1609  * the tap netdevice from there.
1610  *
1611  * @param pmd
1612  *   Pointer to private structure.
1613  * @param[in] idx
1614  *   The idx in the implicit_rte_flows array specifying which rule to apply.
1615  *
1616  * @return -1 if the rule couldn't be applied, 0 otherwise.
1617  */
1618 int tap_flow_implicit_create(struct pmd_internals *pmd,
1619                              enum implicit_rule_index idx)
1620 {
1621         uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
1622         struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
1623         struct rte_flow_action isolate_actions[2] = {
1624                 [1] = {
1625                         .type = RTE_FLOW_ACTION_TYPE_END,
1626                 },
1627         };
1628         struct rte_flow_item *items = implicit_rte_flows[idx].items;
1629         struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
1630         struct rte_flow_item_eth eth_local = { .type = 0 };
1631         uint16_t if_index = pmd->remote_if_index;
1632         struct rte_flow *remote_flow = NULL;
1633         struct nlmsg *msg = NULL;
1634         int err = 0;
1635         struct rte_flow_item items_local[2] = {
1636                 [0] = {
1637                         .type = items[0].type,
1638                         .spec = &eth_local,
1639                         .mask = items[0].mask,
1640                 },
1641                 [1] = {
1642                         .type = items[1].type,
1643                 }
1644         };
1645
1646         remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1647         if (!remote_flow) {
1648                 RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n");
1649                 goto fail;
1650         }
1651         msg = &remote_flow->msg;
1652         if (idx == TAP_REMOTE_TX) {
1653                 if_index = pmd->if_index;
1654         } else if (idx == TAP_ISOLATE) {
1655                 if_index = pmd->if_index;
1656                 /* Don't be exclusive for this rule, it can be changed later. */
1657                 flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
1658                 isolate_actions[0].type = pmd->flow_isolate ?
1659                         RTE_FLOW_ACTION_TYPE_DROP :
1660                         RTE_FLOW_ACTION_TYPE_PASSTHRU;
1661                 actions = isolate_actions;
1662         } else if (idx == TAP_REMOTE_LOCAL_MAC) {
1663                 /*
1664                  * eth addr couldn't be set in implicit_rte_flows[] as it is not
1665                  * known at compile time.
1666                  */
1667                 memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
1668                 items = items_local;
1669         }
1670         tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
1671         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1672         /*
1673          * The ISOLATE rule is always present and must have a static handle, as
1674          * the action is changed whether the feature is enabled (DROP) or
1675          * disabled (PASSTHRU).
1676          */
1677         if (idx == TAP_ISOLATE)
1678                 remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
1679         else
1680                 tap_flow_set_handle(remote_flow);
1681         if (priv_flow_process(pmd, attr, items, actions, NULL,
1682                               remote_flow, implicit_rte_flows[idx].mirred)) {
1683                 RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
1684                 goto fail;
1685         }
1686         err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1687         if (err < 0) {
1688                 RTE_LOG(ERR, PMD, "Failure sending nl request\n");
1689                 goto fail;
1690         }
1691         err = tap_nl_recv_ack(pmd->nlsk_fd);
1692         if (err < 0) {
1693                 RTE_LOG(ERR, PMD,
1694                         "Kernel refused TC filter rule creation (%d): %s\n",
1695                         errno, strerror(errno));
1696                 goto fail;
1697         }
1698         LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
1699         return 0;
1700 fail:
1701         if (remote_flow)
1702                 rte_free(remote_flow);
1703         return -1;
1704 }
1705
1706 /**
1707  * Remove specific implicit flow rule on the remote device.
1708  *
1709  * @param[in, out] pmd
1710  *   Pointer to private structure.
1711  * @param[in] idx
1712  *   The idx in the implicit_rte_flows array specifying which rule to remove.
1713  *
1714  * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
1715  */
1716 int tap_flow_implicit_destroy(struct pmd_internals *pmd,
1717                               enum implicit_rule_index idx)
1718 {
1719         struct rte_flow *remote_flow;
1720         int cur_prio = -1;
1721         int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
1722
1723         for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
1724              remote_flow;
1725              remote_flow = LIST_NEXT(remote_flow, next)) {
1726                 cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
1727                 if (cur_prio != idx_prio)
1728                         continue;
1729                 return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
1730         }
1731         return 0;
1732 }
1733
1734 /**
1735  * Destroy all implicit flows.
1736  *
1737  * @see rte_flow_flush()
1738  */
1739 int
1740 tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
1741 {
1742         struct rte_flow *remote_flow;
1743
1744         while (!LIST_EMPTY(&pmd->implicit_flows)) {
1745                 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1746                 if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
1747                         return -1;
1748         }
1749         return 0;
1750 }
1751
1752 #define MAX_RSS_KEYS 256
1753 #define SEC_NAME_CLS_Q "cls_q"
1754
1755 const char *sec_name[SEC_MAX] = {
1756         [SEC_L3_L4] = "l3_l4",
1757 };
1758
1759 /**
1760  * Enable RSS on tap: create TC rules for queuing.
1761  *
1762  * @param[in, out] pmd
1763  *   Pointer to private structure.
1764  *
1765  * @param[in] attr
1766  *   Pointer to rte_flow to get flow group
1767  *
1768  * @param[out] error
1769  *   Pointer to error reporting if not NULL.
1770  *
1771  * @return 0 on success, negative value on failure.
1772  */
1773 static int rss_enable(struct pmd_internals *pmd,
1774                         const struct rte_flow_attr *attr,
1775                         struct rte_flow_error *error)
1776 {
1777         struct rte_flow *rss_flow = NULL;
1778         struct nlmsg *msg = NULL;
1779         /* 4096 is the maximum number of instructions for a BPF program */
1780         char annotation[64];
1781         int i;
1782         int err = 0;
1783
1784         /* unlimit locked memory */
1785         struct rlimit memlock_limit = {
1786                 .rlim_cur = RLIM_INFINITY,
1787                 .rlim_max = RLIM_INFINITY,
1788         };
1789         setrlimit(RLIMIT_MEMLOCK, &memlock_limit);
1790
1791          /* Get a new map key for a new RSS rule */
1792         err = bpf_rss_key(KEY_CMD_INIT, NULL);
1793         if (err < 0) {
1794                 rte_flow_error_set(
1795                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1796                         "Failed to initialize BPF RSS keys");
1797
1798                 return -1;
1799         }
1800
1801         /*
1802          *  Create BPF RSS MAP
1803          */
1804         pmd->map_fd = tap_flow_bpf_rss_map_create(sizeof(__u32), /* key size */
1805                                 sizeof(struct rss_key),
1806                                 MAX_RSS_KEYS);
1807         if (pmd->map_fd < 0) {
1808                 RTE_LOG(ERR, PMD,
1809                         "Failed to create BPF map (%d): %s\n",
1810                                 errno, strerror(errno));
1811                 rte_flow_error_set(
1812                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1813                         "Kernel too old or not configured "
1814                         "to support BPF maps");
1815
1816                 return -ENOTSUP;
1817         }
1818
1819         /*
1820          * Add a rule per queue to match reclassified packets and direct them to
1821          * the correct queue.
1822          */
1823         for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
1824                 pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
1825                 if (pmd->bpf_fd[i] < 0) {
1826                         RTE_LOG(ERR, PMD,
1827                                 "Failed to load BPF section %s for queue %d",
1828                                 SEC_NAME_CLS_Q, i);
1829                         rte_flow_error_set(
1830                                 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1831                                 NULL,
1832                                 "Kernel too old or not configured "
1833                                 "to support BPF programs loading");
1834
1835                         return -ENOTSUP;
1836                 }
1837
1838                 rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1839                 if (!rss_flow) {
1840                         RTE_LOG(ERR, PMD,
1841                                 "Cannot allocate memory for rte_flow");
1842                         return -1;
1843                 }
1844                 msg = &rss_flow->msg;
1845                 tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER, NLM_F_REQUEST |
1846                             NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1847                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1848                 tap_flow_set_handle(rss_flow);
1849                 uint16_t group = attr->group << GROUP_SHIFT;
1850                 uint16_t prio = group | (i + PRIORITY_OFFSET);
1851                 msg->t.tcm_info = TC_H_MAKE(prio << 16, msg->t.tcm_info);
1852                 msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1853
1854                 tap_nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
1855                 if (tap_nlattr_nested_start(msg, TCA_OPTIONS) < 0)
1856                         return -1;
1857                 tap_nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
1858                 snprintf(annotation, sizeof(annotation), "[%s%d]",
1859                         SEC_NAME_CLS_Q, i);
1860                 tap_nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation) + 1,
1861                            annotation);
1862                 /* Actions */
1863                 {
1864                         struct action_data adata = {
1865                                 .id = "skbedit",
1866                                 .skbedit = {
1867                                         .skbedit = {
1868                                                 .action = TC_ACT_PIPE,
1869                                         },
1870                                         .queue = i,
1871                                 },
1872                         };
1873                         if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
1874                                 return -1;
1875                 }
1876                 tap_nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
1877
1878                 /* Netlink message is now ready to be sent */
1879                 if (tap_nl_send(pmd->nlsk_fd, &msg->nh) < 0)
1880                         return -1;
1881                 err = tap_nl_recv_ack(pmd->nlsk_fd);
1882                 if (err < 0) {
1883                         RTE_LOG(ERR, PMD,
1884                                 "Kernel refused TC filter rule creation (%d): %s\n",
1885                                 errno, strerror(errno));
1886                         return err;
1887                 }
1888                 LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
1889         }
1890
1891         pmd->rss_enabled = 1;
1892         return err;
1893 }
1894
1895 /**
1896  * Manage bpf RSS keys repository with operations: init, get, release
1897  *
1898  * @param[in] cmd
1899  *   Command on RSS keys: init, get, release
1900  *
1901  * @param[in, out] key_idx
1902  *   Pointer to RSS Key index (out for get command, in for release command)
1903  *
1904  * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
1905  */
1906 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
1907 {
1908         __u32 i;
1909         int err = -1;
1910         static __u32 num_used_keys;
1911         static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
1912         static __u32 rss_keys_initialized;
1913
1914         switch (cmd) {
1915         case KEY_CMD_GET:
1916                 if (!rss_keys_initialized)
1917                         break;
1918
1919                 if (num_used_keys == RTE_DIM(rss_keys))
1920                         break;
1921
1922                 *key_idx = num_used_keys % RTE_DIM(rss_keys);
1923                 while (rss_keys[*key_idx] == KEY_STAT_USED)
1924                         *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys);
1925
1926                 rss_keys[*key_idx] = KEY_STAT_USED;
1927                 num_used_keys++;
1928                 err = 0;
1929         break;
1930
1931         case KEY_CMD_RELEASE:
1932                 if (!rss_keys_initialized) {
1933                         err = 0;
1934                         break;
1935                 }
1936
1937                 if (rss_keys[*key_idx] == KEY_STAT_USED) {
1938                         rss_keys[*key_idx] = KEY_STAT_AVAILABLE;
1939                         num_used_keys--;
1940                         err = 0;
1941                 }
1942         break;
1943
1944         case KEY_CMD_INIT:
1945                 for (i = 0; i < RTE_DIM(rss_keys); i++)
1946                         rss_keys[i] = KEY_STAT_AVAILABLE;
1947
1948                 rss_keys_initialized = 1;
1949                 num_used_keys = 0;
1950                 err = 0;
1951         break;
1952
1953         case KEY_CMD_DEINIT:
1954                 for (i = 0; i < RTE_DIM(rss_keys); i++)
1955                         rss_keys[i] = KEY_STAT_UNSPEC;
1956
1957                 rss_keys_initialized = 0;
1958                 num_used_keys = 0;
1959                 err = 0;
1960         break;
1961
1962         default:
1963                 break;
1964         }
1965
1966         return err;
1967 }
1968
1969 /**
1970  * Add RSS hash calculations and queue selection
1971  *
1972  * @param[in, out] pmd
1973  *   Pointer to internal structure. Used to set/get RSS map fd
1974  *
1975  * @param[in] rss
1976  *   Pointer to RSS flow actions
1977  *
1978  * @param[out] error
1979  *   Pointer to error reporting if not NULL.
1980  *
1981  * @return 0 on success, negative value on failure
1982  */
1983 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
1984                            const struct rte_flow_action_rss *rss,
1985                            struct rte_flow_error *error)
1986 {
1987         /* 4096 is the maximum number of instructions for a BPF program */
1988         int i;
1989         int err;
1990         struct rss_key rss_entry = { .hash_fields = 0,
1991                                      .key_size = 0 };
1992
1993         /* Get a new map key for a new RSS rule */
1994         err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
1995         if (err < 0) {
1996                 rte_flow_error_set(
1997                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1998                         "Failed to get BPF RSS key");
1999
2000                 return -1;
2001         }
2002
2003         /* Update RSS map entry with queues */
2004         rss_entry.nb_queues = rss->num;
2005         for (i = 0; i < rss->num; i++)
2006                 rss_entry.queues[i] = rss->queue[i];
2007         rss_entry.hash_fields =
2008                 (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
2009
2010         /* Add this RSS entry to map */
2011         err = tap_flow_bpf_update_rss_elem(pmd->map_fd,
2012                                 &flow->key_idx, &rss_entry);
2013
2014         if (err) {
2015                 RTE_LOG(ERR, PMD,
2016                         "Failed to update BPF map entry #%u (%d): %s\n",
2017                         flow->key_idx, errno, strerror(errno));
2018                 rte_flow_error_set(
2019                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2020                         "Kernel too old or not configured "
2021                         "to support BPF maps updates");
2022
2023                 return -ENOTSUP;
2024         }
2025
2026
2027         /*
2028          * Load bpf rules to calculate hash for this key_idx
2029          */
2030
2031         flow->bpf_fd[SEC_L3_L4] =
2032                 tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
2033         if (flow->bpf_fd[SEC_L3_L4] < 0) {
2034                 RTE_LOG(ERR, PMD,
2035                         "Failed to load BPF section %s (%d): %s\n",
2036                                 sec_name[SEC_L3_L4], errno, strerror(errno));
2037                 rte_flow_error_set(
2038                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2039                         "Kernel too old or not configured "
2040                         "to support BPF program loading");
2041
2042                 return -ENOTSUP;
2043         }
2044
2045         /* Actions */
2046         {
2047                 struct action_data adata[] = {
2048                         {
2049                                 .id = "bpf",
2050                                 .bpf = {
2051                                         .bpf_fd = flow->bpf_fd[SEC_L3_L4],
2052                                         .annotation = sec_name[SEC_L3_L4],
2053                                         .bpf = {
2054                                                 .action = TC_ACT_PIPE,
2055                                         },
2056                                 },
2057                         },
2058                 };
2059
2060                 if (add_actions(flow, RTE_DIM(adata), adata,
2061                         TCA_FLOWER_ACT) < 0)
2062                         return -1;
2063         }
2064
2065         return 0;
2066 }
2067
2068 /**
2069  * Manage filter operations.
2070  *
2071  * @param dev
2072  *   Pointer to Ethernet device structure.
2073  * @param filter_type
2074  *   Filter type.
2075  * @param filter_op
2076  *   Operation to perform.
2077  * @param arg
2078  *   Pointer to operation-specific structure.
2079  *
2080  * @return
2081  *   0 on success, negative errno value on failure.
2082  */
2083 int
2084 tap_dev_filter_ctrl(struct rte_eth_dev *dev,
2085                     enum rte_filter_type filter_type,
2086                     enum rte_filter_op filter_op,
2087                     void *arg)
2088 {
2089         switch (filter_type) {
2090         case RTE_ETH_FILTER_GENERIC:
2091                 if (filter_op != RTE_ETH_FILTER_GET)
2092                         return -EINVAL;
2093                 *(const void **)arg = &tap_flow_ops;
2094                 return 0;
2095         default:
2096                 RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n",
2097                         (void *)dev, filter_type);
2098         }
2099         return -EINVAL;
2100 }
2101