net/tap: use SPDX tags in 6WIND copyrighted files
[dpdk.git] / drivers / net / tap / tap_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox.
4  */
5
6 #include <errno.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/queue.h>
10 #include <sys/resource.h>
11
12 #include <rte_byteorder.h>
13 #include <rte_jhash.h>
14 #include <rte_malloc.h>
15 #include <rte_eth_tap.h>
16 #include <tap_flow.h>
17 #include <tap_autoconf.h>
18 #include <tap_tcmsgs.h>
19 #include <tap_rss.h>
20
21 #ifndef HAVE_TC_FLOWER
22 /*
23  * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
24  * avoid sending TC messages the kernel cannot understand.
25  */
26 enum {
27         TCA_FLOWER_UNSPEC,
28         TCA_FLOWER_CLASSID,
29         TCA_FLOWER_INDEV,
30         TCA_FLOWER_ACT,
31         TCA_FLOWER_KEY_ETH_DST,         /* ETH_ALEN */
32         TCA_FLOWER_KEY_ETH_DST_MASK,    /* ETH_ALEN */
33         TCA_FLOWER_KEY_ETH_SRC,         /* ETH_ALEN */
34         TCA_FLOWER_KEY_ETH_SRC_MASK,    /* ETH_ALEN */
35         TCA_FLOWER_KEY_ETH_TYPE,        /* be16 */
36         TCA_FLOWER_KEY_IP_PROTO,        /* u8 */
37         TCA_FLOWER_KEY_IPV4_SRC,        /* be32 */
38         TCA_FLOWER_KEY_IPV4_SRC_MASK,   /* be32 */
39         TCA_FLOWER_KEY_IPV4_DST,        /* be32 */
40         TCA_FLOWER_KEY_IPV4_DST_MASK,   /* be32 */
41         TCA_FLOWER_KEY_IPV6_SRC,        /* struct in6_addr */
42         TCA_FLOWER_KEY_IPV6_SRC_MASK,   /* struct in6_addr */
43         TCA_FLOWER_KEY_IPV6_DST,        /* struct in6_addr */
44         TCA_FLOWER_KEY_IPV6_DST_MASK,   /* struct in6_addr */
45         TCA_FLOWER_KEY_TCP_SRC,         /* be16 */
46         TCA_FLOWER_KEY_TCP_DST,         /* be16 */
47         TCA_FLOWER_KEY_UDP_SRC,         /* be16 */
48         TCA_FLOWER_KEY_UDP_DST,         /* be16 */
49 };
50 #endif
51 #ifndef HAVE_TC_VLAN_ID
52 enum {
53         /* TCA_FLOWER_FLAGS, */
54         TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
55         TCA_FLOWER_KEY_VLAN_PRIO,       /* u8   */
56         TCA_FLOWER_KEY_VLAN_ETH_TYPE,   /* be16 */
57 };
58 #endif
59 /*
60  * For kernels < 4.2 BPF related enums may not be defined.
61  * Runtime checks will be carried out to gracefully report on TC messages that
62  * are rejected by the kernel. Rejection reasons may be due to:
63  * 1. enum is not defined
64  * 2. enum is defined but kernel is not configured to support BPF system calls,
65  *    BPF classifications or BPF actions.
66  */
67 #ifndef HAVE_TC_BPF
68 enum {
69         TCA_BPF_UNSPEC,
70         TCA_BPF_ACT,
71         TCA_BPF_POLICE,
72         TCA_BPF_CLASSID,
73         TCA_BPF_OPS_LEN,
74         TCA_BPF_OPS,
75 };
76 #endif
77 #ifndef HAVE_TC_BPF_FD
78 enum {
79         TCA_BPF_FD = TCA_BPF_OPS + 1,
80         TCA_BPF_NAME,
81 };
82 #endif
83 #ifndef HAVE_TC_ACT_BPF
84 #define tc_gen \
85         __u32                 index; \
86         __u32                 capab; \
87         int                   action; \
88         int                   refcnt; \
89         int                   bindcnt
90
91 struct tc_act_bpf {
92         tc_gen;
93 };
94
95 enum {
96         TCA_ACT_BPF_UNSPEC,
97         TCA_ACT_BPF_TM,
98         TCA_ACT_BPF_PARMS,
99         TCA_ACT_BPF_OPS_LEN,
100         TCA_ACT_BPF_OPS,
101 };
102
103 #endif
104 #ifndef HAVE_TC_ACT_BPF_FD
105 enum {
106         TCA_ACT_BPF_FD = TCA_ACT_BPF_OPS + 1,
107         TCA_ACT_BPF_NAME,
108 };
109 #endif
110
111 /* RSS key management */
112 enum bpf_rss_key_e {
113         KEY_CMD_GET = 1,
114         KEY_CMD_RELEASE,
115         KEY_CMD_INIT,
116         KEY_CMD_DEINIT,
117 };
118
119 enum key_status_e {
120         KEY_STAT_UNSPEC,
121         KEY_STAT_USED,
122         KEY_STAT_AVAILABLE,
123 };
124
125 #define ISOLATE_HANDLE 1
126
127 struct rte_flow {
128         LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
129         struct rte_flow *remote_flow; /* associated remote flow */
130         int bpf_fd[SEC_MAX]; /* list of bfs fds per ELF section */
131         uint32_t key_idx; /* RSS rule key index into BPF map */
132         struct nlmsg msg;
133 };
134
135 struct convert_data {
136         uint16_t eth_type;
137         uint16_t ip_proto;
138         uint8_t vlan;
139         struct rte_flow *flow;
140 };
141
142 struct remote_rule {
143         struct rte_flow_attr attr;
144         struct rte_flow_item items[2];
145         struct rte_flow_action actions[2];
146         int mirred;
147 };
148
149 struct action_data {
150         char id[16];
151
152         union {
153                 struct tc_gact gact;
154                 struct tc_mirred mirred;
155                 struct skbedit {
156                         struct tc_skbedit skbedit;
157                         uint16_t queue;
158                 } skbedit;
159                 struct bpf {
160                         struct tc_act_bpf bpf;
161                         int bpf_fd;
162                         const char *annotation;
163                 } bpf;
164         };
165 };
166
167 static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
168 static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
169 static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
170 static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
171 static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
172 static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
173 static int
174 tap_flow_validate(struct rte_eth_dev *dev,
175                   const struct rte_flow_attr *attr,
176                   const struct rte_flow_item items[],
177                   const struct rte_flow_action actions[],
178                   struct rte_flow_error *error);
179
180 static struct rte_flow *
181 tap_flow_create(struct rte_eth_dev *dev,
182                 const struct rte_flow_attr *attr,
183                 const struct rte_flow_item items[],
184                 const struct rte_flow_action actions[],
185                 struct rte_flow_error *error);
186
187 static void
188 tap_flow_free(struct pmd_internals *pmd,
189         struct rte_flow *flow);
190
191 static int
192 tap_flow_destroy(struct rte_eth_dev *dev,
193                  struct rte_flow *flow,
194                  struct rte_flow_error *error);
195
196 static int
197 tap_flow_isolate(struct rte_eth_dev *dev,
198                  int set,
199                  struct rte_flow_error *error);
200
201 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
202 static int rss_enable(struct pmd_internals *pmd,
203                         const struct rte_flow_attr *attr,
204                         struct rte_flow_error *error);
205 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
206                         const struct rte_flow_action_rss *rss,
207                         struct rte_flow_error *error);
208
209 static const struct rte_flow_ops tap_flow_ops = {
210         .validate = tap_flow_validate,
211         .create = tap_flow_create,
212         .destroy = tap_flow_destroy,
213         .flush = tap_flow_flush,
214         .isolate = tap_flow_isolate,
215 };
216
217 /* Static initializer for items. */
218 #define ITEMS(...) \
219         (const enum rte_flow_item_type []){ \
220                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
221         }
222
223 /* Structure to generate a simple graph of layers supported by the NIC. */
224 struct tap_flow_items {
225         /* Bit-mask corresponding to what is supported for this item. */
226         const void *mask;
227         const unsigned int mask_sz; /* Bit-mask size in bytes. */
228         /*
229          * Bit-mask corresponding to the default mask, if none is provided
230          * along with the item.
231          */
232         const void *default_mask;
233         /**
234          * Conversion function from rte_flow to netlink attributes.
235          *
236          * @param item
237          *   rte_flow item to convert.
238          * @param data
239          *   Internal structure to store the conversion.
240          *
241          * @return
242          *   0 on success, negative value otherwise.
243          */
244         int (*convert)(const struct rte_flow_item *item, void *data);
245         /** List of possible following items.  */
246         const enum rte_flow_item_type *const items;
247 };
248
249 /* Graph of supported items and associated actions. */
250 static const struct tap_flow_items tap_flow_items[] = {
251         [RTE_FLOW_ITEM_TYPE_END] = {
252                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
253         },
254         [RTE_FLOW_ITEM_TYPE_ETH] = {
255                 .items = ITEMS(
256                         RTE_FLOW_ITEM_TYPE_VLAN,
257                         RTE_FLOW_ITEM_TYPE_IPV4,
258                         RTE_FLOW_ITEM_TYPE_IPV6),
259                 .mask = &(const struct rte_flow_item_eth){
260                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
261                         .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
262                         .type = -1,
263                 },
264                 .mask_sz = sizeof(struct rte_flow_item_eth),
265                 .default_mask = &rte_flow_item_eth_mask,
266                 .convert = tap_flow_create_eth,
267         },
268         [RTE_FLOW_ITEM_TYPE_VLAN] = {
269                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
270                                RTE_FLOW_ITEM_TYPE_IPV6),
271                 .mask = &(const struct rte_flow_item_vlan){
272                         .tpid = -1,
273                         /* DEI matching is not supported */
274 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
275                         .tci = 0xffef,
276 #else
277                         .tci = 0xefff,
278 #endif
279                 },
280                 .mask_sz = sizeof(struct rte_flow_item_vlan),
281                 .default_mask = &rte_flow_item_vlan_mask,
282                 .convert = tap_flow_create_vlan,
283         },
284         [RTE_FLOW_ITEM_TYPE_IPV4] = {
285                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
286                                RTE_FLOW_ITEM_TYPE_TCP),
287                 .mask = &(const struct rte_flow_item_ipv4){
288                         .hdr = {
289                                 .src_addr = -1,
290                                 .dst_addr = -1,
291                                 .next_proto_id = -1,
292                         },
293                 },
294                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
295                 .default_mask = &rte_flow_item_ipv4_mask,
296                 .convert = tap_flow_create_ipv4,
297         },
298         [RTE_FLOW_ITEM_TYPE_IPV6] = {
299                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
300                                RTE_FLOW_ITEM_TYPE_TCP),
301                 .mask = &(const struct rte_flow_item_ipv6){
302                         .hdr = {
303                                 .src_addr = {
304                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
305                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
306                                 },
307                                 .dst_addr = {
308                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
309                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
310                                 },
311                                 .proto = -1,
312                         },
313                 },
314                 .mask_sz = sizeof(struct rte_flow_item_ipv6),
315                 .default_mask = &rte_flow_item_ipv6_mask,
316                 .convert = tap_flow_create_ipv6,
317         },
318         [RTE_FLOW_ITEM_TYPE_UDP] = {
319                 .mask = &(const struct rte_flow_item_udp){
320                         .hdr = {
321                                 .src_port = -1,
322                                 .dst_port = -1,
323                         },
324                 },
325                 .mask_sz = sizeof(struct rte_flow_item_udp),
326                 .default_mask = &rte_flow_item_udp_mask,
327                 .convert = tap_flow_create_udp,
328         },
329         [RTE_FLOW_ITEM_TYPE_TCP] = {
330                 .mask = &(const struct rte_flow_item_tcp){
331                         .hdr = {
332                                 .src_port = -1,
333                                 .dst_port = -1,
334                         },
335                 },
336                 .mask_sz = sizeof(struct rte_flow_item_tcp),
337                 .default_mask = &rte_flow_item_tcp_mask,
338                 .convert = tap_flow_create_tcp,
339         },
340 };
341
342 /*
343  *                TC rules, by growing priority
344  *
345  *        Remote netdevice                  Tap netdevice
346  * +-------------+-------------+  +-------------+-------------+
347  * |   Ingress   |   Egress    |  |   Ingress   |   Egress    |
348  * |-------------|-------------|  |-------------|-------------|
349  * |             |  \       /  |  |             |  REMOTE TX  | prio 1
350  * |             |   \     /   |  |             |   \     /   | prio 2
351  * |  EXPLICIT   |    \   /    |  |  EXPLICIT   |    \   /    |   .
352  * |             |     \ /     |  |             |     \ /     |   .
353  * |    RULES    |      X      |  |    RULES    |      X      |   .
354  * |      .      |     / \     |  |      .      |     / \     |   .
355  * |      .      |    /   \    |  |      .      |    /   \    |   .
356  * |      .      |   /     \   |  |      .      |   /     \   |   .
357  * |      .      |  /       \  |  |      .      |  /       \  |   .
358  *
359  *      ....           ....           ....           ....
360  *
361  * |      .      |  \       /  |  |      .      |  \       /  |   .
362  * |      .      |   \     /   |  |      .      |   \     /   |   .
363  * |             |    \   /    |  |             |    \   /    |
364  * |  LOCAL_MAC  |     \ /     |  |    \   /    |     \ /     | last prio - 5
365  * |   PROMISC   |      X      |  |     \ /     |      X      | last prio - 4
366  * |   ALLMULTI  |     / \     |  |      X      |     / \     | last prio - 3
367  * |  BROADCAST  |    /   \    |  |     / \     |    /   \    | last prio - 2
368  * | BROADCASTV6 |   /     \   |  |    /   \    |   /     \   | last prio - 1
369  * |     xx      |  /       \  |  |   ISOLATE   |  /       \  | last prio
370  * +-------------+-------------+  +-------------+-------------+
371  *
372  * The implicit flow rules are stored in a list in with mandatorily the last two
373  * being the ISOLATE and REMOTE_TX rules. e.g.:
374  *
375  * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
376  *
377  * That enables tap_flow_isolate() to remove implicit rules by popping the list
378  * head and remove it as long as it applies on the remote netdevice. The
379  * implicit rule for TX redirection is not removed, as isolate concerns only
380  * incoming traffic.
381  */
382
383 static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
384         [TAP_REMOTE_LOCAL_MAC] = {
385                 .attr = {
386                         .group = MAX_GROUP,
387                         .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
388                         .ingress = 1,
389                 },
390                 .items[0] = {
391                         .type = RTE_FLOW_ITEM_TYPE_ETH,
392                         .mask =  &(const struct rte_flow_item_eth){
393                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
394                         },
395                 },
396                 .items[1] = {
397                         .type = RTE_FLOW_ITEM_TYPE_END,
398                 },
399                 .mirred = TCA_EGRESS_REDIR,
400         },
401         [TAP_REMOTE_BROADCAST] = {
402                 .attr = {
403                         .group = MAX_GROUP,
404                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
405                         .ingress = 1,
406                 },
407                 .items[0] = {
408                         .type = RTE_FLOW_ITEM_TYPE_ETH,
409                         .mask =  &(const struct rte_flow_item_eth){
410                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
411                         },
412                         .spec = &(const struct rte_flow_item_eth){
413                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
414                         },
415                 },
416                 .items[1] = {
417                         .type = RTE_FLOW_ITEM_TYPE_END,
418                 },
419                 .mirred = TCA_EGRESS_MIRROR,
420         },
421         [TAP_REMOTE_BROADCASTV6] = {
422                 .attr = {
423                         .group = MAX_GROUP,
424                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
425                         .ingress = 1,
426                 },
427                 .items[0] = {
428                         .type = RTE_FLOW_ITEM_TYPE_ETH,
429                         .mask =  &(const struct rte_flow_item_eth){
430                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
431                         },
432                         .spec = &(const struct rte_flow_item_eth){
433                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
434                         },
435                 },
436                 .items[1] = {
437                         .type = RTE_FLOW_ITEM_TYPE_END,
438                 },
439                 .mirred = TCA_EGRESS_MIRROR,
440         },
441         [TAP_REMOTE_PROMISC] = {
442                 .attr = {
443                         .group = MAX_GROUP,
444                         .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
445                         .ingress = 1,
446                 },
447                 .items[0] = {
448                         .type = RTE_FLOW_ITEM_TYPE_VOID,
449                 },
450                 .items[1] = {
451                         .type = RTE_FLOW_ITEM_TYPE_END,
452                 },
453                 .mirred = TCA_EGRESS_MIRROR,
454         },
455         [TAP_REMOTE_ALLMULTI] = {
456                 .attr = {
457                         .group = MAX_GROUP,
458                         .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
459                         .ingress = 1,
460                 },
461                 .items[0] = {
462                         .type = RTE_FLOW_ITEM_TYPE_ETH,
463                         .mask =  &(const struct rte_flow_item_eth){
464                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
465                         },
466                         .spec = &(const struct rte_flow_item_eth){
467                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
468                         },
469                 },
470                 .items[1] = {
471                         .type = RTE_FLOW_ITEM_TYPE_END,
472                 },
473                 .mirred = TCA_EGRESS_MIRROR,
474         },
475         [TAP_REMOTE_TX] = {
476                 .attr = {
477                         .group = 0,
478                         .priority = TAP_REMOTE_TX,
479                         .egress = 1,
480                 },
481                 .items[0] = {
482                         .type = RTE_FLOW_ITEM_TYPE_VOID,
483                 },
484                 .items[1] = {
485                         .type = RTE_FLOW_ITEM_TYPE_END,
486                 },
487                 .mirred = TCA_EGRESS_MIRROR,
488         },
489         [TAP_ISOLATE] = {
490                 .attr = {
491                         .group = MAX_GROUP,
492                         .priority = PRIORITY_MASK - TAP_ISOLATE,
493                         .ingress = 1,
494                 },
495                 .items[0] = {
496                         .type = RTE_FLOW_ITEM_TYPE_VOID,
497                 },
498                 .items[1] = {
499                         .type = RTE_FLOW_ITEM_TYPE_END,
500                 },
501         },
502 };
503
504 /**
505  * Make as much checks as possible on an Ethernet item, and if a flow is
506  * provided, fill it appropriately with Ethernet info.
507  *
508  * @param[in] item
509  *   Item specification.
510  * @param[in, out] data
511  *   Additional data structure to tell next layers we've been here.
512  *
513  * @return
514  *   0 if checks are alright, -1 otherwise.
515  */
516 static int
517 tap_flow_create_eth(const struct rte_flow_item *item, void *data)
518 {
519         struct convert_data *info = (struct convert_data *)data;
520         const struct rte_flow_item_eth *spec = item->spec;
521         const struct rte_flow_item_eth *mask = item->mask;
522         struct rte_flow *flow = info->flow;
523         struct nlmsg *msg;
524
525         /* use default mask if none provided */
526         if (!mask)
527                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
528         /* TC does not support eth_type masking. Only accept if exact match. */
529         if (mask->type && mask->type != 0xffff)
530                 return -1;
531         if (!spec)
532                 return 0;
533         /* store eth_type for consistency if ipv4/6 pattern item comes next */
534         if (spec->type & mask->type)
535                 info->eth_type = spec->type;
536         if (!flow)
537                 return 0;
538         msg = &flow->msg;
539         if (!is_zero_ether_addr(&spec->dst)) {
540                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
541                            &spec->dst.addr_bytes);
542                 tap_nlattr_add(&msg->nh,
543                            TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
544                            &mask->dst.addr_bytes);
545         }
546         if (!is_zero_ether_addr(&mask->src)) {
547                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
548                            &spec->src.addr_bytes);
549                 tap_nlattr_add(&msg->nh,
550                            TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
551                            &mask->src.addr_bytes);
552         }
553         return 0;
554 }
555
556 /**
557  * Make as much checks as possible on a VLAN item, and if a flow is provided,
558  * fill it appropriately with VLAN info.
559  *
560  * @param[in] item
561  *   Item specification.
562  * @param[in, out] data
563  *   Additional data structure to tell next layers we've been here.
564  *
565  * @return
566  *   0 if checks are alright, -1 otherwise.
567  */
568 static int
569 tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
570 {
571         struct convert_data *info = (struct convert_data *)data;
572         const struct rte_flow_item_vlan *spec = item->spec;
573         const struct rte_flow_item_vlan *mask = item->mask;
574         struct rte_flow *flow = info->flow;
575         struct nlmsg *msg;
576
577         /* use default mask if none provided */
578         if (!mask)
579                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
580         /* TC does not support tpid masking. Only accept if exact match. */
581         if (mask->tpid && mask->tpid != 0xffff)
582                 return -1;
583         /* Double-tagging not supported. */
584         if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q))
585                 return -1;
586         info->vlan = 1;
587         if (!flow)
588                 return 0;
589         msg = &flow->msg;
590         msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
591 #define VLAN_PRIO(tci) ((tci) >> 13)
592 #define VLAN_ID(tci) ((tci) & 0xfff)
593         if (!spec)
594                 return 0;
595         if (spec->tci) {
596                 uint16_t tci = ntohs(spec->tci) & mask->tci;
597                 uint16_t prio = VLAN_PRIO(tci);
598                 uint8_t vid = VLAN_ID(tci);
599
600                 if (prio)
601                         tap_nlattr_add8(&msg->nh,
602                                         TCA_FLOWER_KEY_VLAN_PRIO, prio);
603                 if (vid)
604                         tap_nlattr_add16(&msg->nh,
605                                          TCA_FLOWER_KEY_VLAN_ID, vid);
606         }
607         return 0;
608 }
609
610 /**
611  * Make as much checks as possible on an IPv4 item, and if a flow is provided,
612  * fill it appropriately with IPv4 info.
613  *
614  * @param[in] item
615  *   Item specification.
616  * @param[in, out] data
617  *   Additional data structure to tell next layers we've been here.
618  *
619  * @return
620  *   0 if checks are alright, -1 otherwise.
621  */
622 static int
623 tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
624 {
625         struct convert_data *info = (struct convert_data *)data;
626         const struct rte_flow_item_ipv4 *spec = item->spec;
627         const struct rte_flow_item_ipv4 *mask = item->mask;
628         struct rte_flow *flow = info->flow;
629         struct nlmsg *msg;
630
631         /* use default mask if none provided */
632         if (!mask)
633                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
634         /* check that previous eth type is compatible with ipv4 */
635         if (info->eth_type && info->eth_type != htons(ETH_P_IP))
636                 return -1;
637         /* store ip_proto for consistency if udp/tcp pattern item comes next */
638         if (spec)
639                 info->ip_proto = spec->hdr.next_proto_id;
640         if (!flow)
641                 return 0;
642         msg = &flow->msg;
643         if (!info->eth_type)
644                 info->eth_type = htons(ETH_P_IP);
645         if (!spec)
646                 return 0;
647         if (spec->hdr.dst_addr) {
648                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
649                              spec->hdr.dst_addr);
650                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
651                              mask->hdr.dst_addr);
652         }
653         if (spec->hdr.src_addr) {
654                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
655                              spec->hdr.src_addr);
656                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
657                              mask->hdr.src_addr);
658         }
659         if (spec->hdr.next_proto_id)
660                 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
661                             spec->hdr.next_proto_id);
662         return 0;
663 }
664
665 /**
666  * Make as much checks as possible on an IPv6 item, and if a flow is provided,
667  * fill it appropriately with IPv6 info.
668  *
669  * @param[in] item
670  *   Item specification.
671  * @param[in, out] data
672  *   Additional data structure to tell next layers we've been here.
673  *
674  * @return
675  *   0 if checks are alright, -1 otherwise.
676  */
677 static int
678 tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
679 {
680         struct convert_data *info = (struct convert_data *)data;
681         const struct rte_flow_item_ipv6 *spec = item->spec;
682         const struct rte_flow_item_ipv6 *mask = item->mask;
683         struct rte_flow *flow = info->flow;
684         uint8_t empty_addr[16] = { 0 };
685         struct nlmsg *msg;
686
687         /* use default mask if none provided */
688         if (!mask)
689                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
690         /* check that previous eth type is compatible with ipv6 */
691         if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
692                 return -1;
693         /* store ip_proto for consistency if udp/tcp pattern item comes next */
694         if (spec)
695                 info->ip_proto = spec->hdr.proto;
696         if (!flow)
697                 return 0;
698         msg = &flow->msg;
699         if (!info->eth_type)
700                 info->eth_type = htons(ETH_P_IPV6);
701         if (!spec)
702                 return 0;
703         if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
704                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
705                            sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
706                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
707                            sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
708         }
709         if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
710                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
711                            sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
712                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
713                            sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
714         }
715         if (spec->hdr.proto)
716                 tap_nlattr_add8(&msg->nh,
717                                 TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
718         return 0;
719 }
720
721 /**
722  * Make as much checks as possible on a UDP item, and if a flow is provided,
723  * fill it appropriately with UDP info.
724  *
725  * @param[in] item
726  *   Item specification.
727  * @param[in, out] data
728  *   Additional data structure to tell next layers we've been here.
729  *
730  * @return
731  *   0 if checks are alright, -1 otherwise.
732  */
733 static int
734 tap_flow_create_udp(const struct rte_flow_item *item, void *data)
735 {
736         struct convert_data *info = (struct convert_data *)data;
737         const struct rte_flow_item_udp *spec = item->spec;
738         const struct rte_flow_item_udp *mask = item->mask;
739         struct rte_flow *flow = info->flow;
740         struct nlmsg *msg;
741
742         /* use default mask if none provided */
743         if (!mask)
744                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
745         /* check that previous ip_proto is compatible with udp */
746         if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
747                 return -1;
748         /* TC does not support UDP port masking. Only accept if exact match. */
749         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
750             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
751                 return -1;
752         if (!flow)
753                 return 0;
754         msg = &flow->msg;
755         tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
756         if (!spec)
757                 return 0;
758         if (spec->hdr.dst_port & mask->hdr.dst_port)
759                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
760                              spec->hdr.dst_port);
761         if (spec->hdr.src_port & mask->hdr.src_port)
762                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
763                              spec->hdr.src_port);
764         return 0;
765 }
766
767 /**
768  * Make as much checks as possible on a TCP item, and if a flow is provided,
769  * fill it appropriately with TCP info.
770  *
771  * @param[in] item
772  *   Item specification.
773  * @param[in, out] data
774  *   Additional data structure to tell next layers we've been here.
775  *
776  * @return
777  *   0 if checks are alright, -1 otherwise.
778  */
779 static int
780 tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
781 {
782         struct convert_data *info = (struct convert_data *)data;
783         const struct rte_flow_item_tcp *spec = item->spec;
784         const struct rte_flow_item_tcp *mask = item->mask;
785         struct rte_flow *flow = info->flow;
786         struct nlmsg *msg;
787
788         /* use default mask if none provided */
789         if (!mask)
790                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
791         /* check that previous ip_proto is compatible with tcp */
792         if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
793                 return -1;
794         /* TC does not support TCP port masking. Only accept if exact match. */
795         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
796             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
797                 return -1;
798         if (!flow)
799                 return 0;
800         msg = &flow->msg;
801         tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
802         if (!spec)
803                 return 0;
804         if (spec->hdr.dst_port & mask->hdr.dst_port)
805                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
806                              spec->hdr.dst_port);
807         if (spec->hdr.src_port & mask->hdr.src_port)
808                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
809                              spec->hdr.src_port);
810         return 0;
811 }
812
813 /**
814  * Check support for a given item.
815  *
816  * @param[in] item
817  *   Item specification.
818  * @param size
819  *   Bit-Mask size in bytes.
820  * @param[in] supported_mask
821  *   Bit-mask covering supported fields to compare with spec, last and mask in
822  *   \item.
823  * @param[in] default_mask
824  *   Bit-mask default mask if none is provided in \item.
825  *
826  * @return
827  *   0 on success.
828  */
829 static int
830 tap_flow_item_validate(const struct rte_flow_item *item,
831                        unsigned int size,
832                        const uint8_t *supported_mask,
833                        const uint8_t *default_mask)
834 {
835         int ret = 0;
836
837         /* An empty layer is allowed, as long as all fields are NULL */
838         if (!item->spec && (item->mask || item->last))
839                 return -1;
840         /* Is the item spec compatible with what the NIC supports? */
841         if (item->spec && !item->mask) {
842                 unsigned int i;
843                 const uint8_t *spec = item->spec;
844
845                 for (i = 0; i < size; ++i)
846                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
847                                 return -1;
848                 /* Is the default mask compatible with what the NIC supports? */
849                 for (i = 0; i < size; i++)
850                         if ((default_mask[i] | supported_mask[i]) !=
851                             supported_mask[i])
852                                 return -1;
853         }
854         /* Is the item last compatible with what the NIC supports? */
855         if (item->last && !item->mask) {
856                 unsigned int i;
857                 const uint8_t *spec = item->last;
858
859                 for (i = 0; i < size; ++i)
860                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
861                                 return -1;
862         }
863         /* Is the item mask compatible with what the NIC supports? */
864         if (item->mask) {
865                 unsigned int i;
866                 const uint8_t *spec = item->mask;
867
868                 for (i = 0; i < size; ++i)
869                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
870                                 return -1;
871         }
872         /**
873          * Once masked, Are item spec and item last equal?
874          * TC does not support range so anything else is invalid.
875          */
876         if (item->spec && item->last) {
877                 uint8_t spec[size];
878                 uint8_t last[size];
879                 const uint8_t *apply = default_mask;
880                 unsigned int i;
881
882                 if (item->mask)
883                         apply = item->mask;
884                 for (i = 0; i < size; ++i) {
885                         spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
886                         last[i] = ((const uint8_t *)item->last)[i] & apply[i];
887                 }
888                 ret = memcmp(spec, last, size);
889         }
890         return ret;
891 }
892
893 /**
894  * Configure the kernel with a TC action and its configured parameters
895  * Handled actions: "gact", "mirred", "skbedit", "bpf"
896  *
897  * @param[in] flow
898  *   Pointer to rte flow containing the netlink message
899  *
900  * @param[in, out] act_index
901  *   Pointer to action sequence number in the TC command
902  *
903  * @param[in] adata
904  *  Pointer to struct holding the action parameters
905  *
906  * @return
907  *   -1 on failure, 0 on success
908  */
909 static int
910 add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
911 {
912         struct nlmsg *msg = &flow->msg;
913
914         if (tap_nlattr_nested_start(msg, (*act_index)++) < 0)
915                 return -1;
916
917         tap_nlattr_add(&msg->nh, TCA_ACT_KIND,
918                                 strlen(adata->id) + 1, adata->id);
919         if (tap_nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
920                 return -1;
921         if (strcmp("gact", adata->id) == 0) {
922                 tap_nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(adata->gact),
923                            &adata->gact);
924         } else if (strcmp("mirred", adata->id) == 0) {
925                 if (adata->mirred.eaction == TCA_EGRESS_MIRROR)
926                         adata->mirred.action = TC_ACT_PIPE;
927                 else /* REDIRECT */
928                         adata->mirred.action = TC_ACT_STOLEN;
929                 tap_nlattr_add(&msg->nh, TCA_MIRRED_PARMS,
930                            sizeof(adata->mirred),
931                            &adata->mirred);
932         } else if (strcmp("skbedit", adata->id) == 0) {
933                 tap_nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS,
934                            sizeof(adata->skbedit.skbedit),
935                            &adata->skbedit.skbedit);
936                 tap_nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING,
937                              adata->skbedit.queue);
938         } else if (strcmp("bpf", adata->id) == 0) {
939                 tap_nlattr_add32(&msg->nh, TCA_ACT_BPF_FD, adata->bpf.bpf_fd);
940                 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
941                            strlen(adata->bpf.annotation) + 1,
942                            adata->bpf.annotation);
943                 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
944                            sizeof(adata->bpf.bpf),
945                            &adata->bpf.bpf);
946         } else {
947                 return -1;
948         }
949         tap_nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
950         tap_nlattr_nested_finish(msg); /* nested act_index */
951         return 0;
952 }
953
954 /**
955  * Helper function to send a serie of TC actions to the kernel
956  *
957  * @param[in] flow
958  *   Pointer to rte flow containing the netlink message
959  *
960  * @param[in] nb_actions
961  *   Number of actions in an array of action structs
962  *
963  * @param[in] data
964  *   Pointer to an array of action structs
965  *
966  * @param[in] classifier_actions
967  *   The classifier on behave of which the actions are configured
968  *
969  * @return
970  *   -1 on failure, 0 on success
971  */
972 static int
973 add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
974             int classifier_action)
975 {
976         struct nlmsg *msg = &flow->msg;
977         size_t act_index = 1;
978         int i;
979
980         if (tap_nlattr_nested_start(msg, classifier_action) < 0)
981                 return -1;
982         for (i = 0; i < nb_actions; i++)
983                 if (add_action(flow, &act_index, data + i) < 0)
984                         return -1;
985         tap_nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
986         return 0;
987 }
988
989 /**
990  * Validate a flow supported by TC.
991  * If flow param is not NULL, then also fill the netlink message inside.
992  *
993  * @param pmd
994  *   Pointer to private structure.
995  * @param[in] attr
996  *   Flow rule attributes.
997  * @param[in] pattern
998  *   Pattern specification (list terminated by the END pattern item).
999  * @param[in] actions
1000  *   Associated actions (list terminated by the END action).
1001  * @param[out] error
1002  *   Perform verbose error reporting if not NULL.
1003  * @param[in, out] flow
1004  *   Flow structure to update.
1005  * @param[in] mirred
1006  *   If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
1007  *   redirection to the tap netdevice, and the TC rule will be configured
1008  *   on the remote netdevice in pmd.
1009  *   If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
1010  *   mirroring to the tap netdevice, and the TC rule will be configured
1011  *   on the remote netdevice in pmd. Matching packets will thus be duplicated.
1012  *   If set to 0, the standard behavior is to be used: set correct actions for
1013  *   the TC rule, and apply it on the tap netdevice.
1014  *
1015  * @return
1016  *   0 on success, a negative errno value otherwise and rte_errno is set.
1017  */
1018 static int
1019 priv_flow_process(struct pmd_internals *pmd,
1020                   const struct rte_flow_attr *attr,
1021                   const struct rte_flow_item items[],
1022                   const struct rte_flow_action actions[],
1023                   struct rte_flow_error *error,
1024                   struct rte_flow *flow,
1025                   int mirred)
1026 {
1027         const struct tap_flow_items *cur_item = tap_flow_items;
1028         struct convert_data data = {
1029                 .eth_type = 0,
1030                 .ip_proto = 0,
1031                 .flow = flow,
1032         };
1033         int action = 0; /* Only one action authorized for now */
1034
1035         if (attr->group > MAX_GROUP) {
1036                 rte_flow_error_set(
1037                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1038                         NULL, "group value too big: cannot exceed 15");
1039                 return -rte_errno;
1040         }
1041         if (attr->priority > MAX_PRIORITY) {
1042                 rte_flow_error_set(
1043                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1044                         NULL, "priority value too big");
1045                 return -rte_errno;
1046         } else if (flow) {
1047                 uint16_t group = attr->group << GROUP_SHIFT;
1048                 uint16_t prio = group | (attr->priority +
1049                                 RSS_PRIORITY_OFFSET + PRIORITY_OFFSET);
1050                 flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
1051                                                  flow->msg.t.tcm_info);
1052         }
1053         if (flow) {
1054                 if (mirred) {
1055                         /*
1056                          * If attr->ingress, the rule applies on remote ingress
1057                          * to match incoming packets
1058                          * If attr->egress, the rule applies on tap ingress (as
1059                          * seen from the kernel) to deal with packets going out
1060                          * from the DPDK app.
1061                          */
1062                         flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
1063                 } else {
1064                         /* Standard rule on tap egress (kernel standpoint). */
1065                         flow->msg.t.tcm_parent =
1066                                 TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1067                 }
1068                 /* use flower filter type */
1069                 tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
1070                 if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
1071                         goto exit_item_not_supported;
1072         }
1073         for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
1074                 const struct tap_flow_items *token = NULL;
1075                 unsigned int i;
1076                 int err = 0;
1077
1078                 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
1079                         continue;
1080                 for (i = 0;
1081                      cur_item->items &&
1082                      cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
1083                      ++i) {
1084                         if (cur_item->items[i] == items->type) {
1085                                 token = &tap_flow_items[items->type];
1086                                 break;
1087                         }
1088                 }
1089                 if (!token)
1090                         goto exit_item_not_supported;
1091                 cur_item = token;
1092                 err = tap_flow_item_validate(
1093                         items, cur_item->mask_sz,
1094                         (const uint8_t *)cur_item->mask,
1095                         (const uint8_t *)cur_item->default_mask);
1096                 if (err)
1097                         goto exit_item_not_supported;
1098                 if (flow && cur_item->convert) {
1099                         err = cur_item->convert(items, &data);
1100                         if (err)
1101                                 goto exit_item_not_supported;
1102                 }
1103         }
1104         if (flow) {
1105                 if (data.vlan) {
1106                         tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1107                                      htons(ETH_P_8021Q));
1108                         tap_nlattr_add16(&flow->msg.nh,
1109                                      TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1110                                      data.eth_type ?
1111                                      data.eth_type : htons(ETH_P_ALL));
1112                 } else if (data.eth_type) {
1113                         tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1114                                      data.eth_type);
1115                 }
1116         }
1117         if (mirred && flow) {
1118                 struct action_data adata = {
1119                         .id = "mirred",
1120                         .mirred = {
1121                                 .eaction = mirred,
1122                         },
1123                 };
1124
1125                 /*
1126                  * If attr->egress && mirred, then this is a special
1127                  * case where the rule must be applied on the tap, to
1128                  * redirect packets coming from the DPDK App, out
1129                  * through the remote netdevice.
1130                  */
1131                 adata.mirred.ifindex = attr->ingress ? pmd->if_index :
1132                         pmd->remote_if_index;
1133                 if (mirred == TCA_EGRESS_MIRROR)
1134                         adata.mirred.action = TC_ACT_PIPE;
1135                 else
1136                         adata.mirred.action = TC_ACT_STOLEN;
1137                 if (add_actions(flow, 1, &adata, TCA_FLOWER_ACT) < 0)
1138                         goto exit_action_not_supported;
1139                 else
1140                         goto end;
1141         }
1142         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
1143                 int err = 0;
1144
1145                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
1146                         continue;
1147                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
1148                         if (action)
1149                                 goto exit_action_not_supported;
1150                         action = 1;
1151                         if (flow) {
1152                                 struct action_data adata = {
1153                                         .id = "gact",
1154                                         .gact = {
1155                                                 .action = TC_ACT_SHOT,
1156                                         },
1157                                 };
1158
1159                                 err = add_actions(flow, 1, &adata,
1160                                                   TCA_FLOWER_ACT);
1161                         }
1162                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
1163                         if (action)
1164                                 goto exit_action_not_supported;
1165                         action = 1;
1166                         if (flow) {
1167                                 struct action_data adata = {
1168                                         .id = "gact",
1169                                         .gact = {
1170                                                 /* continue */
1171                                                 .action = TC_ACT_UNSPEC,
1172                                         },
1173                                 };
1174
1175                                 err = add_actions(flow, 1, &adata,
1176                                                   TCA_FLOWER_ACT);
1177                         }
1178                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1179                         const struct rte_flow_action_queue *queue =
1180                                 (const struct rte_flow_action_queue *)
1181                                 actions->conf;
1182
1183                         if (action)
1184                                 goto exit_action_not_supported;
1185                         action = 1;
1186                         if (!queue ||
1187                             (queue->index > pmd->dev->data->nb_rx_queues - 1))
1188                                 goto exit_action_not_supported;
1189                         if (flow) {
1190                                 struct action_data adata = {
1191                                         .id = "skbedit",
1192                                         .skbedit = {
1193                                                 .skbedit = {
1194                                                         .action = TC_ACT_PIPE,
1195                                                 },
1196                                                 .queue = queue->index,
1197                                         },
1198                                 };
1199
1200                                 err = add_actions(flow, 1, &adata,
1201                                         TCA_FLOWER_ACT);
1202                         }
1203                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
1204                         const struct rte_flow_action_rss *rss =
1205                                 (const struct rte_flow_action_rss *)
1206                                 actions->conf;
1207
1208                         if (action++)
1209                                 goto exit_action_not_supported;
1210
1211                         if (!pmd->rss_enabled) {
1212                                 err = rss_enable(pmd, attr, error);
1213                                 if (err)
1214                                         goto exit_action_not_supported;
1215                         }
1216                         if (flow && rss)
1217                                 err = rss_add_actions(flow, pmd, rss, error);
1218                 } else {
1219                         goto exit_action_not_supported;
1220                 }
1221                 if (err)
1222                         goto exit_action_not_supported;
1223         }
1224 end:
1225         if (flow)
1226                 tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
1227         return 0;
1228 exit_item_not_supported:
1229         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1230                            items, "item not supported");
1231         return -rte_errno;
1232 exit_action_not_supported:
1233         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1234                            actions, "action not supported");
1235         return -rte_errno;
1236 }
1237
1238
1239
1240 /**
1241  * Validate a flow.
1242  *
1243  * @see rte_flow_validate()
1244  * @see rte_flow_ops
1245  */
1246 static int
1247 tap_flow_validate(struct rte_eth_dev *dev,
1248                   const struct rte_flow_attr *attr,
1249                   const struct rte_flow_item items[],
1250                   const struct rte_flow_action actions[],
1251                   struct rte_flow_error *error)
1252 {
1253         struct pmd_internals *pmd = dev->data->dev_private;
1254
1255         return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
1256 }
1257
1258 /**
1259  * Set a unique handle in a flow.
1260  *
1261  * The kernel supports TC rules with equal priority, as long as they use the
1262  * same matching fields (e.g.: dst mac and ipv4) with different values (and
1263  * full mask to ensure no collision is possible).
1264  * In those rules, the handle (uint32_t) is the part that would identify
1265  * specifically each rule.
1266  *
1267  * On 32-bit architectures, the handle can simply be the flow's pointer address.
1268  * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
1269  * unique handle.
1270  *
1271  * @param[in, out] flow
1272  *   The flow that needs its handle set.
1273  */
1274 static void
1275 tap_flow_set_handle(struct rte_flow *flow)
1276 {
1277         uint32_t handle = 0;
1278
1279         if (sizeof(flow) > 4)
1280                 handle = rte_jhash(&flow, sizeof(flow), 1);
1281         else
1282                 handle = (uintptr_t)flow;
1283         /* must be at least 1 to avoid letting the kernel choose one for us */
1284         if (!handle)
1285                 handle = 1;
1286         flow->msg.t.tcm_handle = handle;
1287 }
1288
1289 /**
1290  * Free the flow opened file descriptors and allocated memory
1291  *
1292  * @param[in] flow
1293  *   Pointer to the flow to free
1294  *
1295  */
1296 static void
1297 tap_flow_free(struct pmd_internals *pmd, struct rte_flow *flow)
1298 {
1299         int i;
1300
1301         if (!flow)
1302                 return;
1303
1304         if (pmd->rss_enabled) {
1305                 /* Close flow BPF file descriptors */
1306                 for (i = 0; i < SEC_MAX; i++)
1307                         if (flow->bpf_fd[i] != 0) {
1308                                 close(flow->bpf_fd[i]);
1309                                 flow->bpf_fd[i] = 0;
1310                         }
1311
1312                 /* Release the map key for this RSS rule */
1313                 bpf_rss_key(KEY_CMD_RELEASE, &flow->key_idx);
1314                 flow->key_idx = 0;
1315         }
1316
1317         /* Free flow allocated memory */
1318         rte_free(flow);
1319 }
1320
1321 /**
1322  * Create a flow.
1323  *
1324  * @see rte_flow_create()
1325  * @see rte_flow_ops
1326  */
1327 static struct rte_flow *
1328 tap_flow_create(struct rte_eth_dev *dev,
1329                 const struct rte_flow_attr *attr,
1330                 const struct rte_flow_item items[],
1331                 const struct rte_flow_action actions[],
1332                 struct rte_flow_error *error)
1333 {
1334         struct pmd_internals *pmd = dev->data->dev_private;
1335         struct rte_flow *remote_flow = NULL;
1336         struct rte_flow *flow = NULL;
1337         struct nlmsg *msg = NULL;
1338         int err;
1339
1340         if (!pmd->if_index) {
1341                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1342                                    NULL,
1343                                    "can't create rule, ifindex not found");
1344                 goto fail;
1345         }
1346         /*
1347          * No rules configured through standard rte_flow should be set on the
1348          * priorities used by implicit rules.
1349          */
1350         if ((attr->group == MAX_GROUP) &&
1351             attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
1352                 rte_flow_error_set(
1353                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1354                         NULL, "priority value too big");
1355                 goto fail;
1356         }
1357         flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1358         if (!flow) {
1359                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1360                                    NULL, "cannot allocate memory for rte_flow");
1361                 goto fail;
1362         }
1363         msg = &flow->msg;
1364         tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
1365                     NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1366         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1367         tap_flow_set_handle(flow);
1368         if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
1369                 goto fail;
1370         err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1371         if (err < 0) {
1372                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1373                                    NULL, "couldn't send request to kernel");
1374                 goto fail;
1375         }
1376         err = tap_nl_recv_ack(pmd->nlsk_fd);
1377         if (err < 0) {
1378                 RTE_LOG(ERR, PMD,
1379                         "Kernel refused TC filter rule creation (%d): %s\n",
1380                         errno, strerror(errno));
1381                 rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
1382                                    NULL,
1383                                    "overlapping rules or Kernel too old for flower support");
1384                 goto fail;
1385         }
1386         LIST_INSERT_HEAD(&pmd->flows, flow, next);
1387         /**
1388          * If a remote device is configured, a TC rule with identical items for
1389          * matching must be set on that device, with a single action: redirect
1390          * to the local pmd->if_index.
1391          */
1392         if (pmd->remote_if_index) {
1393                 remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1394                 if (!remote_flow) {
1395                         rte_flow_error_set(
1396                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1397                                 "cannot allocate memory for rte_flow");
1398                         goto fail;
1399                 }
1400                 msg = &remote_flow->msg;
1401                 /* set the rule if_index for the remote netdevice */
1402                 tc_init_msg(
1403                         msg, pmd->remote_if_index, RTM_NEWTFILTER,
1404                         NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1405                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1406                 tap_flow_set_handle(remote_flow);
1407                 if (priv_flow_process(pmd, attr, items, NULL,
1408                                       error, remote_flow, TCA_EGRESS_REDIR)) {
1409                         rte_flow_error_set(
1410                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1411                                 NULL, "rte flow rule validation failed");
1412                         goto fail;
1413                 }
1414                 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1415                 if (err < 0) {
1416                         rte_flow_error_set(
1417                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1418                                 NULL, "Failure sending nl request");
1419                         goto fail;
1420                 }
1421                 err = tap_nl_recv_ack(pmd->nlsk_fd);
1422                 if (err < 0) {
1423                         RTE_LOG(ERR, PMD,
1424                                 "Kernel refused TC filter rule creation (%d): %s\n",
1425                                 errno, strerror(errno));
1426                         rte_flow_error_set(
1427                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1428                                 NULL,
1429                                 "overlapping rules or Kernel too old for flower support");
1430                         goto fail;
1431                 }
1432                 flow->remote_flow = remote_flow;
1433         }
1434         return flow;
1435 fail:
1436         if (remote_flow)
1437                 rte_free(remote_flow);
1438         if (flow)
1439                 tap_flow_free(pmd, flow);
1440         return NULL;
1441 }
1442
1443 /**
1444  * Destroy a flow using pointer to pmd_internal.
1445  *
1446  * @param[in, out] pmd
1447  *   Pointer to private structure.
1448  * @param[in] flow
1449  *   Pointer to the flow to destroy.
1450  * @param[in, out] error
1451  *   Pointer to the flow error handler
1452  *
1453  * @return 0 if the flow could be destroyed, -1 otherwise.
1454  */
1455 static int
1456 tap_flow_destroy_pmd(struct pmd_internals *pmd,
1457                      struct rte_flow *flow,
1458                      struct rte_flow_error *error)
1459 {
1460         struct rte_flow *remote_flow = flow->remote_flow;
1461         int ret = 0;
1462
1463         LIST_REMOVE(flow, next);
1464         flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1465         flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1466
1467         ret = tap_nl_send(pmd->nlsk_fd, &flow->msg.nh);
1468         if (ret < 0) {
1469                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1470                                    NULL, "couldn't send request to kernel");
1471                 goto end;
1472         }
1473         ret = tap_nl_recv_ack(pmd->nlsk_fd);
1474         /* If errno is ENOENT, the rule is already no longer in the kernel. */
1475         if (ret < 0 && errno == ENOENT)
1476                 ret = 0;
1477         if (ret < 0) {
1478                 RTE_LOG(ERR, PMD,
1479                         "Kernel refused TC filter rule deletion (%d): %s\n",
1480                         errno, strerror(errno));
1481                 rte_flow_error_set(
1482                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1483                         "couldn't receive kernel ack to our request");
1484                 goto end;
1485         }
1486
1487         if (remote_flow) {
1488                 remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1489                 remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1490
1491                 ret = tap_nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
1492                 if (ret < 0) {
1493                         rte_flow_error_set(
1494                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1495                                 NULL, "Failure sending nl request");
1496                         goto end;
1497                 }
1498                 ret = tap_nl_recv_ack(pmd->nlsk_fd);
1499                 if (ret < 0 && errno == ENOENT)
1500                         ret = 0;
1501                 if (ret < 0) {
1502                         RTE_LOG(ERR, PMD,
1503                                 "Kernel refused TC filter rule deletion (%d): %s\n",
1504                                 errno, strerror(errno));
1505                         rte_flow_error_set(
1506                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1507                                 NULL, "Failure trying to receive nl ack");
1508                         goto end;
1509                 }
1510         }
1511 end:
1512         if (remote_flow)
1513                 rte_free(remote_flow);
1514         tap_flow_free(pmd, flow);
1515         return ret;
1516 }
1517
1518 /**
1519  * Destroy a flow.
1520  *
1521  * @see rte_flow_destroy()
1522  * @see rte_flow_ops
1523  */
1524 static int
1525 tap_flow_destroy(struct rte_eth_dev *dev,
1526                  struct rte_flow *flow,
1527                  struct rte_flow_error *error)
1528 {
1529         struct pmd_internals *pmd = dev->data->dev_private;
1530
1531         return tap_flow_destroy_pmd(pmd, flow, error);
1532 }
1533
1534 /**
1535  * Enable/disable flow isolation.
1536  *
1537  * @see rte_flow_isolate()
1538  * @see rte_flow_ops
1539  */
1540 static int
1541 tap_flow_isolate(struct rte_eth_dev *dev,
1542                  int set,
1543                  struct rte_flow_error *error __rte_unused)
1544 {
1545         struct pmd_internals *pmd = dev->data->dev_private;
1546
1547         if (set)
1548                 pmd->flow_isolate = 1;
1549         else
1550                 pmd->flow_isolate = 0;
1551         /*
1552          * If netdevice is there, setup appropriate flow rules immediately.
1553          * Otherwise it will be set when bringing up the netdevice (tun_alloc).
1554          */
1555         if (!pmd->rxq[0].fd)
1556                 return 0;
1557         if (set) {
1558                 struct rte_flow *flow;
1559
1560                 while (1) {
1561                         flow = LIST_FIRST(&pmd->implicit_flows);
1562                         if (!flow)
1563                                 break;
1564                         /*
1565                          * Remove all implicit rules on the remote.
1566                          * Keep the local rule to redirect packets on TX.
1567                          * Keep also the last implicit local rule: ISOLATE.
1568                          */
1569                         if (flow->msg.t.tcm_ifindex == pmd->if_index)
1570                                 break;
1571                         if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0)
1572                                 goto error;
1573                 }
1574                 /* Switch the TC rule according to pmd->flow_isolate */
1575                 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1576                         goto error;
1577         } else {
1578                 /* Switch the TC rule according to pmd->flow_isolate */
1579                 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1580                         goto error;
1581                 if (!pmd->remote_if_index)
1582                         return 0;
1583                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
1584                         goto error;
1585                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
1586                         goto error;
1587                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
1588                         goto error;
1589                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
1590                         goto error;
1591                 if (dev->data->promiscuous &&
1592                     tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
1593                         goto error;
1594                 if (dev->data->all_multicast &&
1595                     tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
1596                         goto error;
1597         }
1598         return 0;
1599 error:
1600         pmd->flow_isolate = 0;
1601         return rte_flow_error_set(
1602                 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1603                 "TC rule creation failed");
1604 }
1605
1606 /**
1607  * Destroy all flows.
1608  *
1609  * @see rte_flow_flush()
1610  * @see rte_flow_ops
1611  */
1612 int
1613 tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1614 {
1615         struct pmd_internals *pmd = dev->data->dev_private;
1616         struct rte_flow *flow;
1617
1618         while (!LIST_EMPTY(&pmd->flows)) {
1619                 flow = LIST_FIRST(&pmd->flows);
1620                 if (tap_flow_destroy(dev, flow, error) < 0)
1621                         return -1;
1622         }
1623         return 0;
1624 }
1625
1626 /**
1627  * Add an implicit flow rule on the remote device to make sure traffic gets to
1628  * the tap netdevice from there.
1629  *
1630  * @param pmd
1631  *   Pointer to private structure.
1632  * @param[in] idx
1633  *   The idx in the implicit_rte_flows array specifying which rule to apply.
1634  *
1635  * @return -1 if the rule couldn't be applied, 0 otherwise.
1636  */
1637 int tap_flow_implicit_create(struct pmd_internals *pmd,
1638                              enum implicit_rule_index idx)
1639 {
1640         uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
1641         struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
1642         struct rte_flow_action isolate_actions[2] = {
1643                 [1] = {
1644                         .type = RTE_FLOW_ACTION_TYPE_END,
1645                 },
1646         };
1647         struct rte_flow_item *items = implicit_rte_flows[idx].items;
1648         struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
1649         struct rte_flow_item_eth eth_local = { .type = 0 };
1650         uint16_t if_index = pmd->remote_if_index;
1651         struct rte_flow *remote_flow = NULL;
1652         struct nlmsg *msg = NULL;
1653         int err = 0;
1654         struct rte_flow_item items_local[2] = {
1655                 [0] = {
1656                         .type = items[0].type,
1657                         .spec = &eth_local,
1658                         .mask = items[0].mask,
1659                 },
1660                 [1] = {
1661                         .type = items[1].type,
1662                 }
1663         };
1664
1665         remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1666         if (!remote_flow) {
1667                 RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n");
1668                 goto fail;
1669         }
1670         msg = &remote_flow->msg;
1671         if (idx == TAP_REMOTE_TX) {
1672                 if_index = pmd->if_index;
1673         } else if (idx == TAP_ISOLATE) {
1674                 if_index = pmd->if_index;
1675                 /* Don't be exclusive for this rule, it can be changed later. */
1676                 flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
1677                 isolate_actions[0].type = pmd->flow_isolate ?
1678                         RTE_FLOW_ACTION_TYPE_DROP :
1679                         RTE_FLOW_ACTION_TYPE_PASSTHRU;
1680                 actions = isolate_actions;
1681         } else if (idx == TAP_REMOTE_LOCAL_MAC) {
1682                 /*
1683                  * eth addr couldn't be set in implicit_rte_flows[] as it is not
1684                  * known at compile time.
1685                  */
1686                 memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
1687                 items = items_local;
1688         }
1689         tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
1690         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1691         /*
1692          * The ISOLATE rule is always present and must have a static handle, as
1693          * the action is changed whether the feature is enabled (DROP) or
1694          * disabled (PASSTHRU).
1695          */
1696         if (idx == TAP_ISOLATE)
1697                 remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
1698         else
1699                 tap_flow_set_handle(remote_flow);
1700         if (priv_flow_process(pmd, attr, items, actions, NULL,
1701                               remote_flow, implicit_rte_flows[idx].mirred)) {
1702                 RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
1703                 goto fail;
1704         }
1705         err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1706         if (err < 0) {
1707                 RTE_LOG(ERR, PMD, "Failure sending nl request\n");
1708                 goto fail;
1709         }
1710         err = tap_nl_recv_ack(pmd->nlsk_fd);
1711         if (err < 0) {
1712                 RTE_LOG(ERR, PMD,
1713                         "Kernel refused TC filter rule creation (%d): %s\n",
1714                         errno, strerror(errno));
1715                 goto fail;
1716         }
1717         LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
1718         return 0;
1719 fail:
1720         if (remote_flow)
1721                 rte_free(remote_flow);
1722         return -1;
1723 }
1724
1725 /**
1726  * Remove specific implicit flow rule on the remote device.
1727  *
1728  * @param[in, out] pmd
1729  *   Pointer to private structure.
1730  * @param[in] idx
1731  *   The idx in the implicit_rte_flows array specifying which rule to remove.
1732  *
1733  * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
1734  */
1735 int tap_flow_implicit_destroy(struct pmd_internals *pmd,
1736                               enum implicit_rule_index idx)
1737 {
1738         struct rte_flow *remote_flow;
1739         int cur_prio = -1;
1740         int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
1741
1742         for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
1743              remote_flow;
1744              remote_flow = LIST_NEXT(remote_flow, next)) {
1745                 cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
1746                 if (cur_prio != idx_prio)
1747                         continue;
1748                 return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
1749         }
1750         return 0;
1751 }
1752
1753 /**
1754  * Destroy all implicit flows.
1755  *
1756  * @see rte_flow_flush()
1757  */
1758 int
1759 tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
1760 {
1761         struct rte_flow *remote_flow;
1762
1763         while (!LIST_EMPTY(&pmd->implicit_flows)) {
1764                 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1765                 if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
1766                         return -1;
1767         }
1768         return 0;
1769 }
1770
1771 #define MAX_RSS_KEYS 256
1772 #define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
1773 #define SEC_NAME_CLS_Q "cls_q"
1774
1775 const char *sec_name[SEC_MAX] = {
1776         [SEC_L3_L4] = "l3_l4",
1777 };
1778
1779 /**
1780  * Enable RSS on tap: create TC rules for queuing.
1781  *
1782  * @param[in, out] pmd
1783  *   Pointer to private structure.
1784  *
1785  * @param[in] attr
1786  *   Pointer to rte_flow to get flow group
1787  *
1788  * @param[out] error
1789  *   Pointer to error reporting if not NULL.
1790  *
1791  * @return 0 on success, negative value on failure.
1792  */
1793 static int rss_enable(struct pmd_internals *pmd,
1794                         const struct rte_flow_attr *attr,
1795                         struct rte_flow_error *error)
1796 {
1797         struct rte_flow *rss_flow = NULL;
1798         struct nlmsg *msg = NULL;
1799         /* 4096 is the maximum number of instructions for a BPF program */
1800         char annotation[64];
1801         int i;
1802         int err = 0;
1803
1804         /* unlimit locked memory */
1805         struct rlimit memlock_limit = {
1806                 .rlim_cur = RLIM_INFINITY,
1807                 .rlim_max = RLIM_INFINITY,
1808         };
1809         setrlimit(RLIMIT_MEMLOCK, &memlock_limit);
1810
1811          /* Get a new map key for a new RSS rule */
1812         err = bpf_rss_key(KEY_CMD_INIT, NULL);
1813         if (err < 0) {
1814                 rte_flow_error_set(
1815                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1816                         "Failed to initialize BPF RSS keys");
1817
1818                 return -1;
1819         }
1820
1821         /*
1822          *  Create BPF RSS MAP
1823          */
1824         pmd->map_fd = tap_flow_bpf_rss_map_create(sizeof(__u32), /* key size */
1825                                 sizeof(struct rss_key),
1826                                 MAX_RSS_KEYS);
1827         if (pmd->map_fd < 0) {
1828                 RTE_LOG(ERR, PMD,
1829                         "Failed to create BPF map (%d): %s\n",
1830                                 errno, strerror(errno));
1831                 rte_flow_error_set(
1832                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1833                         "Kernel too old or not configured "
1834                         "to support BPF maps");
1835
1836                 return -ENOTSUP;
1837         }
1838
1839         /*
1840          * Add a rule per queue to match reclassified packets and direct them to
1841          * the correct queue.
1842          */
1843         for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
1844                 pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
1845                 if (pmd->bpf_fd[i] < 0) {
1846                         RTE_LOG(ERR, PMD,
1847                                 "Failed to load BPF section %s for queue %d",
1848                                 SEC_NAME_CLS_Q, i);
1849                         rte_flow_error_set(
1850                                 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1851                                 NULL,
1852                                 "Kernel too old or not configured "
1853                                 "to support BPF programs loading");
1854
1855                         return -ENOTSUP;
1856                 }
1857
1858                 rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1859                 if (!rss_flow) {
1860                         RTE_LOG(ERR, PMD,
1861                                 "Cannot allocate memory for rte_flow");
1862                         return -1;
1863                 }
1864                 msg = &rss_flow->msg;
1865                 tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER, NLM_F_REQUEST |
1866                             NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1867                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1868                 tap_flow_set_handle(rss_flow);
1869                 uint16_t group = attr->group << GROUP_SHIFT;
1870                 uint16_t prio = group | (i + PRIORITY_OFFSET);
1871                 msg->t.tcm_info = TC_H_MAKE(prio << 16, msg->t.tcm_info);
1872                 msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1873
1874                 tap_nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
1875                 if (tap_nlattr_nested_start(msg, TCA_OPTIONS) < 0)
1876                         return -1;
1877                 tap_nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
1878                 snprintf(annotation, sizeof(annotation), "[%s%d]",
1879                         SEC_NAME_CLS_Q, i);
1880                 tap_nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation) + 1,
1881                            annotation);
1882                 /* Actions */
1883                 {
1884                         struct action_data adata = {
1885                                 .id = "skbedit",
1886                                 .skbedit = {
1887                                         .skbedit = {
1888                                                 .action = TC_ACT_PIPE,
1889                                         },
1890                                         .queue = i,
1891                                 },
1892                         };
1893                         if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
1894                                 return -1;
1895                 }
1896                 tap_nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
1897
1898                 /* Netlink message is now ready to be sent */
1899                 if (tap_nl_send(pmd->nlsk_fd, &msg->nh) < 0)
1900                         return -1;
1901                 err = tap_nl_recv_ack(pmd->nlsk_fd);
1902                 if (err < 0) {
1903                         RTE_LOG(ERR, PMD,
1904                                 "Kernel refused TC filter rule creation (%d): %s\n",
1905                                 errno, strerror(errno));
1906                         return err;
1907                 }
1908                 LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
1909         }
1910
1911         pmd->rss_enabled = 1;
1912         return err;
1913 }
1914
1915 /**
1916  * Manage bpf RSS keys repository with operations: init, get, release
1917  *
1918  * @param[in] cmd
1919  *   Command on RSS keys: init, get, release
1920  *
1921  * @param[in, out] key_idx
1922  *   Pointer to RSS Key index (out for get command, in for release command)
1923  *
1924  * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
1925  */
1926 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
1927 {
1928         __u32 i;
1929         int err = 0;
1930         static __u32 num_used_keys;
1931         static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
1932         static __u32 rss_keys_initialized;
1933
1934         switch (cmd) {
1935         case KEY_CMD_GET:
1936                 if (!rss_keys_initialized) {
1937                         err = -1;
1938                         break;
1939                 }
1940
1941                 if (num_used_keys == RTE_DIM(rss_keys)) {
1942                         err = -1;
1943                         break;
1944                 }
1945
1946                 *key_idx = num_used_keys % RTE_DIM(rss_keys);
1947                 while (rss_keys[*key_idx] == KEY_STAT_USED)
1948                         *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys);
1949
1950                 rss_keys[*key_idx] = KEY_STAT_USED;
1951
1952                 /*
1953                  * Add an offset to key_idx in order to handle a case of
1954                  * RSS and non RSS flows mixture.
1955                  * If a non RSS flow is destroyed it has an eBPF map
1956                  * index 0 (initialized on flow creation) and might
1957                  * unintentionally remove RSS entry 0 from eBPF map.
1958                  * To avoid this issue, add an offset to the real index
1959                  * during a KEY_CMD_GET operation and subtract this offset
1960                  * during a KEY_CMD_RELEASE operation in order to restore
1961                  * the real index.
1962                  */
1963                 *key_idx += KEY_IDX_OFFSET;
1964                 num_used_keys++;
1965         break;
1966
1967         case KEY_CMD_RELEASE:
1968                 if (!rss_keys_initialized)
1969                         break;
1970
1971                 /*
1972                  * Subtract offest to restore real key index
1973                  * If a non RSS flow is falsely trying to release map
1974                  * entry 0 - the offset subtraction will calculate the real
1975                  * map index as an out-of-range value and the release operation
1976                  * will be silently ignored.
1977                  */
1978                 __u32 key = *key_idx - KEY_IDX_OFFSET;
1979                 if (key >= RTE_DIM(rss_keys))
1980                         break;
1981
1982                 if (rss_keys[key] == KEY_STAT_USED) {
1983                         rss_keys[key] = KEY_STAT_AVAILABLE;
1984                         num_used_keys--;
1985                 }
1986         break;
1987
1988         case KEY_CMD_INIT:
1989                 for (i = 0; i < RTE_DIM(rss_keys); i++)
1990                         rss_keys[i] = KEY_STAT_AVAILABLE;
1991
1992                 rss_keys_initialized = 1;
1993                 num_used_keys = 0;
1994         break;
1995
1996         case KEY_CMD_DEINIT:
1997                 for (i = 0; i < RTE_DIM(rss_keys); i++)
1998                         rss_keys[i] = KEY_STAT_UNSPEC;
1999
2000                 rss_keys_initialized = 0;
2001                 num_used_keys = 0;
2002         break;
2003
2004         default:
2005                 break;
2006         }
2007
2008         return err;
2009 }
2010
2011 /**
2012  * Add RSS hash calculations and queue selection
2013  *
2014  * @param[in, out] pmd
2015  *   Pointer to internal structure. Used to set/get RSS map fd
2016  *
2017  * @param[in] rss
2018  *   Pointer to RSS flow actions
2019  *
2020  * @param[out] error
2021  *   Pointer to error reporting if not NULL.
2022  *
2023  * @return 0 on success, negative value on failure
2024  */
2025 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
2026                            const struct rte_flow_action_rss *rss,
2027                            struct rte_flow_error *error)
2028 {
2029         /* 4096 is the maximum number of instructions for a BPF program */
2030         int i;
2031         int err;
2032         struct rss_key rss_entry = { .hash_fields = 0,
2033                                      .key_size = 0 };
2034
2035         /* Get a new map key for a new RSS rule */
2036         err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
2037         if (err < 0) {
2038                 rte_flow_error_set(
2039                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2040                         "Failed to get BPF RSS key");
2041
2042                 return -1;
2043         }
2044
2045         /* Update RSS map entry with queues */
2046         rss_entry.nb_queues = rss->num;
2047         for (i = 0; i < rss->num; i++)
2048                 rss_entry.queues[i] = rss->queue[i];
2049         rss_entry.hash_fields =
2050                 (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
2051
2052         /* Add this RSS entry to map */
2053         err = tap_flow_bpf_update_rss_elem(pmd->map_fd,
2054                                 &flow->key_idx, &rss_entry);
2055
2056         if (err) {
2057                 RTE_LOG(ERR, PMD,
2058                         "Failed to update BPF map entry #%u (%d): %s\n",
2059                         flow->key_idx, errno, strerror(errno));
2060                 rte_flow_error_set(
2061                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2062                         "Kernel too old or not configured "
2063                         "to support BPF maps updates");
2064
2065                 return -ENOTSUP;
2066         }
2067
2068
2069         /*
2070          * Load bpf rules to calculate hash for this key_idx
2071          */
2072
2073         flow->bpf_fd[SEC_L3_L4] =
2074                 tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
2075         if (flow->bpf_fd[SEC_L3_L4] < 0) {
2076                 RTE_LOG(ERR, PMD,
2077                         "Failed to load BPF section %s (%d): %s\n",
2078                                 sec_name[SEC_L3_L4], errno, strerror(errno));
2079                 rte_flow_error_set(
2080                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2081                         "Kernel too old or not configured "
2082                         "to support BPF program loading");
2083
2084                 return -ENOTSUP;
2085         }
2086
2087         /* Actions */
2088         {
2089                 struct action_data adata[] = {
2090                         {
2091                                 .id = "bpf",
2092                                 .bpf = {
2093                                         .bpf_fd = flow->bpf_fd[SEC_L3_L4],
2094                                         .annotation = sec_name[SEC_L3_L4],
2095                                         .bpf = {
2096                                                 .action = TC_ACT_PIPE,
2097                                         },
2098                                 },
2099                         },
2100                 };
2101
2102                 if (add_actions(flow, RTE_DIM(adata), adata,
2103                         TCA_FLOWER_ACT) < 0)
2104                         return -1;
2105         }
2106
2107         return 0;
2108 }
2109
2110 /**
2111  * Manage filter operations.
2112  *
2113  * @param dev
2114  *   Pointer to Ethernet device structure.
2115  * @param filter_type
2116  *   Filter type.
2117  * @param filter_op
2118  *   Operation to perform.
2119  * @param arg
2120  *   Pointer to operation-specific structure.
2121  *
2122  * @return
2123  *   0 on success, negative errno value on failure.
2124  */
2125 int
2126 tap_dev_filter_ctrl(struct rte_eth_dev *dev,
2127                     enum rte_filter_type filter_type,
2128                     enum rte_filter_op filter_op,
2129                     void *arg)
2130 {
2131         switch (filter_type) {
2132         case RTE_ETH_FILTER_GENERIC:
2133                 if (filter_op != RTE_ETH_FILTER_GET)
2134                         return -EINVAL;
2135                 *(const void **)arg = &tap_flow_ops;
2136                 return 0;
2137         default:
2138                 RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n",
2139                         (void *)dev, filter_type);
2140         }
2141         return -EINVAL;
2142 }
2143