net/mlx5: fix VXLAN port registration race condition
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_tcf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 6WIND S.A.
3  * Copyright 2018 Mellanox Technologies, Ltd
4  */
5
6 #include <assert.h>
7 #include <errno.h>
8 #include <libmnl/libmnl.h>
9 #include <linux/gen_stats.h>
10 #include <linux/if_ether.h>
11 #include <linux/netlink.h>
12 #include <linux/pkt_cls.h>
13 #include <linux/pkt_sched.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/tc_act/tc_gact.h>
16 #include <linux/tc_act/tc_mirred.h>
17 #include <netinet/in.h>
18 #include <stdalign.h>
19 #include <stdbool.h>
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <stdlib.h>
23 #include <sys/socket.h>
24
25 #include <rte_byteorder.h>
26 #include <rte_errno.h>
27 #include <rte_ether.h>
28 #include <rte_flow.h>
29 #include <rte_malloc.h>
30 #include <rte_common.h>
31
32 #include "mlx5.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_autoconf.h"
35
36 #ifdef HAVE_TC_ACT_VLAN
37
38 #include <linux/tc_act/tc_vlan.h>
39
40 #else /* HAVE_TC_ACT_VLAN */
41
42 #define TCA_VLAN_ACT_POP 1
43 #define TCA_VLAN_ACT_PUSH 2
44 #define TCA_VLAN_ACT_MODIFY 3
45 #define TCA_VLAN_PARMS 2
46 #define TCA_VLAN_PUSH_VLAN_ID 3
47 #define TCA_VLAN_PUSH_VLAN_PROTOCOL 4
48 #define TCA_VLAN_PAD 5
49 #define TCA_VLAN_PUSH_VLAN_PRIORITY 6
50
51 struct tc_vlan {
52         tc_gen;
53         int v_action;
54 };
55
56 #endif /* HAVE_TC_ACT_VLAN */
57
58 #ifdef HAVE_TC_ACT_PEDIT
59
60 #include <linux/tc_act/tc_pedit.h>
61
62 #else /* HAVE_TC_ACT_VLAN */
63
64 enum {
65         TCA_PEDIT_UNSPEC,
66         TCA_PEDIT_TM,
67         TCA_PEDIT_PARMS,
68         TCA_PEDIT_PAD,
69         TCA_PEDIT_PARMS_EX,
70         TCA_PEDIT_KEYS_EX,
71         TCA_PEDIT_KEY_EX,
72         __TCA_PEDIT_MAX
73 };
74
75 enum {
76         TCA_PEDIT_KEY_EX_HTYPE = 1,
77         TCA_PEDIT_KEY_EX_CMD = 2,
78         __TCA_PEDIT_KEY_EX_MAX
79 };
80
81 enum pedit_header_type {
82         TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0,
83         TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1,
84         TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2,
85         TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3,
86         TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4,
87         TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
88         __PEDIT_HDR_TYPE_MAX,
89 };
90
91 enum pedit_cmd {
92         TCA_PEDIT_KEY_EX_CMD_SET = 0,
93         TCA_PEDIT_KEY_EX_CMD_ADD = 1,
94         __PEDIT_CMD_MAX,
95 };
96
97 struct tc_pedit_key {
98         __u32 mask; /* AND */
99         __u32 val; /*XOR */
100         __u32 off; /*offset */
101         __u32 at;
102         __u32 offmask;
103         __u32 shift;
104 };
105
106 __extension__
107 struct tc_pedit_sel {
108         tc_gen;
109         unsigned char nkeys;
110         unsigned char flags;
111         struct tc_pedit_key keys[0];
112 };
113
114 #endif /* HAVE_TC_ACT_VLAN */
115
116 #ifdef HAVE_TC_ACT_TUNNEL_KEY
117
118 #include <linux/tc_act/tc_tunnel_key.h>
119
120 #ifndef HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT
121 #define TCA_TUNNEL_KEY_ENC_DST_PORT 9
122 #endif
123
124 #ifndef HAVE_TCA_TUNNEL_KEY_NO_CSUM
125 #define TCA_TUNNEL_KEY_NO_CSUM 10
126 #endif
127
128 #ifndef HAVE_TCA_TUNNEL_KEY_ENC_TOS
129 #define TCA_TUNNEL_KEY_ENC_TOS 12
130 #endif
131
132 #ifndef HAVE_TCA_TUNNEL_KEY_ENC_TTL
133 #define TCA_TUNNEL_KEY_ENC_TTL 13
134 #endif
135
136 #else /* HAVE_TC_ACT_TUNNEL_KEY */
137
138 #define TCA_ACT_TUNNEL_KEY 17
139 #define TCA_TUNNEL_KEY_ACT_SET 1
140 #define TCA_TUNNEL_KEY_ACT_RELEASE 2
141 #define TCA_TUNNEL_KEY_PARMS 2
142 #define TCA_TUNNEL_KEY_ENC_IPV4_SRC 3
143 #define TCA_TUNNEL_KEY_ENC_IPV4_DST 4
144 #define TCA_TUNNEL_KEY_ENC_IPV6_SRC 5
145 #define TCA_TUNNEL_KEY_ENC_IPV6_DST 6
146 #define TCA_TUNNEL_KEY_ENC_KEY_ID 7
147 #define TCA_TUNNEL_KEY_ENC_DST_PORT 9
148 #define TCA_TUNNEL_KEY_NO_CSUM 10
149 #define TCA_TUNNEL_KEY_ENC_TOS 12
150 #define TCA_TUNNEL_KEY_ENC_TTL 13
151
152 struct tc_tunnel_key {
153         tc_gen;
154         int t_action;
155 };
156
157 #endif /* HAVE_TC_ACT_TUNNEL_KEY */
158
159 /* Normally found in linux/netlink.h. */
160 #ifndef NETLINK_CAP_ACK
161 #define NETLINK_CAP_ACK 10
162 #endif
163
164 /* Normally found in linux/pkt_sched.h. */
165 #ifndef TC_H_MIN_INGRESS
166 #define TC_H_MIN_INGRESS 0xfff2u
167 #endif
168
169 /* Normally found in linux/pkt_cls.h. */
170 #ifndef TCA_CLS_FLAGS_SKIP_SW
171 #define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
172 #endif
173 #ifndef TCA_CLS_FLAGS_IN_HW
174 #define TCA_CLS_FLAGS_IN_HW (1 << 2)
175 #endif
176 #ifndef HAVE_TCA_CHAIN
177 #define TCA_CHAIN 11
178 #endif
179 #ifndef HAVE_TCA_FLOWER_ACT
180 #define TCA_FLOWER_ACT 3
181 #endif
182 #ifndef HAVE_TCA_FLOWER_FLAGS
183 #define TCA_FLOWER_FLAGS 22
184 #endif
185 #ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE
186 #define TCA_FLOWER_KEY_ETH_TYPE 8
187 #endif
188 #ifndef HAVE_TCA_FLOWER_KEY_ETH_DST
189 #define TCA_FLOWER_KEY_ETH_DST 4
190 #endif
191 #ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK
192 #define TCA_FLOWER_KEY_ETH_DST_MASK 5
193 #endif
194 #ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC
195 #define TCA_FLOWER_KEY_ETH_SRC 6
196 #endif
197 #ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK
198 #define TCA_FLOWER_KEY_ETH_SRC_MASK 7
199 #endif
200 #ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO
201 #define TCA_FLOWER_KEY_IP_PROTO 9
202 #endif
203 #ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC
204 #define TCA_FLOWER_KEY_IPV4_SRC 10
205 #endif
206 #ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK
207 #define TCA_FLOWER_KEY_IPV4_SRC_MASK 11
208 #endif
209 #ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST
210 #define TCA_FLOWER_KEY_IPV4_DST 12
211 #endif
212 #ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK
213 #define TCA_FLOWER_KEY_IPV4_DST_MASK 13
214 #endif
215 #ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC
216 #define TCA_FLOWER_KEY_IPV6_SRC 14
217 #endif
218 #ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK
219 #define TCA_FLOWER_KEY_IPV6_SRC_MASK 15
220 #endif
221 #ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST
222 #define TCA_FLOWER_KEY_IPV6_DST 16
223 #endif
224 #ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK
225 #define TCA_FLOWER_KEY_IPV6_DST_MASK 17
226 #endif
227 #ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC
228 #define TCA_FLOWER_KEY_TCP_SRC 18
229 #endif
230 #ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK
231 #define TCA_FLOWER_KEY_TCP_SRC_MASK 35
232 #endif
233 #ifndef HAVE_TCA_FLOWER_KEY_TCP_DST
234 #define TCA_FLOWER_KEY_TCP_DST 19
235 #endif
236 #ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK
237 #define TCA_FLOWER_KEY_TCP_DST_MASK 36
238 #endif
239 #ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC
240 #define TCA_FLOWER_KEY_UDP_SRC 20
241 #endif
242 #ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK
243 #define TCA_FLOWER_KEY_UDP_SRC_MASK 37
244 #endif
245 #ifndef HAVE_TCA_FLOWER_KEY_UDP_DST
246 #define TCA_FLOWER_KEY_UDP_DST 21
247 #endif
248 #ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK
249 #define TCA_FLOWER_KEY_UDP_DST_MASK 38
250 #endif
251 #ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID
252 #define TCA_FLOWER_KEY_VLAN_ID 23
253 #endif
254 #ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO
255 #define TCA_FLOWER_KEY_VLAN_PRIO 24
256 #endif
257 #ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
258 #define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25
259 #endif
260 #ifndef HAVE_TCA_FLOWER_KEY_ENC_KEY_ID
261 #define TCA_FLOWER_KEY_ENC_KEY_ID 26
262 #endif
263 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC
264 #define TCA_FLOWER_KEY_ENC_IPV4_SRC 27
265 #endif
266 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
267 #define TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK 28
268 #endif
269 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST
270 #define TCA_FLOWER_KEY_ENC_IPV4_DST 29
271 #endif
272 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
273 #define TCA_FLOWER_KEY_ENC_IPV4_DST_MASK 30
274 #endif
275 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC
276 #define TCA_FLOWER_KEY_ENC_IPV6_SRC 31
277 #endif
278 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
279 #define TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK 32
280 #endif
281 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST
282 #define TCA_FLOWER_KEY_ENC_IPV6_DST 33
283 #endif
284 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
285 #define TCA_FLOWER_KEY_ENC_IPV6_DST_MASK 34
286 #endif
287 #ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
288 #define TCA_FLOWER_KEY_ENC_UDP_SRC_PORT 43
289 #endif
290 #ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
291 #define TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK 44
292 #endif
293 #ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT
294 #define TCA_FLOWER_KEY_ENC_UDP_DST_PORT 45
295 #endif
296 #ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
297 #define TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK 46
298 #endif
299 #ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS
300 #define TCA_FLOWER_KEY_TCP_FLAGS 71
301 #endif
302 #ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK
303 #define TCA_FLOWER_KEY_TCP_FLAGS_MASK 72
304 #endif
305 #ifndef HAVE_TCA_FLOWER_KEY_IP_TOS
306 #define TCA_FLOWER_KEY_IP_TOS 73
307 #endif
308 #ifndef HAVE_TCA_FLOWER_KEY_IP_TOS_MASK
309 #define TCA_FLOWER_KEY_IP_TOS_MASK 74
310 #endif
311 #ifndef HAVE_TCA_FLOWER_KEY_IP_TTL
312 #define TCA_FLOWER_KEY_IP_TTL 75
313 #endif
314 #ifndef HAVE_TCA_FLOWER_KEY_IP_TTL_MASK
315 #define TCA_FLOWER_KEY_IP_TTL_MASK 76
316 #endif
317 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TOS
318 #define TCA_FLOWER_KEY_ENC_IP_TOS 80
319 #endif
320 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TOS_MASK
321 #define TCA_FLOWER_KEY_ENC_IP_TOS_MASK 81
322 #endif
323 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TTL
324 #define TCA_FLOWER_KEY_ENC_IP_TTL 82
325 #endif
326 #ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TTL_MASK
327 #define TCA_FLOWER_KEY_ENC_IP_TTL_MASK 83
328 #endif
329
330 #ifndef HAVE_TC_ACT_GOTO_CHAIN
331 #define TC_ACT_GOTO_CHAIN 0x20000000
332 #endif
333
334 #ifndef IPV6_ADDR_LEN
335 #define IPV6_ADDR_LEN 16
336 #endif
337
338 #ifndef IPV4_ADDR_LEN
339 #define IPV4_ADDR_LEN 4
340 #endif
341
342 #ifndef TP_PORT_LEN
343 #define TP_PORT_LEN 2 /* Transport Port (UDP/TCP) Length */
344 #endif
345
346 #ifndef TTL_LEN
347 #define TTL_LEN 1
348 #endif
349
350 #ifndef TCA_ACT_MAX_PRIO
351 #define TCA_ACT_MAX_PRIO 32
352 #endif
353
354 /** Parameters of VXLAN devices created by driver. */
355 #define MLX5_VXLAN_DEFAULT_VNI  1
356 #define MLX5_VXLAN_DEVICE_PFX "vmlx_"
357 /**
358  * Timeout in milliseconds to wait VXLAN UDP offloaded port
359  * registration  completed within the mlx5 driver.
360  */
361 #define MLX5_VXLAN_WAIT_PORT_REG_MS 250
362
363 /** Tunnel action type, used for @p type in header structure. */
364 enum flow_tcf_tunact_type {
365         FLOW_TCF_TUNACT_VXLAN_DECAP,
366         FLOW_TCF_TUNACT_VXLAN_ENCAP,
367 };
368
369 /** Flags used for @p mask in tunnel action encap descriptors. */
370 #define FLOW_TCF_ENCAP_ETH_SRC (1u << 0)
371 #define FLOW_TCF_ENCAP_ETH_DST (1u << 1)
372 #define FLOW_TCF_ENCAP_IPV4_SRC (1u << 2)
373 #define FLOW_TCF_ENCAP_IPV4_DST (1u << 3)
374 #define FLOW_TCF_ENCAP_IPV6_SRC (1u << 4)
375 #define FLOW_TCF_ENCAP_IPV6_DST (1u << 5)
376 #define FLOW_TCF_ENCAP_UDP_SRC (1u << 6)
377 #define FLOW_TCF_ENCAP_UDP_DST (1u << 7)
378 #define FLOW_TCF_ENCAP_VXLAN_VNI (1u << 8)
379 #define FLOW_TCF_ENCAP_IP_TTL (1u << 9)
380 #define FLOW_TCF_ENCAP_IP_TOS (1u << 10)
381
382 /**
383  * Structure for holding netlink context.
384  * Note the size of the message buffer which is MNL_SOCKET_BUFFER_SIZE.
385  * Using this (8KB) buffer size ensures that netlink messages will never be
386  * truncated.
387  */
388 struct mlx5_flow_tcf_context {
389         struct mnl_socket *nl; /* NETLINK_ROUTE libmnl socket. */
390         uint32_t seq; /* Message sequence number. */
391         uint32_t buf_size; /* Message buffer size. */
392         uint8_t *buf; /* Message buffer. */
393 };
394
395 /**
396  * Neigh rule structure. The neigh rule is applied via Netlink to
397  * outer tunnel iface in order to provide destination MAC address
398  * for the VXLAN encapsultion. The neigh rule is implicitly related
399  * to the Flow itself and can be shared by multiple Flows.
400  */
401 struct tcf_neigh_rule {
402         LIST_ENTRY(tcf_neigh_rule) next;
403         uint32_t refcnt;
404         struct ether_addr eth;
405         uint16_t mask;
406         union {
407                 struct {
408                         rte_be32_t dst;
409                 } ipv4;
410                 struct {
411                         uint8_t dst[IPV6_ADDR_LEN];
412                 } ipv6;
413         };
414 };
415
416 /**
417  * Local rule structure. The local rule is applied via Netlink to
418  * outer tunnel iface in order to provide local and peer IP addresses
419  * of the VXLAN tunnel for encapsulation. The local rule is implicitly
420  * related to the Flow itself and can be shared by multiple Flows.
421  */
422 struct tcf_local_rule {
423         LIST_ENTRY(tcf_local_rule) next;
424         uint32_t refcnt;
425         uint16_t mask;
426         union {
427                 struct {
428                         rte_be32_t dst;
429                         rte_be32_t src;
430                 } ipv4;
431                 struct {
432                         uint8_t dst[IPV6_ADDR_LEN];
433                         uint8_t src[IPV6_ADDR_LEN];
434                 } ipv6;
435         };
436 };
437
438 /** Outer interface VXLAN encapsulation rules container. */
439 struct tcf_irule {
440         LIST_ENTRY(tcf_irule) next;
441         LIST_HEAD(, tcf_neigh_rule) neigh;
442         LIST_HEAD(, tcf_local_rule) local;
443         uint32_t refcnt;
444         unsigned int ifouter; /**< Own interface index. */
445 };
446
447 /** VXLAN virtual netdev. */
448 struct tcf_vtep {
449         LIST_ENTRY(tcf_vtep) next;
450         uint32_t refcnt;
451         unsigned int ifindex; /**< Own interface index. */
452         uint16_t port;
453         uint32_t created:1; /**< Actually created by PMD. */
454         uint32_t waitreg:1; /**< Wait for VXLAN UDP port registration. */
455 };
456
457 /** Tunnel descriptor header, common for all tunnel types. */
458 struct flow_tcf_tunnel_hdr {
459         uint32_t type; /**< Tunnel action type. */
460         struct tcf_vtep *vtep; /**< Virtual tunnel endpoint device. */
461         unsigned int ifindex_org; /**< Original dst/src interface */
462         unsigned int *ifindex_ptr; /**< Interface ptr in message. */
463 };
464
465 struct flow_tcf_vxlan_decap {
466         struct flow_tcf_tunnel_hdr hdr;
467         uint16_t udp_port;
468 };
469
470 struct flow_tcf_vxlan_encap {
471         struct flow_tcf_tunnel_hdr hdr;
472         struct tcf_irule *iface;
473         uint32_t mask;
474         uint8_t ip_tos;
475         uint8_t ip_ttl_hop;
476         struct {
477                 struct ether_addr dst;
478                 struct ether_addr src;
479         } eth;
480         union {
481                 struct {
482                         rte_be32_t dst;
483                         rte_be32_t src;
484                 } ipv4;
485                 struct {
486                         uint8_t dst[IPV6_ADDR_LEN];
487                         uint8_t src[IPV6_ADDR_LEN];
488                 } ipv6;
489         };
490         struct {
491                 rte_be16_t src;
492                 rte_be16_t dst;
493         } udp;
494         struct {
495                 uint8_t vni[3];
496         } vxlan;
497 };
498
499 /** Structure used when extracting the values of a flow counters
500  * from a netlink message.
501  */
502 struct flow_tcf_stats_basic {
503         bool valid;
504         struct gnet_stats_basic counters;
505 };
506
507 /** Empty masks for known item types. */
508 static const union {
509         struct rte_flow_item_port_id port_id;
510         struct rte_flow_item_eth eth;
511         struct rte_flow_item_vlan vlan;
512         struct rte_flow_item_ipv4 ipv4;
513         struct rte_flow_item_ipv6 ipv6;
514         struct rte_flow_item_tcp tcp;
515         struct rte_flow_item_udp udp;
516         struct rte_flow_item_vxlan vxlan;
517 } flow_tcf_mask_empty = {
518         {0},
519 };
520
521 /** Supported masks for known item types. */
522 static const struct {
523         struct rte_flow_item_port_id port_id;
524         struct rte_flow_item_eth eth;
525         struct rte_flow_item_vlan vlan;
526         struct rte_flow_item_ipv4 ipv4;
527         struct rte_flow_item_ipv6 ipv6;
528         struct rte_flow_item_tcp tcp;
529         struct rte_flow_item_udp udp;
530         struct rte_flow_item_vxlan vxlan;
531 } flow_tcf_mask_supported = {
532         .port_id = {
533                 .id = 0xffffffff,
534         },
535         .eth = {
536                 .type = RTE_BE16(0xffff),
537                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
538                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
539         },
540         .vlan = {
541                 /* PCP and VID only, no DEI. */
542                 .tci = RTE_BE16(0xefff),
543                 .inner_type = RTE_BE16(0xffff),
544         },
545         .ipv4.hdr = {
546                 .next_proto_id = 0xff,
547                 .time_to_live = 0xff,
548                 .type_of_service = 0xff,
549                 .src_addr = RTE_BE32(0xffffffff),
550                 .dst_addr = RTE_BE32(0xffffffff),
551         },
552         .ipv6.hdr = {
553                 .proto = 0xff,
554                 .vtc_flow = RTE_BE32(0xfful << IPV6_HDR_FL_SHIFT),
555                 .hop_limits = 0xff,
556                 .src_addr =
557                         "\xff\xff\xff\xff\xff\xff\xff\xff"
558                         "\xff\xff\xff\xff\xff\xff\xff\xff",
559                 .dst_addr =
560                         "\xff\xff\xff\xff\xff\xff\xff\xff"
561                         "\xff\xff\xff\xff\xff\xff\xff\xff",
562         },
563         .tcp.hdr = {
564                 .src_port = RTE_BE16(0xffff),
565                 .dst_port = RTE_BE16(0xffff),
566                 .tcp_flags = 0xff,
567         },
568         .udp.hdr = {
569                 .src_port = RTE_BE16(0xffff),
570                 .dst_port = RTE_BE16(0xffff),
571         },
572         .vxlan = {
573                .vni = "\xff\xff\xff",
574         },
575 };
576
577 #define SZ_NLATTR_HDR MNL_ALIGN(sizeof(struct nlattr))
578 #define SZ_NLATTR_NEST SZ_NLATTR_HDR
579 #define SZ_NLATTR_DATA_OF(len) MNL_ALIGN(SZ_NLATTR_HDR + (len))
580 #define SZ_NLATTR_TYPE_OF(typ) SZ_NLATTR_DATA_OF(sizeof(typ))
581 #define SZ_NLATTR_STRZ_OF(str) SZ_NLATTR_DATA_OF(strlen(str) + 1)
582
583 #define PTOI_TABLE_SZ_MAX(dev) (mlx5_dev_to_port_id((dev)->device, NULL, 0) + 2)
584
585 /** DPDK port to network interface index (ifindex) conversion. */
586 struct flow_tcf_ptoi {
587         uint16_t port_id; /**< DPDK port ID. */
588         unsigned int ifindex; /**< Network interface index. */
589 };
590
591 /* Due to a limitation on driver/FW. */
592 #define MLX5_TCF_GROUP_ID_MAX 3
593
594 /*
595  * Due to a limitation on driver/FW, priority ranges from 1 to 16 in kernel.
596  * Priority in rte_flow attribute starts from 0 and is added by 1 in
597  * translation. This is subject to be changed to determine the max priority
598  * based on trial-and-error like Verbs driver once the restriction is lifted or
599  * the range is extended.
600  */
601 #define MLX5_TCF_GROUP_PRIORITY_MAX 15
602
603 #define MLX5_TCF_FATE_ACTIONS \
604         (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
605          MLX5_FLOW_ACTION_JUMP)
606
607 #define MLX5_TCF_VLAN_ACTIONS \
608         (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN | \
609          MLX5_FLOW_ACTION_OF_SET_VLAN_VID | MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
610
611 #define MLX5_TCF_VXLAN_ACTIONS \
612         (MLX5_FLOW_ACTION_VXLAN_ENCAP | MLX5_FLOW_ACTION_VXLAN_DECAP)
613
614 #define MLX5_TCF_PEDIT_ACTIONS \
615         (MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST | \
616          MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST | \
617          MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST | \
618          MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL | \
619          MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)
620
621 #define MLX5_TCF_CONFIG_ACTIONS \
622         (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_JUMP | \
623          MLX5_FLOW_ACTION_OF_PUSH_VLAN | MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
624          MLX5_FLOW_ACTION_OF_SET_VLAN_PCP | \
625          (MLX5_TCF_PEDIT_ACTIONS & ~MLX5_FLOW_ACTION_DEC_TTL))
626
627 #define MAX_PEDIT_KEYS 128
628 #define SZ_PEDIT_KEY_VAL 4
629
630 #define NUM_OF_PEDIT_KEYS(sz) \
631         (((sz) / SZ_PEDIT_KEY_VAL) + (((sz) % SZ_PEDIT_KEY_VAL) ? 1 : 0))
632
633 struct pedit_key_ex {
634         enum pedit_header_type htype;
635         enum pedit_cmd cmd;
636 };
637
638 struct pedit_parser {
639         struct tc_pedit_sel sel;
640         struct tc_pedit_key keys[MAX_PEDIT_KEYS];
641         struct pedit_key_ex keys_ex[MAX_PEDIT_KEYS];
642 };
643
644 /**
645  * Create space for using the implicitly created TC flow counter.
646  *
647  * @param[in] dev
648  *   Pointer to the Ethernet device structure.
649  *
650  * @return
651  *   A pointer to the counter data structure, NULL otherwise and
652  *   rte_errno is set.
653  */
654 static struct mlx5_flow_counter *
655 flow_tcf_counter_new(void)
656 {
657         struct mlx5_flow_counter *cnt;
658
659         /*
660          * eswitch counter cannot be shared and its id is unknown.
661          * currently returning all with id 0.
662          * in the future maybe better to switch to unique numbers.
663          */
664         struct mlx5_flow_counter tmpl = {
665                 .ref_cnt = 1,
666         };
667         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
668         if (!cnt) {
669                 rte_errno = ENOMEM;
670                 return NULL;
671         }
672         *cnt = tmpl;
673         /* Implicit counter, do not add to list. */
674         return cnt;
675 }
676
677 /**
678  * Set pedit key of MAC address
679  *
680  * @param[in] actions
681  *   pointer to action specification
682  * @param[in,out] p_parser
683  *   pointer to pedit_parser
684  */
685 static void
686 flow_tcf_pedit_key_set_mac(const struct rte_flow_action *actions,
687                            struct pedit_parser *p_parser)
688 {
689         int idx = p_parser->sel.nkeys;
690         uint32_t off = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
691                                         offsetof(struct ether_hdr, s_addr) :
692                                         offsetof(struct ether_hdr, d_addr);
693         const struct rte_flow_action_set_mac *conf =
694                 (const struct rte_flow_action_set_mac *)actions->conf;
695
696         p_parser->keys[idx].off = off;
697         p_parser->keys[idx].mask = ~UINT32_MAX;
698         p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH;
699         p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
700         memcpy(&p_parser->keys[idx].val,
701                 conf->mac_addr, SZ_PEDIT_KEY_VAL);
702         idx++;
703         p_parser->keys[idx].off = off + SZ_PEDIT_KEY_VAL;
704         p_parser->keys[idx].mask = 0xFFFF0000;
705         p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH;
706         p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
707         memcpy(&p_parser->keys[idx].val,
708                 conf->mac_addr + SZ_PEDIT_KEY_VAL,
709                 ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
710         p_parser->sel.nkeys = (++idx);
711 }
712
713 /**
714  * Set pedit key of decrease/set ttl
715  *
716  * @param[in] actions
717  *   pointer to action specification
718  * @param[in,out] p_parser
719  *   pointer to pedit_parser
720  * @param[in] item_flags
721  *   flags of all items presented
722  */
723 static void
724 flow_tcf_pedit_key_set_dec_ttl(const struct rte_flow_action *actions,
725                                 struct pedit_parser *p_parser,
726                                 uint64_t item_flags)
727 {
728         int idx = p_parser->sel.nkeys;
729
730         p_parser->keys[idx].mask = 0xFFFFFF00;
731         if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
732                 p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
733                 p_parser->keys[idx].off =
734                         offsetof(struct ipv4_hdr, time_to_live);
735         }
736         if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
737                 p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
738                 p_parser->keys[idx].off =
739                         offsetof(struct ipv6_hdr, hop_limits);
740         }
741         if (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {
742                 p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;
743                 p_parser->keys[idx].val = 0x000000FF;
744         } else {
745                 p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
746                 p_parser->keys[idx].val =
747                         (__u32)((const struct rte_flow_action_set_ttl *)
748                          actions->conf)->ttl_value;
749         }
750         p_parser->sel.nkeys = (++idx);
751 }
752
753 /**
754  * Set pedit key of transport (TCP/UDP) port value
755  *
756  * @param[in] actions
757  *   pointer to action specification
758  * @param[in,out] p_parser
759  *   pointer to pedit_parser
760  * @param[in] item_flags
761  *   flags of all items presented
762  */
763 static void
764 flow_tcf_pedit_key_set_tp_port(const struct rte_flow_action *actions,
765                                 struct pedit_parser *p_parser,
766                                 uint64_t item_flags)
767 {
768         int idx = p_parser->sel.nkeys;
769
770         if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
771                 p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP;
772         if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
773                 p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP;
774         p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
775         /* offset of src/dst port is same for TCP and UDP */
776         p_parser->keys[idx].off =
777                 actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
778                 offsetof(struct tcp_hdr, src_port) :
779                 offsetof(struct tcp_hdr, dst_port);
780         p_parser->keys[idx].mask = 0xFFFF0000;
781         p_parser->keys[idx].val =
782                 (__u32)((const struct rte_flow_action_set_tp *)
783                                 actions->conf)->port;
784         p_parser->sel.nkeys = (++idx);
785 }
786
787 /**
788  * Set pedit key of ipv6 address
789  *
790  * @param[in] actions
791  *   pointer to action specification
792  * @param[in,out] p_parser
793  *   pointer to pedit_parser
794  */
795 static void
796 flow_tcf_pedit_key_set_ipv6_addr(const struct rte_flow_action *actions,
797                                  struct pedit_parser *p_parser)
798 {
799         int idx = p_parser->sel.nkeys;
800         int keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
801         int off_base =
802                 actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
803                 offsetof(struct ipv6_hdr, src_addr) :
804                 offsetof(struct ipv6_hdr, dst_addr);
805         const struct rte_flow_action_set_ipv6 *conf =
806                 (const struct rte_flow_action_set_ipv6 *)actions->conf;
807
808         for (int i = 0; i < keys; i++, idx++) {
809                 p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
810                 p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
811                 p_parser->keys[idx].off = off_base + i * SZ_PEDIT_KEY_VAL;
812                 p_parser->keys[idx].mask = ~UINT32_MAX;
813                 memcpy(&p_parser->keys[idx].val,
814                         conf->ipv6_addr + i *  SZ_PEDIT_KEY_VAL,
815                         SZ_PEDIT_KEY_VAL);
816         }
817         p_parser->sel.nkeys += keys;
818 }
819
820 /**
821  * Set pedit key of ipv4 address
822  *
823  * @param[in] actions
824  *   pointer to action specification
825  * @param[in,out] p_parser
826  *   pointer to pedit_parser
827  */
828 static void
829 flow_tcf_pedit_key_set_ipv4_addr(const struct rte_flow_action *actions,
830                                  struct pedit_parser *p_parser)
831 {
832         int idx = p_parser->sel.nkeys;
833
834         p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
835         p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
836         p_parser->keys[idx].off =
837                 actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
838                 offsetof(struct ipv4_hdr, src_addr) :
839                 offsetof(struct ipv4_hdr, dst_addr);
840         p_parser->keys[idx].mask = ~UINT32_MAX;
841         p_parser->keys[idx].val =
842                 ((const struct rte_flow_action_set_ipv4 *)
843                  actions->conf)->ipv4_addr;
844         p_parser->sel.nkeys = (++idx);
845 }
846
847 /**
848  * Create the pedit's na attribute in netlink message
849  * on pre-allocate message buffer
850  *
851  * @param[in,out] nl
852  *   pointer to pre-allocated netlink message buffer
853  * @param[in,out] actions
854  *   pointer to pointer of actions specification.
855  * @param[in,out] action_flags
856  *   pointer to actions flags
857  * @param[in] item_flags
858  *   flags of all item presented
859  */
860 static void
861 flow_tcf_create_pedit_mnl_msg(struct nlmsghdr *nl,
862                               const struct rte_flow_action **actions,
863                               uint64_t item_flags)
864 {
865         struct pedit_parser p_parser;
866         struct nlattr *na_act_options;
867         struct nlattr *na_pedit_keys;
868
869         memset(&p_parser, 0, sizeof(p_parser));
870         mnl_attr_put_strz(nl, TCA_ACT_KIND, "pedit");
871         na_act_options = mnl_attr_nest_start(nl, TCA_ACT_OPTIONS);
872         /* all modify header actions should be in one tc-pedit action */
873         for (; (*actions)->type != RTE_FLOW_ACTION_TYPE_END; (*actions)++) {
874                 switch ((*actions)->type) {
875                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
876                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
877                         flow_tcf_pedit_key_set_ipv4_addr(*actions, &p_parser);
878                         break;
879                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
880                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
881                         flow_tcf_pedit_key_set_ipv6_addr(*actions, &p_parser);
882                         break;
883                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
884                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
885                         flow_tcf_pedit_key_set_tp_port(*actions,
886                                                         &p_parser, item_flags);
887                         break;
888                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
889                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
890                         flow_tcf_pedit_key_set_dec_ttl(*actions,
891                                                         &p_parser, item_flags);
892                         break;
893                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
894                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
895                         flow_tcf_pedit_key_set_mac(*actions, &p_parser);
896                         break;
897                 default:
898                         goto pedit_mnl_msg_done;
899                 }
900         }
901 pedit_mnl_msg_done:
902         p_parser.sel.action = TC_ACT_PIPE;
903         mnl_attr_put(nl, TCA_PEDIT_PARMS_EX,
904                      sizeof(p_parser.sel) +
905                      p_parser.sel.nkeys * sizeof(struct tc_pedit_key),
906                      &p_parser);
907         na_pedit_keys =
908                 mnl_attr_nest_start(nl, TCA_PEDIT_KEYS_EX | NLA_F_NESTED);
909         for (int i = 0; i < p_parser.sel.nkeys; i++) {
910                 struct nlattr *na_pedit_key =
911                         mnl_attr_nest_start(nl,
912                                             TCA_PEDIT_KEY_EX | NLA_F_NESTED);
913                 mnl_attr_put_u16(nl, TCA_PEDIT_KEY_EX_HTYPE,
914                                  p_parser.keys_ex[i].htype);
915                 mnl_attr_put_u16(nl, TCA_PEDIT_KEY_EX_CMD,
916                                  p_parser.keys_ex[i].cmd);
917                 mnl_attr_nest_end(nl, na_pedit_key);
918         }
919         mnl_attr_nest_end(nl, na_pedit_keys);
920         mnl_attr_nest_end(nl, na_act_options);
921         (*actions)--;
922 }
923
924 /**
925  * Calculate max memory size of one TC-pedit actions.
926  * One TC-pedit action can contain set of keys each defining
927  * a rewrite element (rte_flow action)
928  *
929  * @param[in,out] actions
930  *   actions specification.
931  * @param[in,out] action_flags
932  *   actions flags
933  * @param[in,out] size
934  *   accumulated size
935  * @return
936  *   Max memory size of one TC-pedit action
937  */
938 static int
939 flow_tcf_get_pedit_actions_size(const struct rte_flow_action **actions,
940                                 uint64_t *action_flags)
941 {
942         int pedit_size = 0;
943         int keys = 0;
944         uint64_t flags = 0;
945
946         pedit_size += SZ_NLATTR_NEST + /* na_act_index. */
947                       SZ_NLATTR_STRZ_OF("pedit") +
948                       SZ_NLATTR_NEST; /* TCA_ACT_OPTIONS. */
949         for (; (*actions)->type != RTE_FLOW_ACTION_TYPE_END; (*actions)++) {
950                 switch ((*actions)->type) {
951                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
952                         keys += NUM_OF_PEDIT_KEYS(IPV4_ADDR_LEN);
953                         flags |= MLX5_FLOW_ACTION_SET_IPV4_SRC;
954                         break;
955                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
956                         keys += NUM_OF_PEDIT_KEYS(IPV4_ADDR_LEN);
957                         flags |= MLX5_FLOW_ACTION_SET_IPV4_DST;
958                         break;
959                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
960                         keys += NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
961                         flags |= MLX5_FLOW_ACTION_SET_IPV6_SRC;
962                         break;
963                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
964                         keys += NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
965                         flags |= MLX5_FLOW_ACTION_SET_IPV6_DST;
966                         break;
967                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
968                         /* TCP is as same as UDP */
969                         keys += NUM_OF_PEDIT_KEYS(TP_PORT_LEN);
970                         flags |= MLX5_FLOW_ACTION_SET_TP_SRC;
971                         break;
972                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
973                         /* TCP is as same as UDP */
974                         keys += NUM_OF_PEDIT_KEYS(TP_PORT_LEN);
975                         flags |= MLX5_FLOW_ACTION_SET_TP_DST;
976                         break;
977                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
978                         keys += NUM_OF_PEDIT_KEYS(TTL_LEN);
979                         flags |= MLX5_FLOW_ACTION_SET_TTL;
980                         break;
981                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
982                         keys += NUM_OF_PEDIT_KEYS(TTL_LEN);
983                         flags |= MLX5_FLOW_ACTION_DEC_TTL;
984                         break;
985                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
986                         keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
987                         flags |= MLX5_FLOW_ACTION_SET_MAC_SRC;
988                         break;
989                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
990                         keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
991                         flags |= MLX5_FLOW_ACTION_SET_MAC_DST;
992                         break;
993                 default:
994                         goto get_pedit_action_size_done;
995                 }
996         }
997 get_pedit_action_size_done:
998         /* TCA_PEDIT_PARAMS_EX */
999         pedit_size +=
1000                 SZ_NLATTR_DATA_OF(sizeof(struct tc_pedit_sel) +
1001                                   keys * sizeof(struct tc_pedit_key));
1002         pedit_size += SZ_NLATTR_NEST; /* TCA_PEDIT_KEYS */
1003         pedit_size += keys *
1004                       /* TCA_PEDIT_KEY_EX + HTYPE + CMD */
1005                       (SZ_NLATTR_NEST + SZ_NLATTR_DATA_OF(2) +
1006                        SZ_NLATTR_DATA_OF(2));
1007         (*action_flags) |= flags;
1008         (*actions)--;
1009         return pedit_size;
1010 }
1011
1012 /**
1013  * Retrieve mask for pattern item.
1014  *
1015  * This function does basic sanity checks on a pattern item in order to
1016  * return the most appropriate mask for it.
1017  *
1018  * @param[in] item
1019  *   Item specification.
1020  * @param[in] mask_default
1021  *   Default mask for pattern item as specified by the flow API.
1022  * @param[in] mask_supported
1023  *   Mask fields supported by the implementation.
1024  * @param[in] mask_empty
1025  *   Empty mask to return when there is no specification.
1026  * @param[out] error
1027  *   Perform verbose error reporting if not NULL.
1028  *
1029  * @return
1030  *   Either @p item->mask or one of the mask parameters on success, NULL
1031  *   otherwise and rte_errno is set.
1032  */
1033 static const void *
1034 flow_tcf_item_mask(const struct rte_flow_item *item, const void *mask_default,
1035                    const void *mask_supported, const void *mask_empty,
1036                    size_t mask_size, struct rte_flow_error *error)
1037 {
1038         const uint8_t *mask;
1039         size_t i;
1040
1041         /* item->last and item->mask cannot exist without item->spec. */
1042         if (!item->spec && (item->mask || item->last)) {
1043                 rte_flow_error_set(error, EINVAL,
1044                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1045                                    "\"mask\" or \"last\" field provided without"
1046                                    " a corresponding \"spec\"");
1047                 return NULL;
1048         }
1049         /* No spec, no mask, no problem. */
1050         if (!item->spec)
1051                 return mask_empty;
1052         mask = item->mask ? item->mask : mask_default;
1053         assert(mask);
1054         /*
1055          * Single-pass check to make sure that:
1056          * - Mask is supported, no bits are set outside mask_supported.
1057          * - Both item->spec and item->last are included in mask.
1058          */
1059         for (i = 0; i != mask_size; ++i) {
1060                 if (!mask[i])
1061                         continue;
1062                 if ((mask[i] | ((const uint8_t *)mask_supported)[i]) !=
1063                     ((const uint8_t *)mask_supported)[i]) {
1064                         rte_flow_error_set(error, ENOTSUP,
1065                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1066                                            "unsupported field found"
1067                                            " in \"mask\"");
1068                         return NULL;
1069                 }
1070                 if (item->last &&
1071                     (((const uint8_t *)item->spec)[i] & mask[i]) !=
1072                     (((const uint8_t *)item->last)[i] & mask[i])) {
1073                         rte_flow_error_set(error, EINVAL,
1074                                            RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1075                                            item->last,
1076                                            "range between \"spec\" and \"last\""
1077                                            " not comprised in \"mask\"");
1078                         return NULL;
1079                 }
1080         }
1081         return mask;
1082 }
1083
1084 /**
1085  * Build a conversion table between port ID and ifindex.
1086  *
1087  * @param[in] dev
1088  *   Pointer to Ethernet device.
1089  * @param[out] ptoi
1090  *   Pointer to ptoi table.
1091  * @param[in] len
1092  *   Size of ptoi table provided.
1093  *
1094  * @return
1095  *   Size of ptoi table filled.
1096  */
1097 static unsigned int
1098 flow_tcf_build_ptoi_table(struct rte_eth_dev *dev, struct flow_tcf_ptoi *ptoi,
1099                           unsigned int len)
1100 {
1101         unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
1102         uint16_t port_id[n + 1];
1103         unsigned int i;
1104         unsigned int own = 0;
1105
1106         /* At least one port is needed when no switch domain is present. */
1107         if (!n) {
1108                 n = 1;
1109                 port_id[0] = dev->data->port_id;
1110         } else {
1111                 n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
1112         }
1113         if (n > len)
1114                 return 0;
1115         for (i = 0; i != n; ++i) {
1116                 struct rte_eth_dev_info dev_info;
1117
1118                 rte_eth_dev_info_get(port_id[i], &dev_info);
1119                 if (port_id[i] == dev->data->port_id)
1120                         own = i;
1121                 ptoi[i].port_id = port_id[i];
1122                 ptoi[i].ifindex = dev_info.if_index;
1123         }
1124         /* Ensure first entry of ptoi[] is the current device. */
1125         if (own) {
1126                 ptoi[n] = ptoi[0];
1127                 ptoi[0] = ptoi[own];
1128                 ptoi[own] = ptoi[n];
1129         }
1130         /* An entry with zero ifindex terminates ptoi[]. */
1131         ptoi[n].port_id = 0;
1132         ptoi[n].ifindex = 0;
1133         return n;
1134 }
1135
1136 /**
1137  * Verify the @p attr will be correctly understood by the E-switch.
1138  *
1139  * @param[in] attr
1140  *   Pointer to flow attributes
1141  * @param[out] error
1142  *   Pointer to error structure.
1143  *
1144  * @return
1145  *   0 on success, a negative errno value otherwise and rte_errno is set.
1146  */
1147 static int
1148 flow_tcf_validate_attributes(const struct rte_flow_attr *attr,
1149                              struct rte_flow_error *error)
1150 {
1151         /*
1152          * Supported attributes: groups, some priorities and ingress only.
1153          * group is supported only if kernel supports chain. Don't care about
1154          * transfer as it is the caller's problem.
1155          */
1156         if (attr->group > MLX5_TCF_GROUP_ID_MAX)
1157                 return rte_flow_error_set(error, ENOTSUP,
1158                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1159                                           "group ID larger than "
1160                                           RTE_STR(MLX5_TCF_GROUP_ID_MAX)
1161                                           " isn't supported");
1162         else if (attr->priority > MLX5_TCF_GROUP_PRIORITY_MAX)
1163                 return rte_flow_error_set(error, ENOTSUP,
1164                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1165                                           attr,
1166                                           "priority more than "
1167                                           RTE_STR(MLX5_TCF_GROUP_PRIORITY_MAX)
1168                                           " is not supported");
1169         if (!attr->ingress)
1170                 return rte_flow_error_set(error, EINVAL,
1171                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1172                                           attr, "only ingress is supported");
1173         if (attr->egress)
1174                 return rte_flow_error_set(error, ENOTSUP,
1175                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1176                                           attr, "egress is not supported");
1177         return 0;
1178 }
1179
1180 /**
1181  * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_ETH item for E-Switch.
1182  * The routine checks the L2 fields to be used in encapsulation header.
1183  *
1184  * @param[in] item
1185  *   Pointer to the item structure.
1186  * @param[out] error
1187  *   Pointer to the error structure.
1188  *
1189  * @return
1190  *   0 on success, a negative errno value otherwise and rte_errno is set.
1191  **/
1192 static int
1193 flow_tcf_validate_vxlan_encap_eth(const struct rte_flow_item *item,
1194                                   struct rte_flow_error *error)
1195 {
1196         const struct rte_flow_item_eth *spec = item->spec;
1197         const struct rte_flow_item_eth *mask = item->mask;
1198
1199         if (!spec) {
1200                 /*
1201                  * Specification for L2 addresses can be empty
1202                  * because these ones are optional and not
1203                  * required directly by tc rule. Kernel tries
1204                  * to resolve these ones on its own
1205                  */
1206                 return 0;
1207         }
1208         if (!mask) {
1209                 /* If mask is not specified use the default one. */
1210                 mask = &rte_flow_item_eth_mask;
1211         }
1212         if (memcmp(&mask->dst,
1213                    &flow_tcf_mask_empty.eth.dst,
1214                    sizeof(flow_tcf_mask_empty.eth.dst))) {
1215                 if (memcmp(&mask->dst,
1216                            &rte_flow_item_eth_mask.dst,
1217                            sizeof(rte_flow_item_eth_mask.dst)))
1218                         return rte_flow_error_set
1219                                 (error, ENOTSUP,
1220                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1221                                  "no support for partial mask on"
1222                                  " \"eth.dst\" field");
1223         }
1224         if (memcmp(&mask->src,
1225                    &flow_tcf_mask_empty.eth.src,
1226                    sizeof(flow_tcf_mask_empty.eth.src))) {
1227                 if (memcmp(&mask->src,
1228                            &rte_flow_item_eth_mask.src,
1229                            sizeof(rte_flow_item_eth_mask.src)))
1230                         return rte_flow_error_set
1231                                 (error, ENOTSUP,
1232                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1233                                  "no support for partial mask on"
1234                                  " \"eth.src\" field");
1235         }
1236         if (mask->type != RTE_BE16(0x0000)) {
1237                 if (mask->type != RTE_BE16(0xffff))
1238                         return rte_flow_error_set
1239                                 (error, ENOTSUP,
1240                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1241                                  "no support for partial mask on"
1242                                  " \"eth.type\" field");
1243                 DRV_LOG(WARNING,
1244                         "outer ethernet type field"
1245                         " cannot be forced for vxlan"
1246                         " encapsulation, parameter ignored");
1247         }
1248         return 0;
1249 }
1250
1251 /**
1252  * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV4 item for E-Switch.
1253  * The routine checks the IPv4 fields to be used in encapsulation header.
1254  *
1255  * @param[in] item
1256  *   Pointer to the item structure.
1257  * @param[out] error
1258  *   Pointer to the error structure.
1259  *
1260  * @return
1261  *   0 on success, a negative errno value otherwise and rte_errno is set.
1262  **/
1263 static int
1264 flow_tcf_validate_vxlan_encap_ipv4(const struct rte_flow_item *item,
1265                                    struct rte_flow_error *error)
1266 {
1267         const struct rte_flow_item_ipv4 *spec = item->spec;
1268         const struct rte_flow_item_ipv4 *mask = item->mask;
1269
1270         if (!spec) {
1271                 /*
1272                  * Specification for IP addresses cannot be empty
1273                  * because it is required by tunnel_key parameter.
1274                  */
1275                 return rte_flow_error_set(error, EINVAL,
1276                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1277                                           "NULL outer ipv4 address"
1278                                           " specification for vxlan"
1279                                           " encapsulation");
1280         }
1281         if (!mask)
1282                 mask = &rte_flow_item_ipv4_mask;
1283         if (mask->hdr.dst_addr != RTE_BE32(0x00000000)) {
1284                 if (mask->hdr.dst_addr != RTE_BE32(0xffffffff))
1285                         return rte_flow_error_set
1286                                 (error, ENOTSUP,
1287                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1288                                  "no support for partial mask on"
1289                                  " \"ipv4.hdr.dst_addr\" field"
1290                                  " for vxlan encapsulation");
1291                 /* More IPv4 address validations can be put here. */
1292         } else {
1293                 /*
1294                  * Kernel uses the destination IP address to determine
1295                  * the routing path and obtain the MAC destination
1296                  * address, so IP destination address must be
1297                  * specified in the tc rule.
1298                  */
1299                 return rte_flow_error_set(error, EINVAL,
1300                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1301                                           "outer ipv4 destination address"
1302                                           " must be specified for"
1303                                           " vxlan encapsulation");
1304         }
1305         if (mask->hdr.src_addr != RTE_BE32(0x00000000)) {
1306                 if (mask->hdr.src_addr != RTE_BE32(0xffffffff))
1307                         return rte_flow_error_set
1308                                 (error, ENOTSUP,
1309                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1310                                  "no support for partial mask on"
1311                                  " \"ipv4.hdr.src_addr\" field"
1312                                  " for vxlan encapsulation");
1313                 /* More IPv4 address validations can be put here. */
1314         } else {
1315                 /*
1316                  * Kernel uses the source IP address to select the
1317                  * interface for egress encapsulated traffic, so
1318                  * it must be specified in the tc rule.
1319                  */
1320                 return rte_flow_error_set(error, EINVAL,
1321                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1322                                           "outer ipv4 source address"
1323                                           " must be specified for"
1324                                           " vxlan encapsulation");
1325         }
1326         if (mask->hdr.type_of_service &&
1327             mask->hdr.type_of_service != 0xff)
1328                 return rte_flow_error_set(error, ENOTSUP,
1329                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1330                                           "no support for partial mask on"
1331                                           " \"ipv4.hdr.type_of_service\" field"
1332                                           " for vxlan encapsulation");
1333         if (mask->hdr.time_to_live &&
1334             mask->hdr.time_to_live != 0xff)
1335                 return rte_flow_error_set(error, ENOTSUP,
1336                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1337                                           "no support for partial mask on"
1338                                           " \"ipv4.hdr.time_to_live\" field"
1339                                           " for vxlan encapsulation");
1340         return 0;
1341 }
1342
1343 /**
1344  * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV6 item for E-Switch.
1345  * The routine checks the IPv6 fields to be used in encapsulation header.
1346  *
1347  * @param[in] item
1348  *   Pointer to the item structure.
1349  * @param[out] error
1350  *   Pointer to the error structure.
1351  *
1352  * @return
1353  *   0 on success, a negative errno value otherwise and rte_errno is set.
1354  **/
1355 static int
1356 flow_tcf_validate_vxlan_encap_ipv6(const struct rte_flow_item *item,
1357                                    struct rte_flow_error *error)
1358 {
1359         const struct rte_flow_item_ipv6 *spec = item->spec;
1360         const struct rte_flow_item_ipv6 *mask = item->mask;
1361         uint8_t msk6;
1362
1363         if (!spec) {
1364                 /*
1365                  * Specification for IP addresses cannot be empty
1366                  * because it is required by tunnel_key parameter.
1367                  */
1368                 return rte_flow_error_set(error, EINVAL,
1369                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1370                                           "NULL outer ipv6 address"
1371                                           " specification for"
1372                                           " vxlan encapsulation");
1373         }
1374         if (!mask)
1375                 mask = &rte_flow_item_ipv6_mask;
1376         if (memcmp(&mask->hdr.dst_addr,
1377                    &flow_tcf_mask_empty.ipv6.hdr.dst_addr,
1378                    IPV6_ADDR_LEN)) {
1379                 if (memcmp(&mask->hdr.dst_addr,
1380                            &rte_flow_item_ipv6_mask.hdr.dst_addr,
1381                            IPV6_ADDR_LEN))
1382                         return rte_flow_error_set
1383                                         (error, ENOTSUP,
1384                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1385                                          "no support for partial mask on"
1386                                          " \"ipv6.hdr.dst_addr\" field"
1387                                          " for vxlan encapsulation");
1388                 /* More IPv6 address validations can be put here. */
1389         } else {
1390                 /*
1391                  * Kernel uses the destination IP address to determine
1392                  * the routing path and obtain the MAC destination
1393                  * address (heigh or gate), so IP destination address
1394                  * must be specified within the tc rule.
1395                  */
1396                 return rte_flow_error_set(error, EINVAL,
1397                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1398                                           "outer ipv6 destination address"
1399                                           " must be specified for"
1400                                           " vxlan encapsulation");
1401         }
1402         if (memcmp(&mask->hdr.src_addr,
1403                    &flow_tcf_mask_empty.ipv6.hdr.src_addr,
1404                    IPV6_ADDR_LEN)) {
1405                 if (memcmp(&mask->hdr.src_addr,
1406                            &rte_flow_item_ipv6_mask.hdr.src_addr,
1407                            IPV6_ADDR_LEN))
1408                         return rte_flow_error_set
1409                                         (error, ENOTSUP,
1410                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1411                                          "no support for partial mask on"
1412                                          " \"ipv6.hdr.src_addr\" field"
1413                                          " for vxlan encapsulation");
1414                 /* More L3 address validation can be put here. */
1415         } else {
1416                 /*
1417                  * Kernel uses the source IP address to select the
1418                  * interface for egress encapsulated traffic, so
1419                  * it must be specified in the tc rule.
1420                  */
1421                 return rte_flow_error_set(error, EINVAL,
1422                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1423                                           "outer L3 source address"
1424                                           " must be specified for"
1425                                           " vxlan encapsulation");
1426         }
1427         msk6 = (rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
1428                 IPV6_HDR_TC_SHIFT) & 0xff;
1429         if (msk6 && msk6 != 0xff)
1430                 return rte_flow_error_set(error, ENOTSUP,
1431                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1432                                           "no support for partial mask on"
1433                                           " \"ipv6.hdr.vtc_flow.tos\" field"
1434                                           " for vxlan encapsulation");
1435         if (mask->hdr.hop_limits && mask->hdr.hop_limits != 0xff)
1436                 return rte_flow_error_set(error, ENOTSUP,
1437                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1438                                           "no support for partial mask on"
1439                                           " \"ipv6.hdr.hop_limits\" field"
1440                                           " for vxlan encapsulation");
1441         return 0;
1442 }
1443
1444 /**
1445  * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_UDP item for E-Switch.
1446  * The routine checks the UDP fields to be used in encapsulation header.
1447  *
1448  * @param[in] item
1449  *   Pointer to the item structure.
1450  * @param[out] error
1451  *   Pointer to the error structure.
1452  *
1453  * @return
1454  *   0 on success, a negative errno value otherwise and rte_errno is set.
1455  **/
1456 static int
1457 flow_tcf_validate_vxlan_encap_udp(const struct rte_flow_item *item,
1458                                   struct rte_flow_error *error)
1459 {
1460         const struct rte_flow_item_udp *spec = item->spec;
1461         const struct rte_flow_item_udp *mask = item->mask;
1462
1463         if (!spec) {
1464                 /*
1465                  * Specification for UDP ports cannot be empty
1466                  * because it is required by tunnel_key parameter.
1467                  */
1468                 return rte_flow_error_set(error, EINVAL,
1469                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1470                                           "NULL UDP port specification "
1471                                           " for vxlan encapsulation");
1472         }
1473         if (!mask)
1474                 mask = &rte_flow_item_udp_mask;
1475         if (mask->hdr.dst_port != RTE_BE16(0x0000)) {
1476                 if (mask->hdr.dst_port != RTE_BE16(0xffff))
1477                         return rte_flow_error_set
1478                                         (error, ENOTSUP,
1479                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1480                                          "no support for partial mask on"
1481                                          " \"udp.hdr.dst_port\" field"
1482                                          " for vxlan encapsulation");
1483                 if (!spec->hdr.dst_port)
1484                         return rte_flow_error_set
1485                                         (error, EINVAL,
1486                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
1487                                          "outer UDP remote port cannot be"
1488                                          " 0 for vxlan encapsulation");
1489         } else {
1490                 return rte_flow_error_set(error, EINVAL,
1491                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1492                                           "outer UDP remote port"
1493                                           " must be specified for"
1494                                           " vxlan encapsulation");
1495         }
1496         if (mask->hdr.src_port != RTE_BE16(0x0000)) {
1497                 if (mask->hdr.src_port != RTE_BE16(0xffff))
1498                         return rte_flow_error_set
1499                                         (error, ENOTSUP,
1500                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1501                                          "no support for partial mask on"
1502                                          " \"udp.hdr.src_port\" field"
1503                                          " for vxlan encapsulation");
1504                 DRV_LOG(WARNING,
1505                         "outer UDP source port cannot be"
1506                         " forced for vxlan encapsulation,"
1507                         " parameter ignored");
1508         }
1509         return 0;
1510 }
1511
1512 /**
1513  * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_VXLAN item for E-Switch.
1514  * The routine checks the VNIP fields to be used in encapsulation header.
1515  *
1516  * @param[in] item
1517  *   Pointer to the item structure.
1518  * @param[out] error
1519  *   Pointer to the error structure.
1520  *
1521  * @return
1522  *   0 on success, a negative errno value otherwise and rte_errno is set.
1523  **/
1524 static int
1525 flow_tcf_validate_vxlan_encap_vni(const struct rte_flow_item *item,
1526                                   struct rte_flow_error *error)
1527 {
1528         const struct rte_flow_item_vxlan *spec = item->spec;
1529         const struct rte_flow_item_vxlan *mask = item->mask;
1530
1531         if (!spec) {
1532                 /* Outer VNI is required by tunnel_key parameter. */
1533                 return rte_flow_error_set(error, EINVAL,
1534                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1535                                           "NULL VNI specification"
1536                                           " for vxlan encapsulation");
1537         }
1538         if (!mask)
1539                 mask = &rte_flow_item_vxlan_mask;
1540         if (!mask->vni[0] && !mask->vni[1] && !mask->vni[2])
1541                 return rte_flow_error_set(error, EINVAL,
1542                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1543                                           "outer VNI must be specified "
1544                                           "for vxlan encapsulation");
1545         if (mask->vni[0] != 0xff ||
1546             mask->vni[1] != 0xff ||
1547             mask->vni[2] != 0xff)
1548                 return rte_flow_error_set(error, ENOTSUP,
1549                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1550                                           "no support for partial mask on"
1551                                           " \"vxlan.vni\" field");
1552
1553         if (!spec->vni[0] && !spec->vni[1] && !spec->vni[2])
1554                 return rte_flow_error_set(error, EINVAL,
1555                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1556                                           "vxlan vni cannot be 0");
1557         return 0;
1558 }
1559
1560 /**
1561  * Validate VXLAN_ENCAP action item list for E-Switch.
1562  * The routine checks items to be used in encapsulation header.
1563  *
1564  * @param[in] action
1565  *   Pointer to the VXLAN_ENCAP action structure.
1566  * @param[out] error
1567  *   Pointer to the error structure.
1568  *
1569  * @return
1570  *   0 on success, a negative errno value otherwise and rte_errno is set.
1571  **/
1572 static int
1573 flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action,
1574                               struct rte_flow_error *error)
1575 {
1576         const struct rte_flow_item *items;
1577         int ret;
1578         uint32_t item_flags = 0;
1579
1580         if (!action->conf)
1581                 return rte_flow_error_set(error, EINVAL,
1582                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1583                                           "Missing vxlan tunnel"
1584                                           " action configuration");
1585         items = ((const struct rte_flow_action_vxlan_encap *)
1586                                         action->conf)->definition;
1587         if (!items)
1588                 return rte_flow_error_set(error, EINVAL,
1589                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1590                                           "Missing vxlan tunnel"
1591                                           " encapsulation parameters");
1592         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1593                 switch (items->type) {
1594                 case RTE_FLOW_ITEM_TYPE_VOID:
1595                         break;
1596                 case RTE_FLOW_ITEM_TYPE_ETH:
1597                         ret = mlx5_flow_validate_item_eth(items, item_flags,
1598                                                           error);
1599                         if (ret < 0)
1600                                 return ret;
1601                         ret = flow_tcf_validate_vxlan_encap_eth(items, error);
1602                         if (ret < 0)
1603                                 return ret;
1604                         item_flags |= MLX5_FLOW_LAYER_OUTER_L2;
1605                         break;
1606                 break;
1607                 case RTE_FLOW_ITEM_TYPE_IPV4:
1608                         ret = mlx5_flow_validate_item_ipv4
1609                                         (items, item_flags,
1610                                          &flow_tcf_mask_supported.ipv4, error);
1611                         if (ret < 0)
1612                                 return ret;
1613                         ret = flow_tcf_validate_vxlan_encap_ipv4(items, error);
1614                         if (ret < 0)
1615                                 return ret;
1616                         item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1617                         break;
1618                 case RTE_FLOW_ITEM_TYPE_IPV6:
1619                         ret = mlx5_flow_validate_item_ipv6
1620                                         (items, item_flags,
1621                                          &flow_tcf_mask_supported.ipv6, error);
1622                         if (ret < 0)
1623                                 return ret;
1624                         ret = flow_tcf_validate_vxlan_encap_ipv6(items, error);
1625                         if (ret < 0)
1626                                 return ret;
1627                         item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1628                         break;
1629                 case RTE_FLOW_ITEM_TYPE_UDP:
1630                         ret = mlx5_flow_validate_item_udp(items, item_flags,
1631                                                            0xFF, error);
1632                         if (ret < 0)
1633                                 return ret;
1634                         ret = flow_tcf_validate_vxlan_encap_udp(items, error);
1635                         if (ret < 0)
1636                                 return ret;
1637                         item_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
1638                         break;
1639                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1640                         ret = mlx5_flow_validate_item_vxlan(items,
1641                                                             item_flags, error);
1642                         if (ret < 0)
1643                                 return ret;
1644                         ret = flow_tcf_validate_vxlan_encap_vni(items, error);
1645                         if (ret < 0)
1646                                 return ret;
1647                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
1648                         break;
1649                 default:
1650                         return rte_flow_error_set
1651                                         (error, ENOTSUP,
1652                                          RTE_FLOW_ERROR_TYPE_ITEM, items,
1653                                          "vxlan encap item not supported");
1654                 }
1655         }
1656         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
1657                 return rte_flow_error_set(error, EINVAL,
1658                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1659                                           "no outer IP layer found"
1660                                           " for vxlan encapsulation");
1661         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1662                 return rte_flow_error_set(error, EINVAL,
1663                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1664                                           "no outer UDP layer found"
1665                                           " for vxlan encapsulation");
1666         if (!(item_flags & MLX5_FLOW_LAYER_VXLAN))
1667                 return rte_flow_error_set(error, EINVAL,
1668                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1669                                           "no VXLAN VNI found"
1670                                           " for vxlan encapsulation");
1671         return 0;
1672 }
1673
1674 /**
1675  * Validate outer RTE_FLOW_ITEM_TYPE_UDP item if tunnel item
1676  * RTE_FLOW_ITEM_TYPE_VXLAN is present in item list.
1677  *
1678  * @param[in] udp
1679  *   Outer UDP layer item (if any, NULL otherwise).
1680  * @param[out] error
1681  *   Pointer to the error structure.
1682  *
1683  * @return
1684  *   0 on success, a negative errno value otherwise and rte_errno is set.
1685  **/
1686 static int
1687 flow_tcf_validate_vxlan_decap_udp(const struct rte_flow_item *udp,
1688                                   struct rte_flow_error *error)
1689 {
1690         const struct rte_flow_item_udp *spec = udp->spec;
1691         const struct rte_flow_item_udp *mask = udp->mask;
1692
1693         if (!spec)
1694                 /*
1695                  * Specification for UDP ports cannot be empty
1696                  * because it is required as decap parameter.
1697                  */
1698                 return rte_flow_error_set(error, EINVAL,
1699                                           RTE_FLOW_ERROR_TYPE_ITEM, udp,
1700                                           "NULL UDP port specification"
1701                                           " for VXLAN decapsulation");
1702         if (!mask)
1703                 mask = &rte_flow_item_udp_mask;
1704         if (mask->hdr.dst_port != RTE_BE16(0x0000)) {
1705                 if (mask->hdr.dst_port != RTE_BE16(0xffff))
1706                         return rte_flow_error_set
1707                                         (error, ENOTSUP,
1708                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1709                                          "no support for partial mask on"
1710                                          " \"udp.hdr.dst_port\" field");
1711                 if (!spec->hdr.dst_port)
1712                         return rte_flow_error_set
1713                                         (error, EINVAL,
1714                                          RTE_FLOW_ERROR_TYPE_ITEM, udp,
1715                                          "zero decap local UDP port");
1716         } else {
1717                 return rte_flow_error_set(error, EINVAL,
1718                                           RTE_FLOW_ERROR_TYPE_ITEM, udp,
1719                                           "outer UDP destination port must be "
1720                                           "specified for vxlan decapsulation");
1721         }
1722         if (mask->hdr.src_port != RTE_BE16(0x0000)) {
1723                 if (mask->hdr.src_port != RTE_BE16(0xffff))
1724                         return rte_flow_error_set
1725                                         (error, ENOTSUP,
1726                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1727                                          "no support for partial mask on"
1728                                          " \"udp.hdr.src_port\" field");
1729                 DRV_LOG(WARNING,
1730                         "outer UDP local port cannot be "
1731                         "forced for VXLAN encapsulation, "
1732                         "parameter ignored");
1733         }
1734         return 0;
1735 }
1736
1737 /**
1738  * Validate flow for E-Switch.
1739  *
1740  * @param[in] priv
1741  *   Pointer to the priv structure.
1742  * @param[in] attr
1743  *   Pointer to the flow attributes.
1744  * @param[in] items
1745  *   Pointer to the list of items.
1746  * @param[in] actions
1747  *   Pointer to the list of actions.
1748  * @param[out] error
1749  *   Pointer to the error structure.
1750  *
1751  * @return
1752  *   0 on success, a negative errno value otherwise and rte_errno is set.
1753  */
1754 static int
1755 flow_tcf_validate(struct rte_eth_dev *dev,
1756                   const struct rte_flow_attr *attr,
1757                   const struct rte_flow_item items[],
1758                   const struct rte_flow_action actions[],
1759                   struct rte_flow_error *error)
1760 {
1761         union {
1762                 const struct rte_flow_item_port_id *port_id;
1763                 const struct rte_flow_item_eth *eth;
1764                 const struct rte_flow_item_vlan *vlan;
1765                 const struct rte_flow_item_ipv4 *ipv4;
1766                 const struct rte_flow_item_ipv6 *ipv6;
1767                 const struct rte_flow_item_tcp *tcp;
1768                 const struct rte_flow_item_udp *udp;
1769                 const struct rte_flow_item_vxlan *vxlan;
1770         } spec, mask;
1771         union {
1772                 const struct rte_flow_action_port_id *port_id;
1773                 const struct rte_flow_action_jump *jump;
1774                 const struct rte_flow_action_of_push_vlan *of_push_vlan;
1775                 const struct rte_flow_action_of_set_vlan_vid *
1776                         of_set_vlan_vid;
1777                 const struct rte_flow_action_of_set_vlan_pcp *
1778                         of_set_vlan_pcp;
1779                 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1780                 const struct rte_flow_action_set_ipv4 *set_ipv4;
1781                 const struct rte_flow_action_set_ipv6 *set_ipv6;
1782         } conf;
1783         const struct rte_flow_item *outer_udp = NULL;
1784         rte_be16_t inner_etype = RTE_BE16(ETH_P_ALL);
1785         rte_be16_t outer_etype = RTE_BE16(ETH_P_ALL);
1786         rte_be16_t vlan_etype = RTE_BE16(ETH_P_ALL);
1787         uint64_t item_flags = 0;
1788         uint64_t action_flags = 0;
1789         uint8_t next_protocol = 0xff;
1790         unsigned int tcm_ifindex = 0;
1791         uint8_t pedit_validated = 0;
1792         struct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];
1793         struct rte_eth_dev *port_id_dev = NULL;
1794         bool in_port_id_set;
1795         int ret;
1796
1797         claim_nonzero(flow_tcf_build_ptoi_table(dev, ptoi,
1798                                                 PTOI_TABLE_SZ_MAX(dev)));
1799         ret = flow_tcf_validate_attributes(attr, error);
1800         if (ret < 0)
1801                 return ret;
1802         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1803                 unsigned int i;
1804                 uint64_t current_action_flag = 0;
1805
1806                 switch (actions->type) {
1807                 case RTE_FLOW_ACTION_TYPE_VOID:
1808                         break;
1809                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
1810                         current_action_flag = MLX5_FLOW_ACTION_PORT_ID;
1811                         if (!actions->conf)
1812                                 break;
1813                         conf.port_id = actions->conf;
1814                         if (conf.port_id->original)
1815                                 i = 0;
1816                         else
1817                                 for (i = 0; ptoi[i].ifindex; ++i)
1818                                         if (ptoi[i].port_id == conf.port_id->id)
1819                                                 break;
1820                         if (!ptoi[i].ifindex)
1821                                 return rte_flow_error_set
1822                                         (error, ENODEV,
1823                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1824                                          conf.port_id,
1825                                          "missing data to convert port ID to"
1826                                          " ifindex");
1827                         port_id_dev = &rte_eth_devices[conf.port_id->id];
1828                         break;
1829                 case RTE_FLOW_ACTION_TYPE_JUMP:
1830                         current_action_flag = MLX5_FLOW_ACTION_JUMP;
1831                         if (!actions->conf)
1832                                 break;
1833                         conf.jump = actions->conf;
1834                         if (attr->group >= conf.jump->group)
1835                                 return rte_flow_error_set
1836                                         (error, ENOTSUP,
1837                                          RTE_FLOW_ERROR_TYPE_ACTION,
1838                                          actions,
1839                                          "can jump only to a group forward");
1840                         break;
1841                 case RTE_FLOW_ACTION_TYPE_DROP:
1842                         current_action_flag = MLX5_FLOW_ACTION_DROP;
1843                         break;
1844                 case RTE_FLOW_ACTION_TYPE_COUNT:
1845                         break;
1846                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
1847                         current_action_flag = MLX5_FLOW_ACTION_OF_POP_VLAN;
1848                         break;
1849                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: {
1850                         rte_be16_t ethertype;
1851
1852                         current_action_flag = MLX5_FLOW_ACTION_OF_PUSH_VLAN;
1853                         if (!actions->conf)
1854                                 break;
1855                         conf.of_push_vlan = actions->conf;
1856                         ethertype = conf.of_push_vlan->ethertype;
1857                         if (ethertype != RTE_BE16(ETH_P_8021Q) &&
1858                             ethertype != RTE_BE16(ETH_P_8021AD))
1859                                 return rte_flow_error_set
1860                                         (error, EINVAL,
1861                                          RTE_FLOW_ERROR_TYPE_ACTION, actions,
1862                                          "vlan push TPID must be "
1863                                          "802.1Q or 802.1AD");
1864                         break;
1865                 }
1866                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1867                         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1868                                 return rte_flow_error_set
1869                                         (error, ENOTSUP,
1870                                          RTE_FLOW_ERROR_TYPE_ACTION, actions,
1871                                          "vlan modify is not supported,"
1872                                          " set action must follow push action");
1873                         current_action_flag = MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
1874                         break;
1875                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1876                         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1877                                 return rte_flow_error_set
1878                                         (error, ENOTSUP,
1879                                          RTE_FLOW_ERROR_TYPE_ACTION, actions,
1880                                          "vlan modify is not supported,"
1881                                          " set action must follow push action");
1882                         current_action_flag = MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
1883                         break;
1884                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1885                         current_action_flag = MLX5_FLOW_ACTION_VXLAN_DECAP;
1886                         break;
1887                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1888                         ret = flow_tcf_validate_vxlan_encap(actions, error);
1889                         if (ret < 0)
1890                                 return ret;
1891                         current_action_flag = MLX5_FLOW_ACTION_VXLAN_ENCAP;
1892                         break;
1893                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
1894                         current_action_flag = MLX5_FLOW_ACTION_SET_IPV4_SRC;
1895                         break;
1896                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
1897                         current_action_flag = MLX5_FLOW_ACTION_SET_IPV4_DST;
1898                         break;
1899                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
1900                         current_action_flag = MLX5_FLOW_ACTION_SET_IPV6_SRC;
1901                         break;
1902                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
1903                         current_action_flag = MLX5_FLOW_ACTION_SET_IPV6_DST;
1904                         break;
1905                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
1906                         current_action_flag = MLX5_FLOW_ACTION_SET_TP_SRC;
1907                         break;
1908                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
1909                         current_action_flag = MLX5_FLOW_ACTION_SET_TP_DST;
1910                         break;
1911                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
1912                         current_action_flag = MLX5_FLOW_ACTION_SET_TTL;
1913                         break;
1914                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
1915                         current_action_flag = MLX5_FLOW_ACTION_DEC_TTL;
1916                         break;
1917                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
1918                         current_action_flag = MLX5_FLOW_ACTION_SET_MAC_SRC;
1919                         break;
1920                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
1921                         current_action_flag = MLX5_FLOW_ACTION_SET_MAC_DST;
1922                         break;
1923                 default:
1924                         return rte_flow_error_set(error, ENOTSUP,
1925                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1926                                                   actions,
1927                                                   "action not supported");
1928                 }
1929                 if (current_action_flag & MLX5_TCF_CONFIG_ACTIONS) {
1930                         if (!actions->conf)
1931                                 return rte_flow_error_set
1932                                         (error, EINVAL,
1933                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1934                                          actions,
1935                                          "action configuration not set");
1936                 }
1937                 if ((current_action_flag & MLX5_TCF_PEDIT_ACTIONS) &&
1938                     pedit_validated)
1939                         return rte_flow_error_set(error, ENOTSUP,
1940                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1941                                                   actions,
1942                                                   "set actions should be "
1943                                                   "listed successively");
1944                 if ((current_action_flag & ~MLX5_TCF_PEDIT_ACTIONS) &&
1945                     (action_flags & MLX5_TCF_PEDIT_ACTIONS))
1946                         pedit_validated = 1;
1947                 if ((current_action_flag & MLX5_TCF_FATE_ACTIONS) &&
1948                     (action_flags & MLX5_TCF_FATE_ACTIONS))
1949                         return rte_flow_error_set(error, EINVAL,
1950                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1951                                                   actions,
1952                                                   "can't have multiple fate"
1953                                                   " actions");
1954                 if ((current_action_flag & MLX5_TCF_VXLAN_ACTIONS) &&
1955                     (action_flags & MLX5_TCF_VXLAN_ACTIONS))
1956                         return rte_flow_error_set(error, EINVAL,
1957                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1958                                                   actions,
1959                                                   "can't have multiple vxlan"
1960                                                   " actions");
1961                 if ((current_action_flag & MLX5_TCF_VXLAN_ACTIONS) &&
1962                     (action_flags & MLX5_TCF_VLAN_ACTIONS))
1963                         return rte_flow_error_set(error, ENOTSUP,
1964                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1965                                                   actions,
1966                                                   "can't have vxlan and vlan"
1967                                                   " actions in the same rule");
1968                 action_flags |= current_action_flag;
1969         }
1970         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1971                 unsigned int i;
1972
1973                 switch (items->type) {
1974                 case RTE_FLOW_ITEM_TYPE_VOID:
1975                         break;
1976                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
1977                         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1978                                 return rte_flow_error_set
1979                                         (error, ENOTSUP,
1980                                          RTE_FLOW_ERROR_TYPE_ITEM, items,
1981                                          "inner tunnel port id"
1982                                          " item is not supported");
1983                         mask.port_id = flow_tcf_item_mask
1984                                 (items, &rte_flow_item_port_id_mask,
1985                                  &flow_tcf_mask_supported.port_id,
1986                                  &flow_tcf_mask_empty.port_id,
1987                                  sizeof(flow_tcf_mask_supported.port_id),
1988                                  error);
1989                         if (!mask.port_id)
1990                                 return -rte_errno;
1991                         if (mask.port_id == &flow_tcf_mask_empty.port_id) {
1992                                 in_port_id_set = 1;
1993                                 break;
1994                         }
1995                         spec.port_id = items->spec;
1996                         if (mask.port_id->id && mask.port_id->id != 0xffffffff)
1997                                 return rte_flow_error_set
1998                                         (error, ENOTSUP,
1999                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2000                                          mask.port_id,
2001                                          "no support for partial mask on"
2002                                          " \"id\" field");
2003                         if (!mask.port_id->id)
2004                                 i = 0;
2005                         else
2006                                 for (i = 0; ptoi[i].ifindex; ++i)
2007                                         if (ptoi[i].port_id == spec.port_id->id)
2008                                                 break;
2009                         if (!ptoi[i].ifindex)
2010                                 return rte_flow_error_set
2011                                         (error, ENODEV,
2012                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2013                                          spec.port_id,
2014                                          "missing data to convert port ID to"
2015                                          " ifindex");
2016                         if (in_port_id_set && ptoi[i].ifindex != tcm_ifindex)
2017                                 return rte_flow_error_set
2018                                         (error, ENOTSUP,
2019                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2020                                          spec.port_id,
2021                                          "cannot match traffic for"
2022                                          " several port IDs through"
2023                                          " a single flow rule");
2024                         tcm_ifindex = ptoi[i].ifindex;
2025                         in_port_id_set = 1;
2026                         break;
2027                 case RTE_FLOW_ITEM_TYPE_ETH:
2028                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2029                                                           error);
2030                         if (ret < 0)
2031                                 return ret;
2032                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
2033                                       MLX5_FLOW_LAYER_INNER_L2 :
2034                                       MLX5_FLOW_LAYER_OUTER_L2;
2035                         /* TODO:
2036                          * Redundant check due to different supported mask.
2037                          * Same for the rest of items.
2038                          */
2039                         mask.eth = flow_tcf_item_mask
2040                                 (items, &rte_flow_item_eth_mask,
2041                                  &flow_tcf_mask_supported.eth,
2042                                  &flow_tcf_mask_empty.eth,
2043                                  sizeof(flow_tcf_mask_supported.eth),
2044                                  error);
2045                         if (!mask.eth)
2046                                 return -rte_errno;
2047                         if (mask.eth->type && mask.eth->type !=
2048                             RTE_BE16(0xffff))
2049                                 return rte_flow_error_set
2050                                         (error, ENOTSUP,
2051                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2052                                          mask.eth,
2053                                          "no support for partial mask on"
2054                                          " \"type\" field");
2055                         assert(items->spec);
2056                         spec.eth = items->spec;
2057                         if (mask.eth->type &&
2058                             (item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2059                             inner_etype != RTE_BE16(ETH_P_ALL) &&
2060                             inner_etype != spec.eth->type)
2061                                 return rte_flow_error_set
2062                                         (error, EINVAL,
2063                                          RTE_FLOW_ERROR_TYPE_ITEM,
2064                                          items,
2065                                          "inner eth_type conflict");
2066                         if (mask.eth->type &&
2067                             !(item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2068                             outer_etype != RTE_BE16(ETH_P_ALL) &&
2069                             outer_etype != spec.eth->type)
2070                                 return rte_flow_error_set
2071                                         (error, EINVAL,
2072                                          RTE_FLOW_ERROR_TYPE_ITEM,
2073                                          items,
2074                                          "outer eth_type conflict");
2075                         if (mask.eth->type) {
2076                                 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2077                                         inner_etype = spec.eth->type;
2078                                 else
2079                                         outer_etype = spec.eth->type;
2080                         }
2081                         break;
2082                 case RTE_FLOW_ITEM_TYPE_VLAN:
2083                         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2084                                 return rte_flow_error_set
2085                                         (error, ENOTSUP,
2086                                          RTE_FLOW_ERROR_TYPE_ITEM, items,
2087                                          "inner tunnel VLAN"
2088                                          " is not supported");
2089                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2090                                                            error);
2091                         if (ret < 0)
2092                                 return ret;
2093                         item_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
2094                         mask.vlan = flow_tcf_item_mask
2095                                 (items, &rte_flow_item_vlan_mask,
2096                                  &flow_tcf_mask_supported.vlan,
2097                                  &flow_tcf_mask_empty.vlan,
2098                                  sizeof(flow_tcf_mask_supported.vlan),
2099                                  error);
2100                         if (!mask.vlan)
2101                                 return -rte_errno;
2102                         if ((mask.vlan->tci & RTE_BE16(0xe000) &&
2103                              (mask.vlan->tci & RTE_BE16(0xe000)) !=
2104                               RTE_BE16(0xe000)) ||
2105                             (mask.vlan->tci & RTE_BE16(0x0fff) &&
2106                              (mask.vlan->tci & RTE_BE16(0x0fff)) !=
2107                               RTE_BE16(0x0fff)) ||
2108                             (mask.vlan->inner_type &&
2109                              mask.vlan->inner_type != RTE_BE16(0xffff)))
2110                                 return rte_flow_error_set
2111                                         (error, ENOTSUP,
2112                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2113                                          mask.vlan,
2114                                          "no support for partial masks on"
2115                                          " \"tci\" (PCP and VID parts) and"
2116                                          " \"inner_type\" fields");
2117                         if (outer_etype != RTE_BE16(ETH_P_ALL) &&
2118                             outer_etype != RTE_BE16(ETH_P_8021Q))
2119                                 return rte_flow_error_set
2120                                         (error, EINVAL,
2121                                          RTE_FLOW_ERROR_TYPE_ITEM,
2122                                          items,
2123                                          "outer eth_type conflict,"
2124                                          " must be 802.1Q");
2125                         outer_etype = RTE_BE16(ETH_P_8021Q);
2126                         assert(items->spec);
2127                         spec.vlan = items->spec;
2128                         if (mask.vlan->inner_type &&
2129                             vlan_etype != RTE_BE16(ETH_P_ALL) &&
2130                             vlan_etype != spec.vlan->inner_type)
2131                                 return rte_flow_error_set
2132                                         (error, EINVAL,
2133                                          RTE_FLOW_ERROR_TYPE_ITEM,
2134                                          items,
2135                                          "vlan eth_type conflict");
2136                         if (mask.vlan->inner_type)
2137                                 vlan_etype = spec.vlan->inner_type;
2138                         break;
2139                 case RTE_FLOW_ITEM_TYPE_IPV4:
2140                         ret = mlx5_flow_validate_item_ipv4
2141                                         (items, item_flags,
2142                                          &flow_tcf_mask_supported.ipv4, error);
2143                         if (ret < 0)
2144                                 return ret;
2145                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
2146                                       MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2147                                       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2148                         mask.ipv4 = flow_tcf_item_mask
2149                                 (items, &rte_flow_item_ipv4_mask,
2150                                  &flow_tcf_mask_supported.ipv4,
2151                                  &flow_tcf_mask_empty.ipv4,
2152                                  sizeof(flow_tcf_mask_supported.ipv4),
2153                                  error);
2154                         if (!mask.ipv4)
2155                                 return -rte_errno;
2156                         if (mask.ipv4->hdr.next_proto_id &&
2157                             mask.ipv4->hdr.next_proto_id != 0xff)
2158                                 return rte_flow_error_set
2159                                         (error, ENOTSUP,
2160                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2161                                          mask.ipv4,
2162                                          "no support for partial mask on"
2163                                          " \"hdr.next_proto_id\" field");
2164                         else if (mask.ipv4->hdr.next_proto_id)
2165                                 next_protocol =
2166                                         ((const struct rte_flow_item_ipv4 *)
2167                                          (items->spec))->hdr.next_proto_id;
2168                         if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
2169                                 if (inner_etype != RTE_BE16(ETH_P_ALL) &&
2170                                     inner_etype != RTE_BE16(ETH_P_IP))
2171                                         return rte_flow_error_set
2172                                                 (error, EINVAL,
2173                                                  RTE_FLOW_ERROR_TYPE_ITEM,
2174                                                  items,
2175                                                  "inner eth_type conflict,"
2176                                                  " IPv4 is required");
2177                                 inner_etype = RTE_BE16(ETH_P_IP);
2178                         } else if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN) {
2179                                 if (vlan_etype != RTE_BE16(ETH_P_ALL) &&
2180                                     vlan_etype != RTE_BE16(ETH_P_IP))
2181                                         return rte_flow_error_set
2182                                                 (error, EINVAL,
2183                                                  RTE_FLOW_ERROR_TYPE_ITEM,
2184                                                  items,
2185                                                  "vlan eth_type conflict,"
2186                                                  " IPv4 is required");
2187                                 vlan_etype = RTE_BE16(ETH_P_IP);
2188                         } else {
2189                                 if (outer_etype != RTE_BE16(ETH_P_ALL) &&
2190                                     outer_etype != RTE_BE16(ETH_P_IP))
2191                                         return rte_flow_error_set
2192                                                 (error, EINVAL,
2193                                                  RTE_FLOW_ERROR_TYPE_ITEM,
2194                                                  items,
2195                                                  "eth_type conflict,"
2196                                                  " IPv4 is required");
2197                                 outer_etype = RTE_BE16(ETH_P_IP);
2198                         }
2199                         break;
2200                 case RTE_FLOW_ITEM_TYPE_IPV6:
2201                         ret = mlx5_flow_validate_item_ipv6
2202                                         (items, item_flags,
2203                                          &flow_tcf_mask_supported.ipv6, error);
2204                         if (ret < 0)
2205                                 return ret;
2206                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
2207                                       MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2208                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2209                         mask.ipv6 = flow_tcf_item_mask
2210                                 (items, &rte_flow_item_ipv6_mask,
2211                                  &flow_tcf_mask_supported.ipv6,
2212                                  &flow_tcf_mask_empty.ipv6,
2213                                  sizeof(flow_tcf_mask_supported.ipv6),
2214                                  error);
2215                         if (!mask.ipv6)
2216                                 return -rte_errno;
2217                         if (mask.ipv6->hdr.proto &&
2218                             mask.ipv6->hdr.proto != 0xff)
2219                                 return rte_flow_error_set
2220                                         (error, ENOTSUP,
2221                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2222                                          mask.ipv6,
2223                                          "no support for partial mask on"
2224                                          " \"hdr.proto\" field");
2225                         else if (mask.ipv6->hdr.proto)
2226                                 next_protocol =
2227                                         ((const struct rte_flow_item_ipv6 *)
2228                                          (items->spec))->hdr.proto;
2229                         if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
2230                                 if (inner_etype != RTE_BE16(ETH_P_ALL) &&
2231                                     inner_etype != RTE_BE16(ETH_P_IPV6))
2232                                         return rte_flow_error_set
2233                                                 (error, EINVAL,
2234                                                  RTE_FLOW_ERROR_TYPE_ITEM,
2235                                                  items,
2236                                                  "inner eth_type conflict,"
2237                                                  " IPv6 is required");
2238                                 inner_etype = RTE_BE16(ETH_P_IPV6);
2239                         } else if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN) {
2240                                 if (vlan_etype != RTE_BE16(ETH_P_ALL) &&
2241                                     vlan_etype != RTE_BE16(ETH_P_IPV6))
2242                                         return rte_flow_error_set
2243                                                 (error, EINVAL,
2244                                                  RTE_FLOW_ERROR_TYPE_ITEM,
2245                                                  items,
2246                                                  "vlan eth_type conflict,"
2247                                                  " IPv6 is required");
2248                                 vlan_etype = RTE_BE16(ETH_P_IPV6);
2249                         } else {
2250                                 if (outer_etype != RTE_BE16(ETH_P_ALL) &&
2251                                     outer_etype != RTE_BE16(ETH_P_IPV6))
2252                                         return rte_flow_error_set
2253                                                 (error, EINVAL,
2254                                                  RTE_FLOW_ERROR_TYPE_ITEM,
2255                                                  items,
2256                                                  "eth_type conflict,"
2257                                                  " IPv6 is required");
2258                                 outer_etype = RTE_BE16(ETH_P_IPV6);
2259                         }
2260                         break;
2261                 case RTE_FLOW_ITEM_TYPE_UDP:
2262                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2263                                                           next_protocol, error);
2264                         if (ret < 0)
2265                                 return ret;
2266                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
2267                                       MLX5_FLOW_LAYER_INNER_L4_UDP :
2268                                       MLX5_FLOW_LAYER_OUTER_L4_UDP;
2269                         mask.udp = flow_tcf_item_mask
2270                                 (items, &rte_flow_item_udp_mask,
2271                                  &flow_tcf_mask_supported.udp,
2272                                  &flow_tcf_mask_empty.udp,
2273                                  sizeof(flow_tcf_mask_supported.udp),
2274                                  error);
2275                         if (!mask.udp)
2276                                 return -rte_errno;
2277                         /*
2278                          * Save the presumed outer UDP item for extra check
2279                          * if the tunnel item will be found later in the list.
2280                          */
2281                         if (!(item_flags & MLX5_FLOW_LAYER_TUNNEL))
2282                                 outer_udp = items;
2283                         break;
2284                 case RTE_FLOW_ITEM_TYPE_TCP:
2285                         ret = mlx5_flow_validate_item_tcp
2286                                              (items, item_flags,
2287                                               next_protocol,
2288                                               &flow_tcf_mask_supported.tcp,
2289                                               error);
2290                         if (ret < 0)
2291                                 return ret;
2292                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
2293                                       MLX5_FLOW_LAYER_INNER_L4_TCP :
2294                                       MLX5_FLOW_LAYER_OUTER_L4_TCP;
2295                         mask.tcp = flow_tcf_item_mask
2296                                 (items, &rte_flow_item_tcp_mask,
2297                                  &flow_tcf_mask_supported.tcp,
2298                                  &flow_tcf_mask_empty.tcp,
2299                                  sizeof(flow_tcf_mask_supported.tcp),
2300                                  error);
2301                         if (!mask.tcp)
2302                                 return -rte_errno;
2303                         break;
2304                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2305                         if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)
2306                                 return rte_flow_error_set
2307                                         (error, ENOTSUP,
2308                                          RTE_FLOW_ERROR_TYPE_ITEM, items,
2309                                          "vxlan tunnel over vlan"
2310                                          " is not supported");
2311                         ret = mlx5_flow_validate_item_vxlan(items,
2312                                                             item_flags, error);
2313                         if (ret < 0)
2314                                 return ret;
2315                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
2316                         mask.vxlan = flow_tcf_item_mask
2317                                 (items, &rte_flow_item_vxlan_mask,
2318                                  &flow_tcf_mask_supported.vxlan,
2319                                  &flow_tcf_mask_empty.vxlan,
2320                                  sizeof(flow_tcf_mask_supported.vxlan), error);
2321                         if (!mask.vxlan)
2322                                 return -rte_errno;
2323                         if (mask.vxlan->vni[0] != 0xff ||
2324                             mask.vxlan->vni[1] != 0xff ||
2325                             mask.vxlan->vni[2] != 0xff)
2326                                 return rte_flow_error_set
2327                                         (error, ENOTSUP,
2328                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2329                                          mask.vxlan,
2330                                          "no support for partial or "
2331                                          "empty mask on \"vxlan.vni\" field");
2332                         /*
2333                          * The VNI item assumes the VXLAN tunnel, it requires
2334                          * at least the outer destination UDP port must be
2335                          * specified without wildcards to allow kernel select
2336                          * the virtual VXLAN device by port. Also outer IPv4
2337                          * or IPv6 item must be specified (wilcards or even
2338                          * zero mask are allowed) to let driver know the tunnel
2339                          * IP version and process UDP traffic correctly.
2340                          */
2341                         if (!(item_flags &
2342                              (MLX5_FLOW_LAYER_OUTER_L3_IPV4 |
2343                               MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2344                                 return rte_flow_error_set
2345                                                  (error, EINVAL,
2346                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2347                                                   NULL,
2348                                                   "no outer IP pattern found"
2349                                                   " for vxlan tunnel");
2350                         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2351                                 return rte_flow_error_set
2352                                                  (error, EINVAL,
2353                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2354                                                   NULL,
2355                                                   "no outer UDP pattern found"
2356                                                   " for vxlan tunnel");
2357                         /*
2358                          * All items preceding the tunnel item become outer
2359                          * ones and we should do extra validation for them
2360                          * due to tc limitations for tunnel outer parameters.
2361                          * Currently only outer UDP item requres extra check,
2362                          * use the saved pointer instead of item list rescan.
2363                          */
2364                         assert(outer_udp);
2365                         ret = flow_tcf_validate_vxlan_decap_udp
2366                                                 (outer_udp, error);
2367                         if (ret < 0)
2368                                 return ret;
2369                         /* Reset L4 protocol for inner parameters. */
2370                         next_protocol = 0xff;
2371                         break;
2372                 default:
2373                         return rte_flow_error_set(error, ENOTSUP,
2374                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2375                                                   items, "item not supported");
2376                 }
2377         }
2378         if ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&
2379             (action_flags & MLX5_FLOW_ACTION_DROP))
2380                 return rte_flow_error_set(error, ENOTSUP,
2381                                           RTE_FLOW_ERROR_TYPE_ACTION,
2382                                           actions,
2383                                           "set action is not compatible with "
2384                                           "drop action");
2385         if ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&
2386             !(action_flags & MLX5_FLOW_ACTION_PORT_ID))
2387                 return rte_flow_error_set(error, ENOTSUP,
2388                                           RTE_FLOW_ERROR_TYPE_ACTION,
2389                                           actions,
2390                                           "set action must be followed by "
2391                                           "port_id action");
2392         if (action_flags &
2393            (MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST)) {
2394                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4))
2395                         return rte_flow_error_set(error, EINVAL,
2396                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2397                                                   actions,
2398                                                   "no ipv4 item found in"
2399                                                   " pattern");
2400         }
2401         if (action_flags &
2402            (MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST)) {
2403                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))
2404                         return rte_flow_error_set(error, EINVAL,
2405                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2406                                                   actions,
2407                                                   "no ipv6 item found in"
2408                                                   " pattern");
2409         }
2410         if (action_flags &
2411            (MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST)) {
2412                 if (!(item_flags &
2413                      (MLX5_FLOW_LAYER_OUTER_L4_UDP |
2414                       MLX5_FLOW_LAYER_OUTER_L4_TCP)))
2415                         return rte_flow_error_set(error, EINVAL,
2416                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2417                                                   actions,
2418                                                   "no TCP/UDP item found in"
2419                                                   " pattern");
2420         }
2421         /*
2422          * FW syndrome (0xA9C090):
2423          *     set_flow_table_entry: push vlan action fte in fdb can ONLY be
2424          *     forward to the uplink.
2425          */
2426         if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2427             (action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
2428             ((struct priv *)port_id_dev->data->dev_private)->representor)
2429                 return rte_flow_error_set(error, ENOTSUP,
2430                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
2431                                           "vlan push can only be applied"
2432                                           " when forwarding to uplink port");
2433         /*
2434          * FW syndrome (0x294609):
2435          *     set_flow_table_entry: modify/pop/push actions in fdb flow table
2436          *     are supported only while forwarding to vport.
2437          */
2438         if ((action_flags & MLX5_TCF_VLAN_ACTIONS) &&
2439             !(action_flags & MLX5_FLOW_ACTION_PORT_ID))
2440                 return rte_flow_error_set(error, ENOTSUP,
2441                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
2442                                           "vlan actions are supported"
2443                                           " only with port_id action");
2444         if ((action_flags & MLX5_TCF_VXLAN_ACTIONS) &&
2445             !(action_flags & MLX5_FLOW_ACTION_PORT_ID))
2446                 return rte_flow_error_set(error, ENOTSUP,
2447                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2448                                           "vxlan actions are supported"
2449                                           " only with port_id action");
2450         if (!(action_flags & MLX5_TCF_FATE_ACTIONS))
2451                 return rte_flow_error_set(error, EINVAL,
2452                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
2453                                           "no fate action is found");
2454         if (action_flags &
2455            (MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL)) {
2456                 if (!(item_flags &
2457                      (MLX5_FLOW_LAYER_OUTER_L3_IPV4 |
2458                       MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2459                         return rte_flow_error_set(error, EINVAL,
2460                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2461                                                   actions,
2462                                                   "no IP found in pattern");
2463         }
2464         if (action_flags &
2465             (MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)) {
2466                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
2467                         return rte_flow_error_set(error, ENOTSUP,
2468                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2469                                                   actions,
2470                                                   "no ethernet found in"
2471                                                   " pattern");
2472         }
2473         if ((action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) &&
2474             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
2475                 return rte_flow_error_set(error, EINVAL,
2476                                           RTE_FLOW_ERROR_TYPE_ACTION,
2477                                           NULL,
2478                                           "no VNI pattern found"
2479                                           " for vxlan decap action");
2480         if ((action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) &&
2481             (item_flags & MLX5_FLOW_LAYER_TUNNEL))
2482                 return rte_flow_error_set(error, EINVAL,
2483                                           RTE_FLOW_ERROR_TYPE_ACTION,
2484                                           NULL,
2485                                           "vxlan encap not supported"
2486                                           " for tunneled traffic");
2487         return 0;
2488 }
2489
2490 /**
2491  * Calculate maximum size of memory for flow items of Linux TC flower.
2492  *
2493  * @param[in] attr
2494  *   Pointer to the flow attributes.
2495  * @param[in] items
2496  *   Pointer to the list of items.
2497  * @param[out] action_flags
2498  *   Pointer to the detected actions.
2499  *
2500  * @return
2501  *   Maximum size of memory for items.
2502  */
2503 static int
2504 flow_tcf_get_items_size(const struct rte_flow_attr *attr,
2505                         const struct rte_flow_item items[],
2506                         uint64_t *action_flags)
2507 {
2508         int size = 0;
2509
2510         size += SZ_NLATTR_STRZ_OF("flower") +
2511                 SZ_NLATTR_TYPE_OF(uint16_t) + /* Outer ether type. */
2512                 SZ_NLATTR_NEST + /* TCA_OPTIONS. */
2513                 SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CLS_FLAGS_SKIP_SW. */
2514         if (attr->group > 0)
2515                 size += SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CHAIN. */
2516         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2517                 switch (items->type) {
2518                 case RTE_FLOW_ITEM_TYPE_VOID:
2519                         break;
2520                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2521                         break;
2522                 case RTE_FLOW_ITEM_TYPE_ETH:
2523                         size += SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) * 4;
2524                                 /* dst/src MAC addr and mask. */
2525                         break;
2526                 case RTE_FLOW_ITEM_TYPE_VLAN:
2527                         size += SZ_NLATTR_TYPE_OF(uint16_t) +
2528                                 /* VLAN Ether type. */
2529                                 SZ_NLATTR_TYPE_OF(uint8_t) + /* VLAN prio. */
2530                                 SZ_NLATTR_TYPE_OF(uint16_t); /* VLAN ID. */
2531                         break;
2532                 case RTE_FLOW_ITEM_TYPE_IPV4: {
2533                         const struct rte_flow_item_ipv4 *ipv4 = items->mask;
2534
2535                         size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
2536                                 SZ_NLATTR_TYPE_OF(uint32_t) * 4;
2537                                 /* dst/src IP addr and mask. */
2538                         if (ipv4 && ipv4->hdr.time_to_live)
2539                                 size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
2540                         if (ipv4 && ipv4->hdr.type_of_service)
2541                                 size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
2542                         break;
2543                 }
2544                 case RTE_FLOW_ITEM_TYPE_IPV6: {
2545                         const struct rte_flow_item_ipv6 *ipv6 = items->mask;
2546
2547                         size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
2548                                 SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 4;
2549                                 /* dst/src IP addr and mask. */
2550                         if (ipv6 && ipv6->hdr.hop_limits)
2551                                 size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
2552                         if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
2553                                      (0xfful << IPV6_HDR_TC_SHIFT)))
2554                                 size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
2555                         break;
2556                 }
2557                 case RTE_FLOW_ITEM_TYPE_UDP:
2558                         size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
2559                                 SZ_NLATTR_TYPE_OF(uint16_t) * 4;
2560                                 /* dst/src port and mask. */
2561                         break;
2562                 case RTE_FLOW_ITEM_TYPE_TCP:
2563                         size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
2564                                 SZ_NLATTR_TYPE_OF(uint16_t) * 4;
2565                                 /* dst/src port and mask. */
2566                         break;
2567                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2568                         size += SZ_NLATTR_TYPE_OF(uint32_t);
2569                         /*
2570                          * There might be no VXLAN decap action in the action
2571                          * list, nonetheless the VXLAN tunnel flow requires
2572                          * the decap structure to be correctly applied to
2573                          * VXLAN device, set the flag to create the structure.
2574                          * Translation routine will not put the decap action
2575                          * in tne Netlink message if there is no actual action
2576                          * in the list.
2577                          */
2578                         *action_flags |= MLX5_FLOW_ACTION_VXLAN_DECAP;
2579                         break;
2580                 default:
2581                         DRV_LOG(WARNING,
2582                                 "unsupported item %p type %d,"
2583                                 " items must be validated before flow creation",
2584                                 (const void *)items, items->type);
2585                         break;
2586                 }
2587         }
2588         return size;
2589 }
2590
2591 /**
2592  * Calculate size of memory to store the VXLAN encapsultion
2593  * related items in the Netlink message buffer. Items list
2594  * is specified by RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action.
2595  * The item list should be validated.
2596  *
2597  * @param[in] action
2598  *   RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action object.
2599  *   List of pattern items to scan data from.
2600  *
2601  * @return
2602  *   The size the part of Netlink message buffer to store the
2603  *   VXLAN encapsulation item attributes.
2604  */
2605 static int
2606 flow_tcf_vxlan_encap_size(const struct rte_flow_action *action)
2607 {
2608         const struct rte_flow_item *items;
2609         int size = 0;
2610
2611         assert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);
2612         assert(action->conf);
2613
2614         items = ((const struct rte_flow_action_vxlan_encap *)
2615                                         action->conf)->definition;
2616         assert(items);
2617         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2618                 switch (items->type) {
2619                 case RTE_FLOW_ITEM_TYPE_VOID:
2620                         break;
2621                 case RTE_FLOW_ITEM_TYPE_ETH:
2622                         /* This item does not require message buffer. */
2623                         break;
2624                 case RTE_FLOW_ITEM_TYPE_IPV4: {
2625                         const struct rte_flow_item_ipv4 *ipv4 = items->mask;
2626
2627                         size += SZ_NLATTR_DATA_OF(IPV4_ADDR_LEN) * 2;
2628                         if (ipv4 && ipv4->hdr.time_to_live)
2629                                 size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
2630                         if (ipv4 && ipv4->hdr.type_of_service)
2631                                 size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
2632                         break;
2633                 }
2634                 case RTE_FLOW_ITEM_TYPE_IPV6: {
2635                         const struct rte_flow_item_ipv6 *ipv6 = items->mask;
2636
2637                         size += SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 2;
2638                         if (ipv6 && ipv6->hdr.hop_limits)
2639                                 size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
2640                         if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
2641                                      (0xfful << IPV6_HDR_TC_SHIFT)))
2642                                 size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
2643                         break;
2644                 }
2645                 case RTE_FLOW_ITEM_TYPE_UDP: {
2646                         const struct rte_flow_item_udp *udp = items->mask;
2647
2648                         size += SZ_NLATTR_TYPE_OF(uint16_t);
2649                         if (!udp || udp->hdr.src_port != RTE_BE16(0x0000))
2650                                 size += SZ_NLATTR_TYPE_OF(uint16_t);
2651                         break;
2652                 }
2653                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2654                         size += SZ_NLATTR_TYPE_OF(uint32_t);
2655                         break;
2656                 default:
2657                         assert(false);
2658                         DRV_LOG(WARNING,
2659                                 "unsupported item %p type %d,"
2660                                 " items must be validated"
2661                                 " before flow creation",
2662                                 (const void *)items, items->type);
2663                         return 0;
2664                 }
2665         }
2666         return size;
2667 }
2668
2669 /**
2670  * Calculate maximum size of memory for flow actions of Linux TC flower and
2671  * extract specified actions.
2672  *
2673  * @param[in] actions
2674  *   Pointer to the list of actions.
2675  * @param[out] action_flags
2676  *   Pointer to the detected actions.
2677  *
2678  * @return
2679  *   Maximum size of memory for actions.
2680  */
2681 static int
2682 flow_tcf_get_actions_and_size(const struct rte_flow_action actions[],
2683                               uint64_t *action_flags)
2684 {
2685         int size = 0;
2686         uint64_t flags = 0;
2687
2688         size += SZ_NLATTR_NEST; /* TCA_FLOWER_ACT. */
2689         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2690                 switch (actions->type) {
2691                 case RTE_FLOW_ACTION_TYPE_VOID:
2692                         break;
2693                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2694                         size += SZ_NLATTR_NEST + /* na_act_index. */
2695                                 SZ_NLATTR_STRZ_OF("mirred") +
2696                                 SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
2697                                 SZ_NLATTR_TYPE_OF(struct tc_mirred);
2698                         flags |= MLX5_FLOW_ACTION_PORT_ID;
2699                         break;
2700                 case RTE_FLOW_ACTION_TYPE_JUMP:
2701                         size += SZ_NLATTR_NEST + /* na_act_index. */
2702                                 SZ_NLATTR_STRZ_OF("gact") +
2703                                 SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
2704                                 SZ_NLATTR_TYPE_OF(struct tc_gact);
2705                         flags |= MLX5_FLOW_ACTION_JUMP;
2706                         break;
2707                 case RTE_FLOW_ACTION_TYPE_DROP:
2708                         size += SZ_NLATTR_NEST + /* na_act_index. */
2709                                 SZ_NLATTR_STRZ_OF("gact") +
2710                                 SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
2711                                 SZ_NLATTR_TYPE_OF(struct tc_gact);
2712                         flags |= MLX5_FLOW_ACTION_DROP;
2713                         break;
2714                 case RTE_FLOW_ACTION_TYPE_COUNT:
2715                         break;
2716                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2717                         flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
2718                         goto action_of_vlan;
2719                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2720                         flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
2721                         goto action_of_vlan;
2722                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2723                         flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
2724                         goto action_of_vlan;
2725                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2726                         flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
2727                         goto action_of_vlan;
2728 action_of_vlan:
2729                         size += SZ_NLATTR_NEST + /* na_act_index. */
2730                                 SZ_NLATTR_STRZ_OF("vlan") +
2731                                 SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
2732                                 SZ_NLATTR_TYPE_OF(struct tc_vlan) +
2733                                 SZ_NLATTR_TYPE_OF(uint16_t) +
2734                                 /* VLAN protocol. */
2735                                 SZ_NLATTR_TYPE_OF(uint16_t) + /* VLAN ID. */
2736                                 SZ_NLATTR_TYPE_OF(uint8_t); /* VLAN prio. */
2737                         break;
2738                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2739                         size += SZ_NLATTR_NEST + /* na_act_index. */
2740                                 SZ_NLATTR_STRZ_OF("tunnel_key") +
2741                                 SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
2742                                 SZ_NLATTR_TYPE_OF(uint8_t);
2743                         size += SZ_NLATTR_TYPE_OF(struct tc_tunnel_key);
2744                         size += flow_tcf_vxlan_encap_size(actions) +
2745                                 RTE_ALIGN_CEIL /* preceding encap params. */
2746                                 (sizeof(struct flow_tcf_vxlan_encap),
2747                                 MNL_ALIGNTO);
2748                         flags |= MLX5_FLOW_ACTION_VXLAN_ENCAP;
2749                         break;
2750                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2751                         size += SZ_NLATTR_NEST + /* na_act_index. */
2752                                 SZ_NLATTR_STRZ_OF("tunnel_key") +
2753                                 SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
2754                                 SZ_NLATTR_TYPE_OF(uint8_t);
2755                         size += SZ_NLATTR_TYPE_OF(struct tc_tunnel_key);
2756                         size += RTE_ALIGN_CEIL /* preceding decap params. */
2757                                 (sizeof(struct flow_tcf_vxlan_decap),
2758                                 MNL_ALIGNTO);
2759                         flags |= MLX5_FLOW_ACTION_VXLAN_DECAP;
2760                         break;
2761                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2762                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2763                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2764                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2765                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2766                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2767                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2768                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2769                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2770                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2771                         size += flow_tcf_get_pedit_actions_size(&actions,
2772                                                                 &flags);
2773                         break;
2774                 default:
2775                         DRV_LOG(WARNING,
2776                                 "unsupported action %p type %d,"
2777                                 " items must be validated before flow creation",
2778                                 (const void *)actions, actions->type);
2779                         break;
2780                 }
2781         }
2782         *action_flags = flags;
2783         return size;
2784 }
2785
2786 /**
2787  * Prepare a flow object for Linux TC flower. It calculates the maximum size of
2788  * memory required, allocates the memory, initializes Netlink message headers
2789  * and set unique TC message handle.
2790  *
2791  * @param[in] attr
2792  *   Pointer to the flow attributes.
2793  * @param[in] items
2794  *   Pointer to the list of items.
2795  * @param[in] actions
2796  *   Pointer to the list of actions.
2797  * @param[out] error
2798  *   Pointer to the error structure.
2799  *
2800  * @return
2801  *   Pointer to mlx5_flow object on success,
2802  *   otherwise NULL and rte_errno is set.
2803  */
2804 static struct mlx5_flow *
2805 flow_tcf_prepare(const struct rte_flow_attr *attr,
2806                  const struct rte_flow_item items[],
2807                  const struct rte_flow_action actions[],
2808                  struct rte_flow_error *error)
2809 {
2810         size_t size = RTE_ALIGN_CEIL
2811                         (sizeof(struct mlx5_flow),
2812                          alignof(struct flow_tcf_tunnel_hdr)) +
2813                       MNL_ALIGN(sizeof(struct nlmsghdr)) +
2814                       MNL_ALIGN(sizeof(struct tcmsg));
2815         struct mlx5_flow *dev_flow;
2816         uint64_t action_flags = 0;
2817         struct nlmsghdr *nlh;
2818         struct tcmsg *tcm;
2819         uint8_t *sp, *tun = NULL;
2820
2821         size += flow_tcf_get_items_size(attr, items, &action_flags);
2822         size += flow_tcf_get_actions_and_size(actions, &action_flags);
2823         dev_flow = rte_zmalloc(__func__, size, MNL_ALIGNTO);
2824         if (!dev_flow) {
2825                 rte_flow_error_set(error, ENOMEM,
2826                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2827                                    "not enough memory to create E-Switch flow");
2828                 return NULL;
2829         }
2830         sp = (uint8_t *)(dev_flow + 1);
2831         if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) {
2832                 sp = RTE_PTR_ALIGN
2833                         (sp, alignof(struct flow_tcf_tunnel_hdr));
2834                 tun = sp;
2835                 sp += RTE_ALIGN_CEIL
2836                         (sizeof(struct flow_tcf_vxlan_encap),
2837                         MNL_ALIGNTO);
2838 #ifndef NDEBUG
2839                 size -= RTE_ALIGN_CEIL
2840                         (sizeof(struct flow_tcf_vxlan_encap),
2841                         MNL_ALIGNTO);
2842 #endif
2843         } else if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) {
2844                 sp = RTE_PTR_ALIGN
2845                         (sp, alignof(struct flow_tcf_tunnel_hdr));
2846                 tun = sp;
2847                 sp += RTE_ALIGN_CEIL
2848                         (sizeof(struct flow_tcf_vxlan_decap),
2849                         MNL_ALIGNTO);
2850 #ifndef NDEBUG
2851                 size -= RTE_ALIGN_CEIL
2852                         (sizeof(struct flow_tcf_vxlan_decap),
2853                         MNL_ALIGNTO);
2854 #endif
2855         } else {
2856                 sp = RTE_PTR_ALIGN(sp, MNL_ALIGNTO);
2857         }
2858         nlh = mnl_nlmsg_put_header(sp);
2859         tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
2860         *dev_flow = (struct mlx5_flow){
2861                 .tcf = (struct mlx5_flow_tcf){
2862 #ifndef NDEBUG
2863                         .nlsize = size - RTE_ALIGN_CEIL
2864                                 (sizeof(struct mlx5_flow),
2865                                  alignof(struct flow_tcf_tunnel_hdr)),
2866 #endif
2867                         .tunnel = (struct flow_tcf_tunnel_hdr *)tun,
2868                         .nlh = nlh,
2869                         .tcm = tcm,
2870                 },
2871         };
2872         if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP)
2873                 dev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_DECAP;
2874         else if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP)
2875                 dev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_ENCAP;
2876         return dev_flow;
2877 }
2878
2879 /**
2880  * Make adjustments for supporting count actions.
2881  *
2882  * @param[in] dev
2883  *   Pointer to the Ethernet device structure.
2884  * @param[in] dev_flow
2885  *   Pointer to mlx5_flow.
2886  * @param[out] error
2887  *   Pointer to error structure.
2888  *
2889  * @return
2890  *   0 On success else a negative errno value is returned and rte_errno is set.
2891  */
2892 static int
2893 flow_tcf_translate_action_count(struct rte_eth_dev *dev __rte_unused,
2894                                   struct mlx5_flow *dev_flow,
2895                                   struct rte_flow_error *error)
2896 {
2897         struct rte_flow *flow = dev_flow->flow;
2898
2899         if (!flow->counter) {
2900                 flow->counter = flow_tcf_counter_new();
2901                 if (!flow->counter)
2902                         return rte_flow_error_set(error, rte_errno,
2903                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2904                                                   NULL,
2905                                                   "cannot get counter"
2906                                                   " context.");
2907         }
2908         return 0;
2909 }
2910
2911 /**
2912  * Convert VXLAN VNI to 32-bit integer.
2913  *
2914  * @param[in] vni
2915  *   VXLAN VNI in 24-bit wire format.
2916  *
2917  * @return
2918  *   VXLAN VNI as a 32-bit integer value in network endian.
2919  */
2920 static inline rte_be32_t
2921 vxlan_vni_as_be32(const uint8_t vni[3])
2922 {
2923         union {
2924                 uint8_t vni[4];
2925                 rte_be32_t dword;
2926         } ret = {
2927                 .vni = { 0, vni[0], vni[1], vni[2] },
2928         };
2929         return ret.dword;
2930 }
2931
2932 /**
2933  * Helper function to process RTE_FLOW_ITEM_TYPE_ETH entry in configuration
2934  * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the MAC address fields
2935  * in the encapsulation parameters structure. The item must be prevalidated,
2936  * no any validation checks performed by function.
2937  *
2938  * @param[in] spec
2939  *   RTE_FLOW_ITEM_TYPE_ETH entry specification.
2940  * @param[in] mask
2941  *   RTE_FLOW_ITEM_TYPE_ETH entry mask.
2942  * @param[out] encap
2943  *   Structure to fill the gathered MAC address data.
2944  */
2945 static void
2946 flow_tcf_parse_vxlan_encap_eth(const struct rte_flow_item_eth *spec,
2947                                const struct rte_flow_item_eth *mask,
2948                                struct flow_tcf_vxlan_encap *encap)
2949 {
2950         /* Item must be validated before. No redundant checks. */
2951         assert(spec);
2952         if (!mask || !memcmp(&mask->dst,
2953                              &rte_flow_item_eth_mask.dst,
2954                              sizeof(rte_flow_item_eth_mask.dst))) {
2955                 /*
2956                  * Ethernet addresses are not supported by
2957                  * tc as tunnel_key parameters. Destination
2958                  * address is needed to form encap packet
2959                  * header and retrieved by kernel from
2960                  * implicit sources (ARP table, etc),
2961                  * address masks are not supported at all.
2962                  */
2963                 encap->eth.dst = spec->dst;
2964                 encap->mask |= FLOW_TCF_ENCAP_ETH_DST;
2965         }
2966         if (!mask || !memcmp(&mask->src,
2967                              &rte_flow_item_eth_mask.src,
2968                              sizeof(rte_flow_item_eth_mask.src))) {
2969                 /*
2970                  * Ethernet addresses are not supported by
2971                  * tc as tunnel_key parameters. Source ethernet
2972                  * address is ignored anyway.
2973                  */
2974                 encap->eth.src = spec->src;
2975                 encap->mask |= FLOW_TCF_ENCAP_ETH_SRC;
2976         }
2977 }
2978
2979 /**
2980  * Helper function to process RTE_FLOW_ITEM_TYPE_IPV4 entry in configuration
2981  * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the IPV4 address fields
2982  * in the encapsulation parameters structure. The item must be prevalidated,
2983  * no any validation checks performed by function.
2984  *
2985  * @param[in] spec
2986  *   RTE_FLOW_ITEM_TYPE_IPV4 entry specification.
2987  * @param[in] mask
2988  *  RTE_FLOW_ITEM_TYPE_IPV4 entry mask.
2989  * @param[out] encap
2990  *   Structure to fill the gathered IPV4 address data.
2991  */
2992 static void
2993 flow_tcf_parse_vxlan_encap_ipv4(const struct rte_flow_item_ipv4 *spec,
2994                                 const struct rte_flow_item_ipv4 *mask,
2995                                 struct flow_tcf_vxlan_encap *encap)
2996 {
2997         /* Item must be validated before. No redundant checks. */
2998         assert(spec);
2999         encap->ipv4.dst = spec->hdr.dst_addr;
3000         encap->ipv4.src = spec->hdr.src_addr;
3001         encap->mask |= FLOW_TCF_ENCAP_IPV4_SRC |
3002                        FLOW_TCF_ENCAP_IPV4_DST;
3003         if (mask && mask->hdr.type_of_service) {
3004                 encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
3005                 encap->ip_tos = spec->hdr.type_of_service;
3006         }
3007         if (mask && mask->hdr.time_to_live) {
3008                 encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
3009                 encap->ip_ttl_hop = spec->hdr.time_to_live;
3010         }
3011 }
3012
3013 /**
3014  * Helper function to process RTE_FLOW_ITEM_TYPE_IPV6 entry in configuration
3015  * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the IPV6 address fields
3016  * in the encapsulation parameters structure. The item must be prevalidated,
3017  * no any validation checks performed by function.
3018  *
3019  * @param[in] spec
3020  *   RTE_FLOW_ITEM_TYPE_IPV6 entry specification.
3021  * @param[in] mask
3022  *  RTE_FLOW_ITEM_TYPE_IPV6 entry mask.
3023  * @param[out] encap
3024  *   Structure to fill the gathered IPV6 address data.
3025  */
3026 static void
3027 flow_tcf_parse_vxlan_encap_ipv6(const struct rte_flow_item_ipv6 *spec,
3028                                 const struct rte_flow_item_ipv6 *mask,
3029                                 struct flow_tcf_vxlan_encap *encap)
3030 {
3031         /* Item must be validated before. No redundant checks. */
3032         assert(spec);
3033         memcpy(encap->ipv6.dst, spec->hdr.dst_addr, IPV6_ADDR_LEN);
3034         memcpy(encap->ipv6.src, spec->hdr.src_addr, IPV6_ADDR_LEN);
3035         encap->mask |= FLOW_TCF_ENCAP_IPV6_SRC |
3036                        FLOW_TCF_ENCAP_IPV6_DST;
3037         if (mask) {
3038                 if ((rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
3039                     IPV6_HDR_TC_SHIFT) & 0xff) {
3040                         encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
3041                         encap->ip_tos = (rte_be_to_cpu_32
3042                                                 (spec->hdr.vtc_flow) >>
3043                                                  IPV6_HDR_TC_SHIFT) & 0xff;
3044                 }
3045                 if (mask->hdr.hop_limits) {
3046                         encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
3047                         encap->ip_ttl_hop = spec->hdr.hop_limits;
3048                 }
3049         }
3050 }
3051
3052 /**
3053  * Helper function to process RTE_FLOW_ITEM_TYPE_UDP entry in configuration
3054  * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the UDP port fields
3055  * in the encapsulation parameters structure. The item must be prevalidated,
3056  * no any validation checks performed by function.
3057  *
3058  * @param[in] spec
3059  *   RTE_FLOW_ITEM_TYPE_UDP entry specification.
3060  * @param[in] mask
3061  *   RTE_FLOW_ITEM_TYPE_UDP entry mask.
3062  * @param[out] encap
3063  *   Structure to fill the gathered UDP port data.
3064  */
3065 static void
3066 flow_tcf_parse_vxlan_encap_udp(const struct rte_flow_item_udp *spec,
3067                                const struct rte_flow_item_udp *mask,
3068                                struct flow_tcf_vxlan_encap *encap)
3069 {
3070         assert(spec);
3071         encap->udp.dst = spec->hdr.dst_port;
3072         encap->mask |= FLOW_TCF_ENCAP_UDP_DST;
3073         if (!mask || mask->hdr.src_port != RTE_BE16(0x0000)) {
3074                 encap->udp.src = spec->hdr.src_port;
3075                 encap->mask |= FLOW_TCF_ENCAP_IPV4_SRC;
3076         }
3077 }
3078
3079 /**
3080  * Helper function to process RTE_FLOW_ITEM_TYPE_VXLAN entry in configuration
3081  * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the VNI fields
3082  * in the encapsulation parameters structure. The item must be prevalidated,
3083  * no any validation checks performed by function.
3084  *
3085  * @param[in] spec
3086  *   RTE_FLOW_ITEM_TYPE_VXLAN entry specification.
3087  * @param[out] encap
3088  *   Structure to fill the gathered VNI address data.
3089  */
3090 static void
3091 flow_tcf_parse_vxlan_encap_vni(const struct rte_flow_item_vxlan *spec,
3092                                struct flow_tcf_vxlan_encap *encap)
3093 {
3094         /* Item must be validated before. Do not redundant checks. */
3095         assert(spec);
3096         memcpy(encap->vxlan.vni, spec->vni, sizeof(encap->vxlan.vni));
3097         encap->mask |= FLOW_TCF_ENCAP_VXLAN_VNI;
3098 }
3099
3100 /**
3101  * Populate consolidated encapsulation object from list of pattern items.
3102  *
3103  * Helper function to process configuration of action such as
3104  * RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. The item list should be
3105  * validated, there is no way to return an meaningful error.
3106  *
3107  * @param[in] action
3108  *   RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action object.
3109  *   List of pattern items to gather data from.
3110  * @param[out] src
3111  *   Structure to fill gathered data.
3112  */
3113 static void
3114 flow_tcf_vxlan_encap_parse(const struct rte_flow_action *action,
3115                            struct flow_tcf_vxlan_encap *encap)
3116 {
3117         union {
3118                 const struct rte_flow_item_eth *eth;
3119                 const struct rte_flow_item_ipv4 *ipv4;
3120                 const struct rte_flow_item_ipv6 *ipv6;
3121                 const struct rte_flow_item_udp *udp;
3122                 const struct rte_flow_item_vxlan *vxlan;
3123         } spec, mask;
3124         const struct rte_flow_item *items;
3125
3126         assert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);
3127         assert(action->conf);
3128
3129         items = ((const struct rte_flow_action_vxlan_encap *)
3130                                         action->conf)->definition;
3131         assert(items);
3132         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3133                 switch (items->type) {
3134                 case RTE_FLOW_ITEM_TYPE_VOID:
3135                         break;
3136                 case RTE_FLOW_ITEM_TYPE_ETH:
3137                         mask.eth = items->mask;
3138                         spec.eth = items->spec;
3139                         flow_tcf_parse_vxlan_encap_eth(spec.eth, mask.eth,
3140                                                        encap);
3141                         break;
3142                 case RTE_FLOW_ITEM_TYPE_IPV4:
3143                         spec.ipv4 = items->spec;
3144                         mask.ipv4 = items->mask;
3145                         flow_tcf_parse_vxlan_encap_ipv4(spec.ipv4, mask.ipv4,
3146                                                         encap);
3147                         break;
3148                 case RTE_FLOW_ITEM_TYPE_IPV6:
3149                         spec.ipv6 = items->spec;
3150                         mask.ipv6 = items->mask;
3151                         flow_tcf_parse_vxlan_encap_ipv6(spec.ipv6, mask.ipv6,
3152                                                         encap);
3153                         break;
3154                 case RTE_FLOW_ITEM_TYPE_UDP:
3155                         mask.udp = items->mask;
3156                         spec.udp = items->spec;
3157                         flow_tcf_parse_vxlan_encap_udp(spec.udp, mask.udp,
3158                                                        encap);
3159                         break;
3160                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3161                         spec.vxlan = items->spec;
3162                         flow_tcf_parse_vxlan_encap_vni(spec.vxlan, encap);
3163                         break;
3164                 default:
3165                         assert(false);
3166                         DRV_LOG(WARNING,
3167                                 "unsupported item %p type %d,"
3168                                 " items must be validated"
3169                                 " before flow creation",
3170                                 (const void *)items, items->type);
3171                         encap->mask = 0;
3172                         return;
3173                 }
3174         }
3175 }
3176
3177 /**
3178  * Translate flow for Linux TC flower and construct Netlink message.
3179  *
3180  * @param[in] priv
3181  *   Pointer to the priv structure.
3182  * @param[in, out] flow
3183  *   Pointer to the sub flow.
3184  * @param[in] attr
3185  *   Pointer to the flow attributes.
3186  * @param[in] items
3187  *   Pointer to the list of items.
3188  * @param[in] actions
3189  *   Pointer to the list of actions.
3190  * @param[out] error
3191  *   Pointer to the error structure.
3192  *
3193  * @return
3194  *   0 on success, a negative errno value otherwise and rte_errno is set.
3195  */
3196 static int
3197 flow_tcf_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3198                    const struct rte_flow_attr *attr,
3199                    const struct rte_flow_item items[],
3200                    const struct rte_flow_action actions[],
3201                    struct rte_flow_error *error)
3202 {
3203         union {
3204                 const struct rte_flow_item_port_id *port_id;
3205                 const struct rte_flow_item_eth *eth;
3206                 const struct rte_flow_item_vlan *vlan;
3207                 const struct rte_flow_item_ipv4 *ipv4;
3208                 const struct rte_flow_item_ipv6 *ipv6;
3209                 const struct rte_flow_item_tcp *tcp;
3210                 const struct rte_flow_item_udp *udp;
3211                 const struct rte_flow_item_vxlan *vxlan;
3212         } spec, mask;
3213         union {
3214                 const struct rte_flow_action_port_id *port_id;
3215                 const struct rte_flow_action_jump *jump;
3216                 const struct rte_flow_action_of_push_vlan *of_push_vlan;
3217                 const struct rte_flow_action_of_set_vlan_vid *
3218                         of_set_vlan_vid;
3219                 const struct rte_flow_action_of_set_vlan_pcp *
3220                         of_set_vlan_pcp;
3221         } conf;
3222         union {
3223                 struct flow_tcf_tunnel_hdr *hdr;
3224                 struct flow_tcf_vxlan_decap *vxlan;
3225         } decap = {
3226                 .hdr = NULL,
3227         };
3228         union {
3229                 struct flow_tcf_tunnel_hdr *hdr;
3230                 struct flow_tcf_vxlan_encap *vxlan;
3231         } encap = {
3232                 .hdr = NULL,
3233         };
3234         struct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];
3235         struct nlmsghdr *nlh = dev_flow->tcf.nlh;
3236         struct tcmsg *tcm = dev_flow->tcf.tcm;
3237         uint32_t na_act_index_cur;
3238         rte_be16_t inner_etype = RTE_BE16(ETH_P_ALL);
3239         rte_be16_t outer_etype = RTE_BE16(ETH_P_ALL);
3240         rte_be16_t vlan_etype = RTE_BE16(ETH_P_ALL);
3241         bool ip_proto_set = 0;
3242         bool tunnel_outer = 0;
3243         struct nlattr *na_flower;
3244         struct nlattr *na_flower_act;
3245         struct nlattr *na_vlan_id = NULL;
3246         struct nlattr *na_vlan_priority = NULL;
3247         uint64_t item_flags = 0;
3248         int ret;
3249
3250         claim_nonzero(flow_tcf_build_ptoi_table(dev, ptoi,
3251                                                 PTOI_TABLE_SZ_MAX(dev)));
3252         if (dev_flow->tcf.tunnel) {
3253                 switch (dev_flow->tcf.tunnel->type) {
3254                 case FLOW_TCF_TUNACT_VXLAN_DECAP:
3255                         decap.vxlan = dev_flow->tcf.vxlan_decap;
3256                         tunnel_outer = 1;
3257                         break;
3258                 case FLOW_TCF_TUNACT_VXLAN_ENCAP:
3259                         encap.vxlan = dev_flow->tcf.vxlan_encap;
3260                         break;
3261                 /* New tunnel actions can be added here. */
3262                 default:
3263                         assert(false);
3264                         break;
3265                 }
3266         }
3267         nlh = dev_flow->tcf.nlh;
3268         tcm = dev_flow->tcf.tcm;
3269         /* Prepare API must have been called beforehand. */
3270         assert(nlh != NULL && tcm != NULL);
3271         tcm->tcm_family = AF_UNSPEC;
3272         tcm->tcm_ifindex = ptoi[0].ifindex;
3273         tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS);
3274         /*
3275          * Priority cannot be zero to prevent the kernel from picking one
3276          * automatically.
3277          */
3278         tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16, outer_etype);
3279         if (attr->group > 0)
3280                 mnl_attr_put_u32(nlh, TCA_CHAIN, attr->group);
3281         mnl_attr_put_strz(nlh, TCA_KIND, "flower");
3282         na_flower = mnl_attr_nest_start(nlh, TCA_OPTIONS);
3283         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3284                 unsigned int i;
3285
3286                 switch (items->type) {
3287                 case RTE_FLOW_ITEM_TYPE_VOID:
3288                         break;
3289                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3290                         mask.port_id = flow_tcf_item_mask
3291                                 (items, &rte_flow_item_port_id_mask,
3292                                  &flow_tcf_mask_supported.port_id,
3293                                  &flow_tcf_mask_empty.port_id,
3294                                  sizeof(flow_tcf_mask_supported.port_id),
3295                                  error);
3296                         assert(mask.port_id);
3297                         if (mask.port_id == &flow_tcf_mask_empty.port_id)
3298                                 break;
3299                         spec.port_id = items->spec;
3300                         if (!mask.port_id->id)
3301                                 i = 0;
3302                         else
3303                                 for (i = 0; ptoi[i].ifindex; ++i)
3304                                         if (ptoi[i].port_id == spec.port_id->id)
3305                                                 break;
3306                         assert(ptoi[i].ifindex);
3307                         tcm->tcm_ifindex = ptoi[i].ifindex;
3308                         break;
3309                 case RTE_FLOW_ITEM_TYPE_ETH:
3310                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
3311                                       MLX5_FLOW_LAYER_INNER_L2 :
3312                                       MLX5_FLOW_LAYER_OUTER_L2;
3313                         mask.eth = flow_tcf_item_mask
3314                                 (items, &rte_flow_item_eth_mask,
3315                                  &flow_tcf_mask_supported.eth,
3316                                  &flow_tcf_mask_empty.eth,
3317                                  sizeof(flow_tcf_mask_supported.eth),
3318                                  error);
3319                         assert(mask.eth);
3320                         if (mask.eth == &flow_tcf_mask_empty.eth)
3321                                 break;
3322                         spec.eth = items->spec;
3323                         if (mask.eth->type) {
3324                                 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
3325                                         inner_etype = spec.eth->type;
3326                                 else
3327                                         outer_etype = spec.eth->type;
3328                         }
3329                         if (tunnel_outer) {
3330                                 DRV_LOG(WARNING,
3331                                         "outer L2 addresses cannot be"
3332                                         " forced is outer ones for tunnel,"
3333                                         " parameter is ignored");
3334                                 break;
3335                         }
3336                         if (!is_zero_ether_addr(&mask.eth->dst)) {
3337                                 mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,
3338                                              ETHER_ADDR_LEN,
3339                                              spec.eth->dst.addr_bytes);
3340                                 mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,
3341                                              ETHER_ADDR_LEN,
3342                                              mask.eth->dst.addr_bytes);
3343                         }
3344                         if (!is_zero_ether_addr(&mask.eth->src)) {
3345                                 mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,
3346                                              ETHER_ADDR_LEN,
3347                                              spec.eth->src.addr_bytes);
3348                                 mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,
3349                                              ETHER_ADDR_LEN,
3350                                              mask.eth->src.addr_bytes);
3351                         }
3352                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3353                         break;
3354                 case RTE_FLOW_ITEM_TYPE_VLAN:
3355                         assert(!encap.hdr);
3356                         assert(!decap.hdr);
3357                         assert(!tunnel_outer);
3358                         item_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
3359                         mask.vlan = flow_tcf_item_mask
3360                                 (items, &rte_flow_item_vlan_mask,
3361                                  &flow_tcf_mask_supported.vlan,
3362                                  &flow_tcf_mask_empty.vlan,
3363                                  sizeof(flow_tcf_mask_supported.vlan),
3364                                  error);
3365                         assert(mask.vlan);
3366                         if (mask.vlan == &flow_tcf_mask_empty.vlan)
3367                                 break;
3368                         spec.vlan = items->spec;
3369                         assert(outer_etype == RTE_BE16(ETH_P_ALL) ||
3370                                outer_etype == RTE_BE16(ETH_P_8021Q));
3371                         outer_etype = RTE_BE16(ETH_P_8021Q);
3372                         if (mask.vlan->inner_type)
3373                                 vlan_etype = spec.vlan->inner_type;
3374                         if (mask.vlan->tci & RTE_BE16(0xe000))
3375                                 mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_VLAN_PRIO,
3376                                                 (rte_be_to_cpu_16
3377                                                  (spec.vlan->tci) >> 13) & 0x7);
3378                         if (mask.vlan->tci & RTE_BE16(0x0fff))
3379                                 mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_VLAN_ID,
3380                                                  rte_be_to_cpu_16
3381                                                  (spec.vlan->tci &
3382                                                   RTE_BE16(0x0fff)));
3383                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3384                         break;
3385                 case RTE_FLOW_ITEM_TYPE_IPV4:
3386                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
3387                                       MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3388                                       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3389                         mask.ipv4 = flow_tcf_item_mask
3390                                 (items, &rte_flow_item_ipv4_mask,
3391                                  &flow_tcf_mask_supported.ipv4,
3392                                  &flow_tcf_mask_empty.ipv4,
3393                                  sizeof(flow_tcf_mask_supported.ipv4),
3394                                  error);
3395                         assert(mask.ipv4);
3396                         if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
3397                                 assert(inner_etype == RTE_BE16(ETH_P_ALL) ||
3398                                        inner_etype == RTE_BE16(ETH_P_IP));
3399                                 inner_etype = RTE_BE16(ETH_P_IP);
3400                         } else if (outer_etype == RTE_BE16(ETH_P_8021Q)) {
3401                                 assert(vlan_etype == RTE_BE16(ETH_P_ALL) ||
3402                                        vlan_etype == RTE_BE16(ETH_P_IP));
3403                                 vlan_etype = RTE_BE16(ETH_P_IP);
3404                         } else {
3405                                 assert(outer_etype == RTE_BE16(ETH_P_ALL) ||
3406                                        outer_etype == RTE_BE16(ETH_P_IP));
3407                                 outer_etype = RTE_BE16(ETH_P_IP);
3408                         }
3409                         spec.ipv4 = items->spec;
3410                         if (!tunnel_outer && mask.ipv4->hdr.next_proto_id) {
3411                                 /*
3412                                  * No way to set IP protocol for outer tunnel
3413                                  * layers. Usually it is fixed, for example,
3414                                  * to UDP for VXLAN/GPE.
3415                                  */
3416                                 assert(spec.ipv4); /* Mask is not empty. */
3417                                 mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
3418                                                 spec.ipv4->hdr.next_proto_id);
3419                                 ip_proto_set = 1;
3420                         }
3421                         if (mask.ipv4 == &flow_tcf_mask_empty.ipv4 ||
3422                              (!mask.ipv4->hdr.src_addr &&
3423                               !mask.ipv4->hdr.dst_addr)) {
3424                                 if (!tunnel_outer)
3425                                         break;
3426                                 /*
3427                                  * For tunnel outer we must set outer IP key
3428                                  * anyway, even if the specification/mask is
3429                                  * empty. There is no another way to tell
3430                                  * kernel about he outer layer protocol.
3431                                  */
3432                                 mnl_attr_put_u32
3433                                         (nlh, TCA_FLOWER_KEY_ENC_IPV4_SRC,
3434                                          mask.ipv4->hdr.src_addr);
3435                                 mnl_attr_put_u32
3436                                         (nlh, TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3437                                          mask.ipv4->hdr.src_addr);
3438                                 assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3439                                 break;
3440                         }
3441                         if (mask.ipv4->hdr.src_addr) {
3442                                 mnl_attr_put_u32
3443                                         (nlh, tunnel_outer ?
3444                                          TCA_FLOWER_KEY_ENC_IPV4_SRC :
3445                                          TCA_FLOWER_KEY_IPV4_SRC,
3446                                          spec.ipv4->hdr.src_addr);
3447                                 mnl_attr_put_u32
3448                                         (nlh, tunnel_outer ?
3449                                          TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK :
3450                                          TCA_FLOWER_KEY_IPV4_SRC_MASK,
3451                                          mask.ipv4->hdr.src_addr);
3452                         }
3453                         if (mask.ipv4->hdr.dst_addr) {
3454                                 mnl_attr_put_u32
3455                                         (nlh, tunnel_outer ?
3456                                          TCA_FLOWER_KEY_ENC_IPV4_DST :
3457                                          TCA_FLOWER_KEY_IPV4_DST,
3458                                          spec.ipv4->hdr.dst_addr);
3459                                 mnl_attr_put_u32
3460                                         (nlh, tunnel_outer ?
3461                                          TCA_FLOWER_KEY_ENC_IPV4_DST_MASK :
3462                                          TCA_FLOWER_KEY_IPV4_DST_MASK,
3463                                          mask.ipv4->hdr.dst_addr);
3464                         }
3465                         if (mask.ipv4->hdr.time_to_live) {
3466                                 mnl_attr_put_u8
3467                                         (nlh, tunnel_outer ?
3468                                          TCA_FLOWER_KEY_ENC_IP_TTL :
3469                                          TCA_FLOWER_KEY_IP_TTL,
3470                                          spec.ipv4->hdr.time_to_live);
3471                                 mnl_attr_put_u8
3472                                         (nlh, tunnel_outer ?
3473                                          TCA_FLOWER_KEY_ENC_IP_TTL_MASK :
3474                                          TCA_FLOWER_KEY_IP_TTL_MASK,
3475                                          mask.ipv4->hdr.time_to_live);
3476                         }
3477                         if (mask.ipv4->hdr.type_of_service) {
3478                                 mnl_attr_put_u8
3479                                         (nlh, tunnel_outer ?
3480                                          TCA_FLOWER_KEY_ENC_IP_TOS :
3481                                          TCA_FLOWER_KEY_IP_TOS,
3482                                          spec.ipv4->hdr.type_of_service);
3483                                 mnl_attr_put_u8
3484                                         (nlh, tunnel_outer ?
3485                                          TCA_FLOWER_KEY_ENC_IP_TOS_MASK :
3486                                          TCA_FLOWER_KEY_IP_TOS_MASK,
3487                                          mask.ipv4->hdr.type_of_service);
3488                         }
3489                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3490                         break;
3491                 case RTE_FLOW_ITEM_TYPE_IPV6: {
3492                         bool ipv6_src, ipv6_dst;
3493                         uint8_t msk6, tos6;
3494
3495                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
3496                                       MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3497                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3498                         mask.ipv6 = flow_tcf_item_mask
3499                                 (items, &rte_flow_item_ipv6_mask,
3500                                  &flow_tcf_mask_supported.ipv6,
3501                                  &flow_tcf_mask_empty.ipv6,
3502                                  sizeof(flow_tcf_mask_supported.ipv6),
3503                                  error);
3504                         assert(mask.ipv6);
3505                         if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
3506                                 assert(inner_etype == RTE_BE16(ETH_P_ALL) ||
3507                                        inner_etype == RTE_BE16(ETH_P_IPV6));
3508                                 inner_etype = RTE_BE16(ETH_P_IPV6);
3509                         } else if (outer_etype == RTE_BE16(ETH_P_8021Q)) {
3510                                 assert(vlan_etype == RTE_BE16(ETH_P_ALL) ||
3511                                        vlan_etype == RTE_BE16(ETH_P_IPV6));
3512                                 vlan_etype = RTE_BE16(ETH_P_IPV6);
3513                         } else {
3514                                 assert(outer_etype == RTE_BE16(ETH_P_ALL) ||
3515                                        outer_etype == RTE_BE16(ETH_P_IPV6));
3516                                 outer_etype = RTE_BE16(ETH_P_IPV6);
3517                         }
3518                         spec.ipv6 = items->spec;
3519                         if (!tunnel_outer && mask.ipv6->hdr.proto) {
3520                                 /*
3521                                  * No way to set IP protocol for outer tunnel
3522                                  * layers. Usually it is fixed, for example,
3523                                  * to UDP for VXLAN/GPE.
3524                                  */
3525                                 assert(spec.ipv6); /* Mask is not empty. */
3526                                 mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
3527                                                 spec.ipv6->hdr.proto);
3528                                 ip_proto_set = 1;
3529                         }
3530                         ipv6_dst = !IN6_IS_ADDR_UNSPECIFIED
3531                                                 (mask.ipv6->hdr.dst_addr);
3532                         ipv6_src = !IN6_IS_ADDR_UNSPECIFIED
3533                                                 (mask.ipv6->hdr.src_addr);
3534                         if (mask.ipv6 == &flow_tcf_mask_empty.ipv6 ||
3535                              (!ipv6_dst && !ipv6_src)) {
3536                                 if (!tunnel_outer)
3537                                         break;
3538                                 /*
3539                                  * For tunnel outer we must set outer IP key
3540                                  * anyway, even if the specification/mask is
3541                                  * empty. There is no another way to tell
3542                                  * kernel about he outer layer protocol.
3543                                  */
3544                                 mnl_attr_put(nlh,
3545                                              TCA_FLOWER_KEY_ENC_IPV6_SRC,
3546                                              IPV6_ADDR_LEN,
3547                                              mask.ipv6->hdr.src_addr);
3548                                 mnl_attr_put(nlh,
3549                                              TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3550                                              IPV6_ADDR_LEN,
3551                                              mask.ipv6->hdr.src_addr);
3552                                 assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3553                                 break;
3554                         }
3555                         if (ipv6_src) {
3556                                 mnl_attr_put(nlh, tunnel_outer ?
3557                                              TCA_FLOWER_KEY_ENC_IPV6_SRC :
3558                                              TCA_FLOWER_KEY_IPV6_SRC,
3559                                              IPV6_ADDR_LEN,
3560                                              spec.ipv6->hdr.src_addr);
3561                                 mnl_attr_put(nlh, tunnel_outer ?
3562                                              TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK :
3563                                              TCA_FLOWER_KEY_IPV6_SRC_MASK,
3564                                              IPV6_ADDR_LEN,
3565                                              mask.ipv6->hdr.src_addr);
3566                         }
3567                         if (ipv6_dst) {
3568                                 mnl_attr_put(nlh, tunnel_outer ?
3569                                              TCA_FLOWER_KEY_ENC_IPV6_DST :
3570                                              TCA_FLOWER_KEY_IPV6_DST,
3571                                              IPV6_ADDR_LEN,
3572                                              spec.ipv6->hdr.dst_addr);
3573                                 mnl_attr_put(nlh, tunnel_outer ?
3574                                              TCA_FLOWER_KEY_ENC_IPV6_DST_MASK :
3575                                              TCA_FLOWER_KEY_IPV6_DST_MASK,
3576                                              IPV6_ADDR_LEN,
3577                                              mask.ipv6->hdr.dst_addr);
3578                         }
3579                         if (mask.ipv6->hdr.hop_limits) {
3580                                 mnl_attr_put_u8
3581                                         (nlh, tunnel_outer ?
3582                                          TCA_FLOWER_KEY_ENC_IP_TTL :
3583                                          TCA_FLOWER_KEY_IP_TTL,
3584                                          spec.ipv6->hdr.hop_limits);
3585                                 mnl_attr_put_u8
3586                                         (nlh, tunnel_outer ?
3587                                          TCA_FLOWER_KEY_ENC_IP_TTL_MASK :
3588                                          TCA_FLOWER_KEY_IP_TTL_MASK,
3589                                          mask.ipv6->hdr.hop_limits);
3590                         }
3591                         msk6 = (rte_be_to_cpu_32(mask.ipv6->hdr.vtc_flow) >>
3592                                 IPV6_HDR_TC_SHIFT) & 0xff;
3593                         if (msk6) {
3594                                 tos6 = (rte_be_to_cpu_32
3595                                         (spec.ipv6->hdr.vtc_flow) >>
3596                                                 IPV6_HDR_TC_SHIFT) & 0xff;
3597                                 mnl_attr_put_u8
3598                                         (nlh, tunnel_outer ?
3599                                          TCA_FLOWER_KEY_ENC_IP_TOS :
3600                                          TCA_FLOWER_KEY_IP_TOS, tos6);
3601                                 mnl_attr_put_u8
3602                                         (nlh, tunnel_outer ?
3603                                          TCA_FLOWER_KEY_ENC_IP_TOS_MASK :
3604                                          TCA_FLOWER_KEY_IP_TOS_MASK, msk6);
3605                         }
3606                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3607                         break;
3608                 }
3609                 case RTE_FLOW_ITEM_TYPE_UDP:
3610                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
3611                                       MLX5_FLOW_LAYER_INNER_L4_UDP :
3612                                       MLX5_FLOW_LAYER_OUTER_L4_UDP;
3613                         mask.udp = flow_tcf_item_mask
3614                                 (items, &rte_flow_item_udp_mask,
3615                                  &flow_tcf_mask_supported.udp,
3616                                  &flow_tcf_mask_empty.udp,
3617                                  sizeof(flow_tcf_mask_supported.udp),
3618                                  error);
3619                         assert(mask.udp);
3620                         spec.udp = items->spec;
3621                         if (!tunnel_outer) {
3622                                 if (!ip_proto_set)
3623                                         mnl_attr_put_u8
3624                                                 (nlh, TCA_FLOWER_KEY_IP_PROTO,
3625                                                 IPPROTO_UDP);
3626                                 if (mask.udp == &flow_tcf_mask_empty.udp)
3627                                         break;
3628                         } else {
3629                                 assert(mask.udp != &flow_tcf_mask_empty.udp);
3630                                 decap.vxlan->udp_port =
3631                                         rte_be_to_cpu_16
3632                                                 (spec.udp->hdr.dst_port);
3633                         }
3634                         if (mask.udp->hdr.src_port) {
3635                                 mnl_attr_put_u16
3636                                         (nlh, tunnel_outer ?
3637                                          TCA_FLOWER_KEY_ENC_UDP_SRC_PORT :
3638                                          TCA_FLOWER_KEY_UDP_SRC,
3639                                          spec.udp->hdr.src_port);
3640                                 mnl_attr_put_u16
3641                                         (nlh, tunnel_outer ?
3642                                          TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK :
3643                                          TCA_FLOWER_KEY_UDP_SRC_MASK,
3644                                          mask.udp->hdr.src_port);
3645                         }
3646                         if (mask.udp->hdr.dst_port) {
3647                                 mnl_attr_put_u16
3648                                         (nlh, tunnel_outer ?
3649                                          TCA_FLOWER_KEY_ENC_UDP_DST_PORT :
3650                                          TCA_FLOWER_KEY_UDP_DST,
3651                                          spec.udp->hdr.dst_port);
3652                                 mnl_attr_put_u16
3653                                         (nlh, tunnel_outer ?
3654                                          TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK :
3655                                          TCA_FLOWER_KEY_UDP_DST_MASK,
3656                                          mask.udp->hdr.dst_port);
3657                         }
3658                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3659                         break;
3660                 case RTE_FLOW_ITEM_TYPE_TCP:
3661                         item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
3662                                       MLX5_FLOW_LAYER_INNER_L4_TCP :
3663                                       MLX5_FLOW_LAYER_OUTER_L4_TCP;
3664                         mask.tcp = flow_tcf_item_mask
3665                                 (items, &rte_flow_item_tcp_mask,
3666                                  &flow_tcf_mask_supported.tcp,
3667                                  &flow_tcf_mask_empty.tcp,
3668                                  sizeof(flow_tcf_mask_supported.tcp),
3669                                  error);
3670                         assert(mask.tcp);
3671                         if (!ip_proto_set)
3672                                 mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
3673                                                 IPPROTO_TCP);
3674                         if (mask.tcp == &flow_tcf_mask_empty.tcp)
3675                                 break;
3676                         spec.tcp = items->spec;
3677                         if (mask.tcp->hdr.src_port) {
3678                                 mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_TCP_SRC,
3679                                                  spec.tcp->hdr.src_port);
3680                                 mnl_attr_put_u16(nlh,
3681                                                  TCA_FLOWER_KEY_TCP_SRC_MASK,
3682                                                  mask.tcp->hdr.src_port);
3683                         }
3684                         if (mask.tcp->hdr.dst_port) {
3685                                 mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_TCP_DST,
3686                                                  spec.tcp->hdr.dst_port);
3687                                 mnl_attr_put_u16(nlh,
3688                                                  TCA_FLOWER_KEY_TCP_DST_MASK,
3689                                                  mask.tcp->hdr.dst_port);
3690                         }
3691                         if (mask.tcp->hdr.tcp_flags) {
3692                                 mnl_attr_put_u16
3693                                         (nlh,
3694                                          TCA_FLOWER_KEY_TCP_FLAGS,
3695                                          rte_cpu_to_be_16
3696                                                 (spec.tcp->hdr.tcp_flags));
3697                                 mnl_attr_put_u16
3698                                         (nlh,
3699                                          TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3700                                          rte_cpu_to_be_16
3701                                                 (mask.tcp->hdr.tcp_flags));
3702                         }
3703                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3704                         break;
3705                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3706                         assert(decap.vxlan);
3707                         tunnel_outer = 0;
3708                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
3709                         spec.vxlan = items->spec;
3710                         mnl_attr_put_u32(nlh,
3711                                          TCA_FLOWER_KEY_ENC_KEY_ID,
3712                                          vxlan_vni_as_be32(spec.vxlan->vni));
3713                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3714                         break;
3715                 default:
3716                         return rte_flow_error_set(error, ENOTSUP,
3717                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3718                                                   NULL, "item not supported");
3719                 }
3720         }
3721         /*
3722          * Set the ether_type flower key and tc rule protocol:
3723          * - if there is nor VLAN neither VXLAN the key is taken from
3724          *   eth item directly or deduced from L3 items.
3725          * - if there is vlan item then key is fixed to 802.1q.
3726          * - if there is vxlan item then key is set to inner tunnel type.
3727          * - simultaneous vlan and vxlan items are prohibited.
3728          */
3729         if (outer_etype != RTE_BE16(ETH_P_ALL)) {
3730                 tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16,
3731                                            outer_etype);
3732                 if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
3733                         if (inner_etype != RTE_BE16(ETH_P_ALL))
3734                                 mnl_attr_put_u16(nlh,
3735                                                  TCA_FLOWER_KEY_ETH_TYPE,
3736                                                  inner_etype);
3737                 } else {
3738                         mnl_attr_put_u16(nlh,
3739                                          TCA_FLOWER_KEY_ETH_TYPE,
3740                                          outer_etype);
3741                         if (outer_etype == RTE_BE16(ETH_P_8021Q) &&
3742                             vlan_etype != RTE_BE16(ETH_P_ALL))
3743                                 mnl_attr_put_u16(nlh,
3744                                                  TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3745                                                  vlan_etype);
3746                 }
3747                 assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3748         }
3749         na_flower_act = mnl_attr_nest_start(nlh, TCA_FLOWER_ACT);
3750         na_act_index_cur = 1;
3751         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3752                 struct nlattr *na_act_index;
3753                 struct nlattr *na_act;
3754                 unsigned int vlan_act;
3755                 unsigned int i;
3756
3757                 switch (actions->type) {
3758                 case RTE_FLOW_ACTION_TYPE_VOID:
3759                         break;
3760                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3761                         conf.port_id = actions->conf;
3762                         if (conf.port_id->original)
3763                                 i = 0;
3764                         else
3765                                 for (i = 0; ptoi[i].ifindex; ++i)
3766                                         if (ptoi[i].port_id == conf.port_id->id)
3767                                                 break;
3768                         assert(ptoi[i].ifindex);
3769                         na_act_index =
3770                                 mnl_attr_nest_start(nlh, na_act_index_cur++);
3771                         assert(na_act_index);
3772                         mnl_attr_put_strz(nlh, TCA_ACT_KIND, "mirred");
3773                         na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
3774                         assert(na_act);
3775                         if (encap.hdr) {
3776                                 assert(dev_flow->tcf.tunnel);
3777                                 dev_flow->tcf.tunnel->ifindex_ptr =
3778                                         &((struct tc_mirred *)
3779                                         mnl_attr_get_payload
3780                                         (mnl_nlmsg_get_payload_tail
3781                                                 (nlh)))->ifindex;
3782                         }
3783                         mnl_attr_put(nlh, TCA_MIRRED_PARMS,
3784                                      sizeof(struct tc_mirred),
3785                                      &(struct tc_mirred){
3786                                         .action = TC_ACT_STOLEN,
3787                                         .eaction = TCA_EGRESS_REDIR,
3788                                         .ifindex = ptoi[i].ifindex,
3789                                      });
3790                         mnl_attr_nest_end(nlh, na_act);
3791                         mnl_attr_nest_end(nlh, na_act_index);
3792                         break;
3793                 case RTE_FLOW_ACTION_TYPE_JUMP:
3794                         conf.jump = actions->conf;
3795                         na_act_index =
3796                                 mnl_attr_nest_start(nlh, na_act_index_cur++);
3797                         assert(na_act_index);
3798                         mnl_attr_put_strz(nlh, TCA_ACT_KIND, "gact");
3799                         na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
3800                         assert(na_act);
3801                         mnl_attr_put(nlh, TCA_GACT_PARMS,
3802                                      sizeof(struct tc_gact),
3803                                      &(struct tc_gact){
3804                                         .action = TC_ACT_GOTO_CHAIN |
3805                                                   conf.jump->group,
3806                                      });
3807                         mnl_attr_nest_end(nlh, na_act);
3808                         mnl_attr_nest_end(nlh, na_act_index);
3809                         break;
3810                 case RTE_FLOW_ACTION_TYPE_DROP:
3811                         na_act_index =
3812                                 mnl_attr_nest_start(nlh, na_act_index_cur++);
3813                         assert(na_act_index);
3814                         mnl_attr_put_strz(nlh, TCA_ACT_KIND, "gact");
3815                         na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
3816                         assert(na_act);
3817                         mnl_attr_put(nlh, TCA_GACT_PARMS,
3818                                      sizeof(struct tc_gact),
3819                                      &(struct tc_gact){
3820                                         .action = TC_ACT_SHOT,
3821                                      });
3822                         mnl_attr_nest_end(nlh, na_act);
3823                         mnl_attr_nest_end(nlh, na_act_index);
3824                         break;
3825                 case RTE_FLOW_ACTION_TYPE_COUNT:
3826                         /*
3827                          * Driver adds the count action implicitly for
3828                          * each rule it creates.
3829                          */
3830                         ret = flow_tcf_translate_action_count(dev,
3831                                                               dev_flow, error);
3832                         if (ret < 0)
3833                                 return ret;
3834                         break;
3835                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3836                         conf.of_push_vlan = NULL;
3837                         vlan_act = TCA_VLAN_ACT_POP;
3838                         goto action_of_vlan;
3839                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3840                         conf.of_push_vlan = actions->conf;
3841                         vlan_act = TCA_VLAN_ACT_PUSH;
3842                         goto action_of_vlan;
3843                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3844                         conf.of_set_vlan_vid = actions->conf;
3845                         if (na_vlan_id)
3846                                 goto override_na_vlan_id;
3847                         vlan_act = TCA_VLAN_ACT_MODIFY;
3848                         goto action_of_vlan;
3849                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3850                         conf.of_set_vlan_pcp = actions->conf;
3851                         if (na_vlan_priority)
3852                                 goto override_na_vlan_priority;
3853                         vlan_act = TCA_VLAN_ACT_MODIFY;
3854                         goto action_of_vlan;
3855 action_of_vlan:
3856                         na_act_index =
3857                                 mnl_attr_nest_start(nlh, na_act_index_cur++);
3858                         assert(na_act_index);
3859                         mnl_attr_put_strz(nlh, TCA_ACT_KIND, "vlan");
3860                         na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
3861                         assert(na_act);
3862                         mnl_attr_put(nlh, TCA_VLAN_PARMS,
3863                                      sizeof(struct tc_vlan),
3864                                      &(struct tc_vlan){
3865                                         .action = TC_ACT_PIPE,
3866                                         .v_action = vlan_act,
3867                                      });
3868                         if (vlan_act == TCA_VLAN_ACT_POP) {
3869                                 mnl_attr_nest_end(nlh, na_act);
3870                                 mnl_attr_nest_end(nlh, na_act_index);
3871                                 break;
3872                         }
3873                         if (vlan_act == TCA_VLAN_ACT_PUSH)
3874                                 mnl_attr_put_u16(nlh,
3875                                                  TCA_VLAN_PUSH_VLAN_PROTOCOL,
3876                                                  conf.of_push_vlan->ethertype);
3877                         na_vlan_id = mnl_nlmsg_get_payload_tail(nlh);
3878                         mnl_attr_put_u16(nlh, TCA_VLAN_PAD, 0);
3879                         na_vlan_priority = mnl_nlmsg_get_payload_tail(nlh);
3880                         mnl_attr_put_u8(nlh, TCA_VLAN_PAD, 0);
3881                         mnl_attr_nest_end(nlh, na_act);
3882                         mnl_attr_nest_end(nlh, na_act_index);
3883                         if (actions->type ==
3884                             RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
3885 override_na_vlan_id:
3886                                 na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID;
3887                                 *(uint16_t *)mnl_attr_get_payload(na_vlan_id) =
3888                                         rte_be_to_cpu_16
3889                                         (conf.of_set_vlan_vid->vlan_vid);
3890                         } else if (actions->type ==
3891                                    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
3892 override_na_vlan_priority:
3893                                 na_vlan_priority->nla_type =
3894                                         TCA_VLAN_PUSH_VLAN_PRIORITY;
3895                                 *(uint8_t *)mnl_attr_get_payload
3896                                         (na_vlan_priority) =
3897                                         conf.of_set_vlan_pcp->vlan_pcp;
3898                         }
3899                         break;
3900                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3901                         assert(decap.vxlan);
3902                         assert(dev_flow->tcf.tunnel);
3903                         dev_flow->tcf.tunnel->ifindex_ptr =
3904                                 (unsigned int *)&tcm->tcm_ifindex;
3905                         na_act_index =
3906                                 mnl_attr_nest_start(nlh, na_act_index_cur++);
3907                         assert(na_act_index);
3908                         mnl_attr_put_strz(nlh, TCA_ACT_KIND, "tunnel_key");
3909                         na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
3910                         assert(na_act);
3911                         mnl_attr_put(nlh, TCA_TUNNEL_KEY_PARMS,
3912                                 sizeof(struct tc_tunnel_key),
3913                                 &(struct tc_tunnel_key){
3914                                         .action = TC_ACT_PIPE,
3915                                         .t_action = TCA_TUNNEL_KEY_ACT_RELEASE,
3916                                         });
3917                         mnl_attr_nest_end(nlh, na_act);
3918                         mnl_attr_nest_end(nlh, na_act_index);
3919                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3920                         break;
3921                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3922                         assert(encap.vxlan);
3923                         flow_tcf_vxlan_encap_parse(actions, encap.vxlan);
3924                         na_act_index =
3925                                 mnl_attr_nest_start(nlh, na_act_index_cur++);
3926                         assert(na_act_index);
3927                         mnl_attr_put_strz(nlh, TCA_ACT_KIND, "tunnel_key");
3928                         na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
3929                         assert(na_act);
3930                         mnl_attr_put(nlh, TCA_TUNNEL_KEY_PARMS,
3931                                 sizeof(struct tc_tunnel_key),
3932                                 &(struct tc_tunnel_key){
3933                                         .action = TC_ACT_PIPE,
3934                                         .t_action = TCA_TUNNEL_KEY_ACT_SET,
3935                                         });
3936                         if (encap.vxlan->mask & FLOW_TCF_ENCAP_UDP_DST)
3937                                 mnl_attr_put_u16(nlh,
3938                                          TCA_TUNNEL_KEY_ENC_DST_PORT,
3939                                          encap.vxlan->udp.dst);
3940                         if (encap.vxlan->mask & FLOW_TCF_ENCAP_IPV4_SRC)
3941                                 mnl_attr_put_u32(nlh,
3942                                          TCA_TUNNEL_KEY_ENC_IPV4_SRC,
3943                                          encap.vxlan->ipv4.src);
3944                         if (encap.vxlan->mask & FLOW_TCF_ENCAP_IPV4_DST)
3945                                 mnl_attr_put_u32(nlh,
3946                                          TCA_TUNNEL_KEY_ENC_IPV4_DST,
3947                                          encap.vxlan->ipv4.dst);
3948                         if (encap.vxlan->mask & FLOW_TCF_ENCAP_IPV6_SRC)
3949                                 mnl_attr_put(nlh,
3950                                          TCA_TUNNEL_KEY_ENC_IPV6_SRC,
3951                                          sizeof(encap.vxlan->ipv6.src),
3952                                          &encap.vxlan->ipv6.src);
3953                         if (encap.vxlan->mask & FLOW_TCF_ENCAP_IPV6_DST)
3954                                 mnl_attr_put(nlh,
3955                                          TCA_TUNNEL_KEY_ENC_IPV6_DST,
3956                                          sizeof(encap.vxlan->ipv6.dst),
3957                                          &encap.vxlan->ipv6.dst);
3958                         if (encap.vxlan->mask & FLOW_TCF_ENCAP_IP_TTL)
3959                                 mnl_attr_put_u8(nlh,
3960                                          TCA_TUNNEL_KEY_ENC_TTL,
3961                                          encap.vxlan->ip_ttl_hop);
3962                         if (encap.vxlan->mask & FLOW_TCF_ENCAP_IP_TOS)
3963                                 mnl_attr_put_u8(nlh,
3964                                          TCA_TUNNEL_KEY_ENC_TOS,
3965                                          encap.vxlan->ip_tos);
3966                         if (encap.vxlan->mask & FLOW_TCF_ENCAP_VXLAN_VNI)
3967                                 mnl_attr_put_u32(nlh,
3968                                          TCA_TUNNEL_KEY_ENC_KEY_ID,
3969                                          vxlan_vni_as_be32
3970                                                 (encap.vxlan->vxlan.vni));
3971                         mnl_attr_put_u8(nlh, TCA_TUNNEL_KEY_NO_CSUM, 0);
3972                         mnl_attr_nest_end(nlh, na_act);
3973                         mnl_attr_nest_end(nlh, na_act_index);
3974                         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
3975                         break;
3976                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3977                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3978                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3979                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3980                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3981                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3982                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3983                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3984                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3985                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3986                         na_act_index =
3987                                 mnl_attr_nest_start(nlh, na_act_index_cur++);
3988                         flow_tcf_create_pedit_mnl_msg(nlh,
3989                                                       &actions, item_flags);
3990                         mnl_attr_nest_end(nlh, na_act_index);
3991                         break;
3992                 default:
3993                         return rte_flow_error_set(error, ENOTSUP,
3994                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3995                                                   actions,
3996                                                   "action not supported");
3997                 }
3998         }
3999         assert(na_flower);
4000         assert(na_flower_act);
4001         mnl_attr_nest_end(nlh, na_flower_act);
4002         dev_flow->tcf.ptc_flags = mnl_attr_get_payload
4003                                         (mnl_nlmsg_get_payload_tail(nlh));
4004         mnl_attr_put_u32(nlh, TCA_FLOWER_FLAGS, decap.vxlan ?
4005                                                 0 : TCA_CLS_FLAGS_SKIP_SW);
4006         mnl_attr_nest_end(nlh, na_flower);
4007         if (dev_flow->tcf.tunnel && dev_flow->tcf.tunnel->ifindex_ptr)
4008                 dev_flow->tcf.tunnel->ifindex_org =
4009                         *dev_flow->tcf.tunnel->ifindex_ptr;
4010         assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
4011         return 0;
4012 }
4013
4014 /**
4015  * Send Netlink message with acknowledgment.
4016  *
4017  * @param tcf
4018  *   Flow context to use.
4019  * @param nlh
4020  *   Message to send. This function always raises the NLM_F_ACK flag before
4021  *   sending.
4022  * @param[in] cb
4023  *   Callback handler for received message.
4024  * @param[in] arg
4025  *   Context pointer for callback handler.
4026  *
4027  * @return
4028  *   0 on success, a negative errno value otherwise and rte_errno is set.
4029  */
4030 static int
4031 flow_tcf_nl_ack(struct mlx5_flow_tcf_context *tcf,
4032                 struct nlmsghdr *nlh,
4033                 mnl_cb_t cb, void *arg)
4034 {
4035         unsigned int portid = mnl_socket_get_portid(tcf->nl);
4036         uint32_t seq = tcf->seq++;
4037         int ret, err = 0;
4038
4039         assert(tcf->nl);
4040         assert(tcf->buf);
4041         if (!seq) {
4042                 /* seq 0 is reserved for kernel event-driven notifications. */
4043                 seq = tcf->seq++;
4044         }
4045         nlh->nlmsg_seq = seq;
4046         nlh->nlmsg_flags |= NLM_F_ACK;
4047         ret = mnl_socket_sendto(tcf->nl, nlh, nlh->nlmsg_len);
4048         if (ret <= 0) {
4049                 /* Message send error occurres. */
4050                 rte_errno = errno;
4051                 return -rte_errno;
4052         }
4053         nlh = (struct nlmsghdr *)(tcf->buf);
4054         /*
4055          * The following loop postpones non-fatal errors until multipart
4056          * messages are complete.
4057          */
4058         while (true) {
4059                 ret = mnl_socket_recvfrom(tcf->nl, tcf->buf, tcf->buf_size);
4060                 if (ret < 0) {
4061                         err = errno;
4062                         /*
4063                          * In case of overflow Will receive till
4064                          * end of multipart message. We may lost part
4065                          * of reply messages but mark and return an error.
4066                          */
4067                         if (err != ENOSPC ||
4068                             !(nlh->nlmsg_flags & NLM_F_MULTI) ||
4069                             nlh->nlmsg_type == NLMSG_DONE)
4070                                 break;
4071                 } else {
4072                         ret = mnl_cb_run(nlh, ret, seq, portid, cb, arg);
4073                         if (!ret) {
4074                                 /*
4075                                  * libmnl returns 0 if DONE or
4076                                  * success ACK message found.
4077                                  */
4078                                 break;
4079                         }
4080                         if (ret < 0) {
4081                                 /*
4082                                  * ACK message with error found
4083                                  * or some error occurred.
4084                                  */
4085                                 err = errno;
4086                                 break;
4087                         }
4088                         /* We should continue receiving. */
4089                 }
4090         }
4091         if (!err)
4092                 return 0;
4093         rte_errno = err;
4094         return -err;
4095 }
4096
4097 #define MNL_BUF_EXTRA_SPACE 16
4098 #define MNL_REQUEST_SIZE_MIN 256
4099 #define MNL_REQUEST_SIZE_MAX 2048
4100 #define MNL_REQUEST_SIZE RTE_MIN(RTE_MAX(sysconf(_SC_PAGESIZE), \
4101                                  MNL_REQUEST_SIZE_MIN), MNL_REQUEST_SIZE_MAX)
4102
4103 /* Data structures used by flow_tcf_xxx_cb() routines. */
4104 struct tcf_nlcb_buf {
4105         LIST_ENTRY(tcf_nlcb_buf) next;
4106         uint32_t size;
4107         alignas(struct nlmsghdr)
4108         uint8_t msg[]; /**< Netlink message data. */
4109 };
4110
4111 struct tcf_nlcb_context {
4112         unsigned int ifindex; /**< Base interface index. */
4113         uint32_t bufsize;
4114         LIST_HEAD(, tcf_nlcb_buf) nlbuf;
4115 };
4116
4117 /**
4118  * Allocate space for netlink command in buffer list
4119  *
4120  * @param[in, out] ctx
4121  *   Pointer to callback context with command buffers list.
4122  * @param[in] size
4123  *   Required size of data buffer to be allocated.
4124  *
4125  * @return
4126  *   Pointer to allocated memory, aligned as message header.
4127  *   NULL if some error occurred.
4128  */
4129 static struct nlmsghdr *
4130 flow_tcf_alloc_nlcmd(struct tcf_nlcb_context *ctx, uint32_t size)
4131 {
4132         struct tcf_nlcb_buf *buf;
4133         struct nlmsghdr *nlh;
4134
4135         size = NLMSG_ALIGN(size);
4136         buf = LIST_FIRST(&ctx->nlbuf);
4137         if (buf && (buf->size + size) <= ctx->bufsize) {
4138                 nlh = (struct nlmsghdr *)&buf->msg[buf->size];
4139                 buf->size += size;
4140                 return nlh;
4141         }
4142         if (size > ctx->bufsize) {
4143                 DRV_LOG(WARNING, "netlink: too long command buffer requested");
4144                 return NULL;
4145         }
4146         buf = rte_malloc(__func__,
4147                         ctx->bufsize + sizeof(struct tcf_nlcb_buf),
4148                         alignof(struct tcf_nlcb_buf));
4149         if (!buf) {
4150                 DRV_LOG(WARNING, "netlink: no memory for command buffer");
4151                 return NULL;
4152         }
4153         LIST_INSERT_HEAD(&ctx->nlbuf, buf, next);
4154         buf->size = size;
4155         nlh = (struct nlmsghdr *)&buf->msg[0];
4156         return nlh;
4157 }
4158
4159 /**
4160  * Send the buffers with prepared netlink commands. Scans the list and
4161  * sends all found buffers. Buffers are sent and freed anyway in order
4162  * to prevent memory leakage if some every message in received packet.
4163  *
4164  * @param[in] tcf
4165  *   Context object initialized by mlx5_flow_tcf_context_create().
4166  * @param[in, out] ctx
4167  *   Pointer to callback context with command buffers list.
4168  *
4169  * @return
4170  *   Zero value on success, negative errno value otherwise
4171  *   and rte_errno is set.
4172  */
4173 static int
4174 flow_tcf_send_nlcmd(struct mlx5_flow_tcf_context *tcf,
4175                     struct tcf_nlcb_context *ctx)
4176 {
4177         struct tcf_nlcb_buf *bc = LIST_FIRST(&ctx->nlbuf);
4178         int ret = 0;
4179
4180         while (bc) {
4181                 struct tcf_nlcb_buf *bn = LIST_NEXT(bc, next);
4182                 struct nlmsghdr *nlh;
4183                 uint32_t msg = 0;
4184                 int rc;
4185
4186                 while (msg < bc->size) {
4187                         /*
4188                          * Send Netlink commands from buffer in one by one
4189                          * fashion. If we send multiple rule deletion commands
4190                          * in one Netlink message and some error occurs it may
4191                          * cause multiple ACK error messages and break sequence
4192                          * numbers of Netlink communication, because we expect
4193                          * the only one ACK reply.
4194                          */
4195                         assert((bc->size - msg) >= sizeof(struct nlmsghdr));
4196                         nlh = (struct nlmsghdr *)&bc->msg[msg];
4197                         assert((bc->size - msg) >= nlh->nlmsg_len);
4198                         msg += nlh->nlmsg_len;
4199                         rc = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
4200                         if (rc) {
4201                                 DRV_LOG(WARNING,
4202                                         "netlink: cleanup error %d", rc);
4203                                 if (!ret)
4204                                         ret = rc;
4205                         }
4206                 }
4207                 rte_free(bc);
4208                 bc = bn;
4209         }
4210         LIST_INIT(&ctx->nlbuf);
4211         return ret;
4212 }
4213
4214 /**
4215  * Collect local IP address rules with scope link attribute  on specified
4216  * network device. This is callback routine called by libmnl mnl_cb_run()
4217  * in loop for every message in received packet.
4218  *
4219  * @param[in] nlh
4220  *   Pointer to reply header.
4221  * @param[in, out] arg
4222  *   Opaque data pointer for this callback.
4223  *
4224  * @return
4225  *   A positive, nonzero value on success, negative errno value otherwise
4226  *   and rte_errno is set.
4227  */
4228 static int
4229 flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)
4230 {
4231         struct tcf_nlcb_context *ctx = arg;
4232         struct nlmsghdr *cmd;
4233         struct ifaddrmsg *ifa;
4234         struct nlattr *na;
4235         struct nlattr *na_local = NULL;
4236         struct nlattr *na_peer = NULL;
4237         unsigned char family;
4238         uint32_t size;
4239
4240         if (nlh->nlmsg_type != RTM_NEWADDR) {
4241                 rte_errno = EINVAL;
4242                 return -rte_errno;
4243         }
4244         ifa = mnl_nlmsg_get_payload(nlh);
4245         family = ifa->ifa_family;
4246         if (ifa->ifa_index != ctx->ifindex ||
4247             ifa->ifa_scope != RT_SCOPE_LINK ||
4248             !(ifa->ifa_flags & IFA_F_PERMANENT) ||
4249             (family != AF_INET && family != AF_INET6))
4250                 return 1;
4251         mnl_attr_for_each(na, nlh, sizeof(*ifa)) {
4252                 switch (mnl_attr_get_type(na)) {
4253                 case IFA_LOCAL:
4254                         na_local = na;
4255                         break;
4256                 case IFA_ADDRESS:
4257                         na_peer = na;
4258                         break;
4259                 }
4260                 if (na_local && na_peer)
4261                         break;
4262         }
4263         if (!na_local || !na_peer)
4264                 return 1;
4265         /* Local rule found with scope link, permanent and assigned peer. */
4266         size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
4267                MNL_ALIGN(sizeof(struct ifaddrmsg)) +
4268                (family == AF_INET6 ? 2 * SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
4269                                    : 2 * SZ_NLATTR_TYPE_OF(uint32_t));
4270         cmd = flow_tcf_alloc_nlcmd(ctx, size);
4271         if (!cmd) {
4272                 rte_errno = ENOMEM;
4273                 return -rte_errno;
4274         }
4275         cmd = mnl_nlmsg_put_header(cmd);
4276         cmd->nlmsg_type = RTM_DELADDR;
4277         cmd->nlmsg_flags = NLM_F_REQUEST;
4278         ifa = mnl_nlmsg_put_extra_header(cmd, sizeof(*ifa));
4279         ifa->ifa_flags = IFA_F_PERMANENT;
4280         ifa->ifa_scope = RT_SCOPE_LINK;
4281         ifa->ifa_index = ctx->ifindex;
4282         if (family == AF_INET) {
4283                 ifa->ifa_family = AF_INET;
4284                 ifa->ifa_prefixlen = 32;
4285                 mnl_attr_put_u32(cmd, IFA_LOCAL, mnl_attr_get_u32(na_local));
4286                 mnl_attr_put_u32(cmd, IFA_ADDRESS, mnl_attr_get_u32(na_peer));
4287         } else {
4288                 ifa->ifa_family = AF_INET6;
4289                 ifa->ifa_prefixlen = 128;
4290                 mnl_attr_put(cmd, IFA_LOCAL, IPV6_ADDR_LEN,
4291                         mnl_attr_get_payload(na_local));
4292                 mnl_attr_put(cmd, IFA_ADDRESS, IPV6_ADDR_LEN,
4293                         mnl_attr_get_payload(na_peer));
4294         }
4295         assert(size == cmd->nlmsg_len);
4296         return 1;
4297 }
4298
4299 /**
4300  * Cleanup the local IP addresses on outer interface.
4301  *
4302  * @param[in] tcf
4303  *   Context object initialized by mlx5_flow_tcf_context_create().
4304  * @param[in] ifindex
4305  *   Network inferface index to perform cleanup.
4306  */
4307 static void
4308 flow_tcf_encap_local_cleanup(struct mlx5_flow_tcf_context *tcf,
4309                             unsigned int ifindex)
4310 {
4311         struct nlmsghdr *nlh;
4312         struct ifaddrmsg *ifa;
4313         struct tcf_nlcb_context ctx = {
4314                 .ifindex = ifindex,
4315                 .bufsize = MNL_REQUEST_SIZE,
4316                 .nlbuf = LIST_HEAD_INITIALIZER(),
4317         };
4318         int ret;
4319
4320         assert(ifindex);
4321         /*
4322          * Seek and destroy leftovers of local IP addresses with
4323          * matching properties "scope link".
4324          */
4325         nlh = mnl_nlmsg_put_header(tcf->buf);
4326         nlh->nlmsg_type = RTM_GETADDR;
4327         nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
4328         ifa = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifa));
4329         ifa->ifa_family = AF_UNSPEC;
4330         ifa->ifa_index = ifindex;
4331         ifa->ifa_scope = RT_SCOPE_LINK;
4332         ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_local_cb, &ctx);
4333         if (ret)
4334                 DRV_LOG(WARNING, "netlink: query device list error %d", ret);
4335         ret = flow_tcf_send_nlcmd(tcf, &ctx);
4336         if (ret)
4337                 DRV_LOG(WARNING, "netlink: device delete error %d", ret);
4338 }
4339
4340 /**
4341  * Collect neigh permament rules on specified network device.
4342  * This is callback routine called by libmnl mnl_cb_run() in loop for
4343  * every message in received packet.
4344  *
4345  * @param[in] nlh
4346  *   Pointer to reply header.
4347  * @param[in, out] arg
4348  *   Opaque data pointer for this callback.
4349  *
4350  * @return
4351  *   A positive, nonzero value on success, negative errno value otherwise
4352  *   and rte_errno is set.
4353  */
4354 static int
4355 flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
4356 {
4357         struct tcf_nlcb_context *ctx = arg;
4358         struct nlmsghdr *cmd;
4359         struct ndmsg *ndm;
4360         struct nlattr *na;
4361         struct nlattr *na_ip = NULL;
4362         struct nlattr *na_mac = NULL;
4363         unsigned char family;
4364         uint32_t size;
4365
4366         if (nlh->nlmsg_type != RTM_NEWNEIGH) {
4367                 rte_errno = EINVAL;
4368                 return -rte_errno;
4369         }
4370         ndm = mnl_nlmsg_get_payload(nlh);
4371         family = ndm->ndm_family;
4372         if (ndm->ndm_ifindex != (int)ctx->ifindex ||
4373            !(ndm->ndm_state & NUD_PERMANENT) ||
4374            (family != AF_INET && family != AF_INET6))
4375                 return 1;
4376         mnl_attr_for_each(na, nlh, sizeof(*ndm)) {
4377                 switch (mnl_attr_get_type(na)) {
4378                 case NDA_DST:
4379                         na_ip = na;
4380                         break;
4381                 case NDA_LLADDR:
4382                         na_mac = na;
4383                         break;
4384                 }
4385                 if (na_mac && na_ip)
4386                         break;
4387         }
4388         if (!na_mac || !na_ip)
4389                 return 1;
4390         /* Neigh rule with permenent attribute found. */
4391         size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
4392                MNL_ALIGN(sizeof(struct ndmsg)) +
4393                SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +
4394                (family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
4395                                    : SZ_NLATTR_TYPE_OF(uint32_t));
4396         cmd = flow_tcf_alloc_nlcmd(ctx, size);
4397         if (!cmd) {
4398                 rte_errno = ENOMEM;
4399                 return -rte_errno;
4400         }
4401         cmd = mnl_nlmsg_put_header(cmd);
4402         cmd->nlmsg_type = RTM_DELNEIGH;
4403         cmd->nlmsg_flags = NLM_F_REQUEST;
4404         ndm = mnl_nlmsg_put_extra_header(cmd, sizeof(*ndm));
4405         ndm->ndm_ifindex = ctx->ifindex;
4406         ndm->ndm_state = NUD_PERMANENT;
4407         ndm->ndm_flags = 0;
4408         ndm->ndm_type = 0;
4409         if (family == AF_INET) {
4410                 ndm->ndm_family = AF_INET;
4411                 mnl_attr_put_u32(cmd, NDA_DST, mnl_attr_get_u32(na_ip));
4412         } else {
4413                 ndm->ndm_family = AF_INET6;
4414                 mnl_attr_put(cmd, NDA_DST, IPV6_ADDR_LEN,
4415                              mnl_attr_get_payload(na_ip));
4416         }
4417         mnl_attr_put(cmd, NDA_LLADDR, ETHER_ADDR_LEN,
4418                      mnl_attr_get_payload(na_mac));
4419         assert(size == cmd->nlmsg_len);
4420         return 1;
4421 }
4422
4423 /**
4424  * Cleanup the neigh rules on outer interface.
4425  *
4426  * @param[in] tcf
4427  *   Context object initialized by mlx5_flow_tcf_context_create().
4428  * @param[in] ifindex
4429  *   Network inferface index to perform cleanup.
4430  */
4431 static void
4432 flow_tcf_encap_neigh_cleanup(struct mlx5_flow_tcf_context *tcf,
4433                             unsigned int ifindex)
4434 {
4435         struct nlmsghdr *nlh;
4436         struct ndmsg *ndm;
4437         struct tcf_nlcb_context ctx = {
4438                 .ifindex = ifindex,
4439                 .bufsize = MNL_REQUEST_SIZE,
4440                 .nlbuf = LIST_HEAD_INITIALIZER(),
4441         };
4442         int ret;
4443
4444         assert(ifindex);
4445         /* Seek and destroy leftovers of neigh rules. */
4446         nlh = mnl_nlmsg_put_header(tcf->buf);
4447         nlh->nlmsg_type = RTM_GETNEIGH;
4448         nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
4449         ndm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ndm));
4450         ndm->ndm_family = AF_UNSPEC;
4451         ndm->ndm_ifindex = ifindex;
4452         ndm->ndm_state = NUD_PERMANENT;
4453         ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_neigh_cb, &ctx);
4454         if (ret)
4455                 DRV_LOG(WARNING, "netlink: query device list error %d", ret);
4456         ret = flow_tcf_send_nlcmd(tcf, &ctx);
4457         if (ret)
4458                 DRV_LOG(WARNING, "netlink: device delete error %d", ret);
4459 }
4460
4461 /**
4462  * Collect indices of VXLAN encap/decap interfaces associated with device.
4463  * This is callback routine called by libmnl mnl_cb_run() in loop for
4464  * every message in received packet.
4465  *
4466  * @param[in] nlh
4467  *   Pointer to reply header.
4468  * @param[in, out] arg
4469  *   Opaque data pointer for this callback.
4470  *
4471  * @return
4472  *   A positive, nonzero value on success, negative errno value otherwise
4473  *   and rte_errno is set.
4474  */
4475 static int
4476 flow_tcf_collect_vxlan_cb(const struct nlmsghdr *nlh, void *arg)
4477 {
4478         struct tcf_nlcb_context *ctx = arg;
4479         struct nlmsghdr *cmd;
4480         struct ifinfomsg *ifm;
4481         struct nlattr *na;
4482         struct nlattr *na_info = NULL;
4483         struct nlattr *na_vxlan = NULL;
4484         bool found = false;
4485         unsigned int vxindex;
4486         uint32_t size;
4487
4488         if (nlh->nlmsg_type != RTM_NEWLINK) {
4489                 rte_errno = EINVAL;
4490                 return -rte_errno;
4491         }
4492         ifm = mnl_nlmsg_get_payload(nlh);
4493         if (!ifm->ifi_index) {
4494                 rte_errno = EINVAL;
4495                 return -rte_errno;
4496         }
4497         mnl_attr_for_each(na, nlh, sizeof(*ifm))
4498                 if (mnl_attr_get_type(na) == IFLA_LINKINFO) {
4499                         na_info = na;
4500                         break;
4501                 }
4502         if (!na_info)
4503                 return 1;
4504         mnl_attr_for_each_nested(na, na_info) {
4505                 switch (mnl_attr_get_type(na)) {
4506                 case IFLA_INFO_KIND:
4507                         if (!strncmp("vxlan", mnl_attr_get_str(na),
4508                                      mnl_attr_get_len(na)))
4509                                 found = true;
4510                         break;
4511                 case IFLA_INFO_DATA:
4512                         na_vxlan = na;
4513                         break;
4514                 }
4515                 if (found && na_vxlan)
4516                         break;
4517         }
4518         if (!found || !na_vxlan)
4519                 return 1;
4520         found = false;
4521         mnl_attr_for_each_nested(na, na_vxlan) {
4522                 if (mnl_attr_get_type(na) == IFLA_VXLAN_LINK &&
4523                     mnl_attr_get_u32(na) == ctx->ifindex) {
4524                         found = true;
4525                         break;
4526                 }
4527         }
4528         if (!found)
4529                 return 1;
4530         /* Attached VXLAN device found, store the command to delete. */
4531         vxindex = ifm->ifi_index;
4532         size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
4533                MNL_ALIGN(sizeof(struct ifinfomsg));
4534         cmd = flow_tcf_alloc_nlcmd(ctx, size);
4535         if (!cmd) {
4536                 rte_errno = ENOMEM;
4537                 return -rte_errno;
4538         }
4539         cmd = mnl_nlmsg_put_header(cmd);
4540         cmd->nlmsg_type = RTM_DELLINK;
4541         cmd->nlmsg_flags = NLM_F_REQUEST;
4542         ifm = mnl_nlmsg_put_extra_header(cmd, sizeof(*ifm));
4543         ifm->ifi_family = AF_UNSPEC;
4544         ifm->ifi_index = vxindex;
4545         assert(size == cmd->nlmsg_len);
4546         return 1;
4547 }
4548
4549 /**
4550  * Cleanup the outer interface. Removes all found vxlan devices
4551  * attached to specified index, flushes the neigh and local IP
4552  * database.
4553  *
4554  * @param[in] tcf
4555  *   Context object initialized by mlx5_flow_tcf_context_create().
4556  * @param[in] ifindex
4557  *   Network inferface index to perform cleanup.
4558  */
4559 static void
4560 flow_tcf_encap_iface_cleanup(struct mlx5_flow_tcf_context *tcf,
4561                             unsigned int ifindex)
4562 {
4563         struct nlmsghdr *nlh;
4564         struct ifinfomsg *ifm;
4565         struct tcf_nlcb_context ctx = {
4566                 .ifindex = ifindex,
4567                 .bufsize = MNL_REQUEST_SIZE,
4568                 .nlbuf = LIST_HEAD_INITIALIZER(),
4569         };
4570         int ret;
4571
4572         assert(ifindex);
4573         /*
4574          * Seek and destroy leftover VXLAN encap/decap interfaces with
4575          * matching properties.
4576          */
4577         nlh = mnl_nlmsg_put_header(tcf->buf);
4578         nlh->nlmsg_type = RTM_GETLINK;
4579         nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
4580         ifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));
4581         ifm->ifi_family = AF_UNSPEC;
4582         ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_vxlan_cb, &ctx);
4583         if (ret)
4584                 DRV_LOG(WARNING, "netlink: query device list error %d", ret);
4585         ret = flow_tcf_send_nlcmd(tcf, &ctx);
4586         if (ret)
4587                 DRV_LOG(WARNING, "netlink: device delete error %d", ret);
4588 }
4589
4590 /**
4591  * Emit Netlink message to add/remove local address to the outer device.
4592  * The address being added is visible within the link only (scope link).
4593  *
4594  * Note that an implicit route is maintained by the kernel due to the
4595  * presence of a peer address (IFA_ADDRESS).
4596  *
4597  * These rules are used for encapsultion only and allow to assign
4598  * the outer tunnel source IP address.
4599  *
4600  * @param[in] tcf
4601  *   Libmnl socket context object.
4602  * @param[in] encap
4603  *   Encapsulation properties (source address and its peer).
4604  * @param[in] ifindex
4605  *   Network interface to apply rule.
4606  * @param[in] enable
4607  *   Toggle between add and remove.
4608  * @param[out] error
4609  *   Perform verbose error reporting if not NULL.
4610  *
4611  * @return
4612  *   0 on success, a negative errno value otherwise and rte_errno is set.
4613  */
4614 static int
4615 flow_tcf_rule_local(struct mlx5_flow_tcf_context *tcf,
4616                     const struct flow_tcf_vxlan_encap *encap,
4617                     unsigned int ifindex,
4618                     bool enable,
4619                     struct rte_flow_error *error)
4620 {
4621         struct nlmsghdr *nlh;
4622         struct ifaddrmsg *ifa;
4623         alignas(struct nlmsghdr)
4624         uint8_t buf[mnl_nlmsg_size(sizeof(*ifa) + 128)];
4625
4626         nlh = mnl_nlmsg_put_header(buf);
4627         nlh->nlmsg_type = enable ? RTM_NEWADDR : RTM_DELADDR;
4628         nlh->nlmsg_flags =
4629                 NLM_F_REQUEST | (enable ? NLM_F_CREATE | NLM_F_REPLACE : 0);
4630         nlh->nlmsg_seq = 0;
4631         ifa = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifa));
4632         ifa->ifa_flags = IFA_F_PERMANENT;
4633         ifa->ifa_scope = RT_SCOPE_LINK;
4634         ifa->ifa_index = ifindex;
4635         if (encap->mask & FLOW_TCF_ENCAP_IPV4_SRC) {
4636                 ifa->ifa_family = AF_INET;
4637                 ifa->ifa_prefixlen = 32;
4638                 mnl_attr_put_u32(nlh, IFA_LOCAL, encap->ipv4.src);
4639                 if (encap->mask & FLOW_TCF_ENCAP_IPV4_DST)
4640                         mnl_attr_put_u32(nlh, IFA_ADDRESS,
4641                                               encap->ipv4.dst);
4642         } else {
4643                 assert(encap->mask & FLOW_TCF_ENCAP_IPV6_SRC);
4644                 ifa->ifa_family = AF_INET6;
4645                 ifa->ifa_prefixlen = 128;
4646                 mnl_attr_put(nlh, IFA_LOCAL,
4647                                   sizeof(encap->ipv6.src),
4648                                   &encap->ipv6.src);
4649                 if (encap->mask & FLOW_TCF_ENCAP_IPV6_DST)
4650                         mnl_attr_put(nlh, IFA_ADDRESS,
4651                                           sizeof(encap->ipv6.dst),
4652                                           &encap->ipv6.dst);
4653         }
4654         if (!flow_tcf_nl_ack(tcf, nlh, NULL, NULL))
4655                 return 0;
4656         return rte_flow_error_set(error, rte_errno,
4657                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4658                                   "netlink: cannot complete IFA request"
4659                                   " (ip addr add)");
4660 }
4661
4662 /**
4663  * Emit Netlink message to add/remove neighbor.
4664  *
4665  * @param[in] tcf
4666  *   Libmnl socket context object.
4667  * @param[in] encap
4668  *   Encapsulation properties (destination address).
4669  * @param[in] ifindex
4670  *   Network interface.
4671  * @param[in] enable
4672  *   Toggle between add and remove.
4673  * @param[out] error
4674  *   Perform verbose error reporting if not NULL.
4675  *
4676  * @return
4677  *   0 on success, a negative errno value otherwise and rte_errno is set.
4678  */
4679 static int
4680 flow_tcf_rule_neigh(struct mlx5_flow_tcf_context *tcf,
4681                      const struct flow_tcf_vxlan_encap *encap,
4682                      unsigned int ifindex,
4683                      bool enable,
4684                      struct rte_flow_error *error)
4685 {
4686         struct nlmsghdr *nlh;
4687         struct ndmsg *ndm;
4688         alignas(struct nlmsghdr)
4689         uint8_t buf[mnl_nlmsg_size(sizeof(*ndm) + 128)];
4690
4691         nlh = mnl_nlmsg_put_header(buf);
4692         nlh->nlmsg_type = enable ? RTM_NEWNEIGH : RTM_DELNEIGH;
4693         nlh->nlmsg_flags =
4694                 NLM_F_REQUEST | (enable ? NLM_F_CREATE | NLM_F_REPLACE : 0);
4695         nlh->nlmsg_seq = 0;
4696         ndm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ndm));
4697         ndm->ndm_ifindex = ifindex;
4698         ndm->ndm_state = NUD_PERMANENT;
4699         ndm->ndm_flags = 0;
4700         ndm->ndm_type = 0;
4701         if (encap->mask & FLOW_TCF_ENCAP_IPV4_DST) {
4702                 ndm->ndm_family = AF_INET;
4703                 mnl_attr_put_u32(nlh, NDA_DST, encap->ipv4.dst);
4704         } else {
4705                 assert(encap->mask & FLOW_TCF_ENCAP_IPV6_DST);
4706                 ndm->ndm_family = AF_INET6;
4707                 mnl_attr_put(nlh, NDA_DST, sizeof(encap->ipv6.dst),
4708                                                  &encap->ipv6.dst);
4709         }
4710         if (encap->mask & FLOW_TCF_ENCAP_ETH_SRC && enable)
4711                 DRV_LOG(WARNING,
4712                         "outer ethernet source address cannot be "
4713                         "forced for VXLAN encapsulation");
4714         if (encap->mask & FLOW_TCF_ENCAP_ETH_DST)
4715                 mnl_attr_put(nlh, NDA_LLADDR, sizeof(encap->eth.dst),
4716                                                     &encap->eth.dst);
4717         if (!flow_tcf_nl_ack(tcf, nlh, NULL, NULL))
4718                 return 0;
4719         return rte_flow_error_set(error, rte_errno,
4720                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4721                                   "netlink: cannot complete ND request"
4722                                   " (ip neigh)");
4723 }
4724
4725 /**
4726  * Manage the local IP addresses and their peers IP addresses on the
4727  * outer interface for encapsulation purposes. The kernel searches the
4728  * appropriate device for tunnel egress traffic using the outer source
4729  * IP, this IP should be assigned to the outer network device, otherwise
4730  * kernel rejects the rule.
4731  *
4732  * Adds or removes the addresses using the Netlink command like this:
4733  *   ip addr add <src_ip> peer <dst_ip> scope link dev <ifouter>
4734  *
4735  * The addresses are local to the netdev ("scope link"), this reduces
4736  * the risk of conflicts. Note that an implicit route is maintained by
4737  * the kernel due to the presence of a peer address (IFA_ADDRESS).
4738  *
4739  * @param[in] tcf
4740  *   Libmnl socket context object.
4741  * @param[in] iface
4742  *   Object, contains rule database and ifouter index.
4743  * @param[in] dev_flow
4744  *   Flow object, contains the tunnel parameters (for encap only).
4745  * @param[in] enable
4746  *   Toggle between add and remove.
4747  * @param[out] error
4748  *   Perform verbose error reporting if not NULL.
4749  *
4750  * @return
4751  *   0 on success, a negative errno value otherwise and rte_errno is set.
4752  */
4753 static int
4754 flow_tcf_encap_local(struct mlx5_flow_tcf_context *tcf,
4755                      struct tcf_irule *iface,
4756                      struct mlx5_flow *dev_flow,
4757                      bool enable,
4758                      struct rte_flow_error *error)
4759 {
4760         const struct flow_tcf_vxlan_encap *encap = dev_flow->tcf.vxlan_encap;
4761         struct tcf_local_rule *rule = NULL;
4762         int ret;
4763
4764         assert(encap);
4765         assert(encap->hdr.type == FLOW_TCF_TUNACT_VXLAN_ENCAP);
4766         if (encap->mask & FLOW_TCF_ENCAP_IPV4_SRC) {
4767                 assert(encap->mask & FLOW_TCF_ENCAP_IPV4_DST);
4768                 LIST_FOREACH(rule, &iface->local, next) {
4769                         if (rule->mask & FLOW_TCF_ENCAP_IPV4_SRC &&
4770                             encap->ipv4.src == rule->ipv4.src &&
4771                             encap->ipv4.dst == rule->ipv4.dst) {
4772                                 break;
4773                         }
4774                 }
4775         } else {
4776                 assert(encap->mask & FLOW_TCF_ENCAP_IPV6_SRC);
4777                 assert(encap->mask & FLOW_TCF_ENCAP_IPV6_DST);
4778                 LIST_FOREACH(rule, &iface->local, next) {
4779                         if (rule->mask & FLOW_TCF_ENCAP_IPV6_SRC &&
4780                             !memcmp(&encap->ipv6.src, &rule->ipv6.src,
4781                                             sizeof(encap->ipv6.src)) &&
4782                             !memcmp(&encap->ipv6.dst, &rule->ipv6.dst,
4783                                             sizeof(encap->ipv6.dst))) {
4784                                 break;
4785                         }
4786                 }
4787         }
4788         if (rule) {
4789                 if (enable) {
4790                         rule->refcnt++;
4791                         return 0;
4792                 }
4793                 if (!rule->refcnt || !--rule->refcnt) {
4794                         LIST_REMOVE(rule, next);
4795                         return flow_tcf_rule_local(tcf, encap,
4796                                         iface->ifouter, false, error);
4797                 }
4798                 return 0;
4799         }
4800         if (!enable) {
4801                 DRV_LOG(WARNING, "disabling not existing local rule");
4802                 rte_flow_error_set(error, ENOENT,
4803                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4804                                    "disabling not existing local rule");
4805                 return -ENOENT;
4806         }
4807         rule = rte_zmalloc(__func__, sizeof(struct tcf_local_rule),
4808                                 alignof(struct tcf_local_rule));
4809         if (!rule) {
4810                 rte_flow_error_set(error, ENOMEM,
4811                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4812                                    "unable to allocate memory for local rule");
4813                 return -rte_errno;
4814         }
4815         *rule = (struct tcf_local_rule){.refcnt = 0,
4816                                         .mask = 0,
4817                                         };
4818         if (encap->mask & FLOW_TCF_ENCAP_IPV4_SRC) {
4819                 rule->mask = FLOW_TCF_ENCAP_IPV4_SRC
4820                            | FLOW_TCF_ENCAP_IPV4_DST;
4821                 rule->ipv4.src = encap->ipv4.src;
4822                 rule->ipv4.dst = encap->ipv4.dst;
4823         } else {
4824                 rule->mask = FLOW_TCF_ENCAP_IPV6_SRC
4825                            | FLOW_TCF_ENCAP_IPV6_DST;
4826                 memcpy(&rule->ipv6.src, &encap->ipv6.src, IPV6_ADDR_LEN);
4827                 memcpy(&rule->ipv6.dst, &encap->ipv6.dst, IPV6_ADDR_LEN);
4828         }
4829         ret = flow_tcf_rule_local(tcf, encap, iface->ifouter, true, error);
4830         if (ret) {
4831                 rte_free(rule);
4832                 return ret;
4833         }
4834         rule->refcnt++;
4835         LIST_INSERT_HEAD(&iface->local, rule, next);
4836         return 0;
4837 }
4838
4839 /**
4840  * Manage the destination MAC/IP addresses neigh database, kernel uses
4841  * this one to determine the destination MAC address within encapsulation
4842  * header. Adds or removes the entries using the Netlink command like this:
4843  *   ip neigh add dev <ifouter> lladdr <dst_mac> to <dst_ip> nud permanent
4844  *
4845  * @param[in] tcf
4846  *   Libmnl socket context object.
4847  * @param[in] iface
4848  *   Object, contains rule database and ifouter index.
4849  * @param[in] dev_flow
4850  *   Flow object, contains the tunnel parameters (for encap only).
4851  * @param[in] enable
4852  *   Toggle between add and remove.
4853  * @param[out] error
4854  *   Perform verbose error reporting if not NULL.
4855  *
4856  * @return
4857  *   0 on success, a negative errno value otherwise and rte_errno is set.
4858  */
4859 static int
4860 flow_tcf_encap_neigh(struct mlx5_flow_tcf_context *tcf,
4861                      struct tcf_irule *iface,
4862                      struct mlx5_flow *dev_flow,
4863                      bool enable,
4864                      struct rte_flow_error *error)
4865 {
4866         const struct flow_tcf_vxlan_encap *encap = dev_flow->tcf.vxlan_encap;
4867         struct tcf_neigh_rule *rule = NULL;
4868         int ret;
4869
4870         assert(encap);
4871         assert(encap->hdr.type == FLOW_TCF_TUNACT_VXLAN_ENCAP);
4872         if (encap->mask & FLOW_TCF_ENCAP_IPV4_DST) {
4873                 assert(encap->mask & FLOW_TCF_ENCAP_IPV4_SRC);
4874                 LIST_FOREACH(rule, &iface->neigh, next) {
4875                         if (rule->mask & FLOW_TCF_ENCAP_IPV4_DST &&
4876                             encap->ipv4.dst == rule->ipv4.dst) {
4877                                 break;
4878                         }
4879                 }
4880         } else {
4881                 assert(encap->mask & FLOW_TCF_ENCAP_IPV6_SRC);
4882                 assert(encap->mask & FLOW_TCF_ENCAP_IPV6_DST);
4883                 LIST_FOREACH(rule, &iface->neigh, next) {
4884                         if (rule->mask & FLOW_TCF_ENCAP_IPV6_DST &&
4885                             !memcmp(&encap->ipv6.dst, &rule->ipv6.dst,
4886                                                 sizeof(encap->ipv6.dst))) {
4887                                 break;
4888                         }
4889                 }
4890         }
4891         if (rule) {
4892                 if (memcmp(&encap->eth.dst, &rule->eth,
4893                            sizeof(encap->eth.dst))) {
4894                         DRV_LOG(WARNING, "Destination MAC differs"
4895                                          " in neigh rule");
4896                         rte_flow_error_set(error, EEXIST,
4897                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4898                                            NULL, "Different MAC address"
4899                                            " neigh rule for the same"
4900                                            " destination IP");
4901                                         return -EEXIST;
4902                 }
4903                 if (enable) {
4904                         rule->refcnt++;
4905                         return 0;
4906                 }
4907                 if (!rule->refcnt || !--rule->refcnt) {
4908                         LIST_REMOVE(rule, next);
4909                         return flow_tcf_rule_neigh(tcf, encap,
4910                                                    iface->ifouter,
4911                                                    false, error);
4912                 }
4913                 return 0;
4914         }
4915         if (!enable) {
4916                 DRV_LOG(WARNING, "Disabling not existing neigh rule");
4917                 rte_flow_error_set(error, ENOENT,
4918                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4919                                    "unable to allocate memory for neigh rule");
4920                 return -ENOENT;
4921         }
4922         rule = rte_zmalloc(__func__, sizeof(struct tcf_neigh_rule),
4923                                 alignof(struct tcf_neigh_rule));
4924         if (!rule) {
4925                 rte_flow_error_set(error, ENOMEM,
4926                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4927                                    "unable to allocate memory for neigh rule");
4928                 return -rte_errno;
4929         }
4930         *rule = (struct tcf_neigh_rule){.refcnt = 0,
4931                                         .mask = 0,
4932                                         };
4933         if (encap->mask & FLOW_TCF_ENCAP_IPV4_DST) {
4934                 rule->mask = FLOW_TCF_ENCAP_IPV4_DST;
4935                 rule->ipv4.dst = encap->ipv4.dst;
4936         } else {
4937                 rule->mask = FLOW_TCF_ENCAP_IPV6_DST;
4938                 memcpy(&rule->ipv6.dst, &encap->ipv6.dst, IPV6_ADDR_LEN);
4939         }
4940         memcpy(&rule->eth, &encap->eth.dst, sizeof(rule->eth));
4941         ret = flow_tcf_rule_neigh(tcf, encap, iface->ifouter, true, error);
4942         if (ret) {
4943                 rte_free(rule);
4944                 return ret;
4945         }
4946         rule->refcnt++;
4947         LIST_INSERT_HEAD(&iface->neigh, rule, next);
4948         return 0;
4949 }
4950
4951 /* VXLAN encap rule database for outer interfaces. */
4952 static  LIST_HEAD(, tcf_irule) iface_list_vxlan = LIST_HEAD_INITIALIZER();
4953
4954 /* VTEP device list is shared between PMD port instances. */
4955 static LIST_HEAD(, tcf_vtep) vtep_list_vxlan = LIST_HEAD_INITIALIZER();
4956 static pthread_mutex_t vtep_list_mutex = PTHREAD_MUTEX_INITIALIZER;
4957
4958 /**
4959  * Acquire the VXLAN encap rules container for specified interface.
4960  * First looks for the container in the existing ones list, creates
4961  * and initializes the new container if existing not found.
4962  *
4963  * @param[in] tcf
4964  *   Context object initialized by mlx5_flow_tcf_context_create().
4965  * @param[in] ifouter
4966  *   Network interface index to create VXLAN encap rules on.
4967  * @param[out] error
4968  *   Perform verbose error reporting if not NULL.
4969  * @return
4970  *   Rule container pointer on success,
4971  *   NULL otherwise and rte_errno is set.
4972  */
4973 static struct tcf_irule*
4974 flow_tcf_encap_irule_acquire(struct mlx5_flow_tcf_context *tcf,
4975                              unsigned int ifouter,
4976                              struct rte_flow_error *error)
4977 {
4978         struct tcf_irule *iface;
4979
4980         /* Look whether the container for encap rules is created. */
4981         assert(ifouter);
4982         LIST_FOREACH(iface, &iface_list_vxlan, next) {
4983                 if (iface->ifouter == ifouter)
4984                         break;
4985         }
4986         if (iface) {
4987                 /* Container already exists, just increment the reference. */
4988                 iface->refcnt++;
4989                 return iface;
4990         }
4991         /* Not found, we should create the new container. */
4992         iface = rte_zmalloc(__func__, sizeof(*iface),
4993                             alignof(struct tcf_irule));
4994         if (!iface) {
4995                 rte_flow_error_set(error, ENOMEM,
4996                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4997                                    "unable to allocate memory for container");
4998                 return NULL;
4999         }
5000         *iface = (struct tcf_irule){
5001                         .local = LIST_HEAD_INITIALIZER(),
5002                         .neigh = LIST_HEAD_INITIALIZER(),
5003                         .ifouter = ifouter,
5004                         .refcnt = 1,
5005         };
5006         /* Interface cleanup for new container created. */
5007         flow_tcf_encap_iface_cleanup(tcf, ifouter);
5008         flow_tcf_encap_local_cleanup(tcf, ifouter);
5009         flow_tcf_encap_neigh_cleanup(tcf, ifouter);
5010         LIST_INSERT_HEAD(&iface_list_vxlan, iface, next);
5011         return iface;
5012 }
5013
5014 /**
5015  * Releases VXLAN encap rules container by pointer. Decrements the
5016  * reference cointer and deletes the container if counter is zero.
5017  *
5018  * @param[in] irule
5019  *   VXLAN rule container pointer to release.
5020  */
5021 static void
5022 flow_tcf_encap_irule_release(struct tcf_irule *iface)
5023 {
5024         assert(iface->refcnt);
5025         if (--iface->refcnt == 0) {
5026                 /* Reference counter is zero, delete the container. */
5027                 assert(LIST_EMPTY(&iface->local));
5028                 assert(LIST_EMPTY(&iface->neigh));
5029                 LIST_REMOVE(iface, next);
5030                 rte_free(iface);
5031         }
5032 }
5033
5034 /**
5035  * Deletes VTEP network device.
5036  *
5037  * @param[in] tcf
5038  *   Context object initialized by mlx5_flow_tcf_context_create().
5039  * @param[in] vtep
5040  *   Object represinting the network device to delete. Memory
5041  *   allocated for this object is freed by routine.
5042  */
5043 static void
5044 flow_tcf_vtep_delete(struct mlx5_flow_tcf_context *tcf,
5045                      struct tcf_vtep *vtep)
5046 {
5047         struct nlmsghdr *nlh;
5048         struct ifinfomsg *ifm;
5049         alignas(struct nlmsghdr)
5050         uint8_t buf[mnl_nlmsg_size(MNL_ALIGN(sizeof(*ifm))) +
5051                     MNL_BUF_EXTRA_SPACE];
5052         int ret;
5053
5054         assert(!vtep->refcnt);
5055         /* Delete only ifaces those we actually created. */
5056         if (vtep->created && vtep->ifindex) {
5057                 DRV_LOG(INFO, "VTEP delete (%d)", vtep->ifindex);
5058                 nlh = mnl_nlmsg_put_header(buf);
5059                 nlh->nlmsg_type = RTM_DELLINK;
5060                 nlh->nlmsg_flags = NLM_F_REQUEST;
5061                 ifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));
5062                 ifm->ifi_family = AF_UNSPEC;
5063                 ifm->ifi_index = vtep->ifindex;
5064                 assert(sizeof(buf) >= nlh->nlmsg_len);
5065                 ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
5066                 if (ret)
5067                         DRV_LOG(WARNING, "netlink: error deleting vxlan"
5068                                          " encap/decap ifindex %u",
5069                                          ifm->ifi_index);
5070         }
5071         rte_free(vtep);
5072 }
5073
5074 /**
5075  * Creates VTEP network device.
5076  *
5077  * @param[in] tcf
5078  *   Context object initialized by mlx5_flow_tcf_context_create().
5079  * @param[in] port
5080  *   UDP port of created VTEP device.
5081  * @param[out] error
5082  *   Perform verbose error reporting if not NULL.
5083  *
5084  * @return
5085  * Pointer to created device structure on success,
5086  * NULL otherwise and rte_errno is set.
5087  */
5088 static struct tcf_vtep*
5089 flow_tcf_vtep_create(struct mlx5_flow_tcf_context *tcf,
5090                      uint16_t port, struct rte_flow_error *error)
5091 {
5092         struct tcf_vtep *vtep;
5093         struct nlmsghdr *nlh;
5094         struct ifinfomsg *ifm;
5095         char name[sizeof(MLX5_VXLAN_DEVICE_PFX) + 24];
5096         alignas(struct nlmsghdr)
5097         uint8_t buf[mnl_nlmsg_size(sizeof(*ifm)) +
5098                     SZ_NLATTR_DATA_OF(sizeof(name)) +
5099                     SZ_NLATTR_NEST * 2 +
5100                     SZ_NLATTR_STRZ_OF("vxlan") +
5101                     SZ_NLATTR_DATA_OF(sizeof(uint32_t)) +
5102                     SZ_NLATTR_DATA_OF(sizeof(uint16_t)) +
5103                     SZ_NLATTR_DATA_OF(sizeof(uint8_t)) * 3 +
5104                     MNL_BUF_EXTRA_SPACE];
5105         struct nlattr *na_info;
5106         struct nlattr *na_vxlan;
5107         rte_be16_t vxlan_port = rte_cpu_to_be_16(port);
5108         int ret;
5109
5110         vtep = rte_zmalloc(__func__, sizeof(*vtep), alignof(struct tcf_vtep));
5111         if (!vtep) {
5112                 rte_flow_error_set(error, ENOMEM,
5113                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5114                                    "unable to allocate memory for VTEP");
5115                 return NULL;
5116         }
5117         *vtep = (struct tcf_vtep){
5118                         .port = port,
5119         };
5120         memset(buf, 0, sizeof(buf));
5121         nlh = mnl_nlmsg_put_header(buf);
5122         nlh->nlmsg_type = RTM_NEWLINK;
5123         nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE  | NLM_F_EXCL;
5124         ifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));
5125         ifm->ifi_family = AF_UNSPEC;
5126         ifm->ifi_type = 0;
5127         ifm->ifi_index = 0;
5128         ifm->ifi_flags = IFF_UP;
5129         ifm->ifi_change = 0xffffffff;
5130         snprintf(name, sizeof(name), "%s%u", MLX5_VXLAN_DEVICE_PFX, port);
5131         mnl_attr_put_strz(nlh, IFLA_IFNAME, name);
5132         na_info = mnl_attr_nest_start(nlh, IFLA_LINKINFO);
5133         assert(na_info);
5134         mnl_attr_put_strz(nlh, IFLA_INFO_KIND, "vxlan");
5135         na_vxlan = mnl_attr_nest_start(nlh, IFLA_INFO_DATA);
5136         assert(na_vxlan);
5137 #ifdef HAVE_IFLA_VXLAN_COLLECT_METADATA
5138         /*
5139          * RH 7.2 does not support metadata for tunnel device.
5140          * It does not matter because we are going to use the
5141          * hardware offload by mlx5 driver.
5142          */
5143         mnl_attr_put_u8(nlh, IFLA_VXLAN_COLLECT_METADATA, 1);
5144 #endif
5145         mnl_attr_put_u8(nlh, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 1);
5146         mnl_attr_put_u8(nlh, IFLA_VXLAN_LEARNING, 0);
5147         mnl_attr_put_u16(nlh, IFLA_VXLAN_PORT, vxlan_port);
5148 #ifndef HAVE_IFLA_VXLAN_COLLECT_METADATA
5149         /*
5150          *  We must specify VNI explicitly if metadata not supported.
5151          *  Note, VNI is transferred with native endianness format.
5152          */
5153         mnl_attr_put_u16(nlh, IFLA_VXLAN_ID, MLX5_VXLAN_DEFAULT_VNI);
5154 #endif
5155         mnl_attr_nest_end(nlh, na_vxlan);
5156         mnl_attr_nest_end(nlh, na_info);
5157         assert(sizeof(buf) >= nlh->nlmsg_len);
5158         ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
5159         if (ret) {
5160                 DRV_LOG(WARNING,
5161                         "netlink: VTEP %s create failure (%d)",
5162                         name, rte_errno);
5163                 if (rte_errno != EEXIST)
5164                         /*
5165                          * Some unhandled error occurred or device is
5166                          * for encapsulation and cannot be shared.
5167                          */
5168                         goto error;
5169         } else {
5170                 /*
5171                  * Mark device we actually created.
5172                  * We should explicitly delete
5173                  * when we do not need it anymore.
5174                  */
5175                 vtep->created = 1;
5176                 vtep->waitreg = 1;
5177         }
5178         /* Try to get ifindex of created of pre-existing device. */
5179         ret = if_nametoindex(name);
5180         if (!ret) {
5181                 DRV_LOG(WARNING,
5182                         "VTEP %s failed to get index (%d)", name, errno);
5183                 rte_flow_error_set
5184                         (error, -errno,
5185                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5186                          "netlink: failed to retrieve VTEP ifindex");
5187                 goto error;
5188         }
5189         vtep->ifindex = ret;
5190         memset(buf, 0, sizeof(buf));
5191         nlh = mnl_nlmsg_put_header(buf);
5192         nlh->nlmsg_type = RTM_NEWLINK;
5193         nlh->nlmsg_flags = NLM_F_REQUEST;
5194         ifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));
5195         ifm->ifi_family = AF_UNSPEC;
5196         ifm->ifi_type = 0;
5197         ifm->ifi_index = vtep->ifindex;
5198         ifm->ifi_flags = IFF_UP;
5199         ifm->ifi_change = IFF_UP;
5200         ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
5201         if (ret) {
5202                 rte_flow_error_set(error, -errno,
5203                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5204                                    "netlink: failed to set VTEP link up");
5205                 DRV_LOG(WARNING, "netlink: VTEP %s set link up failure (%d)",
5206                         name, rte_errno);
5207                 goto clean;
5208         }
5209         ret = mlx5_flow_tcf_init(tcf, vtep->ifindex, error);
5210         if (ret) {
5211                 DRV_LOG(WARNING, "VTEP %s init failure (%d)", name, rte_errno);
5212                 goto clean;
5213         }
5214         DRV_LOG(INFO, "VTEP create (%d, %d)", vtep->port, vtep->ifindex);
5215         vtep->refcnt = 1;
5216         return vtep;
5217 clean:
5218         flow_tcf_vtep_delete(tcf, vtep);
5219         return NULL;
5220 error:
5221         rte_free(vtep);
5222         return NULL;
5223 }
5224
5225 /**
5226  * Acquire target interface index for VXLAN tunneling decapsulation.
5227  * In order to share the UDP port within the other interfaces the
5228  * VXLAN device created as not attached to any interface (if created).
5229  *
5230  * @param[in] tcf
5231  *   Context object initialized by mlx5_flow_tcf_context_create().
5232  * @param[in] dev_flow
5233  *   Flow tcf object with tunnel structure pointer set.
5234  * @param[out] error
5235  *   Perform verbose error reporting if not NULL.
5236  * @return
5237  *   Interface descriptor pointer on success,
5238  *   NULL otherwise and rte_errno is set.
5239  */
5240 static struct tcf_vtep*
5241 flow_tcf_decap_vtep_acquire(struct mlx5_flow_tcf_context *tcf,
5242                             struct mlx5_flow *dev_flow,
5243                             struct rte_flow_error *error)
5244 {
5245         struct tcf_vtep *vtep;
5246         uint16_t port = dev_flow->tcf.vxlan_decap->udp_port;
5247
5248         LIST_FOREACH(vtep, &vtep_list_vxlan, next) {
5249                 if (vtep->port == port)
5250                         break;
5251         }
5252         if (vtep) {
5253                 /* Device exists, just increment the reference counter. */
5254                 vtep->refcnt++;
5255                 assert(vtep->ifindex);
5256                 return vtep;
5257         }
5258         /* No decapsulation device exists, try to create the new one. */
5259         vtep = flow_tcf_vtep_create(tcf, port, error);
5260         if (vtep)
5261                 LIST_INSERT_HEAD(&vtep_list_vxlan, vtep, next);
5262         return vtep;
5263 }
5264
5265 /**
5266  * Aqcuire target interface index for VXLAN tunneling encapsulation.
5267  *
5268  * @param[in] tcf
5269  *   Context object initialized by mlx5_flow_tcf_context_create().
5270  * @param[in] ifouter
5271  *   Network interface index to attach VXLAN encap device to.
5272  * @param[in] dev_flow
5273  *   Flow tcf object with tunnel structure pointer set.
5274  * @param[out] error
5275  *   Perform verbose error reporting if not NULL.
5276  * @return
5277  *   Interface descriptor pointer on success,
5278  *   NULL otherwise and rte_errno is set.
5279  */
5280 static struct tcf_vtep*
5281 flow_tcf_encap_vtep_acquire(struct mlx5_flow_tcf_context *tcf,
5282                             unsigned int ifouter,
5283                             struct mlx5_flow *dev_flow,
5284                             struct rte_flow_error *error)
5285 {
5286         static uint16_t port;
5287         struct tcf_vtep *vtep;
5288         struct tcf_irule *iface;
5289         int ret;
5290
5291         assert(ifouter);
5292         /* Look whether the VTEP for specified port is created. */
5293         port = rte_be_to_cpu_16(dev_flow->tcf.vxlan_encap->udp.dst);
5294         LIST_FOREACH(vtep, &vtep_list_vxlan, next) {
5295                 if (vtep->port == port)
5296                         break;
5297         }
5298         if (vtep) {
5299                 /* VTEP already exists, just increment the reference. */
5300                 vtep->refcnt++;
5301         } else {
5302                 /* Not found, we should create the new VTEP. */
5303                 vtep = flow_tcf_vtep_create(tcf, port, error);
5304                 if (!vtep)
5305                         return NULL;
5306                 LIST_INSERT_HEAD(&vtep_list_vxlan, vtep, next);
5307         }
5308         assert(vtep->ifindex);
5309         iface = flow_tcf_encap_irule_acquire(tcf, ifouter, error);
5310         if (!iface) {
5311                 if (--vtep->refcnt == 0)
5312                         flow_tcf_vtep_delete(tcf, vtep);
5313                 return NULL;
5314         }
5315         dev_flow->tcf.vxlan_encap->iface = iface;
5316         /* Create local ipaddr with peer to specify the outer IPs. */
5317         ret = flow_tcf_encap_local(tcf, iface, dev_flow, true, error);
5318         if (!ret) {
5319                 /* Create neigh rule to specify outer destination MAC. */
5320                 ret = flow_tcf_encap_neigh(tcf, iface, dev_flow, true, error);
5321                 if (ret)
5322                         flow_tcf_encap_local(tcf, iface,
5323                                              dev_flow, false, error);
5324         }
5325         if (ret) {
5326                 dev_flow->tcf.vxlan_encap->iface = NULL;
5327                 flow_tcf_encap_irule_release(iface);
5328                 if (--vtep->refcnt == 0)
5329                         flow_tcf_vtep_delete(tcf, vtep);
5330                 return NULL;
5331         }
5332         return vtep;
5333 }
5334
5335 /**
5336  * Acquires target interface index for tunneling of any type.
5337  * Creates the new VTEP if needed.
5338  *
5339  * @param[in] tcf
5340  *   Context object initialized by mlx5_flow_tcf_context_create().
5341  * @param[in] ifouter
5342  *   Network interface index to create VXLAN encap rules on.
5343  * @param[in] dev_flow
5344  *   Flow tcf object with tunnel structure pointer set.
5345  * @param[out] error
5346  *   Perform verbose error reporting if not NULL.
5347  * @return
5348  *   Interface descriptor pointer on success,
5349  *   NULL otherwise and rte_errno is set.
5350  */
5351 static struct tcf_vtep*
5352 flow_tcf_vtep_acquire(struct mlx5_flow_tcf_context *tcf,
5353                       unsigned int ifouter,
5354                       struct mlx5_flow *dev_flow,
5355                       struct rte_flow_error *error)
5356 {
5357         struct tcf_vtep *vtep = NULL;
5358
5359         assert(dev_flow->tcf.tunnel);
5360         pthread_mutex_lock(&vtep_list_mutex);
5361         switch (dev_flow->tcf.tunnel->type) {
5362         case FLOW_TCF_TUNACT_VXLAN_ENCAP:
5363                 vtep = flow_tcf_encap_vtep_acquire(tcf, ifouter,
5364                                                   dev_flow, error);
5365                 break;
5366         case FLOW_TCF_TUNACT_VXLAN_DECAP:
5367                 vtep = flow_tcf_decap_vtep_acquire(tcf, dev_flow, error);
5368                 break;
5369         default:
5370                 rte_flow_error_set(error, ENOTSUP,
5371                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5372                                    "unsupported tunnel type");
5373                 break;
5374         }
5375         pthread_mutex_unlock(&vtep_list_mutex);
5376         return vtep;
5377 }
5378
5379 /**
5380  * Release tunneling interface by ifindex. Decrements reference
5381  * counter and actually removes the device if counter is zero.
5382  *
5383  * @param[in] tcf
5384  *   Context object initialized by mlx5_flow_tcf_context_create().
5385  * @param[in] vtep
5386  *   VTEP device descriptor structure.
5387  * @param[in] dev_flow
5388  *   Flow tcf object with tunnel structure pointer set.
5389  */
5390 static void
5391 flow_tcf_vtep_release(struct mlx5_flow_tcf_context *tcf,
5392                       struct tcf_vtep *vtep,
5393                       struct mlx5_flow *dev_flow)
5394 {
5395         assert(dev_flow->tcf.tunnel);
5396         pthread_mutex_lock(&vtep_list_mutex);
5397         switch (dev_flow->tcf.tunnel->type) {
5398         case FLOW_TCF_TUNACT_VXLAN_DECAP:
5399                 break;
5400         case FLOW_TCF_TUNACT_VXLAN_ENCAP: {
5401                 struct tcf_irule *iface;
5402
5403                 /* Remove the encap ancillary rules first. */
5404                 iface = dev_flow->tcf.vxlan_encap->iface;
5405                 assert(iface);
5406                 flow_tcf_encap_neigh(tcf, iface, dev_flow, false, NULL);
5407                 flow_tcf_encap_local(tcf, iface, dev_flow, false, NULL);
5408                 flow_tcf_encap_irule_release(iface);
5409                 dev_flow->tcf.vxlan_encap->iface = NULL;
5410                 break;
5411         }
5412         default:
5413                 assert(false);
5414                 DRV_LOG(WARNING, "Unsupported tunnel type");
5415                 break;
5416         }
5417         assert(vtep->refcnt);
5418         if (--vtep->refcnt == 0) {
5419                 LIST_REMOVE(vtep, next);
5420                 flow_tcf_vtep_delete(tcf, vtep);
5421         }
5422         pthread_mutex_unlock(&vtep_list_mutex);
5423 }
5424
5425 struct tcf_nlcb_query {
5426         uint32_t handle;
5427         uint32_t tc_flags;
5428         uint32_t flags_valid:1;
5429 };
5430
5431 /**
5432  * Collect queried rule attributes. This is callback routine called by
5433  * libmnl mnl_cb_run() in loop for every message in received packet.
5434  * Current implementation collects the flower flags only.
5435  *
5436  * @param[in] nlh
5437  *   Pointer to reply header.
5438  * @param[in, out] arg
5439  *   Context pointer for this callback.
5440  *
5441  * @return
5442  *   A positive, nonzero value on success (required by libmnl
5443  *   to continue messages processing).
5444  */
5445 static int
5446 flow_tcf_collect_query_cb(const struct nlmsghdr *nlh, void *arg)
5447 {
5448         struct tcf_nlcb_query *query = arg;
5449         struct tcmsg *tcm = mnl_nlmsg_get_payload(nlh);
5450         struct nlattr *na, *na_opt;
5451         bool flower = false;
5452
5453         if (nlh->nlmsg_type != RTM_NEWTFILTER ||
5454             tcm->tcm_handle != query->handle)
5455                 return 1;
5456         mnl_attr_for_each(na, nlh, sizeof(*tcm)) {
5457                 switch (mnl_attr_get_type(na)) {
5458                 case TCA_KIND:
5459                         if (strcmp(mnl_attr_get_payload(na), "flower")) {
5460                                 /* Not flower filter, drop entire message. */
5461                                 return 1;
5462                         }
5463                         flower = true;
5464                         break;
5465                 case TCA_OPTIONS:
5466                         if (!flower) {
5467                                 /* Not flower options, drop entire message. */
5468                                 return 1;
5469                         }
5470                         /* Check nested flower options. */
5471                         mnl_attr_for_each_nested(na_opt, na) {
5472                                 switch (mnl_attr_get_type(na_opt)) {
5473                                 case TCA_FLOWER_FLAGS:
5474                                         query->flags_valid = 1;
5475                                         query->tc_flags =
5476                                                 mnl_attr_get_u32(na_opt);
5477                                         break;
5478                                 }
5479                         }
5480                         break;
5481                 }
5482         }
5483         return 1;
5484 }
5485
5486 /**
5487  * Query a TC flower rule flags via netlink.
5488  *
5489  * @param[in] tcf
5490  *   Context object initialized by mlx5_flow_tcf_context_create().
5491  * @param[in] dev_flow
5492  *   Pointer to the flow.
5493  * @param[out] pflags
5494  *   pointer to the data retrieved by the query.
5495  *
5496  * @return
5497  *   0 on success, a negative errno value otherwise.
5498  */
5499 static int
5500 flow_tcf_query_flags(struct mlx5_flow_tcf_context *tcf,
5501                      struct mlx5_flow *dev_flow,
5502                      uint32_t *pflags)
5503 {
5504         struct nlmsghdr *nlh;
5505         struct tcmsg *tcm;
5506         struct tcf_nlcb_query query = {
5507                 .handle = dev_flow->tcf.tcm->tcm_handle,
5508         };
5509
5510         nlh = mnl_nlmsg_put_header(tcf->buf);
5511         nlh->nlmsg_type = RTM_GETTFILTER;
5512         nlh->nlmsg_flags = NLM_F_REQUEST;
5513         tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
5514         memcpy(tcm, dev_flow->tcf.tcm, sizeof(*tcm));
5515         /*
5516          * Ignore Netlink error for filter query operations.
5517          * The reply length is sent by kernel as errno.
5518          * Just check we got the flags option.
5519          */
5520         flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_query_cb, &query);
5521         if (!query.flags_valid) {
5522                 *pflags = 0;
5523                 return -ENOENT;
5524         }
5525         *pflags = query.tc_flags;
5526         return 0;
5527 }
5528
5529 /**
5530  * Query and check the in_hw set for specified rule.
5531  *
5532  * @param[in] tcf
5533  *   Context object initialized by mlx5_flow_tcf_context_create().
5534  * @param[in] dev_flow
5535  *   Pointer to the flow to check.
5536  *
5537  * @return
5538  *   0 on success, a negative errno value otherwise.
5539  */
5540 static int
5541 flow_tcf_check_inhw(struct mlx5_flow_tcf_context *tcf,
5542                     struct mlx5_flow *dev_flow)
5543 {
5544         uint32_t flags;
5545         int ret;
5546
5547         ret = flow_tcf_query_flags(tcf, dev_flow, &flags);
5548         if (ret)
5549                 return ret;
5550         return  (flags & TCA_CLS_FLAGS_IN_HW) ? 0 : -ENOENT;
5551 }
5552
5553 /**
5554  * Remove flow from E-Switch by sending Netlink message.
5555  *
5556  * @param[in] dev
5557  *   Pointer to Ethernet device.
5558  * @param[in, out] flow
5559  *   Pointer to the sub flow.
5560  */
5561 static void
5562 flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5563 {
5564         struct priv *priv = dev->data->dev_private;
5565         struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
5566         struct mlx5_flow *dev_flow;
5567         struct nlmsghdr *nlh;
5568         struct tcmsg *tcm;
5569
5570         if (!flow)
5571                 return;
5572         dev_flow = LIST_FIRST(&flow->dev_flows);
5573         if (!dev_flow)
5574                 return;
5575         /* E-Switch flow can't be expanded. */
5576         assert(!LIST_NEXT(dev_flow, next));
5577         if (dev_flow->tcf.applied) {
5578                 nlh = dev_flow->tcf.nlh;
5579                 nlh->nlmsg_type = RTM_DELTFILTER;
5580                 nlh->nlmsg_flags = NLM_F_REQUEST;
5581                 flow_tcf_nl_ack(ctx, nlh, NULL, NULL);
5582                 if (dev_flow->tcf.tunnel) {
5583                         assert(dev_flow->tcf.tunnel->vtep);
5584                         flow_tcf_vtep_release(ctx,
5585                                 dev_flow->tcf.tunnel->vtep,
5586                                 dev_flow);
5587                         dev_flow->tcf.tunnel->vtep = NULL;
5588                 }
5589                 /* Cleanup the rule handle value. */
5590                 tcm = mnl_nlmsg_get_payload(nlh);
5591                 tcm->tcm_handle = 0;
5592                 dev_flow->tcf.applied = 0;
5593         }
5594 }
5595
5596 /**
5597  * Fetch the applied rule handle. This is callback routine called by
5598  * libmnl mnl_cb_run() in loop for every message in received packet.
5599  * When the NLM_F_ECHO flag i sspecified the kernel sends the created
5600  * rule descriptor back to the application and we can retrieve the
5601  * actual rule handle from updated descriptor.
5602  *
5603  * @param[in] nlh
5604  *   Pointer to reply header.
5605  * @param[in, out] arg
5606  *   Context pointer for this callback.
5607  *
5608  * @return
5609  *   A positive, nonzero value on success (required by libmnl
5610  *   to continue messages processing).
5611  */
5612 static int
5613 flow_tcf_collect_apply_cb(const struct nlmsghdr *nlh, void *arg)
5614 {
5615         struct nlmsghdr *nlhrq = arg;
5616         struct tcmsg *tcmrq = mnl_nlmsg_get_payload(nlhrq);
5617         struct tcmsg *tcm = mnl_nlmsg_get_payload(nlh);
5618         struct nlattr *na;
5619
5620         if (nlh->nlmsg_type != RTM_NEWTFILTER ||
5621             nlh->nlmsg_seq != nlhrq->nlmsg_seq)
5622                 return 1;
5623         mnl_attr_for_each(na, nlh, sizeof(*tcm)) {
5624                 switch (mnl_attr_get_type(na)) {
5625                 case TCA_KIND:
5626                         if (strcmp(mnl_attr_get_payload(na), "flower")) {
5627                                 /* Not flower filter, drop entire message. */
5628                                 return 1;
5629                         }
5630                         tcmrq->tcm_handle = tcm->tcm_handle;
5631                         return 1;
5632                 }
5633         }
5634         return 1;
5635 }
5636 /**
5637  * Apply flow to E-Switch by sending Netlink message.
5638  *
5639  * @param[in] dev
5640  *   Pointer to Ethernet device.
5641  * @param[in, out] flow
5642  *   Pointer to the sub flow.
5643  * @param[out] error
5644  *   Pointer to the error structure.
5645  *
5646  * @return
5647  *   0 on success, a negative errno value otherwise and rte_errno is set.
5648  */
5649 static int
5650 flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
5651                struct rte_flow_error *error)
5652 {
5653         struct priv *priv = dev->data->dev_private;
5654         struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
5655         struct mlx5_flow *dev_flow;
5656         struct nlmsghdr *nlh;
5657         struct tcmsg *tcm;
5658         uint64_t start = 0;
5659         uint64_t twait = 0;
5660         int ret;
5661
5662         dev_flow = LIST_FIRST(&flow->dev_flows);
5663         /* E-Switch flow can't be expanded. */
5664         assert(!LIST_NEXT(dev_flow, next));
5665         if (dev_flow->tcf.applied)
5666                 return 0;
5667         nlh = dev_flow->tcf.nlh;
5668         nlh->nlmsg_type = RTM_NEWTFILTER;
5669         nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE |
5670                            NLM_F_EXCL | NLM_F_ECHO;
5671         tcm = mnl_nlmsg_get_payload(nlh);
5672         /* Allow kernel to assign handle on its own. */
5673         tcm->tcm_handle = 0;
5674         if (dev_flow->tcf.tunnel) {
5675                 /*
5676                  * Replace the interface index, target for
5677                  * encapsulation, source for decapsulation.
5678                  */
5679                 assert(!dev_flow->tcf.tunnel->vtep);
5680                 assert(dev_flow->tcf.tunnel->ifindex_ptr);
5681                 /* Acquire actual VTEP device when rule is being applied. */
5682                 dev_flow->tcf.tunnel->vtep =
5683                         flow_tcf_vtep_acquire(ctx,
5684                                         dev_flow->tcf.tunnel->ifindex_org,
5685                                         dev_flow, error);
5686                 if (!dev_flow->tcf.tunnel->vtep)
5687                         return -rte_errno;
5688                 DRV_LOG(INFO, "Replace ifindex: %d->%d",
5689                                 dev_flow->tcf.tunnel->vtep->ifindex,
5690                                 dev_flow->tcf.tunnel->ifindex_org);
5691                 *dev_flow->tcf.tunnel->ifindex_ptr =
5692                         dev_flow->tcf.tunnel->vtep->ifindex;
5693                 if (dev_flow->tcf.tunnel->vtep->waitreg) {
5694                         /* Clear wait flag for VXLAN port registration. */
5695                         dev_flow->tcf.tunnel->vtep->waitreg = 0;
5696                         twait = rte_get_timer_hz();
5697                         assert(twait > MS_PER_S);
5698                         twait = twait * MLX5_VXLAN_WAIT_PORT_REG_MS;
5699                         twait = twait / MS_PER_S;
5700                         start = rte_get_timer_cycles();
5701                 }
5702         }
5703         /*
5704          * Kernel creates the VXLAN devices and registers UDP ports to
5705          * be hardware offloaded within the NIC kernel drivers. The
5706          * registration process is being performed into context of
5707          * working kernel thread and the race conditions might happen.
5708          * The VXLAN device is created and success is returned to
5709          * calling application, but the UDP port registration process
5710          * is not completed yet. The next applied rule may be rejected
5711          * by the driver with ENOSUP code. We are going to wait a bit,
5712          * allowing registration process to be completed. The waiting
5713          * is performed once after device been created.
5714          */
5715         do {
5716                 struct timespec onems;
5717
5718                 ret = flow_tcf_nl_ack(ctx, nlh,
5719                                       flow_tcf_collect_apply_cb, nlh);
5720                 if (!ret || ret != -ENOTSUP || !twait)
5721                         break;
5722                 /* Wait one millisecond and try again till timeout. */
5723                 onems.tv_sec = 0;
5724                 onems.tv_nsec = NS_PER_S / MS_PER_S;
5725                 nanosleep(&onems, 0);
5726                 if ((rte_get_timer_cycles() - start) > twait) {
5727                         /* Timeout elapsed, try once more and exit. */
5728                         twait = 0;
5729                 }
5730         } while (true);
5731         if (!ret) {
5732                 if (!tcm->tcm_handle) {
5733                         flow_tcf_remove(dev, flow);
5734                         return rte_flow_error_set
5735                                 (error, ENOENT,
5736                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5737                                  "netlink: rule zero handle returned");
5738                 }
5739                 dev_flow->tcf.applied = 1;
5740                 if (*dev_flow->tcf.ptc_flags & TCA_CLS_FLAGS_SKIP_SW)
5741                         return 0;
5742                 /*
5743                  * Rule was applied without skip_sw flag set.
5744                  * We should check whether the rule was acctually
5745                  * accepted by hardware (have look at in_hw flag).
5746                  */
5747                 if (flow_tcf_check_inhw(ctx, dev_flow)) {
5748                         flow_tcf_remove(dev, flow);
5749                         return rte_flow_error_set
5750                                 (error, ENOENT,
5751                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5752                                  "netlink: rule has no in_hw flag set");
5753                 }
5754                 return 0;
5755         }
5756         if (dev_flow->tcf.tunnel) {
5757                 /* Rollback the VTEP configuration if rule apply failed. */
5758                 assert(dev_flow->tcf.tunnel->vtep);
5759                 flow_tcf_vtep_release(ctx, dev_flow->tcf.tunnel->vtep,
5760                                       dev_flow);
5761                 dev_flow->tcf.tunnel->vtep = NULL;
5762         }
5763         return rte_flow_error_set(error, rte_errno,
5764                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5765                                   "netlink: failed to create TC flow rule");
5766 }
5767
5768 /**
5769  * Remove flow from E-Switch and release resources of the device flow.
5770  *
5771  * @param[in] dev
5772  *   Pointer to Ethernet device.
5773  * @param[in, out] flow
5774  *   Pointer to the sub flow.
5775  */
5776 static void
5777 flow_tcf_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5778 {
5779         struct mlx5_flow *dev_flow;
5780
5781         if (!flow)
5782                 return;
5783         flow_tcf_remove(dev, flow);
5784         if (flow->counter) {
5785                 if (--flow->counter->ref_cnt == 0) {
5786                         rte_free(flow->counter);
5787                         flow->counter = NULL;
5788                 }
5789         }
5790         dev_flow = LIST_FIRST(&flow->dev_flows);
5791         if (!dev_flow)
5792                 return;
5793         /* E-Switch flow can't be expanded. */
5794         assert(!LIST_NEXT(dev_flow, next));
5795         LIST_REMOVE(dev_flow, next);
5796         rte_free(dev_flow);
5797 }
5798
5799 /**
5800  * Helper routine for figuring the space size required for a parse buffer.
5801  *
5802  * @param array
5803  *   array of values to use.
5804  * @param idx
5805  *   Current location in array.
5806  * @param value
5807  *   Value to compare with.
5808  *
5809  * @return
5810  *   The maximum between the given value and the array value on index.
5811  */
5812 static uint16_t
5813 flow_tcf_arr_val_max(uint16_t array[], int idx, uint16_t value)
5814 {
5815         return idx < 0 ? (value) : RTE_MAX((array)[idx], value);
5816 }
5817
5818 /**
5819  * Parse rtnetlink message attributes filling the attribute table with the info
5820  * retrieved.
5821  *
5822  * @param tb
5823  *   Attribute table to be filled.
5824  * @param[out] max
5825  *   Maxinum entry in the attribute table.
5826  * @param rte
5827  *   The attributes section in the message to be parsed.
5828  * @param len
5829  *   The length of the attributes section in the message.
5830  */
5831 static void
5832 flow_tcf_nl_parse_rtattr(struct rtattr *tb[], int max,
5833                          struct rtattr *rta, int len)
5834 {
5835         unsigned short type;
5836         memset(tb, 0, sizeof(struct rtattr *) * (max + 1));
5837         while (RTA_OK(rta, len)) {
5838                 type = rta->rta_type;
5839                 if (type <= max && !tb[type])
5840                         tb[type] = rta;
5841                 rta = RTA_NEXT(rta, len);
5842         }
5843 }
5844
5845 /**
5846  * Extract flow counters from flower action.
5847  *
5848  * @param rta
5849  *   flower action stats properties in the Netlink message received.
5850  * @param rta_type
5851  *   The backward sequence of rta_types, as written in the attribute table,
5852  *   we need to traverse in order to get to the requested object.
5853  * @param idx
5854  *   Current location in rta_type table.
5855  * @param[out] data
5856  *   data holding the count statistics of the rte_flow retrieved from
5857  *   the message.
5858  *
5859  * @return
5860  *   0 if data was found and retrieved, -1 otherwise.
5861  */
5862 static int
5863 flow_tcf_nl_action_stats_parse_and_get(struct rtattr *rta,
5864                                        uint16_t rta_type[], int idx,
5865                                        struct gnet_stats_basic *data)
5866 {
5867         int tca_stats_max = flow_tcf_arr_val_max(rta_type, idx,
5868                                                  TCA_STATS_BASIC);
5869         struct rtattr *tbs[tca_stats_max + 1];
5870
5871         if (rta == NULL || idx < 0)
5872                 return -1;
5873         flow_tcf_nl_parse_rtattr(tbs, tca_stats_max,
5874                                  RTA_DATA(rta), RTA_PAYLOAD(rta));
5875         switch (rta_type[idx]) {
5876         case TCA_STATS_BASIC:
5877                 if (tbs[TCA_STATS_BASIC]) {
5878                         memcpy(data, RTA_DATA(tbs[TCA_STATS_BASIC]),
5879                                RTE_MIN(RTA_PAYLOAD(tbs[TCA_STATS_BASIC]),
5880                                sizeof(*data)));
5881                         return 0;
5882                 }
5883                 break;
5884         default:
5885                 break;
5886         }
5887         return -1;
5888 }
5889
5890 /**
5891  * Parse flower single action retrieving the requested action attribute,
5892  * if found.
5893  *
5894  * @param arg
5895  *   flower action properties in the Netlink message received.
5896  * @param rta_type
5897  *   The backward sequence of rta_types, as written in the attribute table,
5898  *   we need to traverse in order to get to the requested object.
5899  * @param idx
5900  *   Current location in rta_type table.
5901  * @param[out] data
5902  *   Count statistics retrieved from the message query.
5903  *
5904  * @return
5905  *   0 if data was found and retrieved, -1 otherwise.
5906  */
5907 static int
5908 flow_tcf_nl_parse_one_action_and_get(struct rtattr *arg,
5909                                      uint16_t rta_type[], int idx, void *data)
5910 {
5911         int tca_act_max = flow_tcf_arr_val_max(rta_type, idx, TCA_ACT_STATS);
5912         struct rtattr *tb[tca_act_max + 1];
5913
5914         if (arg == NULL || idx < 0)
5915                 return -1;
5916         flow_tcf_nl_parse_rtattr(tb, tca_act_max,
5917                                  RTA_DATA(arg), RTA_PAYLOAD(arg));
5918         if (tb[TCA_ACT_KIND] == NULL)
5919                 return -1;
5920         switch (rta_type[idx]) {
5921         case TCA_ACT_STATS:
5922                 if (tb[TCA_ACT_STATS])
5923                         return flow_tcf_nl_action_stats_parse_and_get
5924                                         (tb[TCA_ACT_STATS],
5925                                          rta_type, --idx,
5926                                          (struct gnet_stats_basic *)data);
5927                 break;
5928         default:
5929                 break;
5930         }
5931         return -1;
5932 }
5933
5934 /**
5935  * Parse flower action section in the message retrieving the requested
5936  * attribute from the first action that provides it.
5937  *
5938  * @param opt
5939  *   flower section in the Netlink message received.
5940  * @param rta_type
5941  *   The backward sequence of rta_types, as written in the attribute table,
5942  *   we need to traverse in order to get to the requested object.
5943  * @param idx
5944  *   Current location in rta_type table.
5945  * @param[out] data
5946  *   data retrieved from the message query.
5947  *
5948  * @return
5949  *   0 if data was found and retrieved, -1 otherwise.
5950  */
5951 static int
5952 flow_tcf_nl_action_parse_and_get(struct rtattr *arg,
5953                                  uint16_t rta_type[], int idx, void *data)
5954 {
5955         struct rtattr *tb[TCA_ACT_MAX_PRIO + 1];
5956         int i;
5957
5958         if (arg == NULL || idx < 0)
5959                 return -1;
5960         flow_tcf_nl_parse_rtattr(tb, TCA_ACT_MAX_PRIO,
5961                                  RTA_DATA(arg), RTA_PAYLOAD(arg));
5962         switch (rta_type[idx]) {
5963         /*
5964          * flow counters are stored in the actions defined by the flow
5965          * and not in the flow itself, therefore we need to traverse the
5966          * flower chain of actions in search for them.
5967          *
5968          * Note that the index is not decremented here.
5969          */
5970         case TCA_ACT_STATS:
5971                 for (i = 0; i <= TCA_ACT_MAX_PRIO; i++) {
5972                         if (tb[i] &&
5973                         !flow_tcf_nl_parse_one_action_and_get(tb[i],
5974                                                               rta_type,
5975                                                               idx, data))
5976                                 return 0;
5977                 }
5978                 break;
5979         default:
5980                 break;
5981         }
5982         return -1;
5983 }
5984
5985 /**
5986  * Parse flower classifier options in the message, retrieving the requested
5987  * attribute if found.
5988  *
5989  * @param opt
5990  *   flower section in the Netlink message received.
5991  * @param rta_type
5992  *   The backward sequence of rta_types, as written in the attribute table,
5993  *   we need to traverse in order to get to the requested object.
5994  * @param idx
5995  *   Current location in rta_type table.
5996  * @param[out] data
5997  *   data retrieved from the message query.
5998  *
5999  * @return
6000  *   0 if data was found and retrieved, -1 otherwise.
6001  */
6002 static int
6003 flow_tcf_nl_opts_parse_and_get(struct rtattr *opt,
6004                                uint16_t rta_type[], int idx, void *data)
6005 {
6006         int tca_flower_max = flow_tcf_arr_val_max(rta_type, idx,
6007                                                   TCA_FLOWER_ACT);
6008         struct rtattr *tb[tca_flower_max + 1];
6009
6010         if (!opt || idx < 0)
6011                 return -1;
6012         flow_tcf_nl_parse_rtattr(tb, tca_flower_max,
6013                                  RTA_DATA(opt), RTA_PAYLOAD(opt));
6014         switch (rta_type[idx]) {
6015         case TCA_FLOWER_ACT:
6016                 if (tb[TCA_FLOWER_ACT])
6017                         return flow_tcf_nl_action_parse_and_get
6018                                                         (tb[TCA_FLOWER_ACT],
6019                                                          rta_type, --idx, data);
6020                 break;
6021         default:
6022                 break;
6023         }
6024         return -1;
6025 }
6026
6027 /**
6028  * Parse Netlink reply on filter query, retrieving the flow counters.
6029  *
6030  * @param nlh
6031  *   Message received from Netlink.
6032  * @param rta_type
6033  *   The backward sequence of rta_types, as written in the attribute table,
6034  *   we need to traverse in order to get to the requested object.
6035  * @param idx
6036  *   Current location in rta_type table.
6037  * @param[out] data
6038  *   data retrieved from the message query.
6039  *
6040  * @return
6041  *   0 if data was found and retrieved, -1 otherwise.
6042  */
6043 static int
6044 flow_tcf_nl_filter_parse_and_get(struct nlmsghdr *cnlh,
6045                                  uint16_t rta_type[], int idx, void *data)
6046 {
6047         struct nlmsghdr *nlh = cnlh;
6048         struct tcmsg *t = NLMSG_DATA(nlh);
6049         int len = nlh->nlmsg_len;
6050         int tca_max = flow_tcf_arr_val_max(rta_type, idx, TCA_OPTIONS);
6051         struct rtattr *tb[tca_max + 1];
6052
6053         if (idx < 0)
6054                 return -1;
6055         if (nlh->nlmsg_type != RTM_NEWTFILTER &&
6056             nlh->nlmsg_type != RTM_GETTFILTER &&
6057             nlh->nlmsg_type != RTM_DELTFILTER)
6058                 return -1;
6059         len -= NLMSG_LENGTH(sizeof(*t));
6060         if (len < 0)
6061                 return -1;
6062         flow_tcf_nl_parse_rtattr(tb, tca_max, TCA_RTA(t), len);
6063         /* Not a TC flower flow - bail out */
6064         if (!tb[TCA_KIND] ||
6065             strcmp(RTA_DATA(tb[TCA_KIND]), "flower"))
6066                 return -1;
6067         switch (rta_type[idx]) {
6068         case TCA_OPTIONS:
6069                 if (tb[TCA_OPTIONS])
6070                         return flow_tcf_nl_opts_parse_and_get(tb[TCA_OPTIONS],
6071                                                               rta_type,
6072                                                               --idx, data);
6073                 break;
6074         default:
6075                 break;
6076         }
6077         return -1;
6078 }
6079
6080 /**
6081  * A callback to parse Netlink reply on TC flower query.
6082  *
6083  * @param nlh
6084  *   Message received from Netlink.
6085  * @param[out] data
6086  *   Pointer to data area to be filled by the parsing routine.
6087  *   assumed to be a pointer to struct flow_tcf_stats_basic.
6088  *
6089  * @return
6090  *   MNL_CB_OK value.
6091  */
6092 static int
6093 flow_tcf_nl_message_get_stats_basic(const struct nlmsghdr *nlh, void *data)
6094 {
6095         /*
6096          * The backward sequence of rta_types to pass in order to get
6097          *  to the counters.
6098          */
6099         uint16_t rta_type[] = { TCA_STATS_BASIC, TCA_ACT_STATS,
6100                                 TCA_FLOWER_ACT, TCA_OPTIONS };
6101         struct flow_tcf_stats_basic *sb_data = data;
6102         union {
6103                 const struct nlmsghdr *c;
6104                 struct nlmsghdr *nc;
6105         } tnlh = { .c = nlh };
6106
6107         if (!flow_tcf_nl_filter_parse_and_get(tnlh.nc, rta_type,
6108                                               RTE_DIM(rta_type) - 1,
6109                                               (void *)&sb_data->counters))
6110                 sb_data->valid = true;
6111         return MNL_CB_OK;
6112 }
6113
6114 /**
6115  * Query a TC flower rule for its statistics via netlink.
6116  *
6117  * @param[in] dev
6118  *   Pointer to Ethernet device.
6119  * @param[in] flow
6120  *   Pointer to the sub flow.
6121  * @param[out] data
6122  *   data retrieved by the query.
6123  * @param[out] error
6124  *   Perform verbose error reporting if not NULL.
6125  *
6126  * @return
6127  *   0 on success, a negative errno value otherwise and rte_errno is set.
6128  */
6129 static int
6130 flow_tcf_query_count(struct rte_eth_dev *dev,
6131                           struct rte_flow *flow,
6132                           void *data,
6133                           struct rte_flow_error *error)
6134 {
6135         struct flow_tcf_stats_basic sb_data;
6136         struct rte_flow_query_count *qc = data;
6137         struct priv *priv = dev->data->dev_private;
6138         struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
6139         struct mnl_socket *nl = ctx->nl;
6140         struct mlx5_flow *dev_flow;
6141         struct nlmsghdr *nlh;
6142         uint32_t seq = priv->tcf_context->seq++;
6143         ssize_t ret;
6144         assert(qc);
6145
6146         memset(&sb_data, 0, sizeof(sb_data));
6147         dev_flow = LIST_FIRST(&flow->dev_flows);
6148         /* E-Switch flow can't be expanded. */
6149         assert(!LIST_NEXT(dev_flow, next));
6150         if (!dev_flow->flow->counter)
6151                 goto notsup_exit;
6152         nlh = dev_flow->tcf.nlh;
6153         nlh->nlmsg_type = RTM_GETTFILTER;
6154         nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ECHO;
6155         nlh->nlmsg_seq = seq;
6156         if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) == -1)
6157                 goto error_exit;
6158         do {
6159                 ret = mnl_socket_recvfrom(nl, ctx->buf, ctx->buf_size);
6160                 if (ret <= 0)
6161                         break;
6162                 ret = mnl_cb_run(ctx->buf, ret, seq,
6163                                  mnl_socket_get_portid(nl),
6164                                  flow_tcf_nl_message_get_stats_basic,
6165                                  (void *)&sb_data);
6166         } while (ret > 0);
6167         /* Return the delta from last reset. */
6168         if (sb_data.valid) {
6169                 /* Return the delta from last reset. */
6170                 qc->hits_set = 1;
6171                 qc->bytes_set = 1;
6172                 qc->hits = sb_data.counters.packets - flow->counter->hits;
6173                 qc->bytes = sb_data.counters.bytes - flow->counter->bytes;
6174                 if (qc->reset) {
6175                         flow->counter->hits = sb_data.counters.packets;
6176                         flow->counter->bytes = sb_data.counters.bytes;
6177                 }
6178                 return 0;
6179         }
6180         return rte_flow_error_set(error, EINVAL,
6181                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6182                                   NULL,
6183                                   "flow does not have counter");
6184 error_exit:
6185         return rte_flow_error_set
6186                         (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6187                          NULL, "netlink: failed to read flow rule counters");
6188 notsup_exit:
6189         return rte_flow_error_set
6190                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6191                          NULL, "counters are not available.");
6192 }
6193
6194 /**
6195  * Query a flow.
6196  *
6197  * @see rte_flow_query()
6198  * @see rte_flow_ops
6199  */
6200 static int
6201 flow_tcf_query(struct rte_eth_dev *dev,
6202                struct rte_flow *flow,
6203                const struct rte_flow_action *actions,
6204                void *data,
6205                struct rte_flow_error *error)
6206 {
6207         int ret = -EINVAL;
6208
6209         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6210                 switch (actions->type) {
6211                 case RTE_FLOW_ACTION_TYPE_VOID:
6212                         break;
6213                 case RTE_FLOW_ACTION_TYPE_COUNT:
6214                         ret = flow_tcf_query_count(dev, flow, data, error);
6215                         break;
6216                 default:
6217                         return rte_flow_error_set(error, ENOTSUP,
6218                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6219                                                   actions,
6220                                                   "action not supported");
6221                 }
6222         }
6223         return ret;
6224 }
6225
6226 const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops = {
6227         .validate = flow_tcf_validate,
6228         .prepare = flow_tcf_prepare,
6229         .translate = flow_tcf_translate,
6230         .apply = flow_tcf_apply,
6231         .remove = flow_tcf_remove,
6232         .destroy = flow_tcf_destroy,
6233         .query = flow_tcf_query,
6234 };
6235
6236 /**
6237  * Create and configure a libmnl socket for Netlink flow rules.
6238  *
6239  * @return
6240  *   A valid libmnl socket object pointer on success, NULL otherwise and
6241  *   rte_errno is set.
6242  */
6243 static struct mnl_socket *
6244 flow_tcf_mnl_socket_create(void)
6245 {
6246         struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
6247
6248         if (nl) {
6249                 mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
6250                                       sizeof(int));
6251                 if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
6252                         return nl;
6253         }
6254         rte_errno = errno;
6255         if (nl)
6256                 mnl_socket_close(nl);
6257         return NULL;
6258 }
6259
6260 /**
6261  * Destroy a libmnl socket.
6262  *
6263  * @param nl
6264  *   Libmnl socket of the @p NETLINK_ROUTE kind.
6265  */
6266 static void
6267 flow_tcf_mnl_socket_destroy(struct mnl_socket *nl)
6268 {
6269         if (nl)
6270                 mnl_socket_close(nl);
6271 }
6272
6273 /**
6274  * Initialize ingress qdisc of a given network interface.
6275  *
6276  * @param ctx
6277  *   Pointer to tc-flower context to use.
6278  * @param ifindex
6279  *   Index of network interface to initialize.
6280  * @param[out] error
6281  *   Perform verbose error reporting if not NULL.
6282  *
6283  * @return
6284  *   0 on success, a negative errno value otherwise and rte_errno is set.
6285  */
6286 int
6287 mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
6288                    unsigned int ifindex, struct rte_flow_error *error)
6289 {
6290         struct nlmsghdr *nlh;
6291         struct tcmsg *tcm;
6292         alignas(struct nlmsghdr)
6293         uint8_t buf[mnl_nlmsg_size(sizeof(*tcm)) +
6294                     SZ_NLATTR_STRZ_OF("ingress") +
6295                     MNL_BUF_EXTRA_SPACE];
6296
6297         /* Destroy existing ingress qdisc and everything attached to it. */
6298         nlh = mnl_nlmsg_put_header(buf);
6299         nlh->nlmsg_type = RTM_DELQDISC;
6300         nlh->nlmsg_flags = NLM_F_REQUEST;
6301         tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
6302         tcm->tcm_family = AF_UNSPEC;
6303         tcm->tcm_ifindex = ifindex;
6304         tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
6305         tcm->tcm_parent = TC_H_INGRESS;
6306         assert(sizeof(buf) >= nlh->nlmsg_len);
6307         /* Ignore errors when qdisc is already absent. */
6308         if (flow_tcf_nl_ack(ctx, nlh, NULL, NULL) &&
6309             rte_errno != EINVAL && rte_errno != ENOENT)
6310                 return rte_flow_error_set(error, rte_errno,
6311                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6312                                           "netlink: failed to remove ingress"
6313                                           " qdisc");
6314         /* Create fresh ingress qdisc. */
6315         nlh = mnl_nlmsg_put_header(buf);
6316         nlh->nlmsg_type = RTM_NEWQDISC;
6317         nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
6318         tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
6319         tcm->tcm_family = AF_UNSPEC;
6320         tcm->tcm_ifindex = ifindex;
6321         tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
6322         tcm->tcm_parent = TC_H_INGRESS;
6323         mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
6324         assert(sizeof(buf) >= nlh->nlmsg_len);
6325         if (flow_tcf_nl_ack(ctx, nlh, NULL, NULL))
6326                 return rte_flow_error_set(error, rte_errno,
6327                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6328                                           "netlink: failed to create ingress"
6329                                           " qdisc");
6330         return 0;
6331 }
6332
6333 /**
6334  * Create libmnl context for Netlink flow rules.
6335  *
6336  * @return
6337  *   A valid libmnl socket object pointer on success, NULL otherwise and
6338  *   rte_errno is set.
6339  */
6340 struct mlx5_flow_tcf_context *
6341 mlx5_flow_tcf_context_create(void)
6342 {
6343         struct mlx5_flow_tcf_context *ctx = rte_zmalloc(__func__,
6344                                                         sizeof(*ctx),
6345                                                         sizeof(uint32_t));
6346         if (!ctx)
6347                 goto error;
6348         ctx->nl = flow_tcf_mnl_socket_create();
6349         if (!ctx->nl)
6350                 goto error;
6351         ctx->buf_size = MNL_SOCKET_BUFFER_SIZE;
6352         ctx->buf = rte_zmalloc(__func__,
6353                                ctx->buf_size, sizeof(uint32_t));
6354         if (!ctx->buf)
6355                 goto error;
6356         ctx->seq = random();
6357         return ctx;
6358 error:
6359         mlx5_flow_tcf_context_destroy(ctx);
6360         return NULL;
6361 }
6362
6363 /**
6364  * Destroy a libmnl context.
6365  *
6366  * @param ctx
6367  *   Libmnl socket of the @p NETLINK_ROUTE kind.
6368  */
6369 void
6370 mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx)
6371 {
6372         if (!ctx)
6373                 return;
6374         flow_tcf_mnl_socket_destroy(ctx->nl);
6375         rte_free(ctx->buf);
6376         rte_free(ctx);
6377 }