net/ice: enable QinQ filter for switch
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39         ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ  ( \
41         ICE_SW_INSET_MAC_VLAN | ICE_INSET_VLAN_OUTER)
42 #define ICE_SW_INSET_MAC_IPV4 ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
45 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
46         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
47 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
53         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
54         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_MAC_IPV6 ( \
56         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
58         ICE_INSET_IPV6_NEXT_HDR)
59 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
60         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
61 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
62         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
63         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
64         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
65 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
66         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
67         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
68         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
70         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
73         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
75 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
76         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
77         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
78         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
79 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
80         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
81         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
82         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
83 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
84         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
87 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
88         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
91 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
92         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
93         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
94 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
95         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
96         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
97         ICE_INSET_TUN_IPV4_TOS)
98 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
99         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
100         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
101         ICE_INSET_TUN_IPV4_TOS)
102 #define ICE_SW_INSET_MAC_PPPOE  ( \
103         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
104         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
105 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
106         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
107         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
108         ICE_INSET_PPPOE_PROTO)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
110         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
112         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
114         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
115 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
116         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
117 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
118         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
119 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
120         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
121 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
122         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
123 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
124         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
125 #define ICE_SW_INSET_MAC_IPV4_AH ( \
126         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
127 #define ICE_SW_INSET_MAC_IPV6_AH ( \
128         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
129 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
130         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
131 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
132         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
133 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
134         ICE_SW_INSET_MAC_IPV4 | \
135         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
136 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
137         ICE_SW_INSET_MAC_IPV6 | \
138         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
139
140 struct sw_meta {
141         struct ice_adv_lkup_elem *list;
142         uint16_t lkups_num;
143         struct ice_adv_rule_info rule_info;
144 };
145
146 static struct ice_flow_parser ice_switch_dist_parser;
147 static struct ice_flow_parser ice_switch_perm_parser;
148
149 static struct
150 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
151         {pattern_ethertype,
152                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
153         {pattern_ethertype_vlan,
154                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
155         {pattern_ethertype_qinq,
156                         ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
157         {pattern_eth_arp,
158                         ICE_INSET_NONE, ICE_INSET_NONE},
159         {pattern_eth_ipv4,
160                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
161         {pattern_eth_ipv4_udp,
162                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
163         {pattern_eth_ipv4_tcp,
164                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
165         {pattern_eth_ipv6,
166                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
167         {pattern_eth_ipv6_udp,
168                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
169         {pattern_eth_ipv6_tcp,
170                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
171         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
172                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
173         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
174                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
175         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
176                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
177         {pattern_eth_ipv4_nvgre_eth_ipv4,
178                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
179         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
180                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
181         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
182                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
183         {pattern_eth_pppoes,
184                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
185         {pattern_eth_vlan_pppoes,
186                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
187         {pattern_eth_pppoes_proto,
188                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
189         {pattern_eth_vlan_pppoes_proto,
190                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
191         {pattern_eth_pppoes_ipv4,
192                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
193         {pattern_eth_pppoes_ipv4_tcp,
194                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
195         {pattern_eth_pppoes_ipv4_udp,
196                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
197         {pattern_eth_pppoes_ipv6,
198                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
199         {pattern_eth_pppoes_ipv6_tcp,
200                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
201         {pattern_eth_pppoes_ipv6_udp,
202                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
203         {pattern_eth_vlan_pppoes_ipv4,
204                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
205         {pattern_eth_vlan_pppoes_ipv4_tcp,
206                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
207         {pattern_eth_vlan_pppoes_ipv4_udp,
208                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
209         {pattern_eth_vlan_pppoes_ipv6,
210                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
211         {pattern_eth_vlan_pppoes_ipv6_tcp,
212                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
213         {pattern_eth_vlan_pppoes_ipv6_udp,
214                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
215         {pattern_eth_ipv4_esp,
216                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
217         {pattern_eth_ipv4_udp_esp,
218                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
219         {pattern_eth_ipv6_esp,
220                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
221         {pattern_eth_ipv6_udp_esp,
222                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
223         {pattern_eth_ipv4_ah,
224                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
225         {pattern_eth_ipv6_ah,
226                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
227         {pattern_eth_ipv6_udp_ah,
228                         ICE_INSET_NONE, ICE_INSET_NONE},
229         {pattern_eth_ipv4_l2tp,
230                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
231         {pattern_eth_ipv6_l2tp,
232                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
233         {pattern_eth_ipv4_pfcp,
234                         ICE_INSET_NONE, ICE_INSET_NONE},
235         {pattern_eth_ipv6_pfcp,
236                         ICE_INSET_NONE, ICE_INSET_NONE},
237         {pattern_eth_qinq_ipv4,
238                         ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
239         {pattern_eth_qinq_ipv6,
240                         ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
241         {pattern_eth_qinq_pppoes,
242                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
243         {pattern_eth_qinq_pppoes_proto,
244                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
245         {pattern_eth_qinq_pppoes_ipv4,
246                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
247         {pattern_eth_qinq_pppoes_ipv6,
248                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
249 };
250
251 static struct
252 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
253         {pattern_ethertype,
254                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
255         {pattern_ethertype_vlan,
256                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
257         {pattern_ethertype_qinq,
258                         ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
259         {pattern_eth_arp,
260                 ICE_INSET_NONE, ICE_INSET_NONE},
261         {pattern_eth_ipv4,
262                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
263         {pattern_eth_ipv4_udp,
264                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
265         {pattern_eth_ipv4_tcp,
266                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
267         {pattern_eth_ipv6,
268                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
269         {pattern_eth_ipv6_udp,
270                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
271         {pattern_eth_ipv6_tcp,
272                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
273         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
274                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
275         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
276                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
277         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
278                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
279         {pattern_eth_ipv4_nvgre_eth_ipv4,
280                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
281         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
282                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
283         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
284                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
285         {pattern_eth_pppoes,
286                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
287         {pattern_eth_vlan_pppoes,
288                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
289         {pattern_eth_pppoes_proto,
290                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
291         {pattern_eth_vlan_pppoes_proto,
292                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
293         {pattern_eth_pppoes_ipv4,
294                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
295         {pattern_eth_pppoes_ipv4_tcp,
296                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
297         {pattern_eth_pppoes_ipv4_udp,
298                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
299         {pattern_eth_pppoes_ipv6,
300                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
301         {pattern_eth_pppoes_ipv6_tcp,
302                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
303         {pattern_eth_pppoes_ipv6_udp,
304                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
305         {pattern_eth_vlan_pppoes_ipv4,
306                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
307         {pattern_eth_vlan_pppoes_ipv4_tcp,
308                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
309         {pattern_eth_vlan_pppoes_ipv4_udp,
310                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
311         {pattern_eth_vlan_pppoes_ipv6,
312                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
313         {pattern_eth_vlan_pppoes_ipv6_tcp,
314                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
315         {pattern_eth_vlan_pppoes_ipv6_udp,
316                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
317         {pattern_eth_ipv4_esp,
318                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
319         {pattern_eth_ipv4_udp_esp,
320                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
321         {pattern_eth_ipv6_esp,
322                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
323         {pattern_eth_ipv6_udp_esp,
324                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
325         {pattern_eth_ipv4_ah,
326                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
327         {pattern_eth_ipv6_ah,
328                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
329         {pattern_eth_ipv6_udp_ah,
330                         ICE_INSET_NONE, ICE_INSET_NONE},
331         {pattern_eth_ipv4_l2tp,
332                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
333         {pattern_eth_ipv6_l2tp,
334                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
335         {pattern_eth_ipv4_pfcp,
336                         ICE_INSET_NONE, ICE_INSET_NONE},
337         {pattern_eth_ipv6_pfcp,
338                         ICE_INSET_NONE, ICE_INSET_NONE},
339         {pattern_eth_qinq_ipv4,
340                         ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
341         {pattern_eth_qinq_ipv6,
342                         ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
343         {pattern_eth_qinq_pppoes,
344                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
345         {pattern_eth_qinq_pppoes_proto,
346                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
347         {pattern_eth_qinq_pppoes_ipv4,
348                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
349         {pattern_eth_qinq_pppoes_ipv6,
350                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
351 };
352
353 static int
354 ice_switch_create(struct ice_adapter *ad,
355                 struct rte_flow *flow,
356                 void *meta,
357                 struct rte_flow_error *error)
358 {
359         int ret = 0;
360         struct ice_pf *pf = &ad->pf;
361         struct ice_hw *hw = ICE_PF_TO_HW(pf);
362         struct ice_rule_query_data rule_added = {0};
363         struct ice_rule_query_data *filter_ptr;
364         struct ice_adv_lkup_elem *list =
365                 ((struct sw_meta *)meta)->list;
366         uint16_t lkups_cnt =
367                 ((struct sw_meta *)meta)->lkups_num;
368         struct ice_adv_rule_info *rule_info =
369                 &((struct sw_meta *)meta)->rule_info;
370
371         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
372                 rte_flow_error_set(error, EINVAL,
373                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
374                         "item number too large for rule");
375                 goto error;
376         }
377         if (!list) {
378                 rte_flow_error_set(error, EINVAL,
379                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
380                         "lookup list should not be NULL");
381                 goto error;
382         }
383         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
384         if (!ret) {
385                 filter_ptr = rte_zmalloc("ice_switch_filter",
386                         sizeof(struct ice_rule_query_data), 0);
387                 if (!filter_ptr) {
388                         rte_flow_error_set(error, EINVAL,
389                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
390                                    "No memory for ice_switch_filter");
391                         goto error;
392                 }
393                 flow->rule = filter_ptr;
394                 rte_memcpy(filter_ptr,
395                         &rule_added,
396                         sizeof(struct ice_rule_query_data));
397         } else {
398                 rte_flow_error_set(error, EINVAL,
399                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
400                         "switch filter create flow fail");
401                 goto error;
402         }
403
404         rte_free(list);
405         rte_free(meta);
406         return 0;
407
408 error:
409         rte_free(list);
410         rte_free(meta);
411
412         return -rte_errno;
413 }
414
415 static int
416 ice_switch_destroy(struct ice_adapter *ad,
417                 struct rte_flow *flow,
418                 struct rte_flow_error *error)
419 {
420         struct ice_hw *hw = &ad->hw;
421         int ret;
422         struct ice_rule_query_data *filter_ptr;
423
424         filter_ptr = (struct ice_rule_query_data *)
425                 flow->rule;
426
427         if (!filter_ptr) {
428                 rte_flow_error_set(error, EINVAL,
429                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
430                         "no such flow"
431                         " create by switch filter");
432                 return -rte_errno;
433         }
434
435         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
436         if (ret) {
437                 rte_flow_error_set(error, EINVAL,
438                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
439                         "fail to destroy switch filter rule");
440                 return -rte_errno;
441         }
442
443         rte_free(filter_ptr);
444         return ret;
445 }
446
447 static void
448 ice_switch_filter_rule_free(struct rte_flow *flow)
449 {
450         rte_free(flow->rule);
451 }
452
453 static uint64_t
454 ice_switch_inset_get(const struct rte_flow_item pattern[],
455                 struct rte_flow_error *error,
456                 struct ice_adv_lkup_elem *list,
457                 uint16_t *lkups_num,
458                 enum ice_sw_tunnel_type *tun_type)
459 {
460         const struct rte_flow_item *item = pattern;
461         enum rte_flow_item_type item_type;
462         const struct rte_flow_item_eth *eth_spec, *eth_mask;
463         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
464         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
465         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
466         const struct rte_flow_item_udp *udp_spec, *udp_mask;
467         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
468         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
469         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
470         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
471         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
472         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
473                                 *pppoe_proto_mask;
474         const struct rte_flow_item_esp *esp_spec, *esp_mask;
475         const struct rte_flow_item_ah *ah_spec, *ah_mask;
476         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
477         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
478         uint64_t input_set = ICE_INSET_NONE;
479         uint16_t input_set_byte = 0;
480         bool pppoe_elem_valid = 0;
481         bool pppoe_patt_valid = 0;
482         bool pppoe_prot_valid = 0;
483         bool inner_vlan_valid = 0;
484         bool outer_vlan_valid = 0;
485         bool tunnel_valid = 0;
486         bool profile_rule = 0;
487         bool nvgre_valid = 0;
488         bool vxlan_valid = 0;
489         bool ipv6_valid = 0;
490         bool ipv4_valid = 0;
491         bool udp_valid = 0;
492         bool tcp_valid = 0;
493         uint16_t j, t = 0;
494
495         for (item = pattern; item->type !=
496                         RTE_FLOW_ITEM_TYPE_END; item++) {
497                 if (item->last) {
498                         rte_flow_error_set(error, EINVAL,
499                                         RTE_FLOW_ERROR_TYPE_ITEM,
500                                         item,
501                                         "Not support range");
502                         return 0;
503                 }
504                 item_type = item->type;
505
506                 switch (item_type) {
507                 case RTE_FLOW_ITEM_TYPE_ETH:
508                         eth_spec = item->spec;
509                         eth_mask = item->mask;
510                         if (eth_spec && eth_mask) {
511                                 const uint8_t *a = eth_mask->src.addr_bytes;
512                                 const uint8_t *b = eth_mask->dst.addr_bytes;
513                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
514                                         if (a[j] && tunnel_valid) {
515                                                 input_set |=
516                                                         ICE_INSET_TUN_SMAC;
517                                                 break;
518                                         } else if (a[j]) {
519                                                 input_set |=
520                                                         ICE_INSET_SMAC;
521                                                 break;
522                                         }
523                                 }
524                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
525                                         if (b[j] && tunnel_valid) {
526                                                 input_set |=
527                                                         ICE_INSET_TUN_DMAC;
528                                                 break;
529                                         } else if (b[j]) {
530                                                 input_set |=
531                                                         ICE_INSET_DMAC;
532                                                 break;
533                                         }
534                                 }
535                                 if (eth_mask->type)
536                                         input_set |= ICE_INSET_ETHERTYPE;
537                                 list[t].type = (tunnel_valid  == 0) ?
538                                         ICE_MAC_OFOS : ICE_MAC_IL;
539                                 struct ice_ether_hdr *h;
540                                 struct ice_ether_hdr *m;
541                                 uint16_t i = 0;
542                                 h = &list[t].h_u.eth_hdr;
543                                 m = &list[t].m_u.eth_hdr;
544                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
545                                         if (eth_mask->src.addr_bytes[j]) {
546                                                 h->src_addr[j] =
547                                                 eth_spec->src.addr_bytes[j];
548                                                 m->src_addr[j] =
549                                                 eth_mask->src.addr_bytes[j];
550                                                 i = 1;
551                                                 input_set_byte++;
552                                         }
553                                         if (eth_mask->dst.addr_bytes[j]) {
554                                                 h->dst_addr[j] =
555                                                 eth_spec->dst.addr_bytes[j];
556                                                 m->dst_addr[j] =
557                                                 eth_mask->dst.addr_bytes[j];
558                                                 i = 1;
559                                                 input_set_byte++;
560                                         }
561                                 }
562                                 if (i)
563                                         t++;
564                                 if (eth_mask->type) {
565                                         list[t].type = ICE_ETYPE_OL;
566                                         list[t].h_u.ethertype.ethtype_id =
567                                                 eth_spec->type;
568                                         list[t].m_u.ethertype.ethtype_id =
569                                                 eth_mask->type;
570                                         input_set_byte += 2;
571                                         t++;
572                                 }
573                         }
574                         break;
575
576                 case RTE_FLOW_ITEM_TYPE_IPV4:
577                         ipv4_spec = item->spec;
578                         ipv4_mask = item->mask;
579                         ipv4_valid = 1;
580                         if (ipv4_spec && ipv4_mask) {
581                                 /* Check IPv4 mask and update input set */
582                                 if (ipv4_mask->hdr.version_ihl ||
583                                         ipv4_mask->hdr.total_length ||
584                                         ipv4_mask->hdr.packet_id ||
585                                         ipv4_mask->hdr.hdr_checksum) {
586                                         rte_flow_error_set(error, EINVAL,
587                                                    RTE_FLOW_ERROR_TYPE_ITEM,
588                                                    item,
589                                                    "Invalid IPv4 mask.");
590                                         return 0;
591                                 }
592
593                                 if (tunnel_valid) {
594                                         if (ipv4_mask->hdr.type_of_service)
595                                                 input_set |=
596                                                         ICE_INSET_TUN_IPV4_TOS;
597                                         if (ipv4_mask->hdr.src_addr)
598                                                 input_set |=
599                                                         ICE_INSET_TUN_IPV4_SRC;
600                                         if (ipv4_mask->hdr.dst_addr)
601                                                 input_set |=
602                                                         ICE_INSET_TUN_IPV4_DST;
603                                         if (ipv4_mask->hdr.time_to_live)
604                                                 input_set |=
605                                                         ICE_INSET_TUN_IPV4_TTL;
606                                         if (ipv4_mask->hdr.next_proto_id)
607                                                 input_set |=
608                                                 ICE_INSET_TUN_IPV4_PROTO;
609                                 } else {
610                                         if (ipv4_mask->hdr.src_addr)
611                                                 input_set |= ICE_INSET_IPV4_SRC;
612                                         if (ipv4_mask->hdr.dst_addr)
613                                                 input_set |= ICE_INSET_IPV4_DST;
614                                         if (ipv4_mask->hdr.time_to_live)
615                                                 input_set |= ICE_INSET_IPV4_TTL;
616                                         if (ipv4_mask->hdr.next_proto_id)
617                                                 input_set |=
618                                                 ICE_INSET_IPV4_PROTO;
619                                         if (ipv4_mask->hdr.type_of_service)
620                                                 input_set |=
621                                                         ICE_INSET_IPV4_TOS;
622                                 }
623                                 list[t].type = (tunnel_valid  == 0) ?
624                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
625                                 if (ipv4_mask->hdr.src_addr) {
626                                         list[t].h_u.ipv4_hdr.src_addr =
627                                                 ipv4_spec->hdr.src_addr;
628                                         list[t].m_u.ipv4_hdr.src_addr =
629                                                 ipv4_mask->hdr.src_addr;
630                                         input_set_byte += 2;
631                                 }
632                                 if (ipv4_mask->hdr.dst_addr) {
633                                         list[t].h_u.ipv4_hdr.dst_addr =
634                                                 ipv4_spec->hdr.dst_addr;
635                                         list[t].m_u.ipv4_hdr.dst_addr =
636                                                 ipv4_mask->hdr.dst_addr;
637                                         input_set_byte += 2;
638                                 }
639                                 if (ipv4_mask->hdr.time_to_live) {
640                                         list[t].h_u.ipv4_hdr.time_to_live =
641                                                 ipv4_spec->hdr.time_to_live;
642                                         list[t].m_u.ipv4_hdr.time_to_live =
643                                                 ipv4_mask->hdr.time_to_live;
644                                         input_set_byte++;
645                                 }
646                                 if (ipv4_mask->hdr.next_proto_id) {
647                                         list[t].h_u.ipv4_hdr.protocol =
648                                                 ipv4_spec->hdr.next_proto_id;
649                                         list[t].m_u.ipv4_hdr.protocol =
650                                                 ipv4_mask->hdr.next_proto_id;
651                                         input_set_byte++;
652                                 }
653                                 if ((ipv4_spec->hdr.next_proto_id &
654                                         ipv4_mask->hdr.next_proto_id) ==
655                                         ICE_IPV4_PROTO_NVGRE)
656                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
657                                 if (ipv4_mask->hdr.type_of_service) {
658                                         list[t].h_u.ipv4_hdr.tos =
659                                                 ipv4_spec->hdr.type_of_service;
660                                         list[t].m_u.ipv4_hdr.tos =
661                                                 ipv4_mask->hdr.type_of_service;
662                                         input_set_byte++;
663                                 }
664                                 t++;
665                         }
666                         break;
667
668                 case RTE_FLOW_ITEM_TYPE_IPV6:
669                         ipv6_spec = item->spec;
670                         ipv6_mask = item->mask;
671                         ipv6_valid = 1;
672                         if (ipv6_spec && ipv6_mask) {
673                                 if (ipv6_mask->hdr.payload_len) {
674                                         rte_flow_error_set(error, EINVAL,
675                                            RTE_FLOW_ERROR_TYPE_ITEM,
676                                            item,
677                                            "Invalid IPv6 mask");
678                                         return 0;
679                                 }
680
681                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
682                                         if (ipv6_mask->hdr.src_addr[j] &&
683                                                 tunnel_valid) {
684                                                 input_set |=
685                                                 ICE_INSET_TUN_IPV6_SRC;
686                                                 break;
687                                         } else if (ipv6_mask->hdr.src_addr[j]) {
688                                                 input_set |= ICE_INSET_IPV6_SRC;
689                                                 break;
690                                         }
691                                 }
692                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
693                                         if (ipv6_mask->hdr.dst_addr[j] &&
694                                                 tunnel_valid) {
695                                                 input_set |=
696                                                 ICE_INSET_TUN_IPV6_DST;
697                                                 break;
698                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
699                                                 input_set |= ICE_INSET_IPV6_DST;
700                                                 break;
701                                         }
702                                 }
703                                 if (ipv6_mask->hdr.proto &&
704                                         tunnel_valid)
705                                         input_set |=
706                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
707                                 else if (ipv6_mask->hdr.proto)
708                                         input_set |=
709                                                 ICE_INSET_IPV6_NEXT_HDR;
710                                 if (ipv6_mask->hdr.hop_limits &&
711                                         tunnel_valid)
712                                         input_set |=
713                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
714                                 else if (ipv6_mask->hdr.hop_limits)
715                                         input_set |=
716                                                 ICE_INSET_IPV6_HOP_LIMIT;
717                                 if ((ipv6_mask->hdr.vtc_flow &
718                                                 rte_cpu_to_be_32
719                                                 (RTE_IPV6_HDR_TC_MASK)) &&
720                                         tunnel_valid)
721                                         input_set |=
722                                                         ICE_INSET_TUN_IPV6_TC;
723                                 else if (ipv6_mask->hdr.vtc_flow &
724                                                 rte_cpu_to_be_32
725                                                 (RTE_IPV6_HDR_TC_MASK))
726                                         input_set |= ICE_INSET_IPV6_TC;
727
728                                 list[t].type = (tunnel_valid  == 0) ?
729                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
730                                 struct ice_ipv6_hdr *f;
731                                 struct ice_ipv6_hdr *s;
732                                 f = &list[t].h_u.ipv6_hdr;
733                                 s = &list[t].m_u.ipv6_hdr;
734                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
735                                         if (ipv6_mask->hdr.src_addr[j]) {
736                                                 f->src_addr[j] =
737                                                 ipv6_spec->hdr.src_addr[j];
738                                                 s->src_addr[j] =
739                                                 ipv6_mask->hdr.src_addr[j];
740                                                 input_set_byte++;
741                                         }
742                                         if (ipv6_mask->hdr.dst_addr[j]) {
743                                                 f->dst_addr[j] =
744                                                 ipv6_spec->hdr.dst_addr[j];
745                                                 s->dst_addr[j] =
746                                                 ipv6_mask->hdr.dst_addr[j];
747                                                 input_set_byte++;
748                                         }
749                                 }
750                                 if (ipv6_mask->hdr.proto) {
751                                         f->next_hdr =
752                                                 ipv6_spec->hdr.proto;
753                                         s->next_hdr =
754                                                 ipv6_mask->hdr.proto;
755                                         input_set_byte++;
756                                 }
757                                 if (ipv6_mask->hdr.hop_limits) {
758                                         f->hop_limit =
759                                                 ipv6_spec->hdr.hop_limits;
760                                         s->hop_limit =
761                                                 ipv6_mask->hdr.hop_limits;
762                                         input_set_byte++;
763                                 }
764                                 if (ipv6_mask->hdr.vtc_flow &
765                                                 rte_cpu_to_be_32
766                                                 (RTE_IPV6_HDR_TC_MASK)) {
767                                         struct ice_le_ver_tc_flow vtf;
768                                         vtf.u.fld.version = 0;
769                                         vtf.u.fld.flow_label = 0;
770                                         vtf.u.fld.tc = (rte_be_to_cpu_32
771                                                 (ipv6_spec->hdr.vtc_flow) &
772                                                         RTE_IPV6_HDR_TC_MASK) >>
773                                                         RTE_IPV6_HDR_TC_SHIFT;
774                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
775                                         vtf.u.fld.tc = (rte_be_to_cpu_32
776                                                 (ipv6_mask->hdr.vtc_flow) &
777                                                         RTE_IPV6_HDR_TC_MASK) >>
778                                                         RTE_IPV6_HDR_TC_SHIFT;
779                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
780                                         input_set_byte += 4;
781                                 }
782                                 t++;
783                         }
784                         break;
785
786                 case RTE_FLOW_ITEM_TYPE_UDP:
787                         udp_spec = item->spec;
788                         udp_mask = item->mask;
789                         udp_valid = 1;
790                         if (udp_spec && udp_mask) {
791                                 /* Check UDP mask and update input set*/
792                                 if (udp_mask->hdr.dgram_len ||
793                                     udp_mask->hdr.dgram_cksum) {
794                                         rte_flow_error_set(error, EINVAL,
795                                                    RTE_FLOW_ERROR_TYPE_ITEM,
796                                                    item,
797                                                    "Invalid UDP mask");
798                                         return 0;
799                                 }
800
801                                 if (tunnel_valid) {
802                                         if (udp_mask->hdr.src_port)
803                                                 input_set |=
804                                                 ICE_INSET_TUN_UDP_SRC_PORT;
805                                         if (udp_mask->hdr.dst_port)
806                                                 input_set |=
807                                                 ICE_INSET_TUN_UDP_DST_PORT;
808                                 } else {
809                                         if (udp_mask->hdr.src_port)
810                                                 input_set |=
811                                                 ICE_INSET_UDP_SRC_PORT;
812                                         if (udp_mask->hdr.dst_port)
813                                                 input_set |=
814                                                 ICE_INSET_UDP_DST_PORT;
815                                 }
816                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
817                                                 tunnel_valid == 0)
818                                         list[t].type = ICE_UDP_OF;
819                                 else
820                                         list[t].type = ICE_UDP_ILOS;
821                                 if (udp_mask->hdr.src_port) {
822                                         list[t].h_u.l4_hdr.src_port =
823                                                 udp_spec->hdr.src_port;
824                                         list[t].m_u.l4_hdr.src_port =
825                                                 udp_mask->hdr.src_port;
826                                         input_set_byte += 2;
827                                 }
828                                 if (udp_mask->hdr.dst_port) {
829                                         list[t].h_u.l4_hdr.dst_port =
830                                                 udp_spec->hdr.dst_port;
831                                         list[t].m_u.l4_hdr.dst_port =
832                                                 udp_mask->hdr.dst_port;
833                                         input_set_byte += 2;
834                                 }
835                                 t++;
836                         }
837                         break;
838
839                 case RTE_FLOW_ITEM_TYPE_TCP:
840                         tcp_spec = item->spec;
841                         tcp_mask = item->mask;
842                         tcp_valid = 1;
843                         if (tcp_spec && tcp_mask) {
844                                 /* Check TCP mask and update input set */
845                                 if (tcp_mask->hdr.sent_seq ||
846                                         tcp_mask->hdr.recv_ack ||
847                                         tcp_mask->hdr.data_off ||
848                                         tcp_mask->hdr.tcp_flags ||
849                                         tcp_mask->hdr.rx_win ||
850                                         tcp_mask->hdr.cksum ||
851                                         tcp_mask->hdr.tcp_urp) {
852                                         rte_flow_error_set(error, EINVAL,
853                                            RTE_FLOW_ERROR_TYPE_ITEM,
854                                            item,
855                                            "Invalid TCP mask");
856                                         return 0;
857                                 }
858
859                                 if (tunnel_valid) {
860                                         if (tcp_mask->hdr.src_port)
861                                                 input_set |=
862                                                 ICE_INSET_TUN_TCP_SRC_PORT;
863                                         if (tcp_mask->hdr.dst_port)
864                                                 input_set |=
865                                                 ICE_INSET_TUN_TCP_DST_PORT;
866                                 } else {
867                                         if (tcp_mask->hdr.src_port)
868                                                 input_set |=
869                                                 ICE_INSET_TCP_SRC_PORT;
870                                         if (tcp_mask->hdr.dst_port)
871                                                 input_set |=
872                                                 ICE_INSET_TCP_DST_PORT;
873                                 }
874                                 list[t].type = ICE_TCP_IL;
875                                 if (tcp_mask->hdr.src_port) {
876                                         list[t].h_u.l4_hdr.src_port =
877                                                 tcp_spec->hdr.src_port;
878                                         list[t].m_u.l4_hdr.src_port =
879                                                 tcp_mask->hdr.src_port;
880                                         input_set_byte += 2;
881                                 }
882                                 if (tcp_mask->hdr.dst_port) {
883                                         list[t].h_u.l4_hdr.dst_port =
884                                                 tcp_spec->hdr.dst_port;
885                                         list[t].m_u.l4_hdr.dst_port =
886                                                 tcp_mask->hdr.dst_port;
887                                         input_set_byte += 2;
888                                 }
889                                 t++;
890                         }
891                         break;
892
893                 case RTE_FLOW_ITEM_TYPE_SCTP:
894                         sctp_spec = item->spec;
895                         sctp_mask = item->mask;
896                         if (sctp_spec && sctp_mask) {
897                                 /* Check SCTP mask and update input set */
898                                 if (sctp_mask->hdr.cksum) {
899                                         rte_flow_error_set(error, EINVAL,
900                                            RTE_FLOW_ERROR_TYPE_ITEM,
901                                            item,
902                                            "Invalid SCTP mask");
903                                         return 0;
904                                 }
905
906                                 if (tunnel_valid) {
907                                         if (sctp_mask->hdr.src_port)
908                                                 input_set |=
909                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
910                                         if (sctp_mask->hdr.dst_port)
911                                                 input_set |=
912                                                 ICE_INSET_TUN_SCTP_DST_PORT;
913                                 } else {
914                                         if (sctp_mask->hdr.src_port)
915                                                 input_set |=
916                                                 ICE_INSET_SCTP_SRC_PORT;
917                                         if (sctp_mask->hdr.dst_port)
918                                                 input_set |=
919                                                 ICE_INSET_SCTP_DST_PORT;
920                                 }
921                                 list[t].type = ICE_SCTP_IL;
922                                 if (sctp_mask->hdr.src_port) {
923                                         list[t].h_u.sctp_hdr.src_port =
924                                                 sctp_spec->hdr.src_port;
925                                         list[t].m_u.sctp_hdr.src_port =
926                                                 sctp_mask->hdr.src_port;
927                                         input_set_byte += 2;
928                                 }
929                                 if (sctp_mask->hdr.dst_port) {
930                                         list[t].h_u.sctp_hdr.dst_port =
931                                                 sctp_spec->hdr.dst_port;
932                                         list[t].m_u.sctp_hdr.dst_port =
933                                                 sctp_mask->hdr.dst_port;
934                                         input_set_byte += 2;
935                                 }
936                                 t++;
937                         }
938                         break;
939
940                 case RTE_FLOW_ITEM_TYPE_VXLAN:
941                         vxlan_spec = item->spec;
942                         vxlan_mask = item->mask;
943                         /* Check if VXLAN item is used to describe protocol.
944                          * If yes, both spec and mask should be NULL.
945                          * If no, both spec and mask shouldn't be NULL.
946                          */
947                         if ((!vxlan_spec && vxlan_mask) ||
948                             (vxlan_spec && !vxlan_mask)) {
949                                 rte_flow_error_set(error, EINVAL,
950                                            RTE_FLOW_ERROR_TYPE_ITEM,
951                                            item,
952                                            "Invalid VXLAN item");
953                                 return 0;
954                         }
955                         vxlan_valid = 1;
956                         tunnel_valid = 1;
957                         if (vxlan_spec && vxlan_mask) {
958                                 list[t].type = ICE_VXLAN;
959                                 if (vxlan_mask->vni[0] ||
960                                         vxlan_mask->vni[1] ||
961                                         vxlan_mask->vni[2]) {
962                                         list[t].h_u.tnl_hdr.vni =
963                                                 (vxlan_spec->vni[2] << 16) |
964                                                 (vxlan_spec->vni[1] << 8) |
965                                                 vxlan_spec->vni[0];
966                                         list[t].m_u.tnl_hdr.vni =
967                                                 (vxlan_mask->vni[2] << 16) |
968                                                 (vxlan_mask->vni[1] << 8) |
969                                                 vxlan_mask->vni[0];
970                                         input_set |=
971                                                 ICE_INSET_TUN_VXLAN_VNI;
972                                         input_set_byte += 2;
973                                 }
974                                 t++;
975                         }
976                         break;
977
978                 case RTE_FLOW_ITEM_TYPE_NVGRE:
979                         nvgre_spec = item->spec;
980                         nvgre_mask = item->mask;
981                         /* Check if NVGRE item is used to describe protocol.
982                          * If yes, both spec and mask should be NULL.
983                          * If no, both spec and mask shouldn't be NULL.
984                          */
985                         if ((!nvgre_spec && nvgre_mask) ||
986                             (nvgre_spec && !nvgre_mask)) {
987                                 rte_flow_error_set(error, EINVAL,
988                                            RTE_FLOW_ERROR_TYPE_ITEM,
989                                            item,
990                                            "Invalid NVGRE item");
991                                 return 0;
992                         }
993                         nvgre_valid = 1;
994                         tunnel_valid = 1;
995                         if (nvgre_spec && nvgre_mask) {
996                                 list[t].type = ICE_NVGRE;
997                                 if (nvgre_mask->tni[0] ||
998                                         nvgre_mask->tni[1] ||
999                                         nvgre_mask->tni[2]) {
1000                                         list[t].h_u.nvgre_hdr.tni_flow =
1001                                                 (nvgre_spec->tni[2] << 16) |
1002                                                 (nvgre_spec->tni[1] << 8) |
1003                                                 nvgre_spec->tni[0];
1004                                         list[t].m_u.nvgre_hdr.tni_flow =
1005                                                 (nvgre_mask->tni[2] << 16) |
1006                                                 (nvgre_mask->tni[1] << 8) |
1007                                                 nvgre_mask->tni[0];
1008                                         input_set |=
1009                                                 ICE_INSET_TUN_NVGRE_TNI;
1010                                         input_set_byte += 2;
1011                                 }
1012                                 t++;
1013                         }
1014                         break;
1015
1016                 case RTE_FLOW_ITEM_TYPE_VLAN:
1017                         vlan_spec = item->spec;
1018                         vlan_mask = item->mask;
1019                         /* Check if VLAN item is used to describe protocol.
1020                          * If yes, both spec and mask should be NULL.
1021                          * If no, both spec and mask shouldn't be NULL.
1022                          */
1023                         if ((!vlan_spec && vlan_mask) ||
1024                             (vlan_spec && !vlan_mask)) {
1025                                 rte_flow_error_set(error, EINVAL,
1026                                            RTE_FLOW_ERROR_TYPE_ITEM,
1027                                            item,
1028                                            "Invalid VLAN item");
1029                                 return 0;
1030                         }
1031
1032                         if (!outer_vlan_valid &&
1033                             (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
1034                              *tun_type == ICE_NON_TUN_QINQ))
1035                                 outer_vlan_valid = 1;
1036                         else if (!inner_vlan_valid &&
1037                                  (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
1038                                   *tun_type == ICE_NON_TUN_QINQ))
1039                                 inner_vlan_valid = 1;
1040                         else if (!inner_vlan_valid)
1041                                 inner_vlan_valid = 1;
1042
1043                         if (vlan_spec && vlan_mask) {
1044                                 if (outer_vlan_valid && !inner_vlan_valid) {
1045                                         list[t].type = ICE_VLAN_EX;
1046                                         input_set |= ICE_INSET_VLAN_OUTER;
1047                                 } else if (inner_vlan_valid) {
1048                                         list[t].type = ICE_VLAN_OFOS;
1049                                         input_set |= ICE_INSET_VLAN_INNER;
1050                                 }
1051
1052                                 if (vlan_mask->tci) {
1053                                         list[t].h_u.vlan_hdr.vlan =
1054                                                 vlan_spec->tci;
1055                                         list[t].m_u.vlan_hdr.vlan =
1056                                                 vlan_mask->tci;
1057                                         input_set_byte += 2;
1058                                 }
1059                                 if (vlan_mask->inner_type) {
1060                                         rte_flow_error_set(error, EINVAL,
1061                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1062                                                 item,
1063                                                 "Invalid VLAN input set.");
1064                                         return 0;
1065                                 }
1066                                 t++;
1067                         }
1068                         break;
1069
1070                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1071                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1072                         pppoe_spec = item->spec;
1073                         pppoe_mask = item->mask;
1074                         /* Check if PPPoE item is used to describe protocol.
1075                          * If yes, both spec and mask should be NULL.
1076                          * If no, both spec and mask shouldn't be NULL.
1077                          */
1078                         if ((!pppoe_spec && pppoe_mask) ||
1079                                 (pppoe_spec && !pppoe_mask)) {
1080                                 rte_flow_error_set(error, EINVAL,
1081                                         RTE_FLOW_ERROR_TYPE_ITEM,
1082                                         item,
1083                                         "Invalid pppoe item");
1084                                 return 0;
1085                         }
1086                         pppoe_patt_valid = 1;
1087                         if (pppoe_spec && pppoe_mask) {
1088                                 /* Check pppoe mask and update input set */
1089                                 if (pppoe_mask->length ||
1090                                         pppoe_mask->code ||
1091                                         pppoe_mask->version_type) {
1092                                         rte_flow_error_set(error, EINVAL,
1093                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1094                                                 item,
1095                                                 "Invalid pppoe mask");
1096                                         return 0;
1097                                 }
1098                                 list[t].type = ICE_PPPOE;
1099                                 if (pppoe_mask->session_id) {
1100                                         list[t].h_u.pppoe_hdr.session_id =
1101                                                 pppoe_spec->session_id;
1102                                         list[t].m_u.pppoe_hdr.session_id =
1103                                                 pppoe_mask->session_id;
1104                                         input_set |= ICE_INSET_PPPOE_SESSION;
1105                                         input_set_byte += 2;
1106                                 }
1107                                 t++;
1108                                 pppoe_elem_valid = 1;
1109                         }
1110                         break;
1111
1112                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1113                         pppoe_proto_spec = item->spec;
1114                         pppoe_proto_mask = item->mask;
1115                         /* Check if PPPoE optional proto_id item
1116                          * is used to describe protocol.
1117                          * If yes, both spec and mask should be NULL.
1118                          * If no, both spec and mask shouldn't be NULL.
1119                          */
1120                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1121                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1122                                 rte_flow_error_set(error, EINVAL,
1123                                         RTE_FLOW_ERROR_TYPE_ITEM,
1124                                         item,
1125                                         "Invalid pppoe proto item");
1126                                 return 0;
1127                         }
1128                         if (pppoe_proto_spec && pppoe_proto_mask) {
1129                                 if (pppoe_elem_valid)
1130                                         t--;
1131                                 list[t].type = ICE_PPPOE;
1132                                 if (pppoe_proto_mask->proto_id) {
1133                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1134                                                 pppoe_proto_spec->proto_id;
1135                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1136                                                 pppoe_proto_mask->proto_id;
1137                                         input_set |= ICE_INSET_PPPOE_PROTO;
1138                                         input_set_byte += 2;
1139                                         pppoe_prot_valid = 1;
1140                                 }
1141                                 if ((pppoe_proto_mask->proto_id &
1142                                         pppoe_proto_spec->proto_id) !=
1143                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1144                                         (pppoe_proto_mask->proto_id &
1145                                         pppoe_proto_spec->proto_id) !=
1146                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1147                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1148                                 else
1149                                         *tun_type = ICE_SW_TUN_PPPOE;
1150                                 t++;
1151                         }
1152
1153                         break;
1154
1155                 case RTE_FLOW_ITEM_TYPE_ESP:
1156                         esp_spec = item->spec;
1157                         esp_mask = item->mask;
1158                         if ((esp_spec && !esp_mask) ||
1159                                 (!esp_spec && esp_mask)) {
1160                                 rte_flow_error_set(error, EINVAL,
1161                                            RTE_FLOW_ERROR_TYPE_ITEM,
1162                                            item,
1163                                            "Invalid esp item");
1164                                 return 0;
1165                         }
1166                         /* Check esp mask and update input set */
1167                         if (esp_mask && esp_mask->hdr.seq) {
1168                                 rte_flow_error_set(error, EINVAL,
1169                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1170                                                 item,
1171                                                 "Invalid esp mask");
1172                                 return 0;
1173                         }
1174
1175                         if (!esp_spec && !esp_mask && !input_set) {
1176                                 profile_rule = 1;
1177                                 if (ipv6_valid && udp_valid)
1178                                         *tun_type =
1179                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1180                                 else if (ipv6_valid)
1181                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1182                                 else if (ipv4_valid)
1183                                         return 0;
1184                         } else if (esp_spec && esp_mask &&
1185                                                 esp_mask->hdr.spi){
1186                                 if (udp_valid)
1187                                         list[t].type = ICE_NAT_T;
1188                                 else
1189                                         list[t].type = ICE_ESP;
1190                                 list[t].h_u.esp_hdr.spi =
1191                                         esp_spec->hdr.spi;
1192                                 list[t].m_u.esp_hdr.spi =
1193                                         esp_mask->hdr.spi;
1194                                 input_set |= ICE_INSET_ESP_SPI;
1195                                 input_set_byte += 4;
1196                                 t++;
1197                         }
1198
1199                         if (!profile_rule) {
1200                                 if (ipv6_valid && udp_valid)
1201                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1202                                 else if (ipv4_valid && udp_valid)
1203                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1204                                 else if (ipv6_valid)
1205                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1206                                 else if (ipv4_valid)
1207                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1208                         }
1209                         break;
1210
1211                 case RTE_FLOW_ITEM_TYPE_AH:
1212                         ah_spec = item->spec;
1213                         ah_mask = item->mask;
1214                         if ((ah_spec && !ah_mask) ||
1215                                 (!ah_spec && ah_mask)) {
1216                                 rte_flow_error_set(error, EINVAL,
1217                                            RTE_FLOW_ERROR_TYPE_ITEM,
1218                                            item,
1219                                            "Invalid ah item");
1220                                 return 0;
1221                         }
1222                         /* Check ah mask and update input set */
1223                         if (ah_mask &&
1224                                 (ah_mask->next_hdr ||
1225                                 ah_mask->payload_len ||
1226                                 ah_mask->seq_num ||
1227                                 ah_mask->reserved)) {
1228                                 rte_flow_error_set(error, EINVAL,
1229                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1230                                                 item,
1231                                                 "Invalid ah mask");
1232                                 return 0;
1233                         }
1234
1235                         if (!ah_spec && !ah_mask && !input_set) {
1236                                 profile_rule = 1;
1237                                 if (ipv6_valid && udp_valid)
1238                                         *tun_type =
1239                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1240                                 else if (ipv6_valid)
1241                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1242                                 else if (ipv4_valid)
1243                                         return 0;
1244                         } else if (ah_spec && ah_mask &&
1245                                                 ah_mask->spi){
1246                                 list[t].type = ICE_AH;
1247                                 list[t].h_u.ah_hdr.spi =
1248                                         ah_spec->spi;
1249                                 list[t].m_u.ah_hdr.spi =
1250                                         ah_mask->spi;
1251                                 input_set |= ICE_INSET_AH_SPI;
1252                                 input_set_byte += 4;
1253                                 t++;
1254                         }
1255
1256                         if (!profile_rule) {
1257                                 if (udp_valid)
1258                                         return 0;
1259                                 else if (ipv6_valid)
1260                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1261                                 else if (ipv4_valid)
1262                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1263                         }
1264                         break;
1265
1266                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1267                         l2tp_spec = item->spec;
1268                         l2tp_mask = item->mask;
1269                         if ((l2tp_spec && !l2tp_mask) ||
1270                                 (!l2tp_spec && l2tp_mask)) {
1271                                 rte_flow_error_set(error, EINVAL,
1272                                            RTE_FLOW_ERROR_TYPE_ITEM,
1273                                            item,
1274                                            "Invalid l2tp item");
1275                                 return 0;
1276                         }
1277
1278                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1279                                 if (ipv6_valid)
1280                                         *tun_type =
1281                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1282                                 else if (ipv4_valid)
1283                                         return 0;
1284                         } else if (l2tp_spec && l2tp_mask &&
1285                                                 l2tp_mask->session_id){
1286                                 list[t].type = ICE_L2TPV3;
1287                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1288                                         l2tp_spec->session_id;
1289                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1290                                         l2tp_mask->session_id;
1291                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1292                                 input_set_byte += 4;
1293                                 t++;
1294                         }
1295
1296                         if (!profile_rule) {
1297                                 if (ipv6_valid)
1298                                         *tun_type =
1299                                         ICE_SW_TUN_IPV6_L2TPV3;
1300                                 else if (ipv4_valid)
1301                                         *tun_type =
1302                                         ICE_SW_TUN_IPV4_L2TPV3;
1303                         }
1304                         break;
1305
1306                 case RTE_FLOW_ITEM_TYPE_PFCP:
1307                         pfcp_spec = item->spec;
1308                         pfcp_mask = item->mask;
1309                         /* Check if PFCP item is used to describe protocol.
1310                          * If yes, both spec and mask should be NULL.
1311                          * If no, both spec and mask shouldn't be NULL.
1312                          */
1313                         if ((!pfcp_spec && pfcp_mask) ||
1314                             (pfcp_spec && !pfcp_mask)) {
1315                                 rte_flow_error_set(error, EINVAL,
1316                                            RTE_FLOW_ERROR_TYPE_ITEM,
1317                                            item,
1318                                            "Invalid PFCP item");
1319                                 return -ENOTSUP;
1320                         }
1321                         if (pfcp_spec && pfcp_mask) {
1322                                 /* Check pfcp mask and update input set */
1323                                 if (pfcp_mask->msg_type ||
1324                                         pfcp_mask->msg_len ||
1325                                         pfcp_mask->seid) {
1326                                         rte_flow_error_set(error, EINVAL,
1327                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1328                                                 item,
1329                                                 "Invalid pfcp mask");
1330                                         return -ENOTSUP;
1331                                 }
1332                                 if (pfcp_mask->s_field &&
1333                                         pfcp_spec->s_field == 0x01 &&
1334                                         ipv6_valid)
1335                                         *tun_type =
1336                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1337                                 else if (pfcp_mask->s_field &&
1338                                         pfcp_spec->s_field == 0x01)
1339                                         *tun_type =
1340                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1341                                 else if (pfcp_mask->s_field &&
1342                                         !pfcp_spec->s_field &&
1343                                         ipv6_valid)
1344                                         *tun_type =
1345                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1346                                 else if (pfcp_mask->s_field &&
1347                                         !pfcp_spec->s_field)
1348                                         *tun_type =
1349                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1350                                 else
1351                                         return -ENOTSUP;
1352                         }
1353                         break;
1354
1355                 case RTE_FLOW_ITEM_TYPE_VOID:
1356                         break;
1357
1358                 default:
1359                         rte_flow_error_set(error, EINVAL,
1360                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1361                                    "Invalid pattern item.");
1362                         goto out;
1363                 }
1364         }
1365
1366         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1367             inner_vlan_valid && outer_vlan_valid)
1368                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1369         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1370                  inner_vlan_valid && outer_vlan_valid)
1371                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1372         else if (*tun_type == ICE_NON_TUN &&
1373                  inner_vlan_valid && outer_vlan_valid)
1374                 *tun_type = ICE_NON_TUN_QINQ;
1375         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1376                  inner_vlan_valid && outer_vlan_valid)
1377                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1378
1379         if (pppoe_patt_valid && !pppoe_prot_valid) {
1380                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1381                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1382                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1383                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1384                 else if (inner_vlan_valid && outer_vlan_valid)
1385                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1386                 else if (ipv6_valid && udp_valid)
1387                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1388                 else if (ipv6_valid && tcp_valid)
1389                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1390                 else if (ipv4_valid && udp_valid)
1391                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1392                 else if (ipv4_valid && tcp_valid)
1393                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1394                 else if (ipv6_valid)
1395                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1396                 else if (ipv4_valid)
1397                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1398                 else
1399                         *tun_type = ICE_SW_TUN_PPPOE;
1400         }
1401
1402         if (*tun_type == ICE_NON_TUN) {
1403                 if (vxlan_valid)
1404                         *tun_type = ICE_SW_TUN_VXLAN;
1405                 else if (nvgre_valid)
1406                         *tun_type = ICE_SW_TUN_NVGRE;
1407                 else if (ipv4_valid && tcp_valid)
1408                         *tun_type = ICE_SW_IPV4_TCP;
1409                 else if (ipv4_valid && udp_valid)
1410                         *tun_type = ICE_SW_IPV4_UDP;
1411                 else if (ipv6_valid && tcp_valid)
1412                         *tun_type = ICE_SW_IPV6_TCP;
1413                 else if (ipv6_valid && udp_valid)
1414                         *tun_type = ICE_SW_IPV6_UDP;
1415         }
1416
1417         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1418                 rte_flow_error_set(error, EINVAL,
1419                         RTE_FLOW_ERROR_TYPE_ITEM,
1420                         item,
1421                         "too much input set");
1422                 return -ENOTSUP;
1423         }
1424
1425         *lkups_num = t;
1426
1427         return input_set;
1428 out:
1429         return 0;
1430 }
1431
1432 static int
1433 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1434                             const struct rte_flow_action *actions,
1435                             struct rte_flow_error *error,
1436                             struct ice_adv_rule_info *rule_info)
1437 {
1438         const struct rte_flow_action_vf *act_vf;
1439         const struct rte_flow_action *action;
1440         enum rte_flow_action_type action_type;
1441
1442         for (action = actions; action->type !=
1443                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1444                 action_type = action->type;
1445                 switch (action_type) {
1446                 case RTE_FLOW_ACTION_TYPE_VF:
1447                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1448                         act_vf = action->conf;
1449
1450                         if (act_vf->id >= ad->real_hw.num_vfs &&
1451                                 !act_vf->original) {
1452                                 rte_flow_error_set(error,
1453                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1454                                         actions,
1455                                         "Invalid vf id");
1456                                 return -rte_errno;
1457                         }
1458
1459                         if (act_vf->original)
1460                                 rule_info->sw_act.vsi_handle =
1461                                         ad->real_hw.avf.bus.func;
1462                         else
1463                                 rule_info->sw_act.vsi_handle = act_vf->id;
1464                         break;
1465
1466                 case RTE_FLOW_ACTION_TYPE_DROP:
1467                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1468                         break;
1469
1470                 default:
1471                         rte_flow_error_set(error,
1472                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1473                                            actions,
1474                                            "Invalid action type");
1475                         return -rte_errno;
1476                 }
1477         }
1478
1479         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1480         rule_info->sw_act.flag = ICE_FLTR_RX;
1481         rule_info->rx = 1;
1482         rule_info->priority = 5;
1483
1484         return 0;
1485 }
1486
1487 static int
1488 ice_switch_parse_action(struct ice_pf *pf,
1489                 const struct rte_flow_action *actions,
1490                 struct rte_flow_error *error,
1491                 struct ice_adv_rule_info *rule_info)
1492 {
1493         struct ice_vsi *vsi = pf->main_vsi;
1494         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1495         const struct rte_flow_action_queue *act_q;
1496         const struct rte_flow_action_rss *act_qgrop;
1497         uint16_t base_queue, i;
1498         const struct rte_flow_action *action;
1499         enum rte_flow_action_type action_type;
1500         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1501                  2, 4, 8, 16, 32, 64, 128};
1502
1503         base_queue = pf->base_queue + vsi->base_queue;
1504         for (action = actions; action->type !=
1505                         RTE_FLOW_ACTION_TYPE_END; action++) {
1506                 action_type = action->type;
1507                 switch (action_type) {
1508                 case RTE_FLOW_ACTION_TYPE_RSS:
1509                         act_qgrop = action->conf;
1510                         if (act_qgrop->queue_num <= 1)
1511                                 goto error;
1512                         rule_info->sw_act.fltr_act =
1513                                 ICE_FWD_TO_QGRP;
1514                         rule_info->sw_act.fwd_id.q_id =
1515                                 base_queue + act_qgrop->queue[0];
1516                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1517                                 if (act_qgrop->queue_num ==
1518                                         valid_qgrop_number[i])
1519                                         break;
1520                         }
1521                         if (i == MAX_QGRP_NUM_TYPE)
1522                                 goto error;
1523                         if ((act_qgrop->queue[0] +
1524                                 act_qgrop->queue_num) >
1525                                 dev->data->nb_rx_queues)
1526                                 goto error1;
1527                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1528                                 if (act_qgrop->queue[i + 1] !=
1529                                         act_qgrop->queue[i] + 1)
1530                                         goto error2;
1531                         rule_info->sw_act.qgrp_size =
1532                                 act_qgrop->queue_num;
1533                         break;
1534                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1535                         act_q = action->conf;
1536                         if (act_q->index >= dev->data->nb_rx_queues)
1537                                 goto error;
1538                         rule_info->sw_act.fltr_act =
1539                                 ICE_FWD_TO_Q;
1540                         rule_info->sw_act.fwd_id.q_id =
1541                                 base_queue + act_q->index;
1542                         break;
1543
1544                 case RTE_FLOW_ACTION_TYPE_DROP:
1545                         rule_info->sw_act.fltr_act =
1546                                 ICE_DROP_PACKET;
1547                         break;
1548
1549                 case RTE_FLOW_ACTION_TYPE_VOID:
1550                         break;
1551
1552                 default:
1553                         goto error;
1554                 }
1555         }
1556
1557         rule_info->sw_act.vsi_handle = vsi->idx;
1558         rule_info->rx = 1;
1559         rule_info->sw_act.src = vsi->idx;
1560         rule_info->priority = 5;
1561
1562         return 0;
1563
1564 error:
1565         rte_flow_error_set(error,
1566                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1567                 actions,
1568                 "Invalid action type or queue number");
1569         return -rte_errno;
1570
1571 error1:
1572         rte_flow_error_set(error,
1573                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1574                 actions,
1575                 "Invalid queue region indexes");
1576         return -rte_errno;
1577
1578 error2:
1579         rte_flow_error_set(error,
1580                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1581                 actions,
1582                 "Discontinuous queue region");
1583         return -rte_errno;
1584 }
1585
1586 static int
1587 ice_switch_check_action(const struct rte_flow_action *actions,
1588                             struct rte_flow_error *error)
1589 {
1590         const struct rte_flow_action *action;
1591         enum rte_flow_action_type action_type;
1592         uint16_t actions_num = 0;
1593
1594         for (action = actions; action->type !=
1595                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1596                 action_type = action->type;
1597                 switch (action_type) {
1598                 case RTE_FLOW_ACTION_TYPE_VF:
1599                 case RTE_FLOW_ACTION_TYPE_RSS:
1600                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1601                 case RTE_FLOW_ACTION_TYPE_DROP:
1602                         actions_num++;
1603                         break;
1604                 case RTE_FLOW_ACTION_TYPE_VOID:
1605                         continue;
1606                 default:
1607                         rte_flow_error_set(error,
1608                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1609                                            actions,
1610                                            "Invalid action type");
1611                         return -rte_errno;
1612                 }
1613         }
1614
1615         if (actions_num != 1) {
1616                 rte_flow_error_set(error,
1617                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1618                                    actions,
1619                                    "Invalid action number");
1620                 return -rte_errno;
1621         }
1622
1623         return 0;
1624 }
1625
1626 static bool
1627 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1628 {
1629         switch (tun_type) {
1630         case ICE_SW_TUN_PROFID_IPV6_ESP:
1631         case ICE_SW_TUN_PROFID_IPV6_AH:
1632         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1633         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1634         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1635         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1636         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1637         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1638                 return true;
1639         default:
1640                 break;
1641         }
1642
1643         return false;
1644 }
1645
1646 static int
1647 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1648                 struct ice_pattern_match_item *array,
1649                 uint32_t array_len,
1650                 const struct rte_flow_item pattern[],
1651                 const struct rte_flow_action actions[],
1652                 void **meta,
1653                 struct rte_flow_error *error)
1654 {
1655         struct ice_pf *pf = &ad->pf;
1656         uint64_t inputset = 0;
1657         int ret = 0;
1658         struct sw_meta *sw_meta_ptr = NULL;
1659         struct ice_adv_rule_info rule_info;
1660         struct ice_adv_lkup_elem *list = NULL;
1661         uint16_t lkups_num = 0;
1662         const struct rte_flow_item *item = pattern;
1663         uint16_t item_num = 0;
1664         uint16_t vlan_num = 0;
1665         enum ice_sw_tunnel_type tun_type =
1666                         ICE_NON_TUN;
1667         struct ice_pattern_match_item *pattern_match_item = NULL;
1668
1669         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1670                 item_num++;
1671                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1672                         const struct rte_flow_item_eth *eth_mask;
1673                         if (item->mask)
1674                                 eth_mask = item->mask;
1675                         else
1676                                 continue;
1677                         if (eth_mask->type == UINT16_MAX)
1678                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1679                 }
1680
1681                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1682                         vlan_num++;
1683
1684                 /* reserve one more memory slot for ETH which may
1685                  * consume 2 lookup items.
1686                  */
1687                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1688                         item_num++;
1689         }
1690
1691         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1692                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1693         else if (vlan_num == 2)
1694                 tun_type = ICE_NON_TUN_QINQ;
1695
1696         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1697         if (!list) {
1698                 rte_flow_error_set(error, EINVAL,
1699                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1700                                    "No memory for PMD internal items");
1701                 return -rte_errno;
1702         }
1703
1704         sw_meta_ptr =
1705                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1706         if (!sw_meta_ptr) {
1707                 rte_flow_error_set(error, EINVAL,
1708                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1709                                    "No memory for sw_pattern_meta_ptr");
1710                 goto error;
1711         }
1712
1713         pattern_match_item =
1714                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1715                                               error);
1716         if (!pattern_match_item) {
1717                 rte_flow_error_set(error, EINVAL,
1718                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1719                                    "Invalid input pattern");
1720                 goto error;
1721         }
1722
1723         inputset = ice_switch_inset_get
1724                 (pattern, error, list, &lkups_num, &tun_type);
1725         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1726                 (inputset & ~pattern_match_item->input_set_mask)) {
1727                 rte_flow_error_set(error, EINVAL,
1728                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1729                                    pattern,
1730                                    "Invalid input set");
1731                 goto error;
1732         }
1733
1734         memset(&rule_info, 0, sizeof(rule_info));
1735         rule_info.tun_type = tun_type;
1736
1737         ret = ice_switch_check_action(actions, error);
1738         if (ret)
1739                 goto error;
1740
1741         if (ad->hw.dcf_enabled)
1742                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1743                                                   &rule_info);
1744         else
1745                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1746
1747         if (ret)
1748                 goto error;
1749
1750         if (meta) {
1751                 *meta = sw_meta_ptr;
1752                 ((struct sw_meta *)*meta)->list = list;
1753                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1754                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1755         } else {
1756                 rte_free(list);
1757                 rte_free(sw_meta_ptr);
1758         }
1759
1760         rte_free(pattern_match_item);
1761
1762         return 0;
1763
1764 error:
1765         rte_free(list);
1766         rte_free(sw_meta_ptr);
1767         rte_free(pattern_match_item);
1768
1769         return -rte_errno;
1770 }
1771
1772 static int
1773 ice_switch_query(struct ice_adapter *ad __rte_unused,
1774                 struct rte_flow *flow __rte_unused,
1775                 struct rte_flow_query_count *count __rte_unused,
1776                 struct rte_flow_error *error)
1777 {
1778         rte_flow_error_set(error, EINVAL,
1779                 RTE_FLOW_ERROR_TYPE_HANDLE,
1780                 NULL,
1781                 "count action not supported by switch filter");
1782
1783         return -rte_errno;
1784 }
1785
1786 static int
1787 ice_switch_redirect(struct ice_adapter *ad,
1788                     struct rte_flow *flow,
1789                     struct ice_flow_redirect *rd)
1790 {
1791         struct ice_rule_query_data *rdata = flow->rule;
1792         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1793         struct ice_adv_lkup_elem *lkups_dp = NULL;
1794         struct LIST_HEAD_TYPE *list_head;
1795         struct ice_adv_rule_info rinfo;
1796         struct ice_hw *hw = &ad->hw;
1797         struct ice_switch_info *sw;
1798         uint16_t lkups_cnt;
1799         int ret;
1800
1801         if (rdata->vsi_handle != rd->vsi_handle)
1802                 return 0;
1803
1804         sw = hw->switch_info;
1805         if (!sw->recp_list[rdata->rid].recp_created)
1806                 return -EINVAL;
1807
1808         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1809                 return -ENOTSUP;
1810
1811         list_head = &sw->recp_list[rdata->rid].filt_rules;
1812         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1813                             list_entry) {
1814                 rinfo = list_itr->rule_info;
1815                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1816                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1817                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1818                     (rinfo.fltr_rule_id == rdata->rule_id &&
1819                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1820                         lkups_cnt = list_itr->lkups_cnt;
1821                         lkups_dp = (struct ice_adv_lkup_elem *)
1822                                 ice_memdup(hw, list_itr->lkups,
1823                                            sizeof(*list_itr->lkups) *
1824                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1825
1826                         if (!lkups_dp) {
1827                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1828                                 return -EINVAL;
1829                         }
1830
1831                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1832                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1833                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1834                         }
1835                         break;
1836                 }
1837         }
1838
1839         if (!lkups_dp)
1840                 return -EINVAL;
1841
1842         /* Remove the old rule */
1843         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1844                                lkups_cnt, &rinfo);
1845         if (ret) {
1846                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1847                             rdata->rule_id);
1848                 ret = -EINVAL;
1849                 goto out;
1850         }
1851
1852         /* Update VSI context */
1853         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1854
1855         /* Replay the rule */
1856         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1857                                &rinfo, rdata);
1858         if (ret) {
1859                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1860                 ret = -EINVAL;
1861         }
1862
1863 out:
1864         ice_free(hw, lkups_dp);
1865         return ret;
1866 }
1867
1868 static int
1869 ice_switch_init(struct ice_adapter *ad)
1870 {
1871         int ret = 0;
1872         struct ice_flow_parser *dist_parser;
1873         struct ice_flow_parser *perm_parser;
1874
1875         if (ad->devargs.pipe_mode_support) {
1876                 perm_parser = &ice_switch_perm_parser;
1877                 ret = ice_register_parser(perm_parser, ad);
1878         } else {
1879                 dist_parser = &ice_switch_dist_parser;
1880                 ret = ice_register_parser(dist_parser, ad);
1881         }
1882         return ret;
1883 }
1884
1885 static void
1886 ice_switch_uninit(struct ice_adapter *ad)
1887 {
1888         struct ice_flow_parser *dist_parser;
1889         struct ice_flow_parser *perm_parser;
1890
1891         if (ad->devargs.pipe_mode_support) {
1892                 perm_parser = &ice_switch_perm_parser;
1893                 ice_unregister_parser(perm_parser, ad);
1894         } else {
1895                 dist_parser = &ice_switch_dist_parser;
1896                 ice_unregister_parser(dist_parser, ad);
1897         }
1898 }
1899
1900 static struct
1901 ice_flow_engine ice_switch_engine = {
1902         .init = ice_switch_init,
1903         .uninit = ice_switch_uninit,
1904         .create = ice_switch_create,
1905         .destroy = ice_switch_destroy,
1906         .query_count = ice_switch_query,
1907         .redirect = ice_switch_redirect,
1908         .free = ice_switch_filter_rule_free,
1909         .type = ICE_FLOW_ENGINE_SWITCH,
1910 };
1911
1912 static struct
1913 ice_flow_parser ice_switch_dist_parser = {
1914         .engine = &ice_switch_engine,
1915         .array = ice_switch_pattern_dist_list,
1916         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1917         .parse_pattern_action = ice_switch_parse_pattern_action,
1918         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1919 };
1920
1921 static struct
1922 ice_flow_parser ice_switch_perm_parser = {
1923         .engine = &ice_switch_engine,
1924         .array = ice_switch_pattern_perm_list,
1925         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1926         .parse_pattern_action = ice_switch_parse_pattern_action,
1927         .stage = ICE_FLOW_STAGE_PERMISSION,
1928 };
1929
1930 RTE_INIT(ice_sw_engine_init)
1931 {
1932         struct ice_flow_engine *engine = &ice_switch_engine;
1933         ice_register_flow_engine(engine);
1934 }