net/ice: fix tunnel type for switch rule
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_PPP_IPV4_PROTO      0x0021
31 #define ICE_PPP_IPV6_PROTO      0x0057
32 #define ICE_IPV4_PROTO_NVGRE    0x002F
33
34 #define ICE_SW_INSET_ETHER ( \
35         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
36 #define ICE_SW_INSET_MAC_VLAN ( \
37                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
38                 ICE_INSET_VLAN_OUTER)
39 #define ICE_SW_INSET_MAC_IPV4 ( \
40         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
42 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
47         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
48         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
49         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
50 #define ICE_SW_INSET_MAC_IPV6 ( \
51         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
53         ICE_INSET_IPV6_NEXT_HDR)
54 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
58 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
59         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
60         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
61         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
62 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
63         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
64         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
66         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
69         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
71         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
73         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
88         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
90         ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
92         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
93         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
94         ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_MAC_PPPOE  ( \
96         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
97         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
98 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
99         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
100         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
101         ICE_INSET_PPPOE_PROTO)
102 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
103         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
104 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
105         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
106 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
107         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
108 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
109         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
111         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
113         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
114 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
115         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
116 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
117         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
118 #define ICE_SW_INSET_MAC_IPV4_AH ( \
119         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
120 #define ICE_SW_INSET_MAC_IPV6_AH ( \
121         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
122 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
123         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
124 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
125         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
126 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
127         ICE_SW_INSET_MAC_IPV4 | \
128         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
129 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
130         ICE_SW_INSET_MAC_IPV6 | \
131         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
132
133 struct sw_meta {
134         struct ice_adv_lkup_elem *list;
135         uint16_t lkups_num;
136         struct ice_adv_rule_info rule_info;
137 };
138
139 static struct ice_flow_parser ice_switch_dist_parser_os;
140 static struct ice_flow_parser ice_switch_dist_parser_comms;
141 static struct ice_flow_parser ice_switch_perm_parser;
142
143 static struct
144 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
145         {pattern_ethertype,
146                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
147         {pattern_ethertype_vlan,
148                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
149         {pattern_eth_ipv4,
150                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
151         {pattern_eth_ipv4_udp,
152                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
153         {pattern_eth_ipv4_tcp,
154                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
155         {pattern_eth_ipv6,
156                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
157         {pattern_eth_ipv6_udp,
158                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
159         {pattern_eth_ipv6_tcp,
160                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
161         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
162                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
163         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
164                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
165         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
166                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
167         {pattern_eth_ipv4_nvgre_eth_ipv4,
168                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
169         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
170                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
171         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
172                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
173         {pattern_eth_pppoes,
174                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
175         {pattern_eth_vlan_pppoes,
176                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
177         {pattern_eth_pppoes_proto,
178                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
179         {pattern_eth_vlan_pppoes_proto,
180                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
181         {pattern_eth_pppoes_ipv4,
182                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
183         {pattern_eth_pppoes_ipv4_tcp,
184                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
185         {pattern_eth_pppoes_ipv4_udp,
186                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
187         {pattern_eth_pppoes_ipv6,
188                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
189         {pattern_eth_pppoes_ipv6_tcp,
190                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
191         {pattern_eth_pppoes_ipv6_udp,
192                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
193         {pattern_eth_vlan_pppoes_ipv4,
194                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
195         {pattern_eth_vlan_pppoes_ipv4_tcp,
196                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
197         {pattern_eth_vlan_pppoes_ipv4_udp,
198                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
199         {pattern_eth_vlan_pppoes_ipv6,
200                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
201         {pattern_eth_vlan_pppoes_ipv6_tcp,
202                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
203         {pattern_eth_vlan_pppoes_ipv6_udp,
204                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
205         {pattern_eth_ipv4_esp,
206                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
207         {pattern_eth_ipv4_udp_esp,
208                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
209         {pattern_eth_ipv6_esp,
210                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
211         {pattern_eth_ipv6_udp_esp,
212                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
213         {pattern_eth_ipv4_ah,
214                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
215         {pattern_eth_ipv6_ah,
216                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
217         {pattern_eth_ipv6_udp_ah,
218                         ICE_INSET_NONE, ICE_INSET_NONE},
219         {pattern_eth_ipv4_l2tp,
220                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
221         {pattern_eth_ipv6_l2tp,
222                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
223         {pattern_eth_ipv4_pfcp,
224                         ICE_INSET_NONE, ICE_INSET_NONE},
225         {pattern_eth_ipv6_pfcp,
226                         ICE_INSET_NONE, ICE_INSET_NONE},
227 };
228
229 static struct
230 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
231         {pattern_ethertype,
232                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
233         {pattern_ethertype_vlan,
234                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
235         {pattern_eth_arp,
236                         ICE_INSET_NONE, ICE_INSET_NONE},
237         {pattern_eth_ipv4,
238                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
239         {pattern_eth_ipv4_udp,
240                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
241         {pattern_eth_ipv4_tcp,
242                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
243         {pattern_eth_ipv6,
244                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
245         {pattern_eth_ipv6_udp,
246                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
247         {pattern_eth_ipv6_tcp,
248                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
249         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
250                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
251         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
252                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
253         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
254                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
255         {pattern_eth_ipv4_nvgre_eth_ipv4,
256                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
257         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
258                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
259         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
260                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
261 };
262
263 static struct
264 ice_pattern_match_item ice_switch_pattern_perm[] = {
265         {pattern_ethertype,
266                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
267         {pattern_ethertype_vlan,
268                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
269         {pattern_eth_ipv4,
270                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
271         {pattern_eth_ipv4_udp,
272                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
273         {pattern_eth_ipv4_tcp,
274                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
275         {pattern_eth_ipv6,
276                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
277         {pattern_eth_ipv6_udp,
278                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
279         {pattern_eth_ipv6_tcp,
280                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
281         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
282                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
283         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
284                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
285         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
286                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
287         {pattern_eth_ipv4_nvgre_eth_ipv4,
288                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
289         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
290                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
291         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
292                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
293         {pattern_eth_pppoes,
294                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
295         {pattern_eth_vlan_pppoes,
296                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
297         {pattern_eth_pppoes_proto,
298                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
299         {pattern_eth_vlan_pppoes_proto,
300                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
301         {pattern_eth_pppoes_ipv4,
302                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
303         {pattern_eth_pppoes_ipv4_tcp,
304                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
305         {pattern_eth_pppoes_ipv4_udp,
306                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
307         {pattern_eth_pppoes_ipv6,
308                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
309         {pattern_eth_pppoes_ipv6_tcp,
310                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
311         {pattern_eth_pppoes_ipv6_udp,
312                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
313         {pattern_eth_vlan_pppoes_ipv4,
314                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
315         {pattern_eth_vlan_pppoes_ipv4_tcp,
316                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
317         {pattern_eth_vlan_pppoes_ipv4_udp,
318                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
319         {pattern_eth_vlan_pppoes_ipv6,
320                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
321         {pattern_eth_vlan_pppoes_ipv6_tcp,
322                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
323         {pattern_eth_vlan_pppoes_ipv6_udp,
324                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
325         {pattern_eth_ipv4_esp,
326                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
327         {pattern_eth_ipv4_udp_esp,
328                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
329         {pattern_eth_ipv6_esp,
330                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
331         {pattern_eth_ipv6_udp_esp,
332                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
333         {pattern_eth_ipv4_ah,
334                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
335         {pattern_eth_ipv6_ah,
336                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
337         {pattern_eth_ipv6_udp_ah,
338                         ICE_INSET_NONE, ICE_INSET_NONE},
339         {pattern_eth_ipv4_l2tp,
340                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
341         {pattern_eth_ipv6_l2tp,
342                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
343         {pattern_eth_ipv4_pfcp,
344                         ICE_INSET_NONE, ICE_INSET_NONE},
345         {pattern_eth_ipv6_pfcp,
346                         ICE_INSET_NONE, ICE_INSET_NONE},
347 };
348
349 static int
350 ice_switch_create(struct ice_adapter *ad,
351                 struct rte_flow *flow,
352                 void *meta,
353                 struct rte_flow_error *error)
354 {
355         int ret = 0;
356         struct ice_pf *pf = &ad->pf;
357         struct ice_hw *hw = ICE_PF_TO_HW(pf);
358         struct ice_rule_query_data rule_added = {0};
359         struct ice_rule_query_data *filter_ptr;
360         struct ice_adv_lkup_elem *list =
361                 ((struct sw_meta *)meta)->list;
362         uint16_t lkups_cnt =
363                 ((struct sw_meta *)meta)->lkups_num;
364         struct ice_adv_rule_info *rule_info =
365                 &((struct sw_meta *)meta)->rule_info;
366
367         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
368                 rte_flow_error_set(error, EINVAL,
369                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
370                         "item number too large for rule");
371                 goto error;
372         }
373         if (!list) {
374                 rte_flow_error_set(error, EINVAL,
375                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
376                         "lookup list should not be NULL");
377                 goto error;
378         }
379         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
380         if (!ret) {
381                 filter_ptr = rte_zmalloc("ice_switch_filter",
382                         sizeof(struct ice_rule_query_data), 0);
383                 if (!filter_ptr) {
384                         rte_flow_error_set(error, EINVAL,
385                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
386                                    "No memory for ice_switch_filter");
387                         goto error;
388                 }
389                 flow->rule = filter_ptr;
390                 rte_memcpy(filter_ptr,
391                         &rule_added,
392                         sizeof(struct ice_rule_query_data));
393         } else {
394                 rte_flow_error_set(error, EINVAL,
395                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
396                         "switch filter create flow fail");
397                 goto error;
398         }
399
400         rte_free(list);
401         rte_free(meta);
402         return 0;
403
404 error:
405         rte_free(list);
406         rte_free(meta);
407
408         return -rte_errno;
409 }
410
411 static int
412 ice_switch_destroy(struct ice_adapter *ad,
413                 struct rte_flow *flow,
414                 struct rte_flow_error *error)
415 {
416         struct ice_hw *hw = &ad->hw;
417         int ret;
418         struct ice_rule_query_data *filter_ptr;
419
420         filter_ptr = (struct ice_rule_query_data *)
421                 flow->rule;
422
423         if (!filter_ptr) {
424                 rte_flow_error_set(error, EINVAL,
425                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
426                         "no such flow"
427                         " create by switch filter");
428                 return -rte_errno;
429         }
430
431         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
432         if (ret) {
433                 rte_flow_error_set(error, EINVAL,
434                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
435                         "fail to destroy switch filter rule");
436                 return -rte_errno;
437         }
438
439         rte_free(filter_ptr);
440         return ret;
441 }
442
443 static void
444 ice_switch_filter_rule_free(struct rte_flow *flow)
445 {
446         rte_free(flow->rule);
447 }
448
449 static uint64_t
450 ice_switch_inset_get(const struct rte_flow_item pattern[],
451                 struct rte_flow_error *error,
452                 struct ice_adv_lkup_elem *list,
453                 uint16_t *lkups_num,
454                 enum ice_sw_tunnel_type *tun_type)
455 {
456         const struct rte_flow_item *item = pattern;
457         enum rte_flow_item_type item_type;
458         const struct rte_flow_item_eth *eth_spec, *eth_mask;
459         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
460         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
461         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
462         const struct rte_flow_item_udp *udp_spec, *udp_mask;
463         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
464         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
465         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
466         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
467         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
468         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
469                                 *pppoe_proto_mask;
470         const struct rte_flow_item_esp *esp_spec, *esp_mask;
471         const struct rte_flow_item_ah *ah_spec, *ah_mask;
472         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
473         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
474         uint64_t input_set = ICE_INSET_NONE;
475         bool pppoe_elem_valid = 0;
476         bool pppoe_patt_valid = 0;
477         bool pppoe_prot_valid = 0;
478         bool profile_rule = 0;
479         bool tunnel_valid = 0;
480         bool ipv6_valiad = 0;
481         bool ipv4_valiad = 0;
482         bool udp_valiad = 0;
483         bool tcp_valiad = 0;
484         uint16_t j, t = 0;
485
486         for (item = pattern; item->type !=
487                         RTE_FLOW_ITEM_TYPE_END; item++) {
488                 if (item->last) {
489                         rte_flow_error_set(error, EINVAL,
490                                         RTE_FLOW_ERROR_TYPE_ITEM,
491                                         item,
492                                         "Not support range");
493                         return 0;
494                 }
495                 item_type = item->type;
496
497                 switch (item_type) {
498                 case RTE_FLOW_ITEM_TYPE_ETH:
499                         eth_spec = item->spec;
500                         eth_mask = item->mask;
501                         if (eth_spec && eth_mask) {
502                                 const uint8_t *a = eth_mask->src.addr_bytes;
503                                 const uint8_t *b = eth_mask->dst.addr_bytes;
504                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
505                                         if (a[j] && tunnel_valid) {
506                                                 input_set |=
507                                                         ICE_INSET_TUN_SMAC;
508                                                 break;
509                                         } else if (a[j]) {
510                                                 input_set |=
511                                                         ICE_INSET_SMAC;
512                                                 break;
513                                         }
514                                 }
515                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
516                                         if (b[j] && tunnel_valid) {
517                                                 input_set |=
518                                                         ICE_INSET_TUN_DMAC;
519                                                 break;
520                                         } else if (b[j]) {
521                                                 input_set |=
522                                                         ICE_INSET_DMAC;
523                                                 break;
524                                         }
525                                 }
526                                 if (eth_mask->type)
527                                         input_set |= ICE_INSET_ETHERTYPE;
528                                 list[t].type = (tunnel_valid  == 0) ?
529                                         ICE_MAC_OFOS : ICE_MAC_IL;
530                                 struct ice_ether_hdr *h;
531                                 struct ice_ether_hdr *m;
532                                 uint16_t i = 0;
533                                 h = &list[t].h_u.eth_hdr;
534                                 m = &list[t].m_u.eth_hdr;
535                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
536                                         if (eth_mask->src.addr_bytes[j]) {
537                                                 h->src_addr[j] =
538                                                 eth_spec->src.addr_bytes[j];
539                                                 m->src_addr[j] =
540                                                 eth_mask->src.addr_bytes[j];
541                                                 i = 1;
542                                         }
543                                         if (eth_mask->dst.addr_bytes[j]) {
544                                                 h->dst_addr[j] =
545                                                 eth_spec->dst.addr_bytes[j];
546                                                 m->dst_addr[j] =
547                                                 eth_mask->dst.addr_bytes[j];
548                                                 i = 1;
549                                         }
550                                 }
551                                 if (i)
552                                         t++;
553                                 if (eth_mask->type) {
554                                         list[t].type = ICE_ETYPE_OL;
555                                         list[t].h_u.ethertype.ethtype_id =
556                                                 eth_spec->type;
557                                         list[t].m_u.ethertype.ethtype_id =
558                                                 eth_mask->type;
559                                         t++;
560                                 }
561                         }
562                         break;
563
564                 case RTE_FLOW_ITEM_TYPE_IPV4:
565                         ipv4_spec = item->spec;
566                         ipv4_mask = item->mask;
567                         ipv4_valiad = 1;
568                         if (ipv4_spec && ipv4_mask) {
569                                 /* Check IPv4 mask and update input set */
570                                 if (ipv4_mask->hdr.version_ihl ||
571                                         ipv4_mask->hdr.total_length ||
572                                         ipv4_mask->hdr.packet_id ||
573                                         ipv4_mask->hdr.hdr_checksum) {
574                                         rte_flow_error_set(error, EINVAL,
575                                                    RTE_FLOW_ERROR_TYPE_ITEM,
576                                                    item,
577                                                    "Invalid IPv4 mask.");
578                                         return 0;
579                                 }
580
581                                 if (tunnel_valid) {
582                                         if (ipv4_mask->hdr.type_of_service)
583                                                 input_set |=
584                                                         ICE_INSET_TUN_IPV4_TOS;
585                                         if (ipv4_mask->hdr.src_addr)
586                                                 input_set |=
587                                                         ICE_INSET_TUN_IPV4_SRC;
588                                         if (ipv4_mask->hdr.dst_addr)
589                                                 input_set |=
590                                                         ICE_INSET_TUN_IPV4_DST;
591                                         if (ipv4_mask->hdr.time_to_live)
592                                                 input_set |=
593                                                         ICE_INSET_TUN_IPV4_TTL;
594                                         if (ipv4_mask->hdr.next_proto_id)
595                                                 input_set |=
596                                                 ICE_INSET_TUN_IPV4_PROTO;
597                                 } else {
598                                         if (ipv4_mask->hdr.src_addr)
599                                                 input_set |= ICE_INSET_IPV4_SRC;
600                                         if (ipv4_mask->hdr.dst_addr)
601                                                 input_set |= ICE_INSET_IPV4_DST;
602                                         if (ipv4_mask->hdr.time_to_live)
603                                                 input_set |= ICE_INSET_IPV4_TTL;
604                                         if (ipv4_mask->hdr.next_proto_id)
605                                                 input_set |=
606                                                 ICE_INSET_IPV4_PROTO;
607                                         if (ipv4_mask->hdr.type_of_service)
608                                                 input_set |=
609                                                         ICE_INSET_IPV4_TOS;
610                                 }
611                                 list[t].type = (tunnel_valid  == 0) ?
612                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
613                                 if (ipv4_mask->hdr.src_addr) {
614                                         list[t].h_u.ipv4_hdr.src_addr =
615                                                 ipv4_spec->hdr.src_addr;
616                                         list[t].m_u.ipv4_hdr.src_addr =
617                                                 ipv4_mask->hdr.src_addr;
618                                 }
619                                 if (ipv4_mask->hdr.dst_addr) {
620                                         list[t].h_u.ipv4_hdr.dst_addr =
621                                                 ipv4_spec->hdr.dst_addr;
622                                         list[t].m_u.ipv4_hdr.dst_addr =
623                                                 ipv4_mask->hdr.dst_addr;
624                                 }
625                                 if (ipv4_mask->hdr.time_to_live) {
626                                         list[t].h_u.ipv4_hdr.time_to_live =
627                                                 ipv4_spec->hdr.time_to_live;
628                                         list[t].m_u.ipv4_hdr.time_to_live =
629                                                 ipv4_mask->hdr.time_to_live;
630                                 }
631                                 if (ipv4_mask->hdr.next_proto_id) {
632                                         list[t].h_u.ipv4_hdr.protocol =
633                                                 ipv4_spec->hdr.next_proto_id;
634                                         list[t].m_u.ipv4_hdr.protocol =
635                                                 ipv4_mask->hdr.next_proto_id;
636                                 }
637                                 if ((ipv4_spec->hdr.next_proto_id &
638                                         ipv4_mask->hdr.next_proto_id) ==
639                                         ICE_IPV4_PROTO_NVGRE)
640                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
641                                 if (ipv4_mask->hdr.type_of_service) {
642                                         list[t].h_u.ipv4_hdr.tos =
643                                                 ipv4_spec->hdr.type_of_service;
644                                         list[t].m_u.ipv4_hdr.tos =
645                                                 ipv4_mask->hdr.type_of_service;
646                                 }
647                                 t++;
648                         }
649                         break;
650
651                 case RTE_FLOW_ITEM_TYPE_IPV6:
652                         ipv6_spec = item->spec;
653                         ipv6_mask = item->mask;
654                         ipv6_valiad = 1;
655                         if (ipv6_spec && ipv6_mask) {
656                                 if (ipv6_mask->hdr.payload_len) {
657                                         rte_flow_error_set(error, EINVAL,
658                                            RTE_FLOW_ERROR_TYPE_ITEM,
659                                            item,
660                                            "Invalid IPv6 mask");
661                                         return 0;
662                                 }
663
664                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
665                                         if (ipv6_mask->hdr.src_addr[j] &&
666                                                 tunnel_valid) {
667                                                 input_set |=
668                                                 ICE_INSET_TUN_IPV6_SRC;
669                                                 break;
670                                         } else if (ipv6_mask->hdr.src_addr[j]) {
671                                                 input_set |= ICE_INSET_IPV6_SRC;
672                                                 break;
673                                         }
674                                 }
675                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
676                                         if (ipv6_mask->hdr.dst_addr[j] &&
677                                                 tunnel_valid) {
678                                                 input_set |=
679                                                 ICE_INSET_TUN_IPV6_DST;
680                                                 break;
681                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
682                                                 input_set |= ICE_INSET_IPV6_DST;
683                                                 break;
684                                         }
685                                 }
686                                 if (ipv6_mask->hdr.proto &&
687                                         tunnel_valid)
688                                         input_set |=
689                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
690                                 else if (ipv6_mask->hdr.proto)
691                                         input_set |=
692                                                 ICE_INSET_IPV6_NEXT_HDR;
693                                 if (ipv6_mask->hdr.hop_limits &&
694                                         tunnel_valid)
695                                         input_set |=
696                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
697                                 else if (ipv6_mask->hdr.hop_limits)
698                                         input_set |=
699                                                 ICE_INSET_IPV6_HOP_LIMIT;
700                                 if ((ipv6_mask->hdr.vtc_flow &
701                                                 rte_cpu_to_be_32
702                                                 (RTE_IPV6_HDR_TC_MASK)) &&
703                                         tunnel_valid)
704                                         input_set |=
705                                                         ICE_INSET_TUN_IPV6_TC;
706                                 else if (ipv6_mask->hdr.vtc_flow &
707                                                 rte_cpu_to_be_32
708                                                 (RTE_IPV6_HDR_TC_MASK))
709                                         input_set |= ICE_INSET_IPV6_TC;
710
711                                 list[t].type = (tunnel_valid  == 0) ?
712                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
713                                 struct ice_ipv6_hdr *f;
714                                 struct ice_ipv6_hdr *s;
715                                 f = &list[t].h_u.ipv6_hdr;
716                                 s = &list[t].m_u.ipv6_hdr;
717                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
718                                         if (ipv6_mask->hdr.src_addr[j]) {
719                                                 f->src_addr[j] =
720                                                 ipv6_spec->hdr.src_addr[j];
721                                                 s->src_addr[j] =
722                                                 ipv6_mask->hdr.src_addr[j];
723                                         }
724                                         if (ipv6_mask->hdr.dst_addr[j]) {
725                                                 f->dst_addr[j] =
726                                                 ipv6_spec->hdr.dst_addr[j];
727                                                 s->dst_addr[j] =
728                                                 ipv6_mask->hdr.dst_addr[j];
729                                         }
730                                 }
731                                 if (ipv6_mask->hdr.proto) {
732                                         f->next_hdr =
733                                                 ipv6_spec->hdr.proto;
734                                         s->next_hdr =
735                                                 ipv6_mask->hdr.proto;
736                                 }
737                                 if (ipv6_mask->hdr.hop_limits) {
738                                         f->hop_limit =
739                                                 ipv6_spec->hdr.hop_limits;
740                                         s->hop_limit =
741                                                 ipv6_mask->hdr.hop_limits;
742                                 }
743                                 if (ipv6_mask->hdr.vtc_flow &
744                                                 rte_cpu_to_be_32
745                                                 (RTE_IPV6_HDR_TC_MASK)) {
746                                         struct ice_le_ver_tc_flow vtf;
747                                         vtf.u.fld.version = 0;
748                                         vtf.u.fld.flow_label = 0;
749                                         vtf.u.fld.tc = (rte_be_to_cpu_32
750                                                 (ipv6_spec->hdr.vtc_flow) &
751                                                         RTE_IPV6_HDR_TC_MASK) >>
752                                                         RTE_IPV6_HDR_TC_SHIFT;
753                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
754                                         vtf.u.fld.tc = (rte_be_to_cpu_32
755                                                 (ipv6_mask->hdr.vtc_flow) &
756                                                         RTE_IPV6_HDR_TC_MASK) >>
757                                                         RTE_IPV6_HDR_TC_SHIFT;
758                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
759                                 }
760                                 t++;
761                         }
762                         break;
763
764                 case RTE_FLOW_ITEM_TYPE_UDP:
765                         udp_spec = item->spec;
766                         udp_mask = item->mask;
767                         udp_valiad = 1;
768                         if (udp_spec && udp_mask) {
769                                 /* Check UDP mask and update input set*/
770                                 if (udp_mask->hdr.dgram_len ||
771                                     udp_mask->hdr.dgram_cksum) {
772                                         rte_flow_error_set(error, EINVAL,
773                                                    RTE_FLOW_ERROR_TYPE_ITEM,
774                                                    item,
775                                                    "Invalid UDP mask");
776                                         return 0;
777                                 }
778
779                                 if (tunnel_valid) {
780                                         if (udp_mask->hdr.src_port)
781                                                 input_set |=
782                                                 ICE_INSET_TUN_UDP_SRC_PORT;
783                                         if (udp_mask->hdr.dst_port)
784                                                 input_set |=
785                                                 ICE_INSET_TUN_UDP_DST_PORT;
786                                 } else {
787                                         if (udp_mask->hdr.src_port)
788                                                 input_set |=
789                                                 ICE_INSET_UDP_SRC_PORT;
790                                         if (udp_mask->hdr.dst_port)
791                                                 input_set |=
792                                                 ICE_INSET_UDP_DST_PORT;
793                                 }
794                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
795                                                 tunnel_valid == 0)
796                                         list[t].type = ICE_UDP_OF;
797                                 else
798                                         list[t].type = ICE_UDP_ILOS;
799                                 if (udp_mask->hdr.src_port) {
800                                         list[t].h_u.l4_hdr.src_port =
801                                                 udp_spec->hdr.src_port;
802                                         list[t].m_u.l4_hdr.src_port =
803                                                 udp_mask->hdr.src_port;
804                                 }
805                                 if (udp_mask->hdr.dst_port) {
806                                         list[t].h_u.l4_hdr.dst_port =
807                                                 udp_spec->hdr.dst_port;
808                                         list[t].m_u.l4_hdr.dst_port =
809                                                 udp_mask->hdr.dst_port;
810                                 }
811                                                 t++;
812                         }
813                         break;
814
815                 case RTE_FLOW_ITEM_TYPE_TCP:
816                         tcp_spec = item->spec;
817                         tcp_mask = item->mask;
818                         tcp_valiad = 1;
819                         if (tcp_spec && tcp_mask) {
820                                 /* Check TCP mask and update input set */
821                                 if (tcp_mask->hdr.sent_seq ||
822                                         tcp_mask->hdr.recv_ack ||
823                                         tcp_mask->hdr.data_off ||
824                                         tcp_mask->hdr.tcp_flags ||
825                                         tcp_mask->hdr.rx_win ||
826                                         tcp_mask->hdr.cksum ||
827                                         tcp_mask->hdr.tcp_urp) {
828                                         rte_flow_error_set(error, EINVAL,
829                                            RTE_FLOW_ERROR_TYPE_ITEM,
830                                            item,
831                                            "Invalid TCP mask");
832                                         return 0;
833                                 }
834
835                                 if (tunnel_valid) {
836                                         if (tcp_mask->hdr.src_port)
837                                                 input_set |=
838                                                 ICE_INSET_TUN_TCP_SRC_PORT;
839                                         if (tcp_mask->hdr.dst_port)
840                                                 input_set |=
841                                                 ICE_INSET_TUN_TCP_DST_PORT;
842                                 } else {
843                                         if (tcp_mask->hdr.src_port)
844                                                 input_set |=
845                                                 ICE_INSET_TCP_SRC_PORT;
846                                         if (tcp_mask->hdr.dst_port)
847                                                 input_set |=
848                                                 ICE_INSET_TCP_DST_PORT;
849                                 }
850                                 list[t].type = ICE_TCP_IL;
851                                 if (tcp_mask->hdr.src_port) {
852                                         list[t].h_u.l4_hdr.src_port =
853                                                 tcp_spec->hdr.src_port;
854                                         list[t].m_u.l4_hdr.src_port =
855                                                 tcp_mask->hdr.src_port;
856                                 }
857                                 if (tcp_mask->hdr.dst_port) {
858                                         list[t].h_u.l4_hdr.dst_port =
859                                                 tcp_spec->hdr.dst_port;
860                                         list[t].m_u.l4_hdr.dst_port =
861                                                 tcp_mask->hdr.dst_port;
862                                 }
863                                 t++;
864                         }
865                         break;
866
867                 case RTE_FLOW_ITEM_TYPE_SCTP:
868                         sctp_spec = item->spec;
869                         sctp_mask = item->mask;
870                         if (sctp_spec && sctp_mask) {
871                                 /* Check SCTP mask and update input set */
872                                 if (sctp_mask->hdr.cksum) {
873                                         rte_flow_error_set(error, EINVAL,
874                                            RTE_FLOW_ERROR_TYPE_ITEM,
875                                            item,
876                                            "Invalid SCTP mask");
877                                         return 0;
878                                 }
879
880                                 if (tunnel_valid) {
881                                         if (sctp_mask->hdr.src_port)
882                                                 input_set |=
883                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
884                                         if (sctp_mask->hdr.dst_port)
885                                                 input_set |=
886                                                 ICE_INSET_TUN_SCTP_DST_PORT;
887                                 } else {
888                                         if (sctp_mask->hdr.src_port)
889                                                 input_set |=
890                                                 ICE_INSET_SCTP_SRC_PORT;
891                                         if (sctp_mask->hdr.dst_port)
892                                                 input_set |=
893                                                 ICE_INSET_SCTP_DST_PORT;
894                                 }
895                                 list[t].type = ICE_SCTP_IL;
896                                 if (sctp_mask->hdr.src_port) {
897                                         list[t].h_u.sctp_hdr.src_port =
898                                                 sctp_spec->hdr.src_port;
899                                         list[t].m_u.sctp_hdr.src_port =
900                                                 sctp_mask->hdr.src_port;
901                                 }
902                                 if (sctp_mask->hdr.dst_port) {
903                                         list[t].h_u.sctp_hdr.dst_port =
904                                                 sctp_spec->hdr.dst_port;
905                                         list[t].m_u.sctp_hdr.dst_port =
906                                                 sctp_mask->hdr.dst_port;
907                                 }
908                                 t++;
909                         }
910                         break;
911
912                 case RTE_FLOW_ITEM_TYPE_VXLAN:
913                         vxlan_spec = item->spec;
914                         vxlan_mask = item->mask;
915                         /* Check if VXLAN item is used to describe protocol.
916                          * If yes, both spec and mask should be NULL.
917                          * If no, both spec and mask shouldn't be NULL.
918                          */
919                         if ((!vxlan_spec && vxlan_mask) ||
920                             (vxlan_spec && !vxlan_mask)) {
921                                 rte_flow_error_set(error, EINVAL,
922                                            RTE_FLOW_ERROR_TYPE_ITEM,
923                                            item,
924                                            "Invalid VXLAN item");
925                                 return 0;
926                         }
927
928                         tunnel_valid = 1;
929                         if (vxlan_spec && vxlan_mask) {
930                                 list[t].type = ICE_VXLAN;
931                                 if (vxlan_mask->vni[0] ||
932                                         vxlan_mask->vni[1] ||
933                                         vxlan_mask->vni[2]) {
934                                         list[t].h_u.tnl_hdr.vni =
935                                                 (vxlan_spec->vni[2] << 16) |
936                                                 (vxlan_spec->vni[1] << 8) |
937                                                 vxlan_spec->vni[0];
938                                         list[t].m_u.tnl_hdr.vni =
939                                                 (vxlan_mask->vni[2] << 16) |
940                                                 (vxlan_mask->vni[1] << 8) |
941                                                 vxlan_mask->vni[0];
942                                         input_set |=
943                                                 ICE_INSET_TUN_VXLAN_VNI;
944                                 }
945                                 t++;
946                         }
947                         break;
948
949                 case RTE_FLOW_ITEM_TYPE_NVGRE:
950                         nvgre_spec = item->spec;
951                         nvgre_mask = item->mask;
952                         /* Check if NVGRE item is used to describe protocol.
953                          * If yes, both spec and mask should be NULL.
954                          * If no, both spec and mask shouldn't be NULL.
955                          */
956                         if ((!nvgre_spec && nvgre_mask) ||
957                             (nvgre_spec && !nvgre_mask)) {
958                                 rte_flow_error_set(error, EINVAL,
959                                            RTE_FLOW_ERROR_TYPE_ITEM,
960                                            item,
961                                            "Invalid NVGRE item");
962                                 return 0;
963                         }
964                         tunnel_valid = 1;
965                         if (nvgre_spec && nvgre_mask) {
966                                 list[t].type = ICE_NVGRE;
967                                 if (nvgre_mask->tni[0] ||
968                                         nvgre_mask->tni[1] ||
969                                         nvgre_mask->tni[2]) {
970                                         list[t].h_u.nvgre_hdr.tni_flow =
971                                                 (nvgre_spec->tni[2] << 16) |
972                                                 (nvgre_spec->tni[1] << 8) |
973                                                 nvgre_spec->tni[0];
974                                         list[t].m_u.nvgre_hdr.tni_flow =
975                                                 (nvgre_mask->tni[2] << 16) |
976                                                 (nvgre_mask->tni[1] << 8) |
977                                                 nvgre_mask->tni[0];
978                                         input_set |=
979                                                 ICE_INSET_TUN_NVGRE_TNI;
980                                 }
981                                 t++;
982                         }
983                         break;
984
985                 case RTE_FLOW_ITEM_TYPE_VLAN:
986                         vlan_spec = item->spec;
987                         vlan_mask = item->mask;
988                         /* Check if VLAN item is used to describe protocol.
989                          * If yes, both spec and mask should be NULL.
990                          * If no, both spec and mask shouldn't be NULL.
991                          */
992                         if ((!vlan_spec && vlan_mask) ||
993                             (vlan_spec && !vlan_mask)) {
994                                 rte_flow_error_set(error, EINVAL,
995                                            RTE_FLOW_ERROR_TYPE_ITEM,
996                                            item,
997                                            "Invalid VLAN item");
998                                 return 0;
999                         }
1000                         if (vlan_spec && vlan_mask) {
1001                                 list[t].type = ICE_VLAN_OFOS;
1002                                 if (vlan_mask->tci) {
1003                                         list[t].h_u.vlan_hdr.vlan =
1004                                                 vlan_spec->tci;
1005                                         list[t].m_u.vlan_hdr.vlan =
1006                                                 vlan_mask->tci;
1007                                         input_set |= ICE_INSET_VLAN_OUTER;
1008                                 }
1009                                 if (vlan_mask->inner_type) {
1010                                         list[t].h_u.vlan_hdr.type =
1011                                                 vlan_spec->inner_type;
1012                                         list[t].m_u.vlan_hdr.type =
1013                                                 vlan_mask->inner_type;
1014                                         input_set |= ICE_INSET_ETHERTYPE;
1015                                 }
1016                                 t++;
1017                         }
1018                         break;
1019
1020                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1021                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1022                         pppoe_spec = item->spec;
1023                         pppoe_mask = item->mask;
1024                         /* Check if PPPoE item is used to describe protocol.
1025                          * If yes, both spec and mask should be NULL.
1026                          * If no, both spec and mask shouldn't be NULL.
1027                          */
1028                         if ((!pppoe_spec && pppoe_mask) ||
1029                                 (pppoe_spec && !pppoe_mask)) {
1030                                 rte_flow_error_set(error, EINVAL,
1031                                         RTE_FLOW_ERROR_TYPE_ITEM,
1032                                         item,
1033                                         "Invalid pppoe item");
1034                                 return 0;
1035                         }
1036                         pppoe_patt_valid = 1;
1037                         if (pppoe_spec && pppoe_mask) {
1038                                 /* Check pppoe mask and update input set */
1039                                 if (pppoe_mask->length ||
1040                                         pppoe_mask->code ||
1041                                         pppoe_mask->version_type) {
1042                                         rte_flow_error_set(error, EINVAL,
1043                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1044                                                 item,
1045                                                 "Invalid pppoe mask");
1046                                         return 0;
1047                                 }
1048                                 list[t].type = ICE_PPPOE;
1049                                 if (pppoe_mask->session_id) {
1050                                         list[t].h_u.pppoe_hdr.session_id =
1051                                                 pppoe_spec->session_id;
1052                                         list[t].m_u.pppoe_hdr.session_id =
1053                                                 pppoe_mask->session_id;
1054                                         input_set |= ICE_INSET_PPPOE_SESSION;
1055                                 }
1056                                 t++;
1057                                 pppoe_elem_valid = 1;
1058                         }
1059                         break;
1060
1061                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1062                         pppoe_proto_spec = item->spec;
1063                         pppoe_proto_mask = item->mask;
1064                         /* Check if PPPoE optional proto_id item
1065                          * is used to describe protocol.
1066                          * If yes, both spec and mask should be NULL.
1067                          * If no, both spec and mask shouldn't be NULL.
1068                          */
1069                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1070                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1071                                 rte_flow_error_set(error, EINVAL,
1072                                         RTE_FLOW_ERROR_TYPE_ITEM,
1073                                         item,
1074                                         "Invalid pppoe proto item");
1075                                 return 0;
1076                         }
1077                         if (pppoe_proto_spec && pppoe_proto_mask) {
1078                                 if (pppoe_elem_valid)
1079                                         t--;
1080                                 list[t].type = ICE_PPPOE;
1081                                 if (pppoe_proto_mask->proto_id) {
1082                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1083                                                 pppoe_proto_spec->proto_id;
1084                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1085                                                 pppoe_proto_mask->proto_id;
1086                                         input_set |= ICE_INSET_PPPOE_PROTO;
1087
1088                                         pppoe_prot_valid = 1;
1089                                 }
1090                                 if ((pppoe_proto_mask->proto_id &
1091                                         pppoe_proto_spec->proto_id) !=
1092                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1093                                         (pppoe_proto_mask->proto_id &
1094                                         pppoe_proto_spec->proto_id) !=
1095                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1096                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1097                                 else
1098                                         *tun_type = ICE_SW_TUN_PPPOE;
1099                                 t++;
1100                         }
1101
1102                         break;
1103
1104                 case RTE_FLOW_ITEM_TYPE_ESP:
1105                         esp_spec = item->spec;
1106                         esp_mask = item->mask;
1107                         if ((esp_spec && !esp_mask) ||
1108                                 (!esp_spec && esp_mask)) {
1109                                 rte_flow_error_set(error, EINVAL,
1110                                            RTE_FLOW_ERROR_TYPE_ITEM,
1111                                            item,
1112                                            "Invalid esp item");
1113                                 return 0;
1114                         }
1115                         /* Check esp mask and update input set */
1116                         if (esp_mask && esp_mask->hdr.seq) {
1117                                 rte_flow_error_set(error, EINVAL,
1118                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1119                                                 item,
1120                                                 "Invalid esp mask");
1121                                 return 0;
1122                         }
1123
1124                         if (!esp_spec && !esp_mask && !input_set) {
1125                                 profile_rule = 1;
1126                                 if (ipv6_valiad && udp_valiad)
1127                                         *tun_type =
1128                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1129                                 else if (ipv6_valiad)
1130                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1131                                 else if (ipv4_valiad)
1132                                         return 0;
1133                         } else if (esp_spec && esp_mask &&
1134                                                 esp_mask->hdr.spi){
1135                                 if (udp_valiad)
1136                                         list[t].type = ICE_NAT_T;
1137                                 else
1138                                         list[t].type = ICE_ESP;
1139                                 list[t].h_u.esp_hdr.spi =
1140                                         esp_spec->hdr.spi;
1141                                 list[t].m_u.esp_hdr.spi =
1142                                         esp_mask->hdr.spi;
1143                                 input_set |= ICE_INSET_ESP_SPI;
1144                                 t++;
1145                         }
1146
1147                         if (!profile_rule) {
1148                                 if (ipv6_valiad && udp_valiad)
1149                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1150                                 else if (ipv4_valiad && udp_valiad)
1151                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1152                                 else if (ipv6_valiad)
1153                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1154                                 else if (ipv4_valiad)
1155                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1156                         }
1157                         break;
1158
1159                 case RTE_FLOW_ITEM_TYPE_AH:
1160                         ah_spec = item->spec;
1161                         ah_mask = item->mask;
1162                         if ((ah_spec && !ah_mask) ||
1163                                 (!ah_spec && ah_mask)) {
1164                                 rte_flow_error_set(error, EINVAL,
1165                                            RTE_FLOW_ERROR_TYPE_ITEM,
1166                                            item,
1167                                            "Invalid ah item");
1168                                 return 0;
1169                         }
1170                         /* Check ah mask and update input set */
1171                         if (ah_mask &&
1172                                 (ah_mask->next_hdr ||
1173                                 ah_mask->payload_len ||
1174                                 ah_mask->seq_num ||
1175                                 ah_mask->reserved)) {
1176                                 rte_flow_error_set(error, EINVAL,
1177                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1178                                                 item,
1179                                                 "Invalid ah mask");
1180                                 return 0;
1181                         }
1182
1183                         if (!ah_spec && !ah_mask && !input_set) {
1184                                 profile_rule = 1;
1185                                 if (ipv6_valiad && udp_valiad)
1186                                         *tun_type =
1187                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1188                                 else if (ipv6_valiad)
1189                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1190                                 else if (ipv4_valiad)
1191                                         return 0;
1192                         } else if (ah_spec && ah_mask &&
1193                                                 ah_mask->spi){
1194                                 list[t].type = ICE_AH;
1195                                 list[t].h_u.ah_hdr.spi =
1196                                         ah_spec->spi;
1197                                 list[t].m_u.ah_hdr.spi =
1198                                         ah_mask->spi;
1199                                 input_set |= ICE_INSET_AH_SPI;
1200                                 t++;
1201                         }
1202
1203                         if (!profile_rule) {
1204                                 if (udp_valiad)
1205                                         return 0;
1206                                 else if (ipv6_valiad)
1207                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1208                                 else if (ipv4_valiad)
1209                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1210                         }
1211                         break;
1212
1213                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1214                         l2tp_spec = item->spec;
1215                         l2tp_mask = item->mask;
1216                         if ((l2tp_spec && !l2tp_mask) ||
1217                                 (!l2tp_spec && l2tp_mask)) {
1218                                 rte_flow_error_set(error, EINVAL,
1219                                            RTE_FLOW_ERROR_TYPE_ITEM,
1220                                            item,
1221                                            "Invalid l2tp item");
1222                                 return 0;
1223                         }
1224
1225                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1226                                 if (ipv6_valiad)
1227                                         *tun_type =
1228                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1229                                 else if (ipv4_valiad)
1230                                         return 0;
1231                         } else if (l2tp_spec && l2tp_mask &&
1232                                                 l2tp_mask->session_id){
1233                                 list[t].type = ICE_L2TPV3;
1234                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1235                                         l2tp_spec->session_id;
1236                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1237                                         l2tp_mask->session_id;
1238                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1239                                 t++;
1240                         }
1241
1242                         if (!profile_rule) {
1243                                 if (ipv6_valiad)
1244                                         *tun_type =
1245                                         ICE_SW_TUN_IPV6_L2TPV3;
1246                                 else if (ipv4_valiad)
1247                                         *tun_type =
1248                                         ICE_SW_TUN_IPV4_L2TPV3;
1249                         }
1250                         break;
1251
1252                 case RTE_FLOW_ITEM_TYPE_PFCP:
1253                         pfcp_spec = item->spec;
1254                         pfcp_mask = item->mask;
1255                         /* Check if PFCP item is used to describe protocol.
1256                          * If yes, both spec and mask should be NULL.
1257                          * If no, both spec and mask shouldn't be NULL.
1258                          */
1259                         if ((!pfcp_spec && pfcp_mask) ||
1260                             (pfcp_spec && !pfcp_mask)) {
1261                                 rte_flow_error_set(error, EINVAL,
1262                                            RTE_FLOW_ERROR_TYPE_ITEM,
1263                                            item,
1264                                            "Invalid PFCP item");
1265                                 return -ENOTSUP;
1266                         }
1267                         if (pfcp_spec && pfcp_mask) {
1268                                 /* Check pfcp mask and update input set */
1269                                 if (pfcp_mask->msg_type ||
1270                                         pfcp_mask->msg_len ||
1271                                         pfcp_mask->seid) {
1272                                         rte_flow_error_set(error, EINVAL,
1273                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1274                                                 item,
1275                                                 "Invalid pfcp mask");
1276                                         return -ENOTSUP;
1277                                 }
1278                                 if (pfcp_mask->s_field &&
1279                                         pfcp_spec->s_field == 0x01 &&
1280                                         ipv6_valiad)
1281                                         *tun_type =
1282                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1283                                 else if (pfcp_mask->s_field &&
1284                                         pfcp_spec->s_field == 0x01)
1285                                         *tun_type =
1286                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1287                                 else if (pfcp_mask->s_field &&
1288                                         !pfcp_spec->s_field &&
1289                                         ipv6_valiad)
1290                                         *tun_type =
1291                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1292                                 else if (pfcp_mask->s_field &&
1293                                         !pfcp_spec->s_field)
1294                                         *tun_type =
1295                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1296                                 else
1297                                         return -ENOTSUP;
1298                         }
1299                         break;
1300
1301                 case RTE_FLOW_ITEM_TYPE_VOID:
1302                         break;
1303
1304                 default:
1305                         rte_flow_error_set(error, EINVAL,
1306                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1307                                    "Invalid pattern item.");
1308                         goto out;
1309                 }
1310         }
1311
1312         if (pppoe_patt_valid && !pppoe_prot_valid) {
1313                 if (ipv6_valiad && udp_valiad)
1314                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1315                 else if (ipv6_valiad && tcp_valiad)
1316                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1317                 else if (ipv4_valiad && udp_valiad)
1318                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1319                 else if (ipv4_valiad && tcp_valiad)
1320                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1321                 else if (ipv6_valiad)
1322                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1323                 else if (ipv4_valiad)
1324                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1325                 else
1326                         *tun_type = ICE_SW_TUN_PPPOE;
1327         }
1328
1329         *lkups_num = t;
1330
1331         return input_set;
1332 out:
1333         return 0;
1334 }
1335
1336 static int
1337 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1338                             const struct rte_flow_action *actions,
1339                             struct rte_flow_error *error,
1340                             struct ice_adv_rule_info *rule_info)
1341 {
1342         const struct rte_flow_action_vf *act_vf;
1343         const struct rte_flow_action *action;
1344         enum rte_flow_action_type action_type;
1345
1346         for (action = actions; action->type !=
1347                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1348                 action_type = action->type;
1349                 switch (action_type) {
1350                 case RTE_FLOW_ACTION_TYPE_VF:
1351                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1352                         act_vf = action->conf;
1353                         if (act_vf->original)
1354                                 rule_info->sw_act.vsi_handle =
1355                                         ad->real_hw.avf.bus.func;
1356                         else
1357                                 rule_info->sw_act.vsi_handle = act_vf->id;
1358                         break;
1359                 default:
1360                         rte_flow_error_set(error,
1361                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1362                                            actions,
1363                                            "Invalid action type or queue number");
1364                         return -rte_errno;
1365                 }
1366         }
1367
1368         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1369         rule_info->sw_act.flag = ICE_FLTR_RX;
1370         rule_info->rx = 1;
1371         rule_info->priority = 5;
1372
1373         return 0;
1374 }
1375
1376 static int
1377 ice_switch_parse_action(struct ice_pf *pf,
1378                 const struct rte_flow_action *actions,
1379                 struct rte_flow_error *error,
1380                 struct ice_adv_rule_info *rule_info)
1381 {
1382         struct ice_vsi *vsi = pf->main_vsi;
1383         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1384         const struct rte_flow_action_queue *act_q;
1385         const struct rte_flow_action_rss *act_qgrop;
1386         uint16_t base_queue, i;
1387         const struct rte_flow_action *action;
1388         enum rte_flow_action_type action_type;
1389         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1390                  2, 4, 8, 16, 32, 64, 128};
1391
1392         base_queue = pf->base_queue + vsi->base_queue;
1393         for (action = actions; action->type !=
1394                         RTE_FLOW_ACTION_TYPE_END; action++) {
1395                 action_type = action->type;
1396                 switch (action_type) {
1397                 case RTE_FLOW_ACTION_TYPE_RSS:
1398                         act_qgrop = action->conf;
1399                         if (act_qgrop->queue_num <= 1)
1400                                 goto error;
1401                         rule_info->sw_act.fltr_act =
1402                                 ICE_FWD_TO_QGRP;
1403                         rule_info->sw_act.fwd_id.q_id =
1404                                 base_queue + act_qgrop->queue[0];
1405                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1406                                 if (act_qgrop->queue_num ==
1407                                         valid_qgrop_number[i])
1408                                         break;
1409                         }
1410                         if (i == MAX_QGRP_NUM_TYPE)
1411                                 goto error;
1412                         if ((act_qgrop->queue[0] +
1413                                 act_qgrop->queue_num) >
1414                                 dev->data->nb_rx_queues)
1415                                 goto error;
1416                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1417                                 if (act_qgrop->queue[i + 1] !=
1418                                         act_qgrop->queue[i] + 1)
1419                                         goto error;
1420                         rule_info->sw_act.qgrp_size =
1421                                 act_qgrop->queue_num;
1422                         break;
1423                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1424                         act_q = action->conf;
1425                         if (act_q->index >= dev->data->nb_rx_queues)
1426                                 goto error;
1427                         rule_info->sw_act.fltr_act =
1428                                 ICE_FWD_TO_Q;
1429                         rule_info->sw_act.fwd_id.q_id =
1430                                 base_queue + act_q->index;
1431                         break;
1432
1433                 case RTE_FLOW_ACTION_TYPE_DROP:
1434                         rule_info->sw_act.fltr_act =
1435                                 ICE_DROP_PACKET;
1436                         break;
1437
1438                 case RTE_FLOW_ACTION_TYPE_VOID:
1439                         break;
1440
1441                 default:
1442                         goto error;
1443                 }
1444         }
1445
1446         rule_info->sw_act.vsi_handle = vsi->idx;
1447         rule_info->rx = 1;
1448         rule_info->sw_act.src = vsi->idx;
1449         rule_info->priority = 5;
1450
1451         return 0;
1452
1453 error:
1454         rte_flow_error_set(error,
1455                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1456                 actions,
1457                 "Invalid action type or queue number");
1458         return -rte_errno;
1459 }
1460
1461 static int
1462 ice_switch_check_action(const struct rte_flow_action *actions,
1463                             struct rte_flow_error *error)
1464 {
1465         const struct rte_flow_action *action;
1466         enum rte_flow_action_type action_type;
1467         uint16_t actions_num = 0;
1468
1469         for (action = actions; action->type !=
1470                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1471                 action_type = action->type;
1472                 switch (action_type) {
1473                 case RTE_FLOW_ACTION_TYPE_VF:
1474                 case RTE_FLOW_ACTION_TYPE_RSS:
1475                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1476                 case RTE_FLOW_ACTION_TYPE_DROP:
1477                         actions_num++;
1478                         break;
1479                 case RTE_FLOW_ACTION_TYPE_VOID:
1480                         continue;
1481                 default:
1482                         rte_flow_error_set(error,
1483                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1484                                            actions,
1485                                            "Invalid action type");
1486                         return -rte_errno;
1487                 }
1488         }
1489
1490         if (actions_num != 1) {
1491                 rte_flow_error_set(error,
1492                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1493                                    actions,
1494                                    "Invalid action number");
1495                 return -rte_errno;
1496         }
1497
1498         return 0;
1499 }
1500
1501 static bool
1502 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1503 {
1504         switch (tun_type) {
1505         case ICE_SW_TUN_PROFID_IPV6_ESP:
1506         case ICE_SW_TUN_PROFID_IPV6_AH:
1507         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1508         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1509         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1510         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1511         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1512         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1513                 return true;
1514         default:
1515                 break;
1516         }
1517
1518         return false;
1519 }
1520
1521 static int
1522 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1523                 struct ice_pattern_match_item *array,
1524                 uint32_t array_len,
1525                 const struct rte_flow_item pattern[],
1526                 const struct rte_flow_action actions[],
1527                 void **meta,
1528                 struct rte_flow_error *error)
1529 {
1530         struct ice_pf *pf = &ad->pf;
1531         uint64_t inputset = 0;
1532         int ret = 0;
1533         struct sw_meta *sw_meta_ptr = NULL;
1534         struct ice_adv_rule_info rule_info;
1535         struct ice_adv_lkup_elem *list = NULL;
1536         uint16_t lkups_num = 0;
1537         const struct rte_flow_item *item = pattern;
1538         uint16_t item_num = 0;
1539         enum ice_sw_tunnel_type tun_type =
1540                         ICE_NON_TUN;
1541         struct ice_pattern_match_item *pattern_match_item = NULL;
1542
1543         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1544                 item_num++;
1545                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1546                         tun_type = ICE_SW_TUN_VXLAN;
1547                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1548                         tun_type = ICE_SW_TUN_NVGRE;
1549                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1550                         const struct rte_flow_item_eth *eth_mask;
1551                         if (item->mask)
1552                                 eth_mask = item->mask;
1553                         else
1554                                 continue;
1555                         if (eth_mask->type == UINT16_MAX)
1556                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1557                 }
1558                 /* reserve one more memory slot for ETH which may
1559                  * consume 2 lookup items.
1560                  */
1561                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1562                         item_num++;
1563         }
1564
1565         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1566         if (!list) {
1567                 rte_flow_error_set(error, EINVAL,
1568                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1569                                    "No memory for PMD internal items");
1570                 return -rte_errno;
1571         }
1572
1573         sw_meta_ptr =
1574                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1575         if (!sw_meta_ptr) {
1576                 rte_flow_error_set(error, EINVAL,
1577                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1578                                    "No memory for sw_pattern_meta_ptr");
1579                 goto error;
1580         }
1581
1582         pattern_match_item =
1583                 ice_search_pattern_match_item(pattern, array, array_len, error);
1584         if (!pattern_match_item) {
1585                 rte_flow_error_set(error, EINVAL,
1586                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1587                                    "Invalid input pattern");
1588                 goto error;
1589         }
1590
1591         inputset = ice_switch_inset_get
1592                 (pattern, error, list, &lkups_num, &tun_type);
1593         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1594                 (inputset & ~pattern_match_item->input_set_mask)) {
1595                 rte_flow_error_set(error, EINVAL,
1596                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1597                                    pattern,
1598                                    "Invalid input set");
1599                 goto error;
1600         }
1601
1602         memset(&rule_info, 0, sizeof(rule_info));
1603         rule_info.tun_type = tun_type;
1604
1605         ret = ice_switch_check_action(actions, error);
1606         if (ret) {
1607                 rte_flow_error_set(error, EINVAL,
1608                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1609                                    "Invalid input action number");
1610                 goto error;
1611         }
1612
1613         if (ad->hw.dcf_enabled)
1614                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1615                                                   &rule_info);
1616         else
1617                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1618
1619         if (ret) {
1620                 rte_flow_error_set(error, EINVAL,
1621                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1622                                    "Invalid input action");
1623                 goto error;
1624         }
1625
1626         if (meta) {
1627                 *meta = sw_meta_ptr;
1628                 ((struct sw_meta *)*meta)->list = list;
1629                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1630                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1631         } else {
1632                 rte_free(list);
1633                 rte_free(sw_meta_ptr);
1634         }
1635
1636         rte_free(pattern_match_item);
1637
1638         return 0;
1639
1640 error:
1641         rte_free(list);
1642         rte_free(sw_meta_ptr);
1643         rte_free(pattern_match_item);
1644
1645         return -rte_errno;
1646 }
1647
1648 static int
1649 ice_switch_query(struct ice_adapter *ad __rte_unused,
1650                 struct rte_flow *flow __rte_unused,
1651                 struct rte_flow_query_count *count __rte_unused,
1652                 struct rte_flow_error *error)
1653 {
1654         rte_flow_error_set(error, EINVAL,
1655                 RTE_FLOW_ERROR_TYPE_HANDLE,
1656                 NULL,
1657                 "count action not supported by switch filter");
1658
1659         return -rte_errno;
1660 }
1661
1662 static int
1663 ice_switch_redirect(struct ice_adapter *ad,
1664                     struct rte_flow *flow,
1665                     struct ice_flow_redirect *rd)
1666 {
1667         struct ice_rule_query_data *rdata = flow->rule;
1668         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1669         struct ice_adv_lkup_elem *lkups_dp = NULL;
1670         struct LIST_HEAD_TYPE *list_head;
1671         struct ice_adv_rule_info rinfo;
1672         struct ice_hw *hw = &ad->hw;
1673         struct ice_switch_info *sw;
1674         uint16_t lkups_cnt;
1675         int ret;
1676
1677         if (rdata->vsi_handle != rd->vsi_handle)
1678                 return 0;
1679
1680         sw = hw->switch_info;
1681         if (!sw->recp_list[rdata->rid].recp_created)
1682                 return -EINVAL;
1683
1684         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1685                 return -ENOTSUP;
1686
1687         list_head = &sw->recp_list[rdata->rid].filt_rules;
1688         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1689                             list_entry) {
1690                 rinfo = list_itr->rule_info;
1691                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1692                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1693                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1694                     (rinfo.fltr_rule_id == rdata->rule_id &&
1695                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1696                         lkups_cnt = list_itr->lkups_cnt;
1697                         lkups_dp = (struct ice_adv_lkup_elem *)
1698                                 ice_memdup(hw, list_itr->lkups,
1699                                            sizeof(*list_itr->lkups) *
1700                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1701
1702                         if (!lkups_dp) {
1703                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1704                                 return -EINVAL;
1705                         }
1706
1707                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1708                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1709                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1710                         }
1711                         break;
1712                 }
1713         }
1714
1715         if (!lkups_dp)
1716                 return -EINVAL;
1717
1718         /* Remove the old rule */
1719         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1720                                lkups_cnt, &rinfo);
1721         if (ret) {
1722                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1723                             rdata->rule_id);
1724                 ret = -EINVAL;
1725                 goto out;
1726         }
1727
1728         /* Update VSI context */
1729         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1730
1731         /* Replay the rule */
1732         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1733                                &rinfo, rdata);
1734         if (ret) {
1735                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1736                 ret = -EINVAL;
1737         }
1738
1739 out:
1740         ice_free(hw, lkups_dp);
1741         return ret;
1742 }
1743
1744 static int
1745 ice_switch_init(struct ice_adapter *ad)
1746 {
1747         int ret = 0;
1748         struct ice_flow_parser *dist_parser;
1749         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1750
1751         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1752                 dist_parser = &ice_switch_dist_parser_comms;
1753         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1754                 dist_parser = &ice_switch_dist_parser_os;
1755         else
1756                 return -EINVAL;
1757
1758         if (ad->devargs.pipe_mode_support)
1759                 ret = ice_register_parser(perm_parser, ad);
1760         else
1761                 ret = ice_register_parser(dist_parser, ad);
1762         return ret;
1763 }
1764
1765 static void
1766 ice_switch_uninit(struct ice_adapter *ad)
1767 {
1768         struct ice_flow_parser *dist_parser;
1769         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1770
1771         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1772                 dist_parser = &ice_switch_dist_parser_comms;
1773         else
1774                 dist_parser = &ice_switch_dist_parser_os;
1775
1776         if (ad->devargs.pipe_mode_support)
1777                 ice_unregister_parser(perm_parser, ad);
1778         else
1779                 ice_unregister_parser(dist_parser, ad);
1780 }
1781
1782 static struct
1783 ice_flow_engine ice_switch_engine = {
1784         .init = ice_switch_init,
1785         .uninit = ice_switch_uninit,
1786         .create = ice_switch_create,
1787         .destroy = ice_switch_destroy,
1788         .query_count = ice_switch_query,
1789         .redirect = ice_switch_redirect,
1790         .free = ice_switch_filter_rule_free,
1791         .type = ICE_FLOW_ENGINE_SWITCH,
1792 };
1793
1794 static struct
1795 ice_flow_parser ice_switch_dist_parser_os = {
1796         .engine = &ice_switch_engine,
1797         .array = ice_switch_pattern_dist_os,
1798         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1799         .parse_pattern_action = ice_switch_parse_pattern_action,
1800         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1801 };
1802
1803 static struct
1804 ice_flow_parser ice_switch_dist_parser_comms = {
1805         .engine = &ice_switch_engine,
1806         .array = ice_switch_pattern_dist_comms,
1807         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1808         .parse_pattern_action = ice_switch_parse_pattern_action,
1809         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1810 };
1811
1812 static struct
1813 ice_flow_parser ice_switch_perm_parser = {
1814         .engine = &ice_switch_engine,
1815         .array = ice_switch_pattern_perm,
1816         .array_len = RTE_DIM(ice_switch_pattern_perm),
1817         .parse_pattern_action = ice_switch_parse_pattern_action,
1818         .stage = ICE_FLOW_STAGE_PERMISSION,
1819 };
1820
1821 RTE_INIT(ice_sw_engine_init)
1822 {
1823         struct ice_flow_engine *engine = &ice_switch_engine;
1824         ice_register_flow_engine(engine);
1825 }