net/ice: fix typo on variable name
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39                 ICE_INSET_VLAN_OUTER)
40 #define ICE_SW_INSET_MAC_IPV4 ( \
41         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
42         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
43 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6 ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
54         ICE_INSET_IPV6_NEXT_HDR)
55 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
56         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
59 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
60         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
61         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
62         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
63 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
64         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
65         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
66 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
67         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
68         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
70         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
78         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
80         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
82         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
84         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
85 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
86         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
87         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
89         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
91         ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
93         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
95         ICE_INSET_TUN_IPV4_TOS)
96 #define ICE_SW_INSET_MAC_PPPOE  ( \
97         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
98         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
99 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
100         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
101         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
102         ICE_INSET_PPPOE_PROTO)
103 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
104         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
105 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
106         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
107 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
108         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
110         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
112         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
114         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
115 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
116         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
117 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
118         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
119 #define ICE_SW_INSET_MAC_IPV4_AH ( \
120         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
121 #define ICE_SW_INSET_MAC_IPV6_AH ( \
122         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
123 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
124         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
125 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
126         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
127 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
128         ICE_SW_INSET_MAC_IPV4 | \
129         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
130 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
131         ICE_SW_INSET_MAC_IPV6 | \
132         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
133
134 struct sw_meta {
135         struct ice_adv_lkup_elem *list;
136         uint16_t lkups_num;
137         struct ice_adv_rule_info rule_info;
138 };
139
140 static struct ice_flow_parser ice_switch_dist_parser_os;
141 static struct ice_flow_parser ice_switch_dist_parser_comms;
142 static struct ice_flow_parser ice_switch_perm_parser;
143
144 static struct
145 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
146         {pattern_ethertype,
147                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
148         {pattern_ethertype_vlan,
149                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
150         {pattern_eth_ipv4,
151                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
152         {pattern_eth_ipv4_udp,
153                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
154         {pattern_eth_ipv4_tcp,
155                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
156         {pattern_eth_ipv6,
157                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
158         {pattern_eth_ipv6_udp,
159                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
160         {pattern_eth_ipv6_tcp,
161                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
162         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
163                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
164         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
165                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
166         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
167                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
168         {pattern_eth_ipv4_nvgre_eth_ipv4,
169                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
170         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
171                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
172         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
173                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
174         {pattern_eth_pppoes,
175                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
176         {pattern_eth_vlan_pppoes,
177                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
178         {pattern_eth_pppoes_proto,
179                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
180         {pattern_eth_vlan_pppoes_proto,
181                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
182         {pattern_eth_pppoes_ipv4,
183                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
184         {pattern_eth_pppoes_ipv4_tcp,
185                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
186         {pattern_eth_pppoes_ipv4_udp,
187                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
188         {pattern_eth_pppoes_ipv6,
189                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
190         {pattern_eth_pppoes_ipv6_tcp,
191                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
192         {pattern_eth_pppoes_ipv6_udp,
193                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
194         {pattern_eth_vlan_pppoes_ipv4,
195                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
196         {pattern_eth_vlan_pppoes_ipv4_tcp,
197                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
198         {pattern_eth_vlan_pppoes_ipv4_udp,
199                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
200         {pattern_eth_vlan_pppoes_ipv6,
201                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
202         {pattern_eth_vlan_pppoes_ipv6_tcp,
203                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
204         {pattern_eth_vlan_pppoes_ipv6_udp,
205                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
206         {pattern_eth_ipv4_esp,
207                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
208         {pattern_eth_ipv4_udp_esp,
209                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
210         {pattern_eth_ipv6_esp,
211                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
212         {pattern_eth_ipv6_udp_esp,
213                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
214         {pattern_eth_ipv4_ah,
215                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
216         {pattern_eth_ipv6_ah,
217                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
218         {pattern_eth_ipv6_udp_ah,
219                         ICE_INSET_NONE, ICE_INSET_NONE},
220         {pattern_eth_ipv4_l2tp,
221                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
222         {pattern_eth_ipv6_l2tp,
223                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
224         {pattern_eth_ipv4_pfcp,
225                         ICE_INSET_NONE, ICE_INSET_NONE},
226         {pattern_eth_ipv6_pfcp,
227                         ICE_INSET_NONE, ICE_INSET_NONE},
228 };
229
230 static struct
231 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
232         {pattern_ethertype,
233                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
234         {pattern_ethertype_vlan,
235                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
236         {pattern_eth_arp,
237                         ICE_INSET_NONE, ICE_INSET_NONE},
238         {pattern_eth_ipv4,
239                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
240         {pattern_eth_ipv4_udp,
241                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
242         {pattern_eth_ipv4_tcp,
243                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
244         {pattern_eth_ipv6,
245                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
246         {pattern_eth_ipv6_udp,
247                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
248         {pattern_eth_ipv6_tcp,
249                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
250         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
251                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
252         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
253                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
254         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
255                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
256         {pattern_eth_ipv4_nvgre_eth_ipv4,
257                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
258         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
259                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
260         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
261                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
262 };
263
264 static struct
265 ice_pattern_match_item ice_switch_pattern_perm[] = {
266         {pattern_ethertype,
267                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
268         {pattern_ethertype_vlan,
269                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
270         {pattern_eth_ipv4,
271                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
272         {pattern_eth_ipv4_udp,
273                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
274         {pattern_eth_ipv4_tcp,
275                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
276         {pattern_eth_ipv6,
277                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
278         {pattern_eth_ipv6_udp,
279                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
280         {pattern_eth_ipv6_tcp,
281                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
282         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
283                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
284         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
285                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
286         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
287                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
288         {pattern_eth_ipv4_nvgre_eth_ipv4,
289                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
290         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
291                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
292         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
293                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
294         {pattern_eth_pppoes,
295                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
296         {pattern_eth_vlan_pppoes,
297                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
298         {pattern_eth_pppoes_proto,
299                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
300         {pattern_eth_vlan_pppoes_proto,
301                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
302         {pattern_eth_pppoes_ipv4,
303                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
304         {pattern_eth_pppoes_ipv4_tcp,
305                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
306         {pattern_eth_pppoes_ipv4_udp,
307                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
308         {pattern_eth_pppoes_ipv6,
309                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
310         {pattern_eth_pppoes_ipv6_tcp,
311                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
312         {pattern_eth_pppoes_ipv6_udp,
313                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
314         {pattern_eth_vlan_pppoes_ipv4,
315                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
316         {pattern_eth_vlan_pppoes_ipv4_tcp,
317                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
318         {pattern_eth_vlan_pppoes_ipv4_udp,
319                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
320         {pattern_eth_vlan_pppoes_ipv6,
321                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
322         {pattern_eth_vlan_pppoes_ipv6_tcp,
323                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
324         {pattern_eth_vlan_pppoes_ipv6_udp,
325                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
326         {pattern_eth_ipv4_esp,
327                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
328         {pattern_eth_ipv4_udp_esp,
329                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
330         {pattern_eth_ipv6_esp,
331                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
332         {pattern_eth_ipv6_udp_esp,
333                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
334         {pattern_eth_ipv4_ah,
335                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
336         {pattern_eth_ipv6_ah,
337                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
338         {pattern_eth_ipv6_udp_ah,
339                         ICE_INSET_NONE, ICE_INSET_NONE},
340         {pattern_eth_ipv4_l2tp,
341                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
342         {pattern_eth_ipv6_l2tp,
343                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
344         {pattern_eth_ipv4_pfcp,
345                         ICE_INSET_NONE, ICE_INSET_NONE},
346         {pattern_eth_ipv6_pfcp,
347                         ICE_INSET_NONE, ICE_INSET_NONE},
348 };
349
350 static int
351 ice_switch_create(struct ice_adapter *ad,
352                 struct rte_flow *flow,
353                 void *meta,
354                 struct rte_flow_error *error)
355 {
356         int ret = 0;
357         struct ice_pf *pf = &ad->pf;
358         struct ice_hw *hw = ICE_PF_TO_HW(pf);
359         struct ice_rule_query_data rule_added = {0};
360         struct ice_rule_query_data *filter_ptr;
361         struct ice_adv_lkup_elem *list =
362                 ((struct sw_meta *)meta)->list;
363         uint16_t lkups_cnt =
364                 ((struct sw_meta *)meta)->lkups_num;
365         struct ice_adv_rule_info *rule_info =
366                 &((struct sw_meta *)meta)->rule_info;
367
368         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
369                 rte_flow_error_set(error, EINVAL,
370                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
371                         "item number too large for rule");
372                 goto error;
373         }
374         if (!list) {
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
377                         "lookup list should not be NULL");
378                 goto error;
379         }
380         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
381         if (!ret) {
382                 filter_ptr = rte_zmalloc("ice_switch_filter",
383                         sizeof(struct ice_rule_query_data), 0);
384                 if (!filter_ptr) {
385                         rte_flow_error_set(error, EINVAL,
386                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
387                                    "No memory for ice_switch_filter");
388                         goto error;
389                 }
390                 flow->rule = filter_ptr;
391                 rte_memcpy(filter_ptr,
392                         &rule_added,
393                         sizeof(struct ice_rule_query_data));
394         } else {
395                 rte_flow_error_set(error, EINVAL,
396                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
397                         "switch filter create flow fail");
398                 goto error;
399         }
400
401         rte_free(list);
402         rte_free(meta);
403         return 0;
404
405 error:
406         rte_free(list);
407         rte_free(meta);
408
409         return -rte_errno;
410 }
411
412 static int
413 ice_switch_destroy(struct ice_adapter *ad,
414                 struct rte_flow *flow,
415                 struct rte_flow_error *error)
416 {
417         struct ice_hw *hw = &ad->hw;
418         int ret;
419         struct ice_rule_query_data *filter_ptr;
420
421         filter_ptr = (struct ice_rule_query_data *)
422                 flow->rule;
423
424         if (!filter_ptr) {
425                 rte_flow_error_set(error, EINVAL,
426                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
427                         "no such flow"
428                         " create by switch filter");
429                 return -rte_errno;
430         }
431
432         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
433         if (ret) {
434                 rte_flow_error_set(error, EINVAL,
435                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
436                         "fail to destroy switch filter rule");
437                 return -rte_errno;
438         }
439
440         rte_free(filter_ptr);
441         return ret;
442 }
443
444 static void
445 ice_switch_filter_rule_free(struct rte_flow *flow)
446 {
447         rte_free(flow->rule);
448 }
449
450 static uint64_t
451 ice_switch_inset_get(const struct rte_flow_item pattern[],
452                 struct rte_flow_error *error,
453                 struct ice_adv_lkup_elem *list,
454                 uint16_t *lkups_num,
455                 enum ice_sw_tunnel_type *tun_type)
456 {
457         const struct rte_flow_item *item = pattern;
458         enum rte_flow_item_type item_type;
459         const struct rte_flow_item_eth *eth_spec, *eth_mask;
460         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
461         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
462         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
463         const struct rte_flow_item_udp *udp_spec, *udp_mask;
464         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
465         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
466         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
467         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
468         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
469         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
470                                 *pppoe_proto_mask;
471         const struct rte_flow_item_esp *esp_spec, *esp_mask;
472         const struct rte_flow_item_ah *ah_spec, *ah_mask;
473         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
474         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
475         uint64_t input_set = ICE_INSET_NONE;
476         uint16_t input_set_byte = 0;
477         bool pppoe_elem_valid = 0;
478         bool pppoe_patt_valid = 0;
479         bool pppoe_prot_valid = 0;
480         bool tunnel_valid = 0;
481         bool profile_rule = 0;
482         bool nvgre_valid = 0;
483         bool vxlan_valid = 0;
484         bool ipv6_valid = 0;
485         bool ipv4_valid = 0;
486         bool udp_valid = 0;
487         bool tcp_valid = 0;
488         uint16_t j, t = 0;
489
490         for (item = pattern; item->type !=
491                         RTE_FLOW_ITEM_TYPE_END; item++) {
492                 if (item->last) {
493                         rte_flow_error_set(error, EINVAL,
494                                         RTE_FLOW_ERROR_TYPE_ITEM,
495                                         item,
496                                         "Not support range");
497                         return 0;
498                 }
499                 item_type = item->type;
500
501                 switch (item_type) {
502                 case RTE_FLOW_ITEM_TYPE_ETH:
503                         eth_spec = item->spec;
504                         eth_mask = item->mask;
505                         if (eth_spec && eth_mask) {
506                                 const uint8_t *a = eth_mask->src.addr_bytes;
507                                 const uint8_t *b = eth_mask->dst.addr_bytes;
508                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
509                                         if (a[j] && tunnel_valid) {
510                                                 input_set |=
511                                                         ICE_INSET_TUN_SMAC;
512                                                 break;
513                                         } else if (a[j]) {
514                                                 input_set |=
515                                                         ICE_INSET_SMAC;
516                                                 break;
517                                         }
518                                 }
519                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
520                                         if (b[j] && tunnel_valid) {
521                                                 input_set |=
522                                                         ICE_INSET_TUN_DMAC;
523                                                 break;
524                                         } else if (b[j]) {
525                                                 input_set |=
526                                                         ICE_INSET_DMAC;
527                                                 break;
528                                         }
529                                 }
530                                 if (eth_mask->type)
531                                         input_set |= ICE_INSET_ETHERTYPE;
532                                 list[t].type = (tunnel_valid  == 0) ?
533                                         ICE_MAC_OFOS : ICE_MAC_IL;
534                                 struct ice_ether_hdr *h;
535                                 struct ice_ether_hdr *m;
536                                 uint16_t i = 0;
537                                 h = &list[t].h_u.eth_hdr;
538                                 m = &list[t].m_u.eth_hdr;
539                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
540                                         if (eth_mask->src.addr_bytes[j]) {
541                                                 h->src_addr[j] =
542                                                 eth_spec->src.addr_bytes[j];
543                                                 m->src_addr[j] =
544                                                 eth_mask->src.addr_bytes[j];
545                                                 i = 1;
546                                                 input_set_byte++;
547                                         }
548                                         if (eth_mask->dst.addr_bytes[j]) {
549                                                 h->dst_addr[j] =
550                                                 eth_spec->dst.addr_bytes[j];
551                                                 m->dst_addr[j] =
552                                                 eth_mask->dst.addr_bytes[j];
553                                                 i = 1;
554                                                 input_set_byte++;
555                                         }
556                                 }
557                                 if (i)
558                                         t++;
559                                 if (eth_mask->type) {
560                                         list[t].type = ICE_ETYPE_OL;
561                                         list[t].h_u.ethertype.ethtype_id =
562                                                 eth_spec->type;
563                                         list[t].m_u.ethertype.ethtype_id =
564                                                 eth_mask->type;
565                                         input_set_byte += 2;
566                                         t++;
567                                 }
568                         }
569                         break;
570
571                 case RTE_FLOW_ITEM_TYPE_IPV4:
572                         ipv4_spec = item->spec;
573                         ipv4_mask = item->mask;
574                         ipv4_valid = 1;
575                         if (ipv4_spec && ipv4_mask) {
576                                 /* Check IPv4 mask and update input set */
577                                 if (ipv4_mask->hdr.version_ihl ||
578                                         ipv4_mask->hdr.total_length ||
579                                         ipv4_mask->hdr.packet_id ||
580                                         ipv4_mask->hdr.hdr_checksum) {
581                                         rte_flow_error_set(error, EINVAL,
582                                                    RTE_FLOW_ERROR_TYPE_ITEM,
583                                                    item,
584                                                    "Invalid IPv4 mask.");
585                                         return 0;
586                                 }
587
588                                 if (tunnel_valid) {
589                                         if (ipv4_mask->hdr.type_of_service)
590                                                 input_set |=
591                                                         ICE_INSET_TUN_IPV4_TOS;
592                                         if (ipv4_mask->hdr.src_addr)
593                                                 input_set |=
594                                                         ICE_INSET_TUN_IPV4_SRC;
595                                         if (ipv4_mask->hdr.dst_addr)
596                                                 input_set |=
597                                                         ICE_INSET_TUN_IPV4_DST;
598                                         if (ipv4_mask->hdr.time_to_live)
599                                                 input_set |=
600                                                         ICE_INSET_TUN_IPV4_TTL;
601                                         if (ipv4_mask->hdr.next_proto_id)
602                                                 input_set |=
603                                                 ICE_INSET_TUN_IPV4_PROTO;
604                                 } else {
605                                         if (ipv4_mask->hdr.src_addr)
606                                                 input_set |= ICE_INSET_IPV4_SRC;
607                                         if (ipv4_mask->hdr.dst_addr)
608                                                 input_set |= ICE_INSET_IPV4_DST;
609                                         if (ipv4_mask->hdr.time_to_live)
610                                                 input_set |= ICE_INSET_IPV4_TTL;
611                                         if (ipv4_mask->hdr.next_proto_id)
612                                                 input_set |=
613                                                 ICE_INSET_IPV4_PROTO;
614                                         if (ipv4_mask->hdr.type_of_service)
615                                                 input_set |=
616                                                         ICE_INSET_IPV4_TOS;
617                                 }
618                                 list[t].type = (tunnel_valid  == 0) ?
619                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
620                                 if (ipv4_mask->hdr.src_addr) {
621                                         list[t].h_u.ipv4_hdr.src_addr =
622                                                 ipv4_spec->hdr.src_addr;
623                                         list[t].m_u.ipv4_hdr.src_addr =
624                                                 ipv4_mask->hdr.src_addr;
625                                         input_set_byte += 2;
626                                 }
627                                 if (ipv4_mask->hdr.dst_addr) {
628                                         list[t].h_u.ipv4_hdr.dst_addr =
629                                                 ipv4_spec->hdr.dst_addr;
630                                         list[t].m_u.ipv4_hdr.dst_addr =
631                                                 ipv4_mask->hdr.dst_addr;
632                                         input_set_byte += 2;
633                                 }
634                                 if (ipv4_mask->hdr.time_to_live) {
635                                         list[t].h_u.ipv4_hdr.time_to_live =
636                                                 ipv4_spec->hdr.time_to_live;
637                                         list[t].m_u.ipv4_hdr.time_to_live =
638                                                 ipv4_mask->hdr.time_to_live;
639                                         input_set_byte++;
640                                 }
641                                 if (ipv4_mask->hdr.next_proto_id) {
642                                         list[t].h_u.ipv4_hdr.protocol =
643                                                 ipv4_spec->hdr.next_proto_id;
644                                         list[t].m_u.ipv4_hdr.protocol =
645                                                 ipv4_mask->hdr.next_proto_id;
646                                         input_set_byte++;
647                                 }
648                                 if ((ipv4_spec->hdr.next_proto_id &
649                                         ipv4_mask->hdr.next_proto_id) ==
650                                         ICE_IPV4_PROTO_NVGRE)
651                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
652                                 if (ipv4_mask->hdr.type_of_service) {
653                                         list[t].h_u.ipv4_hdr.tos =
654                                                 ipv4_spec->hdr.type_of_service;
655                                         list[t].m_u.ipv4_hdr.tos =
656                                                 ipv4_mask->hdr.type_of_service;
657                                         input_set_byte++;
658                                 }
659                                 t++;
660                         }
661                         break;
662
663                 case RTE_FLOW_ITEM_TYPE_IPV6:
664                         ipv6_spec = item->spec;
665                         ipv6_mask = item->mask;
666                         ipv6_valid = 1;
667                         if (ipv6_spec && ipv6_mask) {
668                                 if (ipv6_mask->hdr.payload_len) {
669                                         rte_flow_error_set(error, EINVAL,
670                                            RTE_FLOW_ERROR_TYPE_ITEM,
671                                            item,
672                                            "Invalid IPv6 mask");
673                                         return 0;
674                                 }
675
676                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
677                                         if (ipv6_mask->hdr.src_addr[j] &&
678                                                 tunnel_valid) {
679                                                 input_set |=
680                                                 ICE_INSET_TUN_IPV6_SRC;
681                                                 break;
682                                         } else if (ipv6_mask->hdr.src_addr[j]) {
683                                                 input_set |= ICE_INSET_IPV6_SRC;
684                                                 break;
685                                         }
686                                 }
687                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
688                                         if (ipv6_mask->hdr.dst_addr[j] &&
689                                                 tunnel_valid) {
690                                                 input_set |=
691                                                 ICE_INSET_TUN_IPV6_DST;
692                                                 break;
693                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
694                                                 input_set |= ICE_INSET_IPV6_DST;
695                                                 break;
696                                         }
697                                 }
698                                 if (ipv6_mask->hdr.proto &&
699                                         tunnel_valid)
700                                         input_set |=
701                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
702                                 else if (ipv6_mask->hdr.proto)
703                                         input_set |=
704                                                 ICE_INSET_IPV6_NEXT_HDR;
705                                 if (ipv6_mask->hdr.hop_limits &&
706                                         tunnel_valid)
707                                         input_set |=
708                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
709                                 else if (ipv6_mask->hdr.hop_limits)
710                                         input_set |=
711                                                 ICE_INSET_IPV6_HOP_LIMIT;
712                                 if ((ipv6_mask->hdr.vtc_flow &
713                                                 rte_cpu_to_be_32
714                                                 (RTE_IPV6_HDR_TC_MASK)) &&
715                                         tunnel_valid)
716                                         input_set |=
717                                                         ICE_INSET_TUN_IPV6_TC;
718                                 else if (ipv6_mask->hdr.vtc_flow &
719                                                 rte_cpu_to_be_32
720                                                 (RTE_IPV6_HDR_TC_MASK))
721                                         input_set |= ICE_INSET_IPV6_TC;
722
723                                 list[t].type = (tunnel_valid  == 0) ?
724                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
725                                 struct ice_ipv6_hdr *f;
726                                 struct ice_ipv6_hdr *s;
727                                 f = &list[t].h_u.ipv6_hdr;
728                                 s = &list[t].m_u.ipv6_hdr;
729                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
730                                         if (ipv6_mask->hdr.src_addr[j]) {
731                                                 f->src_addr[j] =
732                                                 ipv6_spec->hdr.src_addr[j];
733                                                 s->src_addr[j] =
734                                                 ipv6_mask->hdr.src_addr[j];
735                                                 input_set_byte++;
736                                         }
737                                         if (ipv6_mask->hdr.dst_addr[j]) {
738                                                 f->dst_addr[j] =
739                                                 ipv6_spec->hdr.dst_addr[j];
740                                                 s->dst_addr[j] =
741                                                 ipv6_mask->hdr.dst_addr[j];
742                                                 input_set_byte++;
743                                         }
744                                 }
745                                 if (ipv6_mask->hdr.proto) {
746                                         f->next_hdr =
747                                                 ipv6_spec->hdr.proto;
748                                         s->next_hdr =
749                                                 ipv6_mask->hdr.proto;
750                                         input_set_byte++;
751                                 }
752                                 if (ipv6_mask->hdr.hop_limits) {
753                                         f->hop_limit =
754                                                 ipv6_spec->hdr.hop_limits;
755                                         s->hop_limit =
756                                                 ipv6_mask->hdr.hop_limits;
757                                         input_set_byte++;
758                                 }
759                                 if (ipv6_mask->hdr.vtc_flow &
760                                                 rte_cpu_to_be_32
761                                                 (RTE_IPV6_HDR_TC_MASK)) {
762                                         struct ice_le_ver_tc_flow vtf;
763                                         vtf.u.fld.version = 0;
764                                         vtf.u.fld.flow_label = 0;
765                                         vtf.u.fld.tc = (rte_be_to_cpu_32
766                                                 (ipv6_spec->hdr.vtc_flow) &
767                                                         RTE_IPV6_HDR_TC_MASK) >>
768                                                         RTE_IPV6_HDR_TC_SHIFT;
769                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
770                                         vtf.u.fld.tc = (rte_be_to_cpu_32
771                                                 (ipv6_mask->hdr.vtc_flow) &
772                                                         RTE_IPV6_HDR_TC_MASK) >>
773                                                         RTE_IPV6_HDR_TC_SHIFT;
774                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
775                                         input_set_byte += 4;
776                                 }
777                                 t++;
778                         }
779                         break;
780
781                 case RTE_FLOW_ITEM_TYPE_UDP:
782                         udp_spec = item->spec;
783                         udp_mask = item->mask;
784                         udp_valid = 1;
785                         if (udp_spec && udp_mask) {
786                                 /* Check UDP mask and update input set*/
787                                 if (udp_mask->hdr.dgram_len ||
788                                     udp_mask->hdr.dgram_cksum) {
789                                         rte_flow_error_set(error, EINVAL,
790                                                    RTE_FLOW_ERROR_TYPE_ITEM,
791                                                    item,
792                                                    "Invalid UDP mask");
793                                         return 0;
794                                 }
795
796                                 if (tunnel_valid) {
797                                         if (udp_mask->hdr.src_port)
798                                                 input_set |=
799                                                 ICE_INSET_TUN_UDP_SRC_PORT;
800                                         if (udp_mask->hdr.dst_port)
801                                                 input_set |=
802                                                 ICE_INSET_TUN_UDP_DST_PORT;
803                                 } else {
804                                         if (udp_mask->hdr.src_port)
805                                                 input_set |=
806                                                 ICE_INSET_UDP_SRC_PORT;
807                                         if (udp_mask->hdr.dst_port)
808                                                 input_set |=
809                                                 ICE_INSET_UDP_DST_PORT;
810                                 }
811                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
812                                                 tunnel_valid == 0)
813                                         list[t].type = ICE_UDP_OF;
814                                 else
815                                         list[t].type = ICE_UDP_ILOS;
816                                 if (udp_mask->hdr.src_port) {
817                                         list[t].h_u.l4_hdr.src_port =
818                                                 udp_spec->hdr.src_port;
819                                         list[t].m_u.l4_hdr.src_port =
820                                                 udp_mask->hdr.src_port;
821                                         input_set_byte += 2;
822                                 }
823                                 if (udp_mask->hdr.dst_port) {
824                                         list[t].h_u.l4_hdr.dst_port =
825                                                 udp_spec->hdr.dst_port;
826                                         list[t].m_u.l4_hdr.dst_port =
827                                                 udp_mask->hdr.dst_port;
828                                         input_set_byte += 2;
829                                 }
830                                 t++;
831                         }
832                         break;
833
834                 case RTE_FLOW_ITEM_TYPE_TCP:
835                         tcp_spec = item->spec;
836                         tcp_mask = item->mask;
837                         tcp_valid = 1;
838                         if (tcp_spec && tcp_mask) {
839                                 /* Check TCP mask and update input set */
840                                 if (tcp_mask->hdr.sent_seq ||
841                                         tcp_mask->hdr.recv_ack ||
842                                         tcp_mask->hdr.data_off ||
843                                         tcp_mask->hdr.tcp_flags ||
844                                         tcp_mask->hdr.rx_win ||
845                                         tcp_mask->hdr.cksum ||
846                                         tcp_mask->hdr.tcp_urp) {
847                                         rte_flow_error_set(error, EINVAL,
848                                            RTE_FLOW_ERROR_TYPE_ITEM,
849                                            item,
850                                            "Invalid TCP mask");
851                                         return 0;
852                                 }
853
854                                 if (tunnel_valid) {
855                                         if (tcp_mask->hdr.src_port)
856                                                 input_set |=
857                                                 ICE_INSET_TUN_TCP_SRC_PORT;
858                                         if (tcp_mask->hdr.dst_port)
859                                                 input_set |=
860                                                 ICE_INSET_TUN_TCP_DST_PORT;
861                                 } else {
862                                         if (tcp_mask->hdr.src_port)
863                                                 input_set |=
864                                                 ICE_INSET_TCP_SRC_PORT;
865                                         if (tcp_mask->hdr.dst_port)
866                                                 input_set |=
867                                                 ICE_INSET_TCP_DST_PORT;
868                                 }
869                                 list[t].type = ICE_TCP_IL;
870                                 if (tcp_mask->hdr.src_port) {
871                                         list[t].h_u.l4_hdr.src_port =
872                                                 tcp_spec->hdr.src_port;
873                                         list[t].m_u.l4_hdr.src_port =
874                                                 tcp_mask->hdr.src_port;
875                                         input_set_byte += 2;
876                                 }
877                                 if (tcp_mask->hdr.dst_port) {
878                                         list[t].h_u.l4_hdr.dst_port =
879                                                 tcp_spec->hdr.dst_port;
880                                         list[t].m_u.l4_hdr.dst_port =
881                                                 tcp_mask->hdr.dst_port;
882                                         input_set_byte += 2;
883                                 }
884                                 t++;
885                         }
886                         break;
887
888                 case RTE_FLOW_ITEM_TYPE_SCTP:
889                         sctp_spec = item->spec;
890                         sctp_mask = item->mask;
891                         if (sctp_spec && sctp_mask) {
892                                 /* Check SCTP mask and update input set */
893                                 if (sctp_mask->hdr.cksum) {
894                                         rte_flow_error_set(error, EINVAL,
895                                            RTE_FLOW_ERROR_TYPE_ITEM,
896                                            item,
897                                            "Invalid SCTP mask");
898                                         return 0;
899                                 }
900
901                                 if (tunnel_valid) {
902                                         if (sctp_mask->hdr.src_port)
903                                                 input_set |=
904                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
905                                         if (sctp_mask->hdr.dst_port)
906                                                 input_set |=
907                                                 ICE_INSET_TUN_SCTP_DST_PORT;
908                                 } else {
909                                         if (sctp_mask->hdr.src_port)
910                                                 input_set |=
911                                                 ICE_INSET_SCTP_SRC_PORT;
912                                         if (sctp_mask->hdr.dst_port)
913                                                 input_set |=
914                                                 ICE_INSET_SCTP_DST_PORT;
915                                 }
916                                 list[t].type = ICE_SCTP_IL;
917                                 if (sctp_mask->hdr.src_port) {
918                                         list[t].h_u.sctp_hdr.src_port =
919                                                 sctp_spec->hdr.src_port;
920                                         list[t].m_u.sctp_hdr.src_port =
921                                                 sctp_mask->hdr.src_port;
922                                         input_set_byte += 2;
923                                 }
924                                 if (sctp_mask->hdr.dst_port) {
925                                         list[t].h_u.sctp_hdr.dst_port =
926                                                 sctp_spec->hdr.dst_port;
927                                         list[t].m_u.sctp_hdr.dst_port =
928                                                 sctp_mask->hdr.dst_port;
929                                         input_set_byte += 2;
930                                 }
931                                 t++;
932                         }
933                         break;
934
935                 case RTE_FLOW_ITEM_TYPE_VXLAN:
936                         vxlan_spec = item->spec;
937                         vxlan_mask = item->mask;
938                         /* Check if VXLAN item is used to describe protocol.
939                          * If yes, both spec and mask should be NULL.
940                          * If no, both spec and mask shouldn't be NULL.
941                          */
942                         if ((!vxlan_spec && vxlan_mask) ||
943                             (vxlan_spec && !vxlan_mask)) {
944                                 rte_flow_error_set(error, EINVAL,
945                                            RTE_FLOW_ERROR_TYPE_ITEM,
946                                            item,
947                                            "Invalid VXLAN item");
948                                 return 0;
949                         }
950                         vxlan_valid = 1;
951                         tunnel_valid = 1;
952                         if (vxlan_spec && vxlan_mask) {
953                                 list[t].type = ICE_VXLAN;
954                                 if (vxlan_mask->vni[0] ||
955                                         vxlan_mask->vni[1] ||
956                                         vxlan_mask->vni[2]) {
957                                         list[t].h_u.tnl_hdr.vni =
958                                                 (vxlan_spec->vni[2] << 16) |
959                                                 (vxlan_spec->vni[1] << 8) |
960                                                 vxlan_spec->vni[0];
961                                         list[t].m_u.tnl_hdr.vni =
962                                                 (vxlan_mask->vni[2] << 16) |
963                                                 (vxlan_mask->vni[1] << 8) |
964                                                 vxlan_mask->vni[0];
965                                         input_set |=
966                                                 ICE_INSET_TUN_VXLAN_VNI;
967                                         input_set_byte += 2;
968                                 }
969                                 t++;
970                         }
971                         break;
972
973                 case RTE_FLOW_ITEM_TYPE_NVGRE:
974                         nvgre_spec = item->spec;
975                         nvgre_mask = item->mask;
976                         /* Check if NVGRE item is used to describe protocol.
977                          * If yes, both spec and mask should be NULL.
978                          * If no, both spec and mask shouldn't be NULL.
979                          */
980                         if ((!nvgre_spec && nvgre_mask) ||
981                             (nvgre_spec && !nvgre_mask)) {
982                                 rte_flow_error_set(error, EINVAL,
983                                            RTE_FLOW_ERROR_TYPE_ITEM,
984                                            item,
985                                            "Invalid NVGRE item");
986                                 return 0;
987                         }
988                         nvgre_valid = 1;
989                         tunnel_valid = 1;
990                         if (nvgre_spec && nvgre_mask) {
991                                 list[t].type = ICE_NVGRE;
992                                 if (nvgre_mask->tni[0] ||
993                                         nvgre_mask->tni[1] ||
994                                         nvgre_mask->tni[2]) {
995                                         list[t].h_u.nvgre_hdr.tni_flow =
996                                                 (nvgre_spec->tni[2] << 16) |
997                                                 (nvgre_spec->tni[1] << 8) |
998                                                 nvgre_spec->tni[0];
999                                         list[t].m_u.nvgre_hdr.tni_flow =
1000                                                 (nvgre_mask->tni[2] << 16) |
1001                                                 (nvgre_mask->tni[1] << 8) |
1002                                                 nvgre_mask->tni[0];
1003                                         input_set |=
1004                                                 ICE_INSET_TUN_NVGRE_TNI;
1005                                         input_set_byte += 2;
1006                                 }
1007                                 t++;
1008                         }
1009                         break;
1010
1011                 case RTE_FLOW_ITEM_TYPE_VLAN:
1012                         vlan_spec = item->spec;
1013                         vlan_mask = item->mask;
1014                         /* Check if VLAN item is used to describe protocol.
1015                          * If yes, both spec and mask should be NULL.
1016                          * If no, both spec and mask shouldn't be NULL.
1017                          */
1018                         if ((!vlan_spec && vlan_mask) ||
1019                             (vlan_spec && !vlan_mask)) {
1020                                 rte_flow_error_set(error, EINVAL,
1021                                            RTE_FLOW_ERROR_TYPE_ITEM,
1022                                            item,
1023                                            "Invalid VLAN item");
1024                                 return 0;
1025                         }
1026                         if (vlan_spec && vlan_mask) {
1027                                 list[t].type = ICE_VLAN_OFOS;
1028                                 if (vlan_mask->tci) {
1029                                         list[t].h_u.vlan_hdr.vlan =
1030                                                 vlan_spec->tci;
1031                                         list[t].m_u.vlan_hdr.vlan =
1032                                                 vlan_mask->tci;
1033                                         input_set |= ICE_INSET_VLAN_OUTER;
1034                                         input_set_byte += 2;
1035                                 }
1036                                 if (vlan_mask->inner_type) {
1037                                         list[t].h_u.vlan_hdr.type =
1038                                                 vlan_spec->inner_type;
1039                                         list[t].m_u.vlan_hdr.type =
1040                                                 vlan_mask->inner_type;
1041                                         input_set |= ICE_INSET_ETHERTYPE;
1042                                         input_set_byte += 2;
1043                                 }
1044                                 t++;
1045                         }
1046                         break;
1047
1048                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1049                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1050                         pppoe_spec = item->spec;
1051                         pppoe_mask = item->mask;
1052                         /* Check if PPPoE item is used to describe protocol.
1053                          * If yes, both spec and mask should be NULL.
1054                          * If no, both spec and mask shouldn't be NULL.
1055                          */
1056                         if ((!pppoe_spec && pppoe_mask) ||
1057                                 (pppoe_spec && !pppoe_mask)) {
1058                                 rte_flow_error_set(error, EINVAL,
1059                                         RTE_FLOW_ERROR_TYPE_ITEM,
1060                                         item,
1061                                         "Invalid pppoe item");
1062                                 return 0;
1063                         }
1064                         pppoe_patt_valid = 1;
1065                         if (pppoe_spec && pppoe_mask) {
1066                                 /* Check pppoe mask and update input set */
1067                                 if (pppoe_mask->length ||
1068                                         pppoe_mask->code ||
1069                                         pppoe_mask->version_type) {
1070                                         rte_flow_error_set(error, EINVAL,
1071                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1072                                                 item,
1073                                                 "Invalid pppoe mask");
1074                                         return 0;
1075                                 }
1076                                 list[t].type = ICE_PPPOE;
1077                                 if (pppoe_mask->session_id) {
1078                                         list[t].h_u.pppoe_hdr.session_id =
1079                                                 pppoe_spec->session_id;
1080                                         list[t].m_u.pppoe_hdr.session_id =
1081                                                 pppoe_mask->session_id;
1082                                         input_set |= ICE_INSET_PPPOE_SESSION;
1083                                         input_set_byte += 2;
1084                                 }
1085                                 t++;
1086                                 pppoe_elem_valid = 1;
1087                         }
1088                         break;
1089
1090                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1091                         pppoe_proto_spec = item->spec;
1092                         pppoe_proto_mask = item->mask;
1093                         /* Check if PPPoE optional proto_id item
1094                          * is used to describe protocol.
1095                          * If yes, both spec and mask should be NULL.
1096                          * If no, both spec and mask shouldn't be NULL.
1097                          */
1098                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1099                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1100                                 rte_flow_error_set(error, EINVAL,
1101                                         RTE_FLOW_ERROR_TYPE_ITEM,
1102                                         item,
1103                                         "Invalid pppoe proto item");
1104                                 return 0;
1105                         }
1106                         if (pppoe_proto_spec && pppoe_proto_mask) {
1107                                 if (pppoe_elem_valid)
1108                                         t--;
1109                                 list[t].type = ICE_PPPOE;
1110                                 if (pppoe_proto_mask->proto_id) {
1111                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1112                                                 pppoe_proto_spec->proto_id;
1113                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1114                                                 pppoe_proto_mask->proto_id;
1115                                         input_set |= ICE_INSET_PPPOE_PROTO;
1116                                         input_set_byte += 2;
1117                                         pppoe_prot_valid = 1;
1118                                 }
1119                                 if ((pppoe_proto_mask->proto_id &
1120                                         pppoe_proto_spec->proto_id) !=
1121                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1122                                         (pppoe_proto_mask->proto_id &
1123                                         pppoe_proto_spec->proto_id) !=
1124                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1125                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1126                                 else
1127                                         *tun_type = ICE_SW_TUN_PPPOE;
1128                                 t++;
1129                         }
1130
1131                         break;
1132
1133                 case RTE_FLOW_ITEM_TYPE_ESP:
1134                         esp_spec = item->spec;
1135                         esp_mask = item->mask;
1136                         if ((esp_spec && !esp_mask) ||
1137                                 (!esp_spec && esp_mask)) {
1138                                 rte_flow_error_set(error, EINVAL,
1139                                            RTE_FLOW_ERROR_TYPE_ITEM,
1140                                            item,
1141                                            "Invalid esp item");
1142                                 return 0;
1143                         }
1144                         /* Check esp mask and update input set */
1145                         if (esp_mask && esp_mask->hdr.seq) {
1146                                 rte_flow_error_set(error, EINVAL,
1147                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1148                                                 item,
1149                                                 "Invalid esp mask");
1150                                 return 0;
1151                         }
1152
1153                         if (!esp_spec && !esp_mask && !input_set) {
1154                                 profile_rule = 1;
1155                                 if (ipv6_valid && udp_valid)
1156                                         *tun_type =
1157                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1158                                 else if (ipv6_valid)
1159                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1160                                 else if (ipv4_valid)
1161                                         return 0;
1162                         } else if (esp_spec && esp_mask &&
1163                                                 esp_mask->hdr.spi){
1164                                 if (udp_valid)
1165                                         list[t].type = ICE_NAT_T;
1166                                 else
1167                                         list[t].type = ICE_ESP;
1168                                 list[t].h_u.esp_hdr.spi =
1169                                         esp_spec->hdr.spi;
1170                                 list[t].m_u.esp_hdr.spi =
1171                                         esp_mask->hdr.spi;
1172                                 input_set |= ICE_INSET_ESP_SPI;
1173                                 input_set_byte += 4;
1174                                 t++;
1175                         }
1176
1177                         if (!profile_rule) {
1178                                 if (ipv6_valid && udp_valid)
1179                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1180                                 else if (ipv4_valid && udp_valid)
1181                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1182                                 else if (ipv6_valid)
1183                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1184                                 else if (ipv4_valid)
1185                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1186                         }
1187                         break;
1188
1189                 case RTE_FLOW_ITEM_TYPE_AH:
1190                         ah_spec = item->spec;
1191                         ah_mask = item->mask;
1192                         if ((ah_spec && !ah_mask) ||
1193                                 (!ah_spec && ah_mask)) {
1194                                 rte_flow_error_set(error, EINVAL,
1195                                            RTE_FLOW_ERROR_TYPE_ITEM,
1196                                            item,
1197                                            "Invalid ah item");
1198                                 return 0;
1199                         }
1200                         /* Check ah mask and update input set */
1201                         if (ah_mask &&
1202                                 (ah_mask->next_hdr ||
1203                                 ah_mask->payload_len ||
1204                                 ah_mask->seq_num ||
1205                                 ah_mask->reserved)) {
1206                                 rte_flow_error_set(error, EINVAL,
1207                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1208                                                 item,
1209                                                 "Invalid ah mask");
1210                                 return 0;
1211                         }
1212
1213                         if (!ah_spec && !ah_mask && !input_set) {
1214                                 profile_rule = 1;
1215                                 if (ipv6_valid && udp_valid)
1216                                         *tun_type =
1217                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1218                                 else if (ipv6_valid)
1219                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1220                                 else if (ipv4_valid)
1221                                         return 0;
1222                         } else if (ah_spec && ah_mask &&
1223                                                 ah_mask->spi){
1224                                 list[t].type = ICE_AH;
1225                                 list[t].h_u.ah_hdr.spi =
1226                                         ah_spec->spi;
1227                                 list[t].m_u.ah_hdr.spi =
1228                                         ah_mask->spi;
1229                                 input_set |= ICE_INSET_AH_SPI;
1230                                 input_set_byte += 4;
1231                                 t++;
1232                         }
1233
1234                         if (!profile_rule) {
1235                                 if (udp_valid)
1236                                         return 0;
1237                                 else if (ipv6_valid)
1238                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1239                                 else if (ipv4_valid)
1240                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1241                         }
1242                         break;
1243
1244                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1245                         l2tp_spec = item->spec;
1246                         l2tp_mask = item->mask;
1247                         if ((l2tp_spec && !l2tp_mask) ||
1248                                 (!l2tp_spec && l2tp_mask)) {
1249                                 rte_flow_error_set(error, EINVAL,
1250                                            RTE_FLOW_ERROR_TYPE_ITEM,
1251                                            item,
1252                                            "Invalid l2tp item");
1253                                 return 0;
1254                         }
1255
1256                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1257                                 if (ipv6_valid)
1258                                         *tun_type =
1259                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1260                                 else if (ipv4_valid)
1261                                         return 0;
1262                         } else if (l2tp_spec && l2tp_mask &&
1263                                                 l2tp_mask->session_id){
1264                                 list[t].type = ICE_L2TPV3;
1265                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1266                                         l2tp_spec->session_id;
1267                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1268                                         l2tp_mask->session_id;
1269                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1270                                 input_set_byte += 4;
1271                                 t++;
1272                         }
1273
1274                         if (!profile_rule) {
1275                                 if (ipv6_valid)
1276                                         *tun_type =
1277                                         ICE_SW_TUN_IPV6_L2TPV3;
1278                                 else if (ipv4_valid)
1279                                         *tun_type =
1280                                         ICE_SW_TUN_IPV4_L2TPV3;
1281                         }
1282                         break;
1283
1284                 case RTE_FLOW_ITEM_TYPE_PFCP:
1285                         pfcp_spec = item->spec;
1286                         pfcp_mask = item->mask;
1287                         /* Check if PFCP item is used to describe protocol.
1288                          * If yes, both spec and mask should be NULL.
1289                          * If no, both spec and mask shouldn't be NULL.
1290                          */
1291                         if ((!pfcp_spec && pfcp_mask) ||
1292                             (pfcp_spec && !pfcp_mask)) {
1293                                 rte_flow_error_set(error, EINVAL,
1294                                            RTE_FLOW_ERROR_TYPE_ITEM,
1295                                            item,
1296                                            "Invalid PFCP item");
1297                                 return -ENOTSUP;
1298                         }
1299                         if (pfcp_spec && pfcp_mask) {
1300                                 /* Check pfcp mask and update input set */
1301                                 if (pfcp_mask->msg_type ||
1302                                         pfcp_mask->msg_len ||
1303                                         pfcp_mask->seid) {
1304                                         rte_flow_error_set(error, EINVAL,
1305                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1306                                                 item,
1307                                                 "Invalid pfcp mask");
1308                                         return -ENOTSUP;
1309                                 }
1310                                 if (pfcp_mask->s_field &&
1311                                         pfcp_spec->s_field == 0x01 &&
1312                                         ipv6_valid)
1313                                         *tun_type =
1314                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1315                                 else if (pfcp_mask->s_field &&
1316                                         pfcp_spec->s_field == 0x01)
1317                                         *tun_type =
1318                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1319                                 else if (pfcp_mask->s_field &&
1320                                         !pfcp_spec->s_field &&
1321                                         ipv6_valid)
1322                                         *tun_type =
1323                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1324                                 else if (pfcp_mask->s_field &&
1325                                         !pfcp_spec->s_field)
1326                                         *tun_type =
1327                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1328                                 else
1329                                         return -ENOTSUP;
1330                         }
1331                         break;
1332
1333                 case RTE_FLOW_ITEM_TYPE_VOID:
1334                         break;
1335
1336                 default:
1337                         rte_flow_error_set(error, EINVAL,
1338                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1339                                    "Invalid pattern item.");
1340                         goto out;
1341                 }
1342         }
1343
1344         if (pppoe_patt_valid && !pppoe_prot_valid) {
1345                 if (ipv6_valid && udp_valid)
1346                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1347                 else if (ipv6_valid && tcp_valid)
1348                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1349                 else if (ipv4_valid && udp_valid)
1350                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1351                 else if (ipv4_valid && tcp_valid)
1352                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1353                 else if (ipv6_valid)
1354                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1355                 else if (ipv4_valid)
1356                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1357                 else
1358                         *tun_type = ICE_SW_TUN_PPPOE;
1359         }
1360
1361         if (*tun_type == ICE_NON_TUN) {
1362                 if (vxlan_valid)
1363                         *tun_type = ICE_SW_TUN_VXLAN;
1364                 else if (nvgre_valid)
1365                         *tun_type = ICE_SW_TUN_NVGRE;
1366                 else if (ipv4_valid && tcp_valid)
1367                         *tun_type = ICE_SW_IPV4_TCP;
1368                 else if (ipv4_valid && udp_valid)
1369                         *tun_type = ICE_SW_IPV4_UDP;
1370                 else if (ipv6_valid && tcp_valid)
1371                         *tun_type = ICE_SW_IPV6_TCP;
1372                 else if (ipv6_valid && udp_valid)
1373                         *tun_type = ICE_SW_IPV6_UDP;
1374         }
1375
1376         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1377                 rte_flow_error_set(error, EINVAL,
1378                         RTE_FLOW_ERROR_TYPE_ITEM,
1379                         item,
1380                         "too much input set");
1381                 return -ENOTSUP;
1382         }
1383
1384         *lkups_num = t;
1385
1386         return input_set;
1387 out:
1388         return 0;
1389 }
1390
1391 static int
1392 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1393                             const struct rte_flow_action *actions,
1394                             struct rte_flow_error *error,
1395                             struct ice_adv_rule_info *rule_info)
1396 {
1397         const struct rte_flow_action_vf *act_vf;
1398         const struct rte_flow_action *action;
1399         enum rte_flow_action_type action_type;
1400
1401         for (action = actions; action->type !=
1402                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1403                 action_type = action->type;
1404                 switch (action_type) {
1405                 case RTE_FLOW_ACTION_TYPE_VF:
1406                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1407                         act_vf = action->conf;
1408                         if (act_vf->original)
1409                                 rule_info->sw_act.vsi_handle =
1410                                         ad->real_hw.avf.bus.func;
1411                         else
1412                                 rule_info->sw_act.vsi_handle = act_vf->id;
1413                         break;
1414                 default:
1415                         rte_flow_error_set(error,
1416                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1417                                            actions,
1418                                            "Invalid action type or queue number");
1419                         return -rte_errno;
1420                 }
1421         }
1422
1423         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1424         rule_info->sw_act.flag = ICE_FLTR_RX;
1425         rule_info->rx = 1;
1426         rule_info->priority = 5;
1427
1428         return 0;
1429 }
1430
1431 static int
1432 ice_switch_parse_action(struct ice_pf *pf,
1433                 const struct rte_flow_action *actions,
1434                 struct rte_flow_error *error,
1435                 struct ice_adv_rule_info *rule_info)
1436 {
1437         struct ice_vsi *vsi = pf->main_vsi;
1438         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1439         const struct rte_flow_action_queue *act_q;
1440         const struct rte_flow_action_rss *act_qgrop;
1441         uint16_t base_queue, i;
1442         const struct rte_flow_action *action;
1443         enum rte_flow_action_type action_type;
1444         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1445                  2, 4, 8, 16, 32, 64, 128};
1446
1447         base_queue = pf->base_queue + vsi->base_queue;
1448         for (action = actions; action->type !=
1449                         RTE_FLOW_ACTION_TYPE_END; action++) {
1450                 action_type = action->type;
1451                 switch (action_type) {
1452                 case RTE_FLOW_ACTION_TYPE_RSS:
1453                         act_qgrop = action->conf;
1454                         if (act_qgrop->queue_num <= 1)
1455                                 goto error;
1456                         rule_info->sw_act.fltr_act =
1457                                 ICE_FWD_TO_QGRP;
1458                         rule_info->sw_act.fwd_id.q_id =
1459                                 base_queue + act_qgrop->queue[0];
1460                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1461                                 if (act_qgrop->queue_num ==
1462                                         valid_qgrop_number[i])
1463                                         break;
1464                         }
1465                         if (i == MAX_QGRP_NUM_TYPE)
1466                                 goto error;
1467                         if ((act_qgrop->queue[0] +
1468                                 act_qgrop->queue_num) >
1469                                 dev->data->nb_rx_queues)
1470                                 goto error;
1471                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1472                                 if (act_qgrop->queue[i + 1] !=
1473                                         act_qgrop->queue[i] + 1)
1474                                         goto error;
1475                         rule_info->sw_act.qgrp_size =
1476                                 act_qgrop->queue_num;
1477                         break;
1478                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1479                         act_q = action->conf;
1480                         if (act_q->index >= dev->data->nb_rx_queues)
1481                                 goto error;
1482                         rule_info->sw_act.fltr_act =
1483                                 ICE_FWD_TO_Q;
1484                         rule_info->sw_act.fwd_id.q_id =
1485                                 base_queue + act_q->index;
1486                         break;
1487
1488                 case RTE_FLOW_ACTION_TYPE_DROP:
1489                         rule_info->sw_act.fltr_act =
1490                                 ICE_DROP_PACKET;
1491                         break;
1492
1493                 case RTE_FLOW_ACTION_TYPE_VOID:
1494                         break;
1495
1496                 default:
1497                         goto error;
1498                 }
1499         }
1500
1501         rule_info->sw_act.vsi_handle = vsi->idx;
1502         rule_info->rx = 1;
1503         rule_info->sw_act.src = vsi->idx;
1504         rule_info->priority = 5;
1505
1506         return 0;
1507
1508 error:
1509         rte_flow_error_set(error,
1510                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1511                 actions,
1512                 "Invalid action type or queue number");
1513         return -rte_errno;
1514 }
1515
1516 static int
1517 ice_switch_check_action(const struct rte_flow_action *actions,
1518                             struct rte_flow_error *error)
1519 {
1520         const struct rte_flow_action *action;
1521         enum rte_flow_action_type action_type;
1522         uint16_t actions_num = 0;
1523
1524         for (action = actions; action->type !=
1525                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1526                 action_type = action->type;
1527                 switch (action_type) {
1528                 case RTE_FLOW_ACTION_TYPE_VF:
1529                 case RTE_FLOW_ACTION_TYPE_RSS:
1530                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1531                 case RTE_FLOW_ACTION_TYPE_DROP:
1532                         actions_num++;
1533                         break;
1534                 case RTE_FLOW_ACTION_TYPE_VOID:
1535                         continue;
1536                 default:
1537                         rte_flow_error_set(error,
1538                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1539                                            actions,
1540                                            "Invalid action type");
1541                         return -rte_errno;
1542                 }
1543         }
1544
1545         if (actions_num != 1) {
1546                 rte_flow_error_set(error,
1547                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1548                                    actions,
1549                                    "Invalid action number");
1550                 return -rte_errno;
1551         }
1552
1553         return 0;
1554 }
1555
1556 static bool
1557 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1558 {
1559         switch (tun_type) {
1560         case ICE_SW_TUN_PROFID_IPV6_ESP:
1561         case ICE_SW_TUN_PROFID_IPV6_AH:
1562         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1563         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1564         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1565         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1566         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1567         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1568                 return true;
1569         default:
1570                 break;
1571         }
1572
1573         return false;
1574 }
1575
1576 static int
1577 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1578                 struct ice_pattern_match_item *array,
1579                 uint32_t array_len,
1580                 const struct rte_flow_item pattern[],
1581                 const struct rte_flow_action actions[],
1582                 void **meta,
1583                 struct rte_flow_error *error)
1584 {
1585         struct ice_pf *pf = &ad->pf;
1586         uint64_t inputset = 0;
1587         int ret = 0;
1588         struct sw_meta *sw_meta_ptr = NULL;
1589         struct ice_adv_rule_info rule_info;
1590         struct ice_adv_lkup_elem *list = NULL;
1591         uint16_t lkups_num = 0;
1592         const struct rte_flow_item *item = pattern;
1593         uint16_t item_num = 0;
1594         enum ice_sw_tunnel_type tun_type =
1595                         ICE_NON_TUN;
1596         struct ice_pattern_match_item *pattern_match_item = NULL;
1597
1598         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1599                 item_num++;
1600                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1601                         const struct rte_flow_item_eth *eth_mask;
1602                         if (item->mask)
1603                                 eth_mask = item->mask;
1604                         else
1605                                 continue;
1606                         if (eth_mask->type == UINT16_MAX)
1607                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1608                 }
1609                 /* reserve one more memory slot for ETH which may
1610                  * consume 2 lookup items.
1611                  */
1612                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1613                         item_num++;
1614         }
1615
1616         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1617         if (!list) {
1618                 rte_flow_error_set(error, EINVAL,
1619                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1620                                    "No memory for PMD internal items");
1621                 return -rte_errno;
1622         }
1623
1624         sw_meta_ptr =
1625                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1626         if (!sw_meta_ptr) {
1627                 rte_flow_error_set(error, EINVAL,
1628                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1629                                    "No memory for sw_pattern_meta_ptr");
1630                 goto error;
1631         }
1632
1633         pattern_match_item =
1634                 ice_search_pattern_match_item(pattern, array, array_len, error);
1635         if (!pattern_match_item) {
1636                 rte_flow_error_set(error, EINVAL,
1637                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1638                                    "Invalid input pattern");
1639                 goto error;
1640         }
1641
1642         inputset = ice_switch_inset_get
1643                 (pattern, error, list, &lkups_num, &tun_type);
1644         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1645                 (inputset & ~pattern_match_item->input_set_mask)) {
1646                 rte_flow_error_set(error, EINVAL,
1647                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1648                                    pattern,
1649                                    "Invalid input set");
1650                 goto error;
1651         }
1652
1653         memset(&rule_info, 0, sizeof(rule_info));
1654         rule_info.tun_type = tun_type;
1655
1656         ret = ice_switch_check_action(actions, error);
1657         if (ret) {
1658                 rte_flow_error_set(error, EINVAL,
1659                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1660                                    "Invalid input action number");
1661                 goto error;
1662         }
1663
1664         if (ad->hw.dcf_enabled)
1665                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1666                                                   &rule_info);
1667         else
1668                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1669
1670         if (ret) {
1671                 rte_flow_error_set(error, EINVAL,
1672                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1673                                    "Invalid input action");
1674                 goto error;
1675         }
1676
1677         if (meta) {
1678                 *meta = sw_meta_ptr;
1679                 ((struct sw_meta *)*meta)->list = list;
1680                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1681                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1682         } else {
1683                 rte_free(list);
1684                 rte_free(sw_meta_ptr);
1685         }
1686
1687         rte_free(pattern_match_item);
1688
1689         return 0;
1690
1691 error:
1692         rte_free(list);
1693         rte_free(sw_meta_ptr);
1694         rte_free(pattern_match_item);
1695
1696         return -rte_errno;
1697 }
1698
1699 static int
1700 ice_switch_query(struct ice_adapter *ad __rte_unused,
1701                 struct rte_flow *flow __rte_unused,
1702                 struct rte_flow_query_count *count __rte_unused,
1703                 struct rte_flow_error *error)
1704 {
1705         rte_flow_error_set(error, EINVAL,
1706                 RTE_FLOW_ERROR_TYPE_HANDLE,
1707                 NULL,
1708                 "count action not supported by switch filter");
1709
1710         return -rte_errno;
1711 }
1712
1713 static int
1714 ice_switch_redirect(struct ice_adapter *ad,
1715                     struct rte_flow *flow,
1716                     struct ice_flow_redirect *rd)
1717 {
1718         struct ice_rule_query_data *rdata = flow->rule;
1719         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1720         struct ice_adv_lkup_elem *lkups_dp = NULL;
1721         struct LIST_HEAD_TYPE *list_head;
1722         struct ice_adv_rule_info rinfo;
1723         struct ice_hw *hw = &ad->hw;
1724         struct ice_switch_info *sw;
1725         uint16_t lkups_cnt;
1726         int ret;
1727
1728         if (rdata->vsi_handle != rd->vsi_handle)
1729                 return 0;
1730
1731         sw = hw->switch_info;
1732         if (!sw->recp_list[rdata->rid].recp_created)
1733                 return -EINVAL;
1734
1735         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1736                 return -ENOTSUP;
1737
1738         list_head = &sw->recp_list[rdata->rid].filt_rules;
1739         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1740                             list_entry) {
1741                 rinfo = list_itr->rule_info;
1742                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1743                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1744                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1745                     (rinfo.fltr_rule_id == rdata->rule_id &&
1746                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1747                         lkups_cnt = list_itr->lkups_cnt;
1748                         lkups_dp = (struct ice_adv_lkup_elem *)
1749                                 ice_memdup(hw, list_itr->lkups,
1750                                            sizeof(*list_itr->lkups) *
1751                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1752
1753                         if (!lkups_dp) {
1754                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1755                                 return -EINVAL;
1756                         }
1757
1758                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1759                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1760                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1761                         }
1762                         break;
1763                 }
1764         }
1765
1766         if (!lkups_dp)
1767                 return -EINVAL;
1768
1769         /* Remove the old rule */
1770         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1771                                lkups_cnt, &rinfo);
1772         if (ret) {
1773                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1774                             rdata->rule_id);
1775                 ret = -EINVAL;
1776                 goto out;
1777         }
1778
1779         /* Update VSI context */
1780         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1781
1782         /* Replay the rule */
1783         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1784                                &rinfo, rdata);
1785         if (ret) {
1786                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1787                 ret = -EINVAL;
1788         }
1789
1790 out:
1791         ice_free(hw, lkups_dp);
1792         return ret;
1793 }
1794
1795 static int
1796 ice_switch_init(struct ice_adapter *ad)
1797 {
1798         int ret = 0;
1799         struct ice_flow_parser *dist_parser;
1800         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1801
1802         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1803                 dist_parser = &ice_switch_dist_parser_comms;
1804         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1805                 dist_parser = &ice_switch_dist_parser_os;
1806         else
1807                 return -EINVAL;
1808
1809         if (ad->devargs.pipe_mode_support)
1810                 ret = ice_register_parser(perm_parser, ad);
1811         else
1812                 ret = ice_register_parser(dist_parser, ad);
1813         return ret;
1814 }
1815
1816 static void
1817 ice_switch_uninit(struct ice_adapter *ad)
1818 {
1819         struct ice_flow_parser *dist_parser;
1820         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1821
1822         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1823                 dist_parser = &ice_switch_dist_parser_comms;
1824         else
1825                 dist_parser = &ice_switch_dist_parser_os;
1826
1827         if (ad->devargs.pipe_mode_support)
1828                 ice_unregister_parser(perm_parser, ad);
1829         else
1830                 ice_unregister_parser(dist_parser, ad);
1831 }
1832
1833 static struct
1834 ice_flow_engine ice_switch_engine = {
1835         .init = ice_switch_init,
1836         .uninit = ice_switch_uninit,
1837         .create = ice_switch_create,
1838         .destroy = ice_switch_destroy,
1839         .query_count = ice_switch_query,
1840         .redirect = ice_switch_redirect,
1841         .free = ice_switch_filter_rule_free,
1842         .type = ICE_FLOW_ENGINE_SWITCH,
1843 };
1844
1845 static struct
1846 ice_flow_parser ice_switch_dist_parser_os = {
1847         .engine = &ice_switch_engine,
1848         .array = ice_switch_pattern_dist_os,
1849         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1850         .parse_pattern_action = ice_switch_parse_pattern_action,
1851         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1852 };
1853
1854 static struct
1855 ice_flow_parser ice_switch_dist_parser_comms = {
1856         .engine = &ice_switch_engine,
1857         .array = ice_switch_pattern_dist_comms,
1858         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1859         .parse_pattern_action = ice_switch_parse_pattern_action,
1860         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1861 };
1862
1863 static struct
1864 ice_flow_parser ice_switch_perm_parser = {
1865         .engine = &ice_switch_engine,
1866         .array = ice_switch_pattern_perm,
1867         .array_len = RTE_DIM(ice_switch_pattern_perm),
1868         .parse_pattern_action = ice_switch_parse_pattern_action,
1869         .stage = ICE_FLOW_STAGE_PERMISSION,
1870 };
1871
1872 RTE_INIT(ice_sw_engine_init)
1873 {
1874         struct ice_flow_engine *engine = &ice_switch_engine;
1875         ice_register_flow_engine(engine);
1876 }