1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
40 #define ICE_SW_INSET_MAC_IPV4 ( \
41 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
42 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
43 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
48 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6 ( \
52 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
54 ICE_INSET_IPV6_NEXT_HDR)
55 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
56 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
59 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
60 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
61 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
62 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
63 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
64 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
65 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
66 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
67 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
68 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
70 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
78 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
80 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
82 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
84 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
85 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
86 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
87 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
89 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
91 ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
93 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
95 ICE_INSET_TUN_IPV4_TOS)
96 #define ICE_SW_INSET_MAC_PPPOE ( \
97 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
98 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
99 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
100 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
101 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
102 ICE_INSET_PPPOE_PROTO)
103 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
104 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
105 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
106 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
107 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
108 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
110 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
112 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
114 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
115 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
116 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
117 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
118 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
119 #define ICE_SW_INSET_MAC_IPV4_AH ( \
120 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
121 #define ICE_SW_INSET_MAC_IPV6_AH ( \
122 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
123 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
124 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
125 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
126 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
127 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
128 ICE_SW_INSET_MAC_IPV4 | \
129 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
130 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
131 ICE_SW_INSET_MAC_IPV6 | \
132 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
135 struct ice_adv_lkup_elem *list;
137 struct ice_adv_rule_info rule_info;
140 static struct ice_flow_parser ice_switch_dist_parser_os;
141 static struct ice_flow_parser ice_switch_dist_parser_comms;
142 static struct ice_flow_parser ice_switch_perm_parser;
145 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
147 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
148 {pattern_ethertype_vlan,
149 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
151 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
152 {pattern_eth_ipv4_udp,
153 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
154 {pattern_eth_ipv4_tcp,
155 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
157 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
158 {pattern_eth_ipv6_udp,
159 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
160 {pattern_eth_ipv6_tcp,
161 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
162 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
163 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
164 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
165 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
166 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
167 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
168 {pattern_eth_ipv4_nvgre_eth_ipv4,
169 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
170 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
171 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
172 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
173 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
175 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
176 {pattern_eth_vlan_pppoes,
177 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
178 {pattern_eth_pppoes_proto,
179 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
180 {pattern_eth_vlan_pppoes_proto,
181 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
182 {pattern_eth_pppoes_ipv4,
183 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
184 {pattern_eth_pppoes_ipv4_tcp,
185 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
186 {pattern_eth_pppoes_ipv4_udp,
187 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
188 {pattern_eth_pppoes_ipv6,
189 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
190 {pattern_eth_pppoes_ipv6_tcp,
191 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
192 {pattern_eth_pppoes_ipv6_udp,
193 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
194 {pattern_eth_vlan_pppoes_ipv4,
195 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
196 {pattern_eth_vlan_pppoes_ipv4_tcp,
197 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
198 {pattern_eth_vlan_pppoes_ipv4_udp,
199 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
200 {pattern_eth_vlan_pppoes_ipv6,
201 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
202 {pattern_eth_vlan_pppoes_ipv6_tcp,
203 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
204 {pattern_eth_vlan_pppoes_ipv6_udp,
205 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
206 {pattern_eth_ipv4_esp,
207 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
208 {pattern_eth_ipv4_udp_esp,
209 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
210 {pattern_eth_ipv6_esp,
211 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
212 {pattern_eth_ipv6_udp_esp,
213 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
214 {pattern_eth_ipv4_ah,
215 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
216 {pattern_eth_ipv6_ah,
217 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
218 {pattern_eth_ipv6_udp_ah,
219 ICE_INSET_NONE, ICE_INSET_NONE},
220 {pattern_eth_ipv4_l2tp,
221 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
222 {pattern_eth_ipv6_l2tp,
223 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
224 {pattern_eth_ipv4_pfcp,
225 ICE_INSET_NONE, ICE_INSET_NONE},
226 {pattern_eth_ipv6_pfcp,
227 ICE_INSET_NONE, ICE_INSET_NONE},
231 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
233 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
234 {pattern_ethertype_vlan,
235 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
237 ICE_INSET_NONE, ICE_INSET_NONE},
239 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
240 {pattern_eth_ipv4_udp,
241 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
242 {pattern_eth_ipv4_tcp,
243 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
245 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
246 {pattern_eth_ipv6_udp,
247 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
248 {pattern_eth_ipv6_tcp,
249 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
250 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
251 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
252 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
253 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
254 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
255 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
256 {pattern_eth_ipv4_nvgre_eth_ipv4,
257 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
258 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
259 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
260 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
261 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
265 ice_pattern_match_item ice_switch_pattern_perm[] = {
267 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
268 {pattern_ethertype_vlan,
269 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
271 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
272 {pattern_eth_ipv4_udp,
273 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
274 {pattern_eth_ipv4_tcp,
275 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
277 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
278 {pattern_eth_ipv6_udp,
279 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
280 {pattern_eth_ipv6_tcp,
281 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
282 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
283 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
284 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
285 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
286 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
287 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
288 {pattern_eth_ipv4_nvgre_eth_ipv4,
289 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
290 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
291 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
292 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
293 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
295 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
296 {pattern_eth_vlan_pppoes,
297 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
298 {pattern_eth_pppoes_proto,
299 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
300 {pattern_eth_vlan_pppoes_proto,
301 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
302 {pattern_eth_pppoes_ipv4,
303 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
304 {pattern_eth_pppoes_ipv4_tcp,
305 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
306 {pattern_eth_pppoes_ipv4_udp,
307 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
308 {pattern_eth_pppoes_ipv6,
309 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
310 {pattern_eth_pppoes_ipv6_tcp,
311 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
312 {pattern_eth_pppoes_ipv6_udp,
313 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
314 {pattern_eth_vlan_pppoes_ipv4,
315 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
316 {pattern_eth_vlan_pppoes_ipv4_tcp,
317 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
318 {pattern_eth_vlan_pppoes_ipv4_udp,
319 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
320 {pattern_eth_vlan_pppoes_ipv6,
321 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
322 {pattern_eth_vlan_pppoes_ipv6_tcp,
323 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
324 {pattern_eth_vlan_pppoes_ipv6_udp,
325 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
326 {pattern_eth_ipv4_esp,
327 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
328 {pattern_eth_ipv4_udp_esp,
329 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
330 {pattern_eth_ipv6_esp,
331 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
332 {pattern_eth_ipv6_udp_esp,
333 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
334 {pattern_eth_ipv4_ah,
335 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
336 {pattern_eth_ipv6_ah,
337 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
338 {pattern_eth_ipv6_udp_ah,
339 ICE_INSET_NONE, ICE_INSET_NONE},
340 {pattern_eth_ipv4_l2tp,
341 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
342 {pattern_eth_ipv6_l2tp,
343 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
344 {pattern_eth_ipv4_pfcp,
345 ICE_INSET_NONE, ICE_INSET_NONE},
346 {pattern_eth_ipv6_pfcp,
347 ICE_INSET_NONE, ICE_INSET_NONE},
351 ice_switch_create(struct ice_adapter *ad,
352 struct rte_flow *flow,
354 struct rte_flow_error *error)
357 struct ice_pf *pf = &ad->pf;
358 struct ice_hw *hw = ICE_PF_TO_HW(pf);
359 struct ice_rule_query_data rule_added = {0};
360 struct ice_rule_query_data *filter_ptr;
361 struct ice_adv_lkup_elem *list =
362 ((struct sw_meta *)meta)->list;
364 ((struct sw_meta *)meta)->lkups_num;
365 struct ice_adv_rule_info *rule_info =
366 &((struct sw_meta *)meta)->rule_info;
368 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
371 "item number too large for rule");
375 rte_flow_error_set(error, EINVAL,
376 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
377 "lookup list should not be NULL");
380 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
382 filter_ptr = rte_zmalloc("ice_switch_filter",
383 sizeof(struct ice_rule_query_data), 0);
385 rte_flow_error_set(error, EINVAL,
386 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
387 "No memory for ice_switch_filter");
390 flow->rule = filter_ptr;
391 rte_memcpy(filter_ptr,
393 sizeof(struct ice_rule_query_data));
395 rte_flow_error_set(error, EINVAL,
396 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
397 "switch filter create flow fail");
413 ice_switch_destroy(struct ice_adapter *ad,
414 struct rte_flow *flow,
415 struct rte_flow_error *error)
417 struct ice_hw *hw = &ad->hw;
419 struct ice_rule_query_data *filter_ptr;
421 filter_ptr = (struct ice_rule_query_data *)
425 rte_flow_error_set(error, EINVAL,
426 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
428 " create by switch filter");
432 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
434 rte_flow_error_set(error, EINVAL,
435 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
436 "fail to destroy switch filter rule");
440 rte_free(filter_ptr);
445 ice_switch_filter_rule_free(struct rte_flow *flow)
447 rte_free(flow->rule);
451 ice_switch_inset_get(const struct rte_flow_item pattern[],
452 struct rte_flow_error *error,
453 struct ice_adv_lkup_elem *list,
455 enum ice_sw_tunnel_type *tun_type)
457 const struct rte_flow_item *item = pattern;
458 enum rte_flow_item_type item_type;
459 const struct rte_flow_item_eth *eth_spec, *eth_mask;
460 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
461 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
462 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
463 const struct rte_flow_item_udp *udp_spec, *udp_mask;
464 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
465 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
466 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
467 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
468 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
469 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
471 const struct rte_flow_item_esp *esp_spec, *esp_mask;
472 const struct rte_flow_item_ah *ah_spec, *ah_mask;
473 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
474 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
475 uint64_t input_set = ICE_INSET_NONE;
476 uint16_t input_set_byte = 0;
477 bool pppoe_elem_valid = 0;
478 bool pppoe_patt_valid = 0;
479 bool pppoe_prot_valid = 0;
480 bool tunnel_valid = 0;
481 bool profile_rule = 0;
482 bool nvgre_valid = 0;
483 bool vxlan_valid = 0;
490 for (item = pattern; item->type !=
491 RTE_FLOW_ITEM_TYPE_END; item++) {
493 rte_flow_error_set(error, EINVAL,
494 RTE_FLOW_ERROR_TYPE_ITEM,
496 "Not support range");
499 item_type = item->type;
502 case RTE_FLOW_ITEM_TYPE_ETH:
503 eth_spec = item->spec;
504 eth_mask = item->mask;
505 if (eth_spec && eth_mask) {
506 const uint8_t *a = eth_mask->src.addr_bytes;
507 const uint8_t *b = eth_mask->dst.addr_bytes;
508 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
509 if (a[j] && tunnel_valid) {
519 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
520 if (b[j] && tunnel_valid) {
531 input_set |= ICE_INSET_ETHERTYPE;
532 list[t].type = (tunnel_valid == 0) ?
533 ICE_MAC_OFOS : ICE_MAC_IL;
534 struct ice_ether_hdr *h;
535 struct ice_ether_hdr *m;
537 h = &list[t].h_u.eth_hdr;
538 m = &list[t].m_u.eth_hdr;
539 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
540 if (eth_mask->src.addr_bytes[j]) {
542 eth_spec->src.addr_bytes[j];
544 eth_mask->src.addr_bytes[j];
548 if (eth_mask->dst.addr_bytes[j]) {
550 eth_spec->dst.addr_bytes[j];
552 eth_mask->dst.addr_bytes[j];
559 if (eth_mask->type) {
560 list[t].type = ICE_ETYPE_OL;
561 list[t].h_u.ethertype.ethtype_id =
563 list[t].m_u.ethertype.ethtype_id =
571 case RTE_FLOW_ITEM_TYPE_IPV4:
572 ipv4_spec = item->spec;
573 ipv4_mask = item->mask;
575 if (ipv4_spec && ipv4_mask) {
576 /* Check IPv4 mask and update input set */
577 if (ipv4_mask->hdr.version_ihl ||
578 ipv4_mask->hdr.total_length ||
579 ipv4_mask->hdr.packet_id ||
580 ipv4_mask->hdr.hdr_checksum) {
581 rte_flow_error_set(error, EINVAL,
582 RTE_FLOW_ERROR_TYPE_ITEM,
584 "Invalid IPv4 mask.");
589 if (ipv4_mask->hdr.type_of_service)
591 ICE_INSET_TUN_IPV4_TOS;
592 if (ipv4_mask->hdr.src_addr)
594 ICE_INSET_TUN_IPV4_SRC;
595 if (ipv4_mask->hdr.dst_addr)
597 ICE_INSET_TUN_IPV4_DST;
598 if (ipv4_mask->hdr.time_to_live)
600 ICE_INSET_TUN_IPV4_TTL;
601 if (ipv4_mask->hdr.next_proto_id)
603 ICE_INSET_TUN_IPV4_PROTO;
605 if (ipv4_mask->hdr.src_addr)
606 input_set |= ICE_INSET_IPV4_SRC;
607 if (ipv4_mask->hdr.dst_addr)
608 input_set |= ICE_INSET_IPV4_DST;
609 if (ipv4_mask->hdr.time_to_live)
610 input_set |= ICE_INSET_IPV4_TTL;
611 if (ipv4_mask->hdr.next_proto_id)
613 ICE_INSET_IPV4_PROTO;
614 if (ipv4_mask->hdr.type_of_service)
618 list[t].type = (tunnel_valid == 0) ?
619 ICE_IPV4_OFOS : ICE_IPV4_IL;
620 if (ipv4_mask->hdr.src_addr) {
621 list[t].h_u.ipv4_hdr.src_addr =
622 ipv4_spec->hdr.src_addr;
623 list[t].m_u.ipv4_hdr.src_addr =
624 ipv4_mask->hdr.src_addr;
627 if (ipv4_mask->hdr.dst_addr) {
628 list[t].h_u.ipv4_hdr.dst_addr =
629 ipv4_spec->hdr.dst_addr;
630 list[t].m_u.ipv4_hdr.dst_addr =
631 ipv4_mask->hdr.dst_addr;
634 if (ipv4_mask->hdr.time_to_live) {
635 list[t].h_u.ipv4_hdr.time_to_live =
636 ipv4_spec->hdr.time_to_live;
637 list[t].m_u.ipv4_hdr.time_to_live =
638 ipv4_mask->hdr.time_to_live;
641 if (ipv4_mask->hdr.next_proto_id) {
642 list[t].h_u.ipv4_hdr.protocol =
643 ipv4_spec->hdr.next_proto_id;
644 list[t].m_u.ipv4_hdr.protocol =
645 ipv4_mask->hdr.next_proto_id;
648 if ((ipv4_spec->hdr.next_proto_id &
649 ipv4_mask->hdr.next_proto_id) ==
650 ICE_IPV4_PROTO_NVGRE)
651 *tun_type = ICE_SW_TUN_AND_NON_TUN;
652 if (ipv4_mask->hdr.type_of_service) {
653 list[t].h_u.ipv4_hdr.tos =
654 ipv4_spec->hdr.type_of_service;
655 list[t].m_u.ipv4_hdr.tos =
656 ipv4_mask->hdr.type_of_service;
663 case RTE_FLOW_ITEM_TYPE_IPV6:
664 ipv6_spec = item->spec;
665 ipv6_mask = item->mask;
667 if (ipv6_spec && ipv6_mask) {
668 if (ipv6_mask->hdr.payload_len) {
669 rte_flow_error_set(error, EINVAL,
670 RTE_FLOW_ERROR_TYPE_ITEM,
672 "Invalid IPv6 mask");
676 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
677 if (ipv6_mask->hdr.src_addr[j] &&
680 ICE_INSET_TUN_IPV6_SRC;
682 } else if (ipv6_mask->hdr.src_addr[j]) {
683 input_set |= ICE_INSET_IPV6_SRC;
687 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
688 if (ipv6_mask->hdr.dst_addr[j] &&
691 ICE_INSET_TUN_IPV6_DST;
693 } else if (ipv6_mask->hdr.dst_addr[j]) {
694 input_set |= ICE_INSET_IPV6_DST;
698 if (ipv6_mask->hdr.proto &&
701 ICE_INSET_TUN_IPV6_NEXT_HDR;
702 else if (ipv6_mask->hdr.proto)
704 ICE_INSET_IPV6_NEXT_HDR;
705 if (ipv6_mask->hdr.hop_limits &&
708 ICE_INSET_TUN_IPV6_HOP_LIMIT;
709 else if (ipv6_mask->hdr.hop_limits)
711 ICE_INSET_IPV6_HOP_LIMIT;
712 if ((ipv6_mask->hdr.vtc_flow &
714 (RTE_IPV6_HDR_TC_MASK)) &&
717 ICE_INSET_TUN_IPV6_TC;
718 else if (ipv6_mask->hdr.vtc_flow &
720 (RTE_IPV6_HDR_TC_MASK))
721 input_set |= ICE_INSET_IPV6_TC;
723 list[t].type = (tunnel_valid == 0) ?
724 ICE_IPV6_OFOS : ICE_IPV6_IL;
725 struct ice_ipv6_hdr *f;
726 struct ice_ipv6_hdr *s;
727 f = &list[t].h_u.ipv6_hdr;
728 s = &list[t].m_u.ipv6_hdr;
729 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
730 if (ipv6_mask->hdr.src_addr[j]) {
732 ipv6_spec->hdr.src_addr[j];
734 ipv6_mask->hdr.src_addr[j];
737 if (ipv6_mask->hdr.dst_addr[j]) {
739 ipv6_spec->hdr.dst_addr[j];
741 ipv6_mask->hdr.dst_addr[j];
745 if (ipv6_mask->hdr.proto) {
747 ipv6_spec->hdr.proto;
749 ipv6_mask->hdr.proto;
752 if (ipv6_mask->hdr.hop_limits) {
754 ipv6_spec->hdr.hop_limits;
756 ipv6_mask->hdr.hop_limits;
759 if (ipv6_mask->hdr.vtc_flow &
761 (RTE_IPV6_HDR_TC_MASK)) {
762 struct ice_le_ver_tc_flow vtf;
763 vtf.u.fld.version = 0;
764 vtf.u.fld.flow_label = 0;
765 vtf.u.fld.tc = (rte_be_to_cpu_32
766 (ipv6_spec->hdr.vtc_flow) &
767 RTE_IPV6_HDR_TC_MASK) >>
768 RTE_IPV6_HDR_TC_SHIFT;
769 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
770 vtf.u.fld.tc = (rte_be_to_cpu_32
771 (ipv6_mask->hdr.vtc_flow) &
772 RTE_IPV6_HDR_TC_MASK) >>
773 RTE_IPV6_HDR_TC_SHIFT;
774 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
781 case RTE_FLOW_ITEM_TYPE_UDP:
782 udp_spec = item->spec;
783 udp_mask = item->mask;
785 if (udp_spec && udp_mask) {
786 /* Check UDP mask and update input set*/
787 if (udp_mask->hdr.dgram_len ||
788 udp_mask->hdr.dgram_cksum) {
789 rte_flow_error_set(error, EINVAL,
790 RTE_FLOW_ERROR_TYPE_ITEM,
797 if (udp_mask->hdr.src_port)
799 ICE_INSET_TUN_UDP_SRC_PORT;
800 if (udp_mask->hdr.dst_port)
802 ICE_INSET_TUN_UDP_DST_PORT;
804 if (udp_mask->hdr.src_port)
806 ICE_INSET_UDP_SRC_PORT;
807 if (udp_mask->hdr.dst_port)
809 ICE_INSET_UDP_DST_PORT;
811 if (*tun_type == ICE_SW_TUN_VXLAN &&
813 list[t].type = ICE_UDP_OF;
815 list[t].type = ICE_UDP_ILOS;
816 if (udp_mask->hdr.src_port) {
817 list[t].h_u.l4_hdr.src_port =
818 udp_spec->hdr.src_port;
819 list[t].m_u.l4_hdr.src_port =
820 udp_mask->hdr.src_port;
823 if (udp_mask->hdr.dst_port) {
824 list[t].h_u.l4_hdr.dst_port =
825 udp_spec->hdr.dst_port;
826 list[t].m_u.l4_hdr.dst_port =
827 udp_mask->hdr.dst_port;
834 case RTE_FLOW_ITEM_TYPE_TCP:
835 tcp_spec = item->spec;
836 tcp_mask = item->mask;
838 if (tcp_spec && tcp_mask) {
839 /* Check TCP mask and update input set */
840 if (tcp_mask->hdr.sent_seq ||
841 tcp_mask->hdr.recv_ack ||
842 tcp_mask->hdr.data_off ||
843 tcp_mask->hdr.tcp_flags ||
844 tcp_mask->hdr.rx_win ||
845 tcp_mask->hdr.cksum ||
846 tcp_mask->hdr.tcp_urp) {
847 rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ITEM,
855 if (tcp_mask->hdr.src_port)
857 ICE_INSET_TUN_TCP_SRC_PORT;
858 if (tcp_mask->hdr.dst_port)
860 ICE_INSET_TUN_TCP_DST_PORT;
862 if (tcp_mask->hdr.src_port)
864 ICE_INSET_TCP_SRC_PORT;
865 if (tcp_mask->hdr.dst_port)
867 ICE_INSET_TCP_DST_PORT;
869 list[t].type = ICE_TCP_IL;
870 if (tcp_mask->hdr.src_port) {
871 list[t].h_u.l4_hdr.src_port =
872 tcp_spec->hdr.src_port;
873 list[t].m_u.l4_hdr.src_port =
874 tcp_mask->hdr.src_port;
877 if (tcp_mask->hdr.dst_port) {
878 list[t].h_u.l4_hdr.dst_port =
879 tcp_spec->hdr.dst_port;
880 list[t].m_u.l4_hdr.dst_port =
881 tcp_mask->hdr.dst_port;
888 case RTE_FLOW_ITEM_TYPE_SCTP:
889 sctp_spec = item->spec;
890 sctp_mask = item->mask;
891 if (sctp_spec && sctp_mask) {
892 /* Check SCTP mask and update input set */
893 if (sctp_mask->hdr.cksum) {
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_ITEM,
897 "Invalid SCTP mask");
902 if (sctp_mask->hdr.src_port)
904 ICE_INSET_TUN_SCTP_SRC_PORT;
905 if (sctp_mask->hdr.dst_port)
907 ICE_INSET_TUN_SCTP_DST_PORT;
909 if (sctp_mask->hdr.src_port)
911 ICE_INSET_SCTP_SRC_PORT;
912 if (sctp_mask->hdr.dst_port)
914 ICE_INSET_SCTP_DST_PORT;
916 list[t].type = ICE_SCTP_IL;
917 if (sctp_mask->hdr.src_port) {
918 list[t].h_u.sctp_hdr.src_port =
919 sctp_spec->hdr.src_port;
920 list[t].m_u.sctp_hdr.src_port =
921 sctp_mask->hdr.src_port;
924 if (sctp_mask->hdr.dst_port) {
925 list[t].h_u.sctp_hdr.dst_port =
926 sctp_spec->hdr.dst_port;
927 list[t].m_u.sctp_hdr.dst_port =
928 sctp_mask->hdr.dst_port;
935 case RTE_FLOW_ITEM_TYPE_VXLAN:
936 vxlan_spec = item->spec;
937 vxlan_mask = item->mask;
938 /* Check if VXLAN item is used to describe protocol.
939 * If yes, both spec and mask should be NULL.
940 * If no, both spec and mask shouldn't be NULL.
942 if ((!vxlan_spec && vxlan_mask) ||
943 (vxlan_spec && !vxlan_mask)) {
944 rte_flow_error_set(error, EINVAL,
945 RTE_FLOW_ERROR_TYPE_ITEM,
947 "Invalid VXLAN item");
952 if (vxlan_spec && vxlan_mask) {
953 list[t].type = ICE_VXLAN;
954 if (vxlan_mask->vni[0] ||
955 vxlan_mask->vni[1] ||
956 vxlan_mask->vni[2]) {
957 list[t].h_u.tnl_hdr.vni =
958 (vxlan_spec->vni[2] << 16) |
959 (vxlan_spec->vni[1] << 8) |
961 list[t].m_u.tnl_hdr.vni =
962 (vxlan_mask->vni[2] << 16) |
963 (vxlan_mask->vni[1] << 8) |
966 ICE_INSET_TUN_VXLAN_VNI;
973 case RTE_FLOW_ITEM_TYPE_NVGRE:
974 nvgre_spec = item->spec;
975 nvgre_mask = item->mask;
976 /* Check if NVGRE item is used to describe protocol.
977 * If yes, both spec and mask should be NULL.
978 * If no, both spec and mask shouldn't be NULL.
980 if ((!nvgre_spec && nvgre_mask) ||
981 (nvgre_spec && !nvgre_mask)) {
982 rte_flow_error_set(error, EINVAL,
983 RTE_FLOW_ERROR_TYPE_ITEM,
985 "Invalid NVGRE item");
990 if (nvgre_spec && nvgre_mask) {
991 list[t].type = ICE_NVGRE;
992 if (nvgre_mask->tni[0] ||
993 nvgre_mask->tni[1] ||
994 nvgre_mask->tni[2]) {
995 list[t].h_u.nvgre_hdr.tni_flow =
996 (nvgre_spec->tni[2] << 16) |
997 (nvgre_spec->tni[1] << 8) |
999 list[t].m_u.nvgre_hdr.tni_flow =
1000 (nvgre_mask->tni[2] << 16) |
1001 (nvgre_mask->tni[1] << 8) |
1004 ICE_INSET_TUN_NVGRE_TNI;
1005 input_set_byte += 2;
1011 case RTE_FLOW_ITEM_TYPE_VLAN:
1012 vlan_spec = item->spec;
1013 vlan_mask = item->mask;
1014 /* Check if VLAN item is used to describe protocol.
1015 * If yes, both spec and mask should be NULL.
1016 * If no, both spec and mask shouldn't be NULL.
1018 if ((!vlan_spec && vlan_mask) ||
1019 (vlan_spec && !vlan_mask)) {
1020 rte_flow_error_set(error, EINVAL,
1021 RTE_FLOW_ERROR_TYPE_ITEM,
1023 "Invalid VLAN item");
1026 if (vlan_spec && vlan_mask) {
1027 list[t].type = ICE_VLAN_OFOS;
1028 if (vlan_mask->tci) {
1029 list[t].h_u.vlan_hdr.vlan =
1031 list[t].m_u.vlan_hdr.vlan =
1033 input_set |= ICE_INSET_VLAN_OUTER;
1034 input_set_byte += 2;
1036 if (vlan_mask->inner_type) {
1037 list[t].h_u.vlan_hdr.type =
1038 vlan_spec->inner_type;
1039 list[t].m_u.vlan_hdr.type =
1040 vlan_mask->inner_type;
1041 input_set |= ICE_INSET_ETHERTYPE;
1042 input_set_byte += 2;
1048 case RTE_FLOW_ITEM_TYPE_PPPOED:
1049 case RTE_FLOW_ITEM_TYPE_PPPOES:
1050 pppoe_spec = item->spec;
1051 pppoe_mask = item->mask;
1052 /* Check if PPPoE item is used to describe protocol.
1053 * If yes, both spec and mask should be NULL.
1054 * If no, both spec and mask shouldn't be NULL.
1056 if ((!pppoe_spec && pppoe_mask) ||
1057 (pppoe_spec && !pppoe_mask)) {
1058 rte_flow_error_set(error, EINVAL,
1059 RTE_FLOW_ERROR_TYPE_ITEM,
1061 "Invalid pppoe item");
1064 pppoe_patt_valid = 1;
1065 if (pppoe_spec && pppoe_mask) {
1066 /* Check pppoe mask and update input set */
1067 if (pppoe_mask->length ||
1069 pppoe_mask->version_type) {
1070 rte_flow_error_set(error, EINVAL,
1071 RTE_FLOW_ERROR_TYPE_ITEM,
1073 "Invalid pppoe mask");
1076 list[t].type = ICE_PPPOE;
1077 if (pppoe_mask->session_id) {
1078 list[t].h_u.pppoe_hdr.session_id =
1079 pppoe_spec->session_id;
1080 list[t].m_u.pppoe_hdr.session_id =
1081 pppoe_mask->session_id;
1082 input_set |= ICE_INSET_PPPOE_SESSION;
1083 input_set_byte += 2;
1086 pppoe_elem_valid = 1;
1090 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1091 pppoe_proto_spec = item->spec;
1092 pppoe_proto_mask = item->mask;
1093 /* Check if PPPoE optional proto_id item
1094 * is used to describe protocol.
1095 * If yes, both spec and mask should be NULL.
1096 * If no, both spec and mask shouldn't be NULL.
1098 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1099 (pppoe_proto_spec && !pppoe_proto_mask)) {
1100 rte_flow_error_set(error, EINVAL,
1101 RTE_FLOW_ERROR_TYPE_ITEM,
1103 "Invalid pppoe proto item");
1106 if (pppoe_proto_spec && pppoe_proto_mask) {
1107 if (pppoe_elem_valid)
1109 list[t].type = ICE_PPPOE;
1110 if (pppoe_proto_mask->proto_id) {
1111 list[t].h_u.pppoe_hdr.ppp_prot_id =
1112 pppoe_proto_spec->proto_id;
1113 list[t].m_u.pppoe_hdr.ppp_prot_id =
1114 pppoe_proto_mask->proto_id;
1115 input_set |= ICE_INSET_PPPOE_PROTO;
1116 input_set_byte += 2;
1117 pppoe_prot_valid = 1;
1119 if ((pppoe_proto_mask->proto_id &
1120 pppoe_proto_spec->proto_id) !=
1121 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1122 (pppoe_proto_mask->proto_id &
1123 pppoe_proto_spec->proto_id) !=
1124 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1125 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1127 *tun_type = ICE_SW_TUN_PPPOE;
1133 case RTE_FLOW_ITEM_TYPE_ESP:
1134 esp_spec = item->spec;
1135 esp_mask = item->mask;
1136 if ((esp_spec && !esp_mask) ||
1137 (!esp_spec && esp_mask)) {
1138 rte_flow_error_set(error, EINVAL,
1139 RTE_FLOW_ERROR_TYPE_ITEM,
1141 "Invalid esp item");
1144 /* Check esp mask and update input set */
1145 if (esp_mask && esp_mask->hdr.seq) {
1146 rte_flow_error_set(error, EINVAL,
1147 RTE_FLOW_ERROR_TYPE_ITEM,
1149 "Invalid esp mask");
1153 if (!esp_spec && !esp_mask && !input_set) {
1155 if (ipv6_valid && udp_valid)
1157 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1158 else if (ipv6_valid)
1159 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1160 else if (ipv4_valid)
1162 } else if (esp_spec && esp_mask &&
1165 list[t].type = ICE_NAT_T;
1167 list[t].type = ICE_ESP;
1168 list[t].h_u.esp_hdr.spi =
1170 list[t].m_u.esp_hdr.spi =
1172 input_set |= ICE_INSET_ESP_SPI;
1173 input_set_byte += 4;
1177 if (!profile_rule) {
1178 if (ipv6_valid && udp_valid)
1179 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1180 else if (ipv4_valid && udp_valid)
1181 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1182 else if (ipv6_valid)
1183 *tun_type = ICE_SW_TUN_IPV6_ESP;
1184 else if (ipv4_valid)
1185 *tun_type = ICE_SW_TUN_IPV4_ESP;
1189 case RTE_FLOW_ITEM_TYPE_AH:
1190 ah_spec = item->spec;
1191 ah_mask = item->mask;
1192 if ((ah_spec && !ah_mask) ||
1193 (!ah_spec && ah_mask)) {
1194 rte_flow_error_set(error, EINVAL,
1195 RTE_FLOW_ERROR_TYPE_ITEM,
1200 /* Check ah mask and update input set */
1202 (ah_mask->next_hdr ||
1203 ah_mask->payload_len ||
1205 ah_mask->reserved)) {
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ITEM,
1213 if (!ah_spec && !ah_mask && !input_set) {
1215 if (ipv6_valid && udp_valid)
1217 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1218 else if (ipv6_valid)
1219 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1220 else if (ipv4_valid)
1222 } else if (ah_spec && ah_mask &&
1224 list[t].type = ICE_AH;
1225 list[t].h_u.ah_hdr.spi =
1227 list[t].m_u.ah_hdr.spi =
1229 input_set |= ICE_INSET_AH_SPI;
1230 input_set_byte += 4;
1234 if (!profile_rule) {
1237 else if (ipv6_valid)
1238 *tun_type = ICE_SW_TUN_IPV6_AH;
1239 else if (ipv4_valid)
1240 *tun_type = ICE_SW_TUN_IPV4_AH;
1244 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1245 l2tp_spec = item->spec;
1246 l2tp_mask = item->mask;
1247 if ((l2tp_spec && !l2tp_mask) ||
1248 (!l2tp_spec && l2tp_mask)) {
1249 rte_flow_error_set(error, EINVAL,
1250 RTE_FLOW_ERROR_TYPE_ITEM,
1252 "Invalid l2tp item");
1256 if (!l2tp_spec && !l2tp_mask && !input_set) {
1259 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1260 else if (ipv4_valid)
1262 } else if (l2tp_spec && l2tp_mask &&
1263 l2tp_mask->session_id){
1264 list[t].type = ICE_L2TPV3;
1265 list[t].h_u.l2tpv3_sess_hdr.session_id =
1266 l2tp_spec->session_id;
1267 list[t].m_u.l2tpv3_sess_hdr.session_id =
1268 l2tp_mask->session_id;
1269 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1270 input_set_byte += 4;
1274 if (!profile_rule) {
1277 ICE_SW_TUN_IPV6_L2TPV3;
1278 else if (ipv4_valid)
1280 ICE_SW_TUN_IPV4_L2TPV3;
1284 case RTE_FLOW_ITEM_TYPE_PFCP:
1285 pfcp_spec = item->spec;
1286 pfcp_mask = item->mask;
1287 /* Check if PFCP item is used to describe protocol.
1288 * If yes, both spec and mask should be NULL.
1289 * If no, both spec and mask shouldn't be NULL.
1291 if ((!pfcp_spec && pfcp_mask) ||
1292 (pfcp_spec && !pfcp_mask)) {
1293 rte_flow_error_set(error, EINVAL,
1294 RTE_FLOW_ERROR_TYPE_ITEM,
1296 "Invalid PFCP item");
1299 if (pfcp_spec && pfcp_mask) {
1300 /* Check pfcp mask and update input set */
1301 if (pfcp_mask->msg_type ||
1302 pfcp_mask->msg_len ||
1304 rte_flow_error_set(error, EINVAL,
1305 RTE_FLOW_ERROR_TYPE_ITEM,
1307 "Invalid pfcp mask");
1310 if (pfcp_mask->s_field &&
1311 pfcp_spec->s_field == 0x01 &&
1314 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1315 else if (pfcp_mask->s_field &&
1316 pfcp_spec->s_field == 0x01)
1318 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1319 else if (pfcp_mask->s_field &&
1320 !pfcp_spec->s_field &&
1323 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1324 else if (pfcp_mask->s_field &&
1325 !pfcp_spec->s_field)
1327 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1333 case RTE_FLOW_ITEM_TYPE_VOID:
1337 rte_flow_error_set(error, EINVAL,
1338 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1339 "Invalid pattern item.");
1344 if (pppoe_patt_valid && !pppoe_prot_valid) {
1345 if (ipv6_valid && udp_valid)
1346 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1347 else if (ipv6_valid && tcp_valid)
1348 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1349 else if (ipv4_valid && udp_valid)
1350 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1351 else if (ipv4_valid && tcp_valid)
1352 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1353 else if (ipv6_valid)
1354 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1355 else if (ipv4_valid)
1356 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1358 *tun_type = ICE_SW_TUN_PPPOE;
1361 if (*tun_type == ICE_NON_TUN) {
1363 *tun_type = ICE_SW_TUN_VXLAN;
1364 else if (nvgre_valid)
1365 *tun_type = ICE_SW_TUN_NVGRE;
1366 else if (ipv4_valid && tcp_valid)
1367 *tun_type = ICE_SW_IPV4_TCP;
1368 else if (ipv4_valid && udp_valid)
1369 *tun_type = ICE_SW_IPV4_UDP;
1370 else if (ipv6_valid && tcp_valid)
1371 *tun_type = ICE_SW_IPV6_TCP;
1372 else if (ipv6_valid && udp_valid)
1373 *tun_type = ICE_SW_IPV6_UDP;
1376 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1377 rte_flow_error_set(error, EINVAL,
1378 RTE_FLOW_ERROR_TYPE_ITEM,
1380 "too much input set");
1392 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1393 const struct rte_flow_action *actions,
1394 struct rte_flow_error *error,
1395 struct ice_adv_rule_info *rule_info)
1397 const struct rte_flow_action_vf *act_vf;
1398 const struct rte_flow_action *action;
1399 enum rte_flow_action_type action_type;
1401 for (action = actions; action->type !=
1402 RTE_FLOW_ACTION_TYPE_END; action++) {
1403 action_type = action->type;
1404 switch (action_type) {
1405 case RTE_FLOW_ACTION_TYPE_VF:
1406 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1407 act_vf = action->conf;
1408 if (act_vf->original)
1409 rule_info->sw_act.vsi_handle =
1410 ad->real_hw.avf.bus.func;
1412 rule_info->sw_act.vsi_handle = act_vf->id;
1415 rte_flow_error_set(error,
1416 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1418 "Invalid action type or queue number");
1423 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1424 rule_info->sw_act.flag = ICE_FLTR_RX;
1426 rule_info->priority = 5;
1432 ice_switch_parse_action(struct ice_pf *pf,
1433 const struct rte_flow_action *actions,
1434 struct rte_flow_error *error,
1435 struct ice_adv_rule_info *rule_info)
1437 struct ice_vsi *vsi = pf->main_vsi;
1438 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1439 const struct rte_flow_action_queue *act_q;
1440 const struct rte_flow_action_rss *act_qgrop;
1441 uint16_t base_queue, i;
1442 const struct rte_flow_action *action;
1443 enum rte_flow_action_type action_type;
1444 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1445 2, 4, 8, 16, 32, 64, 128};
1447 base_queue = pf->base_queue + vsi->base_queue;
1448 for (action = actions; action->type !=
1449 RTE_FLOW_ACTION_TYPE_END; action++) {
1450 action_type = action->type;
1451 switch (action_type) {
1452 case RTE_FLOW_ACTION_TYPE_RSS:
1453 act_qgrop = action->conf;
1454 if (act_qgrop->queue_num <= 1)
1456 rule_info->sw_act.fltr_act =
1458 rule_info->sw_act.fwd_id.q_id =
1459 base_queue + act_qgrop->queue[0];
1460 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1461 if (act_qgrop->queue_num ==
1462 valid_qgrop_number[i])
1465 if (i == MAX_QGRP_NUM_TYPE)
1467 if ((act_qgrop->queue[0] +
1468 act_qgrop->queue_num) >
1469 dev->data->nb_rx_queues)
1471 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1472 if (act_qgrop->queue[i + 1] !=
1473 act_qgrop->queue[i] + 1)
1475 rule_info->sw_act.qgrp_size =
1476 act_qgrop->queue_num;
1478 case RTE_FLOW_ACTION_TYPE_QUEUE:
1479 act_q = action->conf;
1480 if (act_q->index >= dev->data->nb_rx_queues)
1482 rule_info->sw_act.fltr_act =
1484 rule_info->sw_act.fwd_id.q_id =
1485 base_queue + act_q->index;
1488 case RTE_FLOW_ACTION_TYPE_DROP:
1489 rule_info->sw_act.fltr_act =
1493 case RTE_FLOW_ACTION_TYPE_VOID:
1501 rule_info->sw_act.vsi_handle = vsi->idx;
1503 rule_info->sw_act.src = vsi->idx;
1504 rule_info->priority = 5;
1509 rte_flow_error_set(error,
1510 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1512 "Invalid action type or queue number");
1517 ice_switch_check_action(const struct rte_flow_action *actions,
1518 struct rte_flow_error *error)
1520 const struct rte_flow_action *action;
1521 enum rte_flow_action_type action_type;
1522 uint16_t actions_num = 0;
1524 for (action = actions; action->type !=
1525 RTE_FLOW_ACTION_TYPE_END; action++) {
1526 action_type = action->type;
1527 switch (action_type) {
1528 case RTE_FLOW_ACTION_TYPE_VF:
1529 case RTE_FLOW_ACTION_TYPE_RSS:
1530 case RTE_FLOW_ACTION_TYPE_QUEUE:
1531 case RTE_FLOW_ACTION_TYPE_DROP:
1534 case RTE_FLOW_ACTION_TYPE_VOID:
1537 rte_flow_error_set(error,
1538 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1540 "Invalid action type");
1545 if (actions_num != 1) {
1546 rte_flow_error_set(error,
1547 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1549 "Invalid action number");
1557 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1560 case ICE_SW_TUN_PROFID_IPV6_ESP:
1561 case ICE_SW_TUN_PROFID_IPV6_AH:
1562 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1563 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1564 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1565 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1566 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1567 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1577 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1578 struct ice_pattern_match_item *array,
1580 const struct rte_flow_item pattern[],
1581 const struct rte_flow_action actions[],
1583 struct rte_flow_error *error)
1585 struct ice_pf *pf = &ad->pf;
1586 uint64_t inputset = 0;
1588 struct sw_meta *sw_meta_ptr = NULL;
1589 struct ice_adv_rule_info rule_info;
1590 struct ice_adv_lkup_elem *list = NULL;
1591 uint16_t lkups_num = 0;
1592 const struct rte_flow_item *item = pattern;
1593 uint16_t item_num = 0;
1594 enum ice_sw_tunnel_type tun_type =
1596 struct ice_pattern_match_item *pattern_match_item = NULL;
1598 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1600 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1601 const struct rte_flow_item_eth *eth_mask;
1603 eth_mask = item->mask;
1606 if (eth_mask->type == UINT16_MAX)
1607 tun_type = ICE_SW_TUN_AND_NON_TUN;
1609 /* reserve one more memory slot for ETH which may
1610 * consume 2 lookup items.
1612 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1616 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1618 rte_flow_error_set(error, EINVAL,
1619 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1620 "No memory for PMD internal items");
1625 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1627 rte_flow_error_set(error, EINVAL,
1628 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1629 "No memory for sw_pattern_meta_ptr");
1633 pattern_match_item =
1634 ice_search_pattern_match_item(pattern, array, array_len, error);
1635 if (!pattern_match_item) {
1636 rte_flow_error_set(error, EINVAL,
1637 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1638 "Invalid input pattern");
1642 inputset = ice_switch_inset_get
1643 (pattern, error, list, &lkups_num, &tun_type);
1644 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1645 (inputset & ~pattern_match_item->input_set_mask)) {
1646 rte_flow_error_set(error, EINVAL,
1647 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1649 "Invalid input set");
1653 memset(&rule_info, 0, sizeof(rule_info));
1654 rule_info.tun_type = tun_type;
1656 ret = ice_switch_check_action(actions, error);
1658 rte_flow_error_set(error, EINVAL,
1659 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1660 "Invalid input action number");
1664 if (ad->hw.dcf_enabled)
1665 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1668 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1671 rte_flow_error_set(error, EINVAL,
1672 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1673 "Invalid input action");
1678 *meta = sw_meta_ptr;
1679 ((struct sw_meta *)*meta)->list = list;
1680 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1681 ((struct sw_meta *)*meta)->rule_info = rule_info;
1684 rte_free(sw_meta_ptr);
1687 rte_free(pattern_match_item);
1693 rte_free(sw_meta_ptr);
1694 rte_free(pattern_match_item);
1700 ice_switch_query(struct ice_adapter *ad __rte_unused,
1701 struct rte_flow *flow __rte_unused,
1702 struct rte_flow_query_count *count __rte_unused,
1703 struct rte_flow_error *error)
1705 rte_flow_error_set(error, EINVAL,
1706 RTE_FLOW_ERROR_TYPE_HANDLE,
1708 "count action not supported by switch filter");
1714 ice_switch_redirect(struct ice_adapter *ad,
1715 struct rte_flow *flow,
1716 struct ice_flow_redirect *rd)
1718 struct ice_rule_query_data *rdata = flow->rule;
1719 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1720 struct ice_adv_lkup_elem *lkups_dp = NULL;
1721 struct LIST_HEAD_TYPE *list_head;
1722 struct ice_adv_rule_info rinfo;
1723 struct ice_hw *hw = &ad->hw;
1724 struct ice_switch_info *sw;
1728 if (rdata->vsi_handle != rd->vsi_handle)
1731 sw = hw->switch_info;
1732 if (!sw->recp_list[rdata->rid].recp_created)
1735 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1738 list_head = &sw->recp_list[rdata->rid].filt_rules;
1739 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1741 rinfo = list_itr->rule_info;
1742 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1743 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1744 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1745 (rinfo.fltr_rule_id == rdata->rule_id &&
1746 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1747 lkups_cnt = list_itr->lkups_cnt;
1748 lkups_dp = (struct ice_adv_lkup_elem *)
1749 ice_memdup(hw, list_itr->lkups,
1750 sizeof(*list_itr->lkups) *
1751 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1754 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1758 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1759 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1760 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1769 /* Remove the old rule */
1770 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1773 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1779 /* Update VSI context */
1780 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1782 /* Replay the rule */
1783 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1786 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1791 ice_free(hw, lkups_dp);
1796 ice_switch_init(struct ice_adapter *ad)
1799 struct ice_flow_parser *dist_parser;
1800 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1802 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1803 dist_parser = &ice_switch_dist_parser_comms;
1804 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1805 dist_parser = &ice_switch_dist_parser_os;
1809 if (ad->devargs.pipe_mode_support)
1810 ret = ice_register_parser(perm_parser, ad);
1812 ret = ice_register_parser(dist_parser, ad);
1817 ice_switch_uninit(struct ice_adapter *ad)
1819 struct ice_flow_parser *dist_parser;
1820 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1822 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1823 dist_parser = &ice_switch_dist_parser_comms;
1825 dist_parser = &ice_switch_dist_parser_os;
1827 if (ad->devargs.pipe_mode_support)
1828 ice_unregister_parser(perm_parser, ad);
1830 ice_unregister_parser(dist_parser, ad);
1834 ice_flow_engine ice_switch_engine = {
1835 .init = ice_switch_init,
1836 .uninit = ice_switch_uninit,
1837 .create = ice_switch_create,
1838 .destroy = ice_switch_destroy,
1839 .query_count = ice_switch_query,
1840 .redirect = ice_switch_redirect,
1841 .free = ice_switch_filter_rule_free,
1842 .type = ICE_FLOW_ENGINE_SWITCH,
1846 ice_flow_parser ice_switch_dist_parser_os = {
1847 .engine = &ice_switch_engine,
1848 .array = ice_switch_pattern_dist_os,
1849 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1850 .parse_pattern_action = ice_switch_parse_pattern_action,
1851 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1855 ice_flow_parser ice_switch_dist_parser_comms = {
1856 .engine = &ice_switch_engine,
1857 .array = ice_switch_pattern_dist_comms,
1858 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1859 .parse_pattern_action = ice_switch_parse_pattern_action,
1860 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1864 ice_flow_parser ice_switch_perm_parser = {
1865 .engine = &ice_switch_engine,
1866 .array = ice_switch_pattern_perm,
1867 .array_len = RTE_DIM(ice_switch_pattern_perm),
1868 .parse_pattern_action = ice_switch_parse_pattern_action,
1869 .stage = ICE_FLOW_STAGE_PERMISSION,
1872 RTE_INIT(ice_sw_engine_init)
1874 struct ice_flow_engine *engine = &ice_switch_engine;
1875 ice_register_flow_engine(engine);