1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
40 #define ICE_SW_INSET_MAC_QINQ ( \
41 ICE_SW_INSET_MAC_VLAN | ICE_INSET_VLAN_OUTER)
42 #define ICE_SW_INSET_MAC_IPV4 ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
45 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
46 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
47 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
48 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
52 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
53 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
54 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_MAC_IPV6 ( \
56 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
58 ICE_INSET_IPV6_NEXT_HDR)
59 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
60 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
61 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
62 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
63 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
64 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
65 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
66 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
67 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
68 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
70 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
75 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
76 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
77 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
78 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
79 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
80 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
81 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
82 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
83 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
84 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
87 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
91 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
92 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
93 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
94 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
95 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
96 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
97 ICE_INSET_TUN_IPV4_TOS)
98 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
99 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
100 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
101 ICE_INSET_TUN_IPV4_TOS)
102 #define ICE_SW_INSET_MAC_PPPOE ( \
103 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
104 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
105 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
106 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
107 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
108 ICE_INSET_PPPOE_PROTO)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
110 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
112 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
114 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
115 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
116 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
117 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
118 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
119 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
120 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
121 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
122 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
123 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
124 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
125 #define ICE_SW_INSET_MAC_IPV4_AH ( \
126 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
127 #define ICE_SW_INSET_MAC_IPV6_AH ( \
128 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
129 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
130 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
131 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
132 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
133 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
134 ICE_SW_INSET_MAC_IPV4 | \
135 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
136 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
137 ICE_SW_INSET_MAC_IPV6 | \
138 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
141 struct ice_adv_lkup_elem *list;
143 struct ice_adv_rule_info rule_info;
146 static struct ice_flow_parser ice_switch_dist_parser;
147 static struct ice_flow_parser ice_switch_perm_parser;
150 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
152 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
153 {pattern_ethertype_vlan,
154 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
155 {pattern_ethertype_qinq,
156 ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
158 ICE_INSET_NONE, ICE_INSET_NONE},
160 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
161 {pattern_eth_ipv4_udp,
162 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
163 {pattern_eth_ipv4_tcp,
164 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
166 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
167 {pattern_eth_ipv6_udp,
168 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
169 {pattern_eth_ipv6_tcp,
170 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
171 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
172 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
173 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
174 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
175 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
176 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
177 {pattern_eth_ipv4_nvgre_eth_ipv4,
178 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
179 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
180 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
181 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
182 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
184 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
185 {pattern_eth_vlan_pppoes,
186 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
187 {pattern_eth_pppoes_proto,
188 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
189 {pattern_eth_vlan_pppoes_proto,
190 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
191 {pattern_eth_pppoes_ipv4,
192 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
193 {pattern_eth_pppoes_ipv4_tcp,
194 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
195 {pattern_eth_pppoes_ipv4_udp,
196 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
197 {pattern_eth_pppoes_ipv6,
198 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
199 {pattern_eth_pppoes_ipv6_tcp,
200 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
201 {pattern_eth_pppoes_ipv6_udp,
202 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
203 {pattern_eth_vlan_pppoes_ipv4,
204 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
205 {pattern_eth_vlan_pppoes_ipv4_tcp,
206 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
207 {pattern_eth_vlan_pppoes_ipv4_udp,
208 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
209 {pattern_eth_vlan_pppoes_ipv6,
210 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
211 {pattern_eth_vlan_pppoes_ipv6_tcp,
212 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
213 {pattern_eth_vlan_pppoes_ipv6_udp,
214 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
215 {pattern_eth_ipv4_esp,
216 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
217 {pattern_eth_ipv4_udp_esp,
218 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
219 {pattern_eth_ipv6_esp,
220 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
221 {pattern_eth_ipv6_udp_esp,
222 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
223 {pattern_eth_ipv4_ah,
224 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
225 {pattern_eth_ipv6_ah,
226 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
227 {pattern_eth_ipv6_udp_ah,
228 ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_ipv4_l2tp,
230 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
231 {pattern_eth_ipv6_l2tp,
232 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
233 {pattern_eth_ipv4_pfcp,
234 ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_ipv6_pfcp,
236 ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_qinq_ipv4,
238 ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
239 {pattern_eth_qinq_ipv6,
240 ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
241 {pattern_eth_qinq_pppoes,
242 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
243 {pattern_eth_qinq_pppoes_proto,
244 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
245 {pattern_eth_qinq_pppoes_ipv4,
246 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
247 {pattern_eth_qinq_pppoes_ipv6,
248 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
252 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
254 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
255 {pattern_ethertype_vlan,
256 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
257 {pattern_ethertype_qinq,
258 ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
260 ICE_INSET_NONE, ICE_INSET_NONE},
262 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
263 {pattern_eth_ipv4_udp,
264 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
265 {pattern_eth_ipv4_tcp,
266 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
268 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
269 {pattern_eth_ipv6_udp,
270 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
271 {pattern_eth_ipv6_tcp,
272 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
273 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
274 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
275 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
276 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
277 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
278 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
279 {pattern_eth_ipv4_nvgre_eth_ipv4,
280 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
281 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
282 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
283 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
284 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
286 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
287 {pattern_eth_vlan_pppoes,
288 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
289 {pattern_eth_pppoes_proto,
290 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
291 {pattern_eth_vlan_pppoes_proto,
292 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
293 {pattern_eth_pppoes_ipv4,
294 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
295 {pattern_eth_pppoes_ipv4_tcp,
296 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
297 {pattern_eth_pppoes_ipv4_udp,
298 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
299 {pattern_eth_pppoes_ipv6,
300 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
301 {pattern_eth_pppoes_ipv6_tcp,
302 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
303 {pattern_eth_pppoes_ipv6_udp,
304 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
305 {pattern_eth_vlan_pppoes_ipv4,
306 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
307 {pattern_eth_vlan_pppoes_ipv4_tcp,
308 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
309 {pattern_eth_vlan_pppoes_ipv4_udp,
310 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
311 {pattern_eth_vlan_pppoes_ipv6,
312 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
313 {pattern_eth_vlan_pppoes_ipv6_tcp,
314 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
315 {pattern_eth_vlan_pppoes_ipv6_udp,
316 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
317 {pattern_eth_ipv4_esp,
318 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
319 {pattern_eth_ipv4_udp_esp,
320 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
321 {pattern_eth_ipv6_esp,
322 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
323 {pattern_eth_ipv6_udp_esp,
324 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
325 {pattern_eth_ipv4_ah,
326 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
327 {pattern_eth_ipv6_ah,
328 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
329 {pattern_eth_ipv6_udp_ah,
330 ICE_INSET_NONE, ICE_INSET_NONE},
331 {pattern_eth_ipv4_l2tp,
332 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
333 {pattern_eth_ipv6_l2tp,
334 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
335 {pattern_eth_ipv4_pfcp,
336 ICE_INSET_NONE, ICE_INSET_NONE},
337 {pattern_eth_ipv6_pfcp,
338 ICE_INSET_NONE, ICE_INSET_NONE},
339 {pattern_eth_qinq_ipv4,
340 ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
341 {pattern_eth_qinq_ipv6,
342 ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
343 {pattern_eth_qinq_pppoes,
344 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
345 {pattern_eth_qinq_pppoes_proto,
346 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
347 {pattern_eth_qinq_pppoes_ipv4,
348 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
349 {pattern_eth_qinq_pppoes_ipv6,
350 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
354 ice_switch_create(struct ice_adapter *ad,
355 struct rte_flow *flow,
357 struct rte_flow_error *error)
360 struct ice_pf *pf = &ad->pf;
361 struct ice_hw *hw = ICE_PF_TO_HW(pf);
362 struct ice_rule_query_data rule_added = {0};
363 struct ice_rule_query_data *filter_ptr;
364 struct ice_adv_lkup_elem *list =
365 ((struct sw_meta *)meta)->list;
367 ((struct sw_meta *)meta)->lkups_num;
368 struct ice_adv_rule_info *rule_info =
369 &((struct sw_meta *)meta)->rule_info;
371 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
372 rte_flow_error_set(error, EINVAL,
373 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
374 "item number too large for rule");
378 rte_flow_error_set(error, EINVAL,
379 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
380 "lookup list should not be NULL");
383 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
385 filter_ptr = rte_zmalloc("ice_switch_filter",
386 sizeof(struct ice_rule_query_data), 0);
388 rte_flow_error_set(error, EINVAL,
389 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
390 "No memory for ice_switch_filter");
393 flow->rule = filter_ptr;
394 rte_memcpy(filter_ptr,
396 sizeof(struct ice_rule_query_data));
398 rte_flow_error_set(error, EINVAL,
399 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
400 "switch filter create flow fail");
416 ice_switch_destroy(struct ice_adapter *ad,
417 struct rte_flow *flow,
418 struct rte_flow_error *error)
420 struct ice_hw *hw = &ad->hw;
422 struct ice_rule_query_data *filter_ptr;
424 filter_ptr = (struct ice_rule_query_data *)
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
431 " create by switch filter");
435 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
437 rte_flow_error_set(error, EINVAL,
438 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
439 "fail to destroy switch filter rule");
443 rte_free(filter_ptr);
448 ice_switch_filter_rule_free(struct rte_flow *flow)
450 rte_free(flow->rule);
454 ice_switch_inset_get(const struct rte_flow_item pattern[],
455 struct rte_flow_error *error,
456 struct ice_adv_lkup_elem *list,
458 enum ice_sw_tunnel_type *tun_type)
460 const struct rte_flow_item *item = pattern;
461 enum rte_flow_item_type item_type;
462 const struct rte_flow_item_eth *eth_spec, *eth_mask;
463 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
464 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
465 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
466 const struct rte_flow_item_udp *udp_spec, *udp_mask;
467 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
468 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
469 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
470 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
471 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
472 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
474 const struct rte_flow_item_esp *esp_spec, *esp_mask;
475 const struct rte_flow_item_ah *ah_spec, *ah_mask;
476 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
477 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
478 uint64_t input_set = ICE_INSET_NONE;
479 uint16_t input_set_byte = 0;
480 bool pppoe_elem_valid = 0;
481 bool pppoe_patt_valid = 0;
482 bool pppoe_prot_valid = 0;
483 bool inner_vlan_valid = 0;
484 bool outer_vlan_valid = 0;
485 bool tunnel_valid = 0;
486 bool profile_rule = 0;
487 bool nvgre_valid = 0;
488 bool vxlan_valid = 0;
495 for (item = pattern; item->type !=
496 RTE_FLOW_ITEM_TYPE_END; item++) {
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
501 "Not support range");
504 item_type = item->type;
507 case RTE_FLOW_ITEM_TYPE_ETH:
508 eth_spec = item->spec;
509 eth_mask = item->mask;
510 if (eth_spec && eth_mask) {
511 const uint8_t *a = eth_mask->src.addr_bytes;
512 const uint8_t *b = eth_mask->dst.addr_bytes;
513 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
514 if (a[j] && tunnel_valid) {
524 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
525 if (b[j] && tunnel_valid) {
536 input_set |= ICE_INSET_ETHERTYPE;
537 list[t].type = (tunnel_valid == 0) ?
538 ICE_MAC_OFOS : ICE_MAC_IL;
539 struct ice_ether_hdr *h;
540 struct ice_ether_hdr *m;
542 h = &list[t].h_u.eth_hdr;
543 m = &list[t].m_u.eth_hdr;
544 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
545 if (eth_mask->src.addr_bytes[j]) {
547 eth_spec->src.addr_bytes[j];
549 eth_mask->src.addr_bytes[j];
553 if (eth_mask->dst.addr_bytes[j]) {
555 eth_spec->dst.addr_bytes[j];
557 eth_mask->dst.addr_bytes[j];
564 if (eth_mask->type) {
565 list[t].type = ICE_ETYPE_OL;
566 list[t].h_u.ethertype.ethtype_id =
568 list[t].m_u.ethertype.ethtype_id =
576 case RTE_FLOW_ITEM_TYPE_IPV4:
577 ipv4_spec = item->spec;
578 ipv4_mask = item->mask;
580 if (ipv4_spec && ipv4_mask) {
581 /* Check IPv4 mask and update input set */
582 if (ipv4_mask->hdr.version_ihl ||
583 ipv4_mask->hdr.total_length ||
584 ipv4_mask->hdr.packet_id ||
585 ipv4_mask->hdr.hdr_checksum) {
586 rte_flow_error_set(error, EINVAL,
587 RTE_FLOW_ERROR_TYPE_ITEM,
589 "Invalid IPv4 mask.");
594 if (ipv4_mask->hdr.type_of_service)
596 ICE_INSET_TUN_IPV4_TOS;
597 if (ipv4_mask->hdr.src_addr)
599 ICE_INSET_TUN_IPV4_SRC;
600 if (ipv4_mask->hdr.dst_addr)
602 ICE_INSET_TUN_IPV4_DST;
603 if (ipv4_mask->hdr.time_to_live)
605 ICE_INSET_TUN_IPV4_TTL;
606 if (ipv4_mask->hdr.next_proto_id)
608 ICE_INSET_TUN_IPV4_PROTO;
610 if (ipv4_mask->hdr.src_addr)
611 input_set |= ICE_INSET_IPV4_SRC;
612 if (ipv4_mask->hdr.dst_addr)
613 input_set |= ICE_INSET_IPV4_DST;
614 if (ipv4_mask->hdr.time_to_live)
615 input_set |= ICE_INSET_IPV4_TTL;
616 if (ipv4_mask->hdr.next_proto_id)
618 ICE_INSET_IPV4_PROTO;
619 if (ipv4_mask->hdr.type_of_service)
623 list[t].type = (tunnel_valid == 0) ?
624 ICE_IPV4_OFOS : ICE_IPV4_IL;
625 if (ipv4_mask->hdr.src_addr) {
626 list[t].h_u.ipv4_hdr.src_addr =
627 ipv4_spec->hdr.src_addr;
628 list[t].m_u.ipv4_hdr.src_addr =
629 ipv4_mask->hdr.src_addr;
632 if (ipv4_mask->hdr.dst_addr) {
633 list[t].h_u.ipv4_hdr.dst_addr =
634 ipv4_spec->hdr.dst_addr;
635 list[t].m_u.ipv4_hdr.dst_addr =
636 ipv4_mask->hdr.dst_addr;
639 if (ipv4_mask->hdr.time_to_live) {
640 list[t].h_u.ipv4_hdr.time_to_live =
641 ipv4_spec->hdr.time_to_live;
642 list[t].m_u.ipv4_hdr.time_to_live =
643 ipv4_mask->hdr.time_to_live;
646 if (ipv4_mask->hdr.next_proto_id) {
647 list[t].h_u.ipv4_hdr.protocol =
648 ipv4_spec->hdr.next_proto_id;
649 list[t].m_u.ipv4_hdr.protocol =
650 ipv4_mask->hdr.next_proto_id;
653 if ((ipv4_spec->hdr.next_proto_id &
654 ipv4_mask->hdr.next_proto_id) ==
655 ICE_IPV4_PROTO_NVGRE)
656 *tun_type = ICE_SW_TUN_AND_NON_TUN;
657 if (ipv4_mask->hdr.type_of_service) {
658 list[t].h_u.ipv4_hdr.tos =
659 ipv4_spec->hdr.type_of_service;
660 list[t].m_u.ipv4_hdr.tos =
661 ipv4_mask->hdr.type_of_service;
668 case RTE_FLOW_ITEM_TYPE_IPV6:
669 ipv6_spec = item->spec;
670 ipv6_mask = item->mask;
672 if (ipv6_spec && ipv6_mask) {
673 if (ipv6_mask->hdr.payload_len) {
674 rte_flow_error_set(error, EINVAL,
675 RTE_FLOW_ERROR_TYPE_ITEM,
677 "Invalid IPv6 mask");
681 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
682 if (ipv6_mask->hdr.src_addr[j] &&
685 ICE_INSET_TUN_IPV6_SRC;
687 } else if (ipv6_mask->hdr.src_addr[j]) {
688 input_set |= ICE_INSET_IPV6_SRC;
692 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
693 if (ipv6_mask->hdr.dst_addr[j] &&
696 ICE_INSET_TUN_IPV6_DST;
698 } else if (ipv6_mask->hdr.dst_addr[j]) {
699 input_set |= ICE_INSET_IPV6_DST;
703 if (ipv6_mask->hdr.proto &&
706 ICE_INSET_TUN_IPV6_NEXT_HDR;
707 else if (ipv6_mask->hdr.proto)
709 ICE_INSET_IPV6_NEXT_HDR;
710 if (ipv6_mask->hdr.hop_limits &&
713 ICE_INSET_TUN_IPV6_HOP_LIMIT;
714 else if (ipv6_mask->hdr.hop_limits)
716 ICE_INSET_IPV6_HOP_LIMIT;
717 if ((ipv6_mask->hdr.vtc_flow &
719 (RTE_IPV6_HDR_TC_MASK)) &&
722 ICE_INSET_TUN_IPV6_TC;
723 else if (ipv6_mask->hdr.vtc_flow &
725 (RTE_IPV6_HDR_TC_MASK))
726 input_set |= ICE_INSET_IPV6_TC;
728 list[t].type = (tunnel_valid == 0) ?
729 ICE_IPV6_OFOS : ICE_IPV6_IL;
730 struct ice_ipv6_hdr *f;
731 struct ice_ipv6_hdr *s;
732 f = &list[t].h_u.ipv6_hdr;
733 s = &list[t].m_u.ipv6_hdr;
734 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
735 if (ipv6_mask->hdr.src_addr[j]) {
737 ipv6_spec->hdr.src_addr[j];
739 ipv6_mask->hdr.src_addr[j];
742 if (ipv6_mask->hdr.dst_addr[j]) {
744 ipv6_spec->hdr.dst_addr[j];
746 ipv6_mask->hdr.dst_addr[j];
750 if (ipv6_mask->hdr.proto) {
752 ipv6_spec->hdr.proto;
754 ipv6_mask->hdr.proto;
757 if (ipv6_mask->hdr.hop_limits) {
759 ipv6_spec->hdr.hop_limits;
761 ipv6_mask->hdr.hop_limits;
764 if (ipv6_mask->hdr.vtc_flow &
766 (RTE_IPV6_HDR_TC_MASK)) {
767 struct ice_le_ver_tc_flow vtf;
768 vtf.u.fld.version = 0;
769 vtf.u.fld.flow_label = 0;
770 vtf.u.fld.tc = (rte_be_to_cpu_32
771 (ipv6_spec->hdr.vtc_flow) &
772 RTE_IPV6_HDR_TC_MASK) >>
773 RTE_IPV6_HDR_TC_SHIFT;
774 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
775 vtf.u.fld.tc = (rte_be_to_cpu_32
776 (ipv6_mask->hdr.vtc_flow) &
777 RTE_IPV6_HDR_TC_MASK) >>
778 RTE_IPV6_HDR_TC_SHIFT;
779 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
786 case RTE_FLOW_ITEM_TYPE_UDP:
787 udp_spec = item->spec;
788 udp_mask = item->mask;
790 if (udp_spec && udp_mask) {
791 /* Check UDP mask and update input set*/
792 if (udp_mask->hdr.dgram_len ||
793 udp_mask->hdr.dgram_cksum) {
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ITEM,
802 if (udp_mask->hdr.src_port)
804 ICE_INSET_TUN_UDP_SRC_PORT;
805 if (udp_mask->hdr.dst_port)
807 ICE_INSET_TUN_UDP_DST_PORT;
809 if (udp_mask->hdr.src_port)
811 ICE_INSET_UDP_SRC_PORT;
812 if (udp_mask->hdr.dst_port)
814 ICE_INSET_UDP_DST_PORT;
816 if (*tun_type == ICE_SW_TUN_VXLAN &&
818 list[t].type = ICE_UDP_OF;
820 list[t].type = ICE_UDP_ILOS;
821 if (udp_mask->hdr.src_port) {
822 list[t].h_u.l4_hdr.src_port =
823 udp_spec->hdr.src_port;
824 list[t].m_u.l4_hdr.src_port =
825 udp_mask->hdr.src_port;
828 if (udp_mask->hdr.dst_port) {
829 list[t].h_u.l4_hdr.dst_port =
830 udp_spec->hdr.dst_port;
831 list[t].m_u.l4_hdr.dst_port =
832 udp_mask->hdr.dst_port;
839 case RTE_FLOW_ITEM_TYPE_TCP:
840 tcp_spec = item->spec;
841 tcp_mask = item->mask;
843 if (tcp_spec && tcp_mask) {
844 /* Check TCP mask and update input set */
845 if (tcp_mask->hdr.sent_seq ||
846 tcp_mask->hdr.recv_ack ||
847 tcp_mask->hdr.data_off ||
848 tcp_mask->hdr.tcp_flags ||
849 tcp_mask->hdr.rx_win ||
850 tcp_mask->hdr.cksum ||
851 tcp_mask->hdr.tcp_urp) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
860 if (tcp_mask->hdr.src_port)
862 ICE_INSET_TUN_TCP_SRC_PORT;
863 if (tcp_mask->hdr.dst_port)
865 ICE_INSET_TUN_TCP_DST_PORT;
867 if (tcp_mask->hdr.src_port)
869 ICE_INSET_TCP_SRC_PORT;
870 if (tcp_mask->hdr.dst_port)
872 ICE_INSET_TCP_DST_PORT;
874 list[t].type = ICE_TCP_IL;
875 if (tcp_mask->hdr.src_port) {
876 list[t].h_u.l4_hdr.src_port =
877 tcp_spec->hdr.src_port;
878 list[t].m_u.l4_hdr.src_port =
879 tcp_mask->hdr.src_port;
882 if (tcp_mask->hdr.dst_port) {
883 list[t].h_u.l4_hdr.dst_port =
884 tcp_spec->hdr.dst_port;
885 list[t].m_u.l4_hdr.dst_port =
886 tcp_mask->hdr.dst_port;
893 case RTE_FLOW_ITEM_TYPE_SCTP:
894 sctp_spec = item->spec;
895 sctp_mask = item->mask;
896 if (sctp_spec && sctp_mask) {
897 /* Check SCTP mask and update input set */
898 if (sctp_mask->hdr.cksum) {
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_ITEM,
902 "Invalid SCTP mask");
907 if (sctp_mask->hdr.src_port)
909 ICE_INSET_TUN_SCTP_SRC_PORT;
910 if (sctp_mask->hdr.dst_port)
912 ICE_INSET_TUN_SCTP_DST_PORT;
914 if (sctp_mask->hdr.src_port)
916 ICE_INSET_SCTP_SRC_PORT;
917 if (sctp_mask->hdr.dst_port)
919 ICE_INSET_SCTP_DST_PORT;
921 list[t].type = ICE_SCTP_IL;
922 if (sctp_mask->hdr.src_port) {
923 list[t].h_u.sctp_hdr.src_port =
924 sctp_spec->hdr.src_port;
925 list[t].m_u.sctp_hdr.src_port =
926 sctp_mask->hdr.src_port;
929 if (sctp_mask->hdr.dst_port) {
930 list[t].h_u.sctp_hdr.dst_port =
931 sctp_spec->hdr.dst_port;
932 list[t].m_u.sctp_hdr.dst_port =
933 sctp_mask->hdr.dst_port;
940 case RTE_FLOW_ITEM_TYPE_VXLAN:
941 vxlan_spec = item->spec;
942 vxlan_mask = item->mask;
943 /* Check if VXLAN item is used to describe protocol.
944 * If yes, both spec and mask should be NULL.
945 * If no, both spec and mask shouldn't be NULL.
947 if ((!vxlan_spec && vxlan_mask) ||
948 (vxlan_spec && !vxlan_mask)) {
949 rte_flow_error_set(error, EINVAL,
950 RTE_FLOW_ERROR_TYPE_ITEM,
952 "Invalid VXLAN item");
957 if (vxlan_spec && vxlan_mask) {
958 list[t].type = ICE_VXLAN;
959 if (vxlan_mask->vni[0] ||
960 vxlan_mask->vni[1] ||
961 vxlan_mask->vni[2]) {
962 list[t].h_u.tnl_hdr.vni =
963 (vxlan_spec->vni[2] << 16) |
964 (vxlan_spec->vni[1] << 8) |
966 list[t].m_u.tnl_hdr.vni =
967 (vxlan_mask->vni[2] << 16) |
968 (vxlan_mask->vni[1] << 8) |
971 ICE_INSET_TUN_VXLAN_VNI;
978 case RTE_FLOW_ITEM_TYPE_NVGRE:
979 nvgre_spec = item->spec;
980 nvgre_mask = item->mask;
981 /* Check if NVGRE item is used to describe protocol.
982 * If yes, both spec and mask should be NULL.
983 * If no, both spec and mask shouldn't be NULL.
985 if ((!nvgre_spec && nvgre_mask) ||
986 (nvgre_spec && !nvgre_mask)) {
987 rte_flow_error_set(error, EINVAL,
988 RTE_FLOW_ERROR_TYPE_ITEM,
990 "Invalid NVGRE item");
995 if (nvgre_spec && nvgre_mask) {
996 list[t].type = ICE_NVGRE;
997 if (nvgre_mask->tni[0] ||
998 nvgre_mask->tni[1] ||
999 nvgre_mask->tni[2]) {
1000 list[t].h_u.nvgre_hdr.tni_flow =
1001 (nvgre_spec->tni[2] << 16) |
1002 (nvgre_spec->tni[1] << 8) |
1004 list[t].m_u.nvgre_hdr.tni_flow =
1005 (nvgre_mask->tni[2] << 16) |
1006 (nvgre_mask->tni[1] << 8) |
1009 ICE_INSET_TUN_NVGRE_TNI;
1010 input_set_byte += 2;
1016 case RTE_FLOW_ITEM_TYPE_VLAN:
1017 vlan_spec = item->spec;
1018 vlan_mask = item->mask;
1019 /* Check if VLAN item is used to describe protocol.
1020 * If yes, both spec and mask should be NULL.
1021 * If no, both spec and mask shouldn't be NULL.
1023 if ((!vlan_spec && vlan_mask) ||
1024 (vlan_spec && !vlan_mask)) {
1025 rte_flow_error_set(error, EINVAL,
1026 RTE_FLOW_ERROR_TYPE_ITEM,
1028 "Invalid VLAN item");
1032 if (!outer_vlan_valid &&
1033 (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
1034 *tun_type == ICE_NON_TUN_QINQ))
1035 outer_vlan_valid = 1;
1036 else if (!inner_vlan_valid &&
1037 (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
1038 *tun_type == ICE_NON_TUN_QINQ))
1039 inner_vlan_valid = 1;
1040 else if (!inner_vlan_valid)
1041 inner_vlan_valid = 1;
1043 if (vlan_spec && vlan_mask) {
1044 if (outer_vlan_valid && !inner_vlan_valid) {
1045 list[t].type = ICE_VLAN_EX;
1046 input_set |= ICE_INSET_VLAN_OUTER;
1047 } else if (inner_vlan_valid) {
1048 list[t].type = ICE_VLAN_OFOS;
1049 input_set |= ICE_INSET_VLAN_INNER;
1052 if (vlan_mask->tci) {
1053 list[t].h_u.vlan_hdr.vlan =
1055 list[t].m_u.vlan_hdr.vlan =
1057 input_set_byte += 2;
1059 if (vlan_mask->inner_type) {
1060 rte_flow_error_set(error, EINVAL,
1061 RTE_FLOW_ERROR_TYPE_ITEM,
1063 "Invalid VLAN input set.");
1070 case RTE_FLOW_ITEM_TYPE_PPPOED:
1071 case RTE_FLOW_ITEM_TYPE_PPPOES:
1072 pppoe_spec = item->spec;
1073 pppoe_mask = item->mask;
1074 /* Check if PPPoE item is used to describe protocol.
1075 * If yes, both spec and mask should be NULL.
1076 * If no, both spec and mask shouldn't be NULL.
1078 if ((!pppoe_spec && pppoe_mask) ||
1079 (pppoe_spec && !pppoe_mask)) {
1080 rte_flow_error_set(error, EINVAL,
1081 RTE_FLOW_ERROR_TYPE_ITEM,
1083 "Invalid pppoe item");
1086 pppoe_patt_valid = 1;
1087 if (pppoe_spec && pppoe_mask) {
1088 /* Check pppoe mask and update input set */
1089 if (pppoe_mask->length ||
1091 pppoe_mask->version_type) {
1092 rte_flow_error_set(error, EINVAL,
1093 RTE_FLOW_ERROR_TYPE_ITEM,
1095 "Invalid pppoe mask");
1098 list[t].type = ICE_PPPOE;
1099 if (pppoe_mask->session_id) {
1100 list[t].h_u.pppoe_hdr.session_id =
1101 pppoe_spec->session_id;
1102 list[t].m_u.pppoe_hdr.session_id =
1103 pppoe_mask->session_id;
1104 input_set |= ICE_INSET_PPPOE_SESSION;
1105 input_set_byte += 2;
1108 pppoe_elem_valid = 1;
1112 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1113 pppoe_proto_spec = item->spec;
1114 pppoe_proto_mask = item->mask;
1115 /* Check if PPPoE optional proto_id item
1116 * is used to describe protocol.
1117 * If yes, both spec and mask should be NULL.
1118 * If no, both spec and mask shouldn't be NULL.
1120 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1121 (pppoe_proto_spec && !pppoe_proto_mask)) {
1122 rte_flow_error_set(error, EINVAL,
1123 RTE_FLOW_ERROR_TYPE_ITEM,
1125 "Invalid pppoe proto item");
1128 if (pppoe_proto_spec && pppoe_proto_mask) {
1129 if (pppoe_elem_valid)
1131 list[t].type = ICE_PPPOE;
1132 if (pppoe_proto_mask->proto_id) {
1133 list[t].h_u.pppoe_hdr.ppp_prot_id =
1134 pppoe_proto_spec->proto_id;
1135 list[t].m_u.pppoe_hdr.ppp_prot_id =
1136 pppoe_proto_mask->proto_id;
1137 input_set |= ICE_INSET_PPPOE_PROTO;
1138 input_set_byte += 2;
1139 pppoe_prot_valid = 1;
1141 if ((pppoe_proto_mask->proto_id &
1142 pppoe_proto_spec->proto_id) !=
1143 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1144 (pppoe_proto_mask->proto_id &
1145 pppoe_proto_spec->proto_id) !=
1146 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1147 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1149 *tun_type = ICE_SW_TUN_PPPOE;
1155 case RTE_FLOW_ITEM_TYPE_ESP:
1156 esp_spec = item->spec;
1157 esp_mask = item->mask;
1158 if ((esp_spec && !esp_mask) ||
1159 (!esp_spec && esp_mask)) {
1160 rte_flow_error_set(error, EINVAL,
1161 RTE_FLOW_ERROR_TYPE_ITEM,
1163 "Invalid esp item");
1166 /* Check esp mask and update input set */
1167 if (esp_mask && esp_mask->hdr.seq) {
1168 rte_flow_error_set(error, EINVAL,
1169 RTE_FLOW_ERROR_TYPE_ITEM,
1171 "Invalid esp mask");
1175 if (!esp_spec && !esp_mask && !input_set) {
1177 if (ipv6_valid && udp_valid)
1179 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1180 else if (ipv6_valid)
1181 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1182 else if (ipv4_valid)
1184 } else if (esp_spec && esp_mask &&
1187 list[t].type = ICE_NAT_T;
1189 list[t].type = ICE_ESP;
1190 list[t].h_u.esp_hdr.spi =
1192 list[t].m_u.esp_hdr.spi =
1194 input_set |= ICE_INSET_ESP_SPI;
1195 input_set_byte += 4;
1199 if (!profile_rule) {
1200 if (ipv6_valid && udp_valid)
1201 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1202 else if (ipv4_valid && udp_valid)
1203 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1204 else if (ipv6_valid)
1205 *tun_type = ICE_SW_TUN_IPV6_ESP;
1206 else if (ipv4_valid)
1207 *tun_type = ICE_SW_TUN_IPV4_ESP;
1211 case RTE_FLOW_ITEM_TYPE_AH:
1212 ah_spec = item->spec;
1213 ah_mask = item->mask;
1214 if ((ah_spec && !ah_mask) ||
1215 (!ah_spec && ah_mask)) {
1216 rte_flow_error_set(error, EINVAL,
1217 RTE_FLOW_ERROR_TYPE_ITEM,
1222 /* Check ah mask and update input set */
1224 (ah_mask->next_hdr ||
1225 ah_mask->payload_len ||
1227 ah_mask->reserved)) {
1228 rte_flow_error_set(error, EINVAL,
1229 RTE_FLOW_ERROR_TYPE_ITEM,
1235 if (!ah_spec && !ah_mask && !input_set) {
1237 if (ipv6_valid && udp_valid)
1239 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1240 else if (ipv6_valid)
1241 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1242 else if (ipv4_valid)
1244 } else if (ah_spec && ah_mask &&
1246 list[t].type = ICE_AH;
1247 list[t].h_u.ah_hdr.spi =
1249 list[t].m_u.ah_hdr.spi =
1251 input_set |= ICE_INSET_AH_SPI;
1252 input_set_byte += 4;
1256 if (!profile_rule) {
1259 else if (ipv6_valid)
1260 *tun_type = ICE_SW_TUN_IPV6_AH;
1261 else if (ipv4_valid)
1262 *tun_type = ICE_SW_TUN_IPV4_AH;
1266 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1267 l2tp_spec = item->spec;
1268 l2tp_mask = item->mask;
1269 if ((l2tp_spec && !l2tp_mask) ||
1270 (!l2tp_spec && l2tp_mask)) {
1271 rte_flow_error_set(error, EINVAL,
1272 RTE_FLOW_ERROR_TYPE_ITEM,
1274 "Invalid l2tp item");
1278 if (!l2tp_spec && !l2tp_mask && !input_set) {
1281 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1282 else if (ipv4_valid)
1284 } else if (l2tp_spec && l2tp_mask &&
1285 l2tp_mask->session_id){
1286 list[t].type = ICE_L2TPV3;
1287 list[t].h_u.l2tpv3_sess_hdr.session_id =
1288 l2tp_spec->session_id;
1289 list[t].m_u.l2tpv3_sess_hdr.session_id =
1290 l2tp_mask->session_id;
1291 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1292 input_set_byte += 4;
1296 if (!profile_rule) {
1299 ICE_SW_TUN_IPV6_L2TPV3;
1300 else if (ipv4_valid)
1302 ICE_SW_TUN_IPV4_L2TPV3;
1306 case RTE_FLOW_ITEM_TYPE_PFCP:
1307 pfcp_spec = item->spec;
1308 pfcp_mask = item->mask;
1309 /* Check if PFCP item is used to describe protocol.
1310 * If yes, both spec and mask should be NULL.
1311 * If no, both spec and mask shouldn't be NULL.
1313 if ((!pfcp_spec && pfcp_mask) ||
1314 (pfcp_spec && !pfcp_mask)) {
1315 rte_flow_error_set(error, EINVAL,
1316 RTE_FLOW_ERROR_TYPE_ITEM,
1318 "Invalid PFCP item");
1321 if (pfcp_spec && pfcp_mask) {
1322 /* Check pfcp mask and update input set */
1323 if (pfcp_mask->msg_type ||
1324 pfcp_mask->msg_len ||
1326 rte_flow_error_set(error, EINVAL,
1327 RTE_FLOW_ERROR_TYPE_ITEM,
1329 "Invalid pfcp mask");
1332 if (pfcp_mask->s_field &&
1333 pfcp_spec->s_field == 0x01 &&
1336 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1337 else if (pfcp_mask->s_field &&
1338 pfcp_spec->s_field == 0x01)
1340 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1341 else if (pfcp_mask->s_field &&
1342 !pfcp_spec->s_field &&
1345 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1346 else if (pfcp_mask->s_field &&
1347 !pfcp_spec->s_field)
1349 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1355 case RTE_FLOW_ITEM_TYPE_VOID:
1359 rte_flow_error_set(error, EINVAL,
1360 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1361 "Invalid pattern item.");
1366 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1367 inner_vlan_valid && outer_vlan_valid)
1368 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1369 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1370 inner_vlan_valid && outer_vlan_valid)
1371 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1372 else if (*tun_type == ICE_NON_TUN &&
1373 inner_vlan_valid && outer_vlan_valid)
1374 *tun_type = ICE_NON_TUN_QINQ;
1375 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1376 inner_vlan_valid && outer_vlan_valid)
1377 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1379 if (pppoe_patt_valid && !pppoe_prot_valid) {
1380 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1381 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1382 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1383 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1384 else if (inner_vlan_valid && outer_vlan_valid)
1385 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1386 else if (ipv6_valid && udp_valid)
1387 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1388 else if (ipv6_valid && tcp_valid)
1389 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1390 else if (ipv4_valid && udp_valid)
1391 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1392 else if (ipv4_valid && tcp_valid)
1393 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1394 else if (ipv6_valid)
1395 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1396 else if (ipv4_valid)
1397 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1399 *tun_type = ICE_SW_TUN_PPPOE;
1402 if (*tun_type == ICE_NON_TUN) {
1404 *tun_type = ICE_SW_TUN_VXLAN;
1405 else if (nvgre_valid)
1406 *tun_type = ICE_SW_TUN_NVGRE;
1407 else if (ipv4_valid && tcp_valid)
1408 *tun_type = ICE_SW_IPV4_TCP;
1409 else if (ipv4_valid && udp_valid)
1410 *tun_type = ICE_SW_IPV4_UDP;
1411 else if (ipv6_valid && tcp_valid)
1412 *tun_type = ICE_SW_IPV6_TCP;
1413 else if (ipv6_valid && udp_valid)
1414 *tun_type = ICE_SW_IPV6_UDP;
1417 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1418 rte_flow_error_set(error, EINVAL,
1419 RTE_FLOW_ERROR_TYPE_ITEM,
1421 "too much input set");
1433 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1434 const struct rte_flow_action *actions,
1435 struct rte_flow_error *error,
1436 struct ice_adv_rule_info *rule_info)
1438 const struct rte_flow_action_vf *act_vf;
1439 const struct rte_flow_action *action;
1440 enum rte_flow_action_type action_type;
1442 for (action = actions; action->type !=
1443 RTE_FLOW_ACTION_TYPE_END; action++) {
1444 action_type = action->type;
1445 switch (action_type) {
1446 case RTE_FLOW_ACTION_TYPE_VF:
1447 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1448 act_vf = action->conf;
1450 if (act_vf->id >= ad->real_hw.num_vfs &&
1451 !act_vf->original) {
1452 rte_flow_error_set(error,
1453 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1459 if (act_vf->original)
1460 rule_info->sw_act.vsi_handle =
1461 ad->real_hw.avf.bus.func;
1463 rule_info->sw_act.vsi_handle = act_vf->id;
1466 case RTE_FLOW_ACTION_TYPE_DROP:
1467 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1471 rte_flow_error_set(error,
1472 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1474 "Invalid action type");
1479 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1480 rule_info->sw_act.flag = ICE_FLTR_RX;
1482 rule_info->priority = 5;
1488 ice_switch_parse_action(struct ice_pf *pf,
1489 const struct rte_flow_action *actions,
1490 struct rte_flow_error *error,
1491 struct ice_adv_rule_info *rule_info)
1493 struct ice_vsi *vsi = pf->main_vsi;
1494 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1495 const struct rte_flow_action_queue *act_q;
1496 const struct rte_flow_action_rss *act_qgrop;
1497 uint16_t base_queue, i;
1498 const struct rte_flow_action *action;
1499 enum rte_flow_action_type action_type;
1500 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1501 2, 4, 8, 16, 32, 64, 128};
1503 base_queue = pf->base_queue + vsi->base_queue;
1504 for (action = actions; action->type !=
1505 RTE_FLOW_ACTION_TYPE_END; action++) {
1506 action_type = action->type;
1507 switch (action_type) {
1508 case RTE_FLOW_ACTION_TYPE_RSS:
1509 act_qgrop = action->conf;
1510 if (act_qgrop->queue_num <= 1)
1512 rule_info->sw_act.fltr_act =
1514 rule_info->sw_act.fwd_id.q_id =
1515 base_queue + act_qgrop->queue[0];
1516 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1517 if (act_qgrop->queue_num ==
1518 valid_qgrop_number[i])
1521 if (i == MAX_QGRP_NUM_TYPE)
1523 if ((act_qgrop->queue[0] +
1524 act_qgrop->queue_num) >
1525 dev->data->nb_rx_queues)
1527 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1528 if (act_qgrop->queue[i + 1] !=
1529 act_qgrop->queue[i] + 1)
1531 rule_info->sw_act.qgrp_size =
1532 act_qgrop->queue_num;
1534 case RTE_FLOW_ACTION_TYPE_QUEUE:
1535 act_q = action->conf;
1536 if (act_q->index >= dev->data->nb_rx_queues)
1538 rule_info->sw_act.fltr_act =
1540 rule_info->sw_act.fwd_id.q_id =
1541 base_queue + act_q->index;
1544 case RTE_FLOW_ACTION_TYPE_DROP:
1545 rule_info->sw_act.fltr_act =
1549 case RTE_FLOW_ACTION_TYPE_VOID:
1557 rule_info->sw_act.vsi_handle = vsi->idx;
1559 rule_info->sw_act.src = vsi->idx;
1560 rule_info->priority = 5;
1565 rte_flow_error_set(error,
1566 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1568 "Invalid action type or queue number");
1572 rte_flow_error_set(error,
1573 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1575 "Invalid queue region indexes");
1579 rte_flow_error_set(error,
1580 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1582 "Discontinuous queue region");
1587 ice_switch_check_action(const struct rte_flow_action *actions,
1588 struct rte_flow_error *error)
1590 const struct rte_flow_action *action;
1591 enum rte_flow_action_type action_type;
1592 uint16_t actions_num = 0;
1594 for (action = actions; action->type !=
1595 RTE_FLOW_ACTION_TYPE_END; action++) {
1596 action_type = action->type;
1597 switch (action_type) {
1598 case RTE_FLOW_ACTION_TYPE_VF:
1599 case RTE_FLOW_ACTION_TYPE_RSS:
1600 case RTE_FLOW_ACTION_TYPE_QUEUE:
1601 case RTE_FLOW_ACTION_TYPE_DROP:
1604 case RTE_FLOW_ACTION_TYPE_VOID:
1607 rte_flow_error_set(error,
1608 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1610 "Invalid action type");
1615 if (actions_num != 1) {
1616 rte_flow_error_set(error,
1617 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1619 "Invalid action number");
1627 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1630 case ICE_SW_TUN_PROFID_IPV6_ESP:
1631 case ICE_SW_TUN_PROFID_IPV6_AH:
1632 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1633 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1634 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1635 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1636 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1637 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1647 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1648 struct ice_pattern_match_item *array,
1650 const struct rte_flow_item pattern[],
1651 const struct rte_flow_action actions[],
1653 struct rte_flow_error *error)
1655 struct ice_pf *pf = &ad->pf;
1656 uint64_t inputset = 0;
1658 struct sw_meta *sw_meta_ptr = NULL;
1659 struct ice_adv_rule_info rule_info;
1660 struct ice_adv_lkup_elem *list = NULL;
1661 uint16_t lkups_num = 0;
1662 const struct rte_flow_item *item = pattern;
1663 uint16_t item_num = 0;
1664 uint16_t vlan_num = 0;
1665 enum ice_sw_tunnel_type tun_type =
1667 struct ice_pattern_match_item *pattern_match_item = NULL;
1669 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1671 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1672 const struct rte_flow_item_eth *eth_mask;
1674 eth_mask = item->mask;
1677 if (eth_mask->type == UINT16_MAX)
1678 tun_type = ICE_SW_TUN_AND_NON_TUN;
1681 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1684 /* reserve one more memory slot for ETH which may
1685 * consume 2 lookup items.
1687 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1691 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1692 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1693 else if (vlan_num == 2)
1694 tun_type = ICE_NON_TUN_QINQ;
1696 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1698 rte_flow_error_set(error, EINVAL,
1699 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1700 "No memory for PMD internal items");
1705 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1707 rte_flow_error_set(error, EINVAL,
1708 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1709 "No memory for sw_pattern_meta_ptr");
1713 pattern_match_item =
1714 ice_search_pattern_match_item(ad, pattern, array, array_len,
1716 if (!pattern_match_item) {
1717 rte_flow_error_set(error, EINVAL,
1718 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1719 "Invalid input pattern");
1723 inputset = ice_switch_inset_get
1724 (pattern, error, list, &lkups_num, &tun_type);
1725 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1726 (inputset & ~pattern_match_item->input_set_mask)) {
1727 rte_flow_error_set(error, EINVAL,
1728 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1730 "Invalid input set");
1734 memset(&rule_info, 0, sizeof(rule_info));
1735 rule_info.tun_type = tun_type;
1737 ret = ice_switch_check_action(actions, error);
1741 if (ad->hw.dcf_enabled)
1742 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1745 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1751 *meta = sw_meta_ptr;
1752 ((struct sw_meta *)*meta)->list = list;
1753 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1754 ((struct sw_meta *)*meta)->rule_info = rule_info;
1757 rte_free(sw_meta_ptr);
1760 rte_free(pattern_match_item);
1766 rte_free(sw_meta_ptr);
1767 rte_free(pattern_match_item);
1773 ice_switch_query(struct ice_adapter *ad __rte_unused,
1774 struct rte_flow *flow __rte_unused,
1775 struct rte_flow_query_count *count __rte_unused,
1776 struct rte_flow_error *error)
1778 rte_flow_error_set(error, EINVAL,
1779 RTE_FLOW_ERROR_TYPE_HANDLE,
1781 "count action not supported by switch filter");
1787 ice_switch_redirect(struct ice_adapter *ad,
1788 struct rte_flow *flow,
1789 struct ice_flow_redirect *rd)
1791 struct ice_rule_query_data *rdata = flow->rule;
1792 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1793 struct ice_adv_lkup_elem *lkups_dp = NULL;
1794 struct LIST_HEAD_TYPE *list_head;
1795 struct ice_adv_rule_info rinfo;
1796 struct ice_hw *hw = &ad->hw;
1797 struct ice_switch_info *sw;
1801 if (rdata->vsi_handle != rd->vsi_handle)
1804 sw = hw->switch_info;
1805 if (!sw->recp_list[rdata->rid].recp_created)
1808 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1811 list_head = &sw->recp_list[rdata->rid].filt_rules;
1812 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1814 rinfo = list_itr->rule_info;
1815 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1816 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1817 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1818 (rinfo.fltr_rule_id == rdata->rule_id &&
1819 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1820 lkups_cnt = list_itr->lkups_cnt;
1821 lkups_dp = (struct ice_adv_lkup_elem *)
1822 ice_memdup(hw, list_itr->lkups,
1823 sizeof(*list_itr->lkups) *
1824 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1827 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1831 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1832 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1833 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1842 /* Remove the old rule */
1843 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1846 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1852 /* Update VSI context */
1853 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1855 /* Replay the rule */
1856 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1859 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1864 ice_free(hw, lkups_dp);
1869 ice_switch_init(struct ice_adapter *ad)
1872 struct ice_flow_parser *dist_parser;
1873 struct ice_flow_parser *perm_parser;
1875 if (ad->devargs.pipe_mode_support) {
1876 perm_parser = &ice_switch_perm_parser;
1877 ret = ice_register_parser(perm_parser, ad);
1879 dist_parser = &ice_switch_dist_parser;
1880 ret = ice_register_parser(dist_parser, ad);
1886 ice_switch_uninit(struct ice_adapter *ad)
1888 struct ice_flow_parser *dist_parser;
1889 struct ice_flow_parser *perm_parser;
1891 if (ad->devargs.pipe_mode_support) {
1892 perm_parser = &ice_switch_perm_parser;
1893 ice_unregister_parser(perm_parser, ad);
1895 dist_parser = &ice_switch_dist_parser;
1896 ice_unregister_parser(dist_parser, ad);
1901 ice_flow_engine ice_switch_engine = {
1902 .init = ice_switch_init,
1903 .uninit = ice_switch_uninit,
1904 .create = ice_switch_create,
1905 .destroy = ice_switch_destroy,
1906 .query_count = ice_switch_query,
1907 .redirect = ice_switch_redirect,
1908 .free = ice_switch_filter_rule_free,
1909 .type = ICE_FLOW_ENGINE_SWITCH,
1913 ice_flow_parser ice_switch_dist_parser = {
1914 .engine = &ice_switch_engine,
1915 .array = ice_switch_pattern_dist_list,
1916 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1917 .parse_pattern_action = ice_switch_parse_pattern_action,
1918 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1922 ice_flow_parser ice_switch_perm_parser = {
1923 .engine = &ice_switch_engine,
1924 .array = ice_switch_pattern_perm_list,
1925 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1926 .parse_pattern_action = ice_switch_parse_pattern_action,
1927 .stage = ICE_FLOW_STAGE_PERMISSION,
1930 RTE_INIT(ice_sw_engine_init)
1932 struct ice_flow_engine *engine = &ice_switch_engine;
1933 ice_register_flow_engine(engine);