1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
40 #define ICE_SW_INSET_MAC_QINQ ( \
41 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
49 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
50 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
51 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
52 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
53 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
54 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
55 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
56 #define ICE_SW_INSET_MAC_IPV6 ( \
57 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
58 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
59 ICE_INSET_IPV6_NEXT_HDR)
60 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
61 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
62 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
63 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
65 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
66 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
67 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
68 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
69 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
70 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
71 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
85 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
88 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
89 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
93 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
96 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
97 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
98 ICE_INSET_TUN_IPV4_TOS)
99 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
100 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
101 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
102 ICE_INSET_TUN_IPV4_TOS)
103 #define ICE_SW_INSET_MAC_PPPOE ( \
104 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
105 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
106 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
107 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
108 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
109 ICE_INSET_PPPOE_PROTO)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
111 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
113 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
114 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
115 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
116 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
117 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
118 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
119 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
120 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
121 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
122 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
123 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
124 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
125 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
126 #define ICE_SW_INSET_MAC_IPV4_AH ( \
127 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
128 #define ICE_SW_INSET_MAC_IPV6_AH ( \
129 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
130 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
131 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
132 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
133 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
134 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
135 ICE_SW_INSET_MAC_IPV4 | \
136 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
137 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
138 ICE_SW_INSET_MAC_IPV6 | \
139 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
142 struct ice_adv_lkup_elem *list;
144 struct ice_adv_rule_info rule_info;
147 static struct ice_flow_parser ice_switch_dist_parser;
148 static struct ice_flow_parser ice_switch_perm_parser;
151 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
153 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
154 {pattern_ethertype_vlan,
155 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
156 {pattern_ethertype_qinq,
157 ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
159 ICE_INSET_NONE, ICE_INSET_NONE},
161 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
162 {pattern_eth_ipv4_udp,
163 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
164 {pattern_eth_ipv4_tcp,
165 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
167 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
168 {pattern_eth_ipv6_udp,
169 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
170 {pattern_eth_ipv6_tcp,
171 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
172 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
173 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
174 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
175 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
176 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
177 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
178 {pattern_eth_ipv4_nvgre_eth_ipv4,
179 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
180 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
181 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
182 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
183 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
185 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
186 {pattern_eth_vlan_pppoes,
187 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
188 {pattern_eth_pppoes_proto,
189 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
190 {pattern_eth_vlan_pppoes_proto,
191 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
192 {pattern_eth_pppoes_ipv4,
193 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
194 {pattern_eth_pppoes_ipv4_tcp,
195 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
196 {pattern_eth_pppoes_ipv4_udp,
197 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
198 {pattern_eth_pppoes_ipv6,
199 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
200 {pattern_eth_pppoes_ipv6_tcp,
201 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
202 {pattern_eth_pppoes_ipv6_udp,
203 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
204 {pattern_eth_vlan_pppoes_ipv4,
205 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
206 {pattern_eth_vlan_pppoes_ipv4_tcp,
207 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
208 {pattern_eth_vlan_pppoes_ipv4_udp,
209 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
210 {pattern_eth_vlan_pppoes_ipv6,
211 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
212 {pattern_eth_vlan_pppoes_ipv6_tcp,
213 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
214 {pattern_eth_vlan_pppoes_ipv6_udp,
215 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
216 {pattern_eth_ipv4_esp,
217 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
218 {pattern_eth_ipv4_udp_esp,
219 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
220 {pattern_eth_ipv6_esp,
221 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
222 {pattern_eth_ipv6_udp_esp,
223 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
224 {pattern_eth_ipv4_ah,
225 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
226 {pattern_eth_ipv6_ah,
227 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
228 {pattern_eth_ipv6_udp_ah,
229 ICE_INSET_NONE, ICE_INSET_NONE},
230 {pattern_eth_ipv4_l2tp,
231 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
232 {pattern_eth_ipv6_l2tp,
233 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
234 {pattern_eth_ipv4_pfcp,
235 ICE_INSET_NONE, ICE_INSET_NONE},
236 {pattern_eth_ipv6_pfcp,
237 ICE_INSET_NONE, ICE_INSET_NONE},
238 {pattern_eth_qinq_ipv4,
239 ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
240 {pattern_eth_qinq_ipv6,
241 ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
242 {pattern_eth_qinq_pppoes,
243 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
244 {pattern_eth_qinq_pppoes_proto,
245 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
246 {pattern_eth_qinq_pppoes_ipv4,
247 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
248 {pattern_eth_qinq_pppoes_ipv6,
249 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
253 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
255 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
256 {pattern_ethertype_vlan,
257 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
258 {pattern_ethertype_qinq,
259 ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
261 ICE_INSET_NONE, ICE_INSET_NONE},
263 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
264 {pattern_eth_ipv4_udp,
265 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
266 {pattern_eth_ipv4_tcp,
267 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
269 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
270 {pattern_eth_ipv6_udp,
271 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
272 {pattern_eth_ipv6_tcp,
273 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
274 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
275 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
276 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
277 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
278 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
279 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
280 {pattern_eth_ipv4_nvgre_eth_ipv4,
281 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
282 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
283 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
284 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
285 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
287 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
288 {pattern_eth_vlan_pppoes,
289 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
290 {pattern_eth_pppoes_proto,
291 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
292 {pattern_eth_vlan_pppoes_proto,
293 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
294 {pattern_eth_pppoes_ipv4,
295 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
296 {pattern_eth_pppoes_ipv4_tcp,
297 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
298 {pattern_eth_pppoes_ipv4_udp,
299 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
300 {pattern_eth_pppoes_ipv6,
301 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
302 {pattern_eth_pppoes_ipv6_tcp,
303 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
304 {pattern_eth_pppoes_ipv6_udp,
305 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
306 {pattern_eth_vlan_pppoes_ipv4,
307 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
308 {pattern_eth_vlan_pppoes_ipv4_tcp,
309 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
310 {pattern_eth_vlan_pppoes_ipv4_udp,
311 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
312 {pattern_eth_vlan_pppoes_ipv6,
313 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
314 {pattern_eth_vlan_pppoes_ipv6_tcp,
315 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
316 {pattern_eth_vlan_pppoes_ipv6_udp,
317 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
318 {pattern_eth_ipv4_esp,
319 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
320 {pattern_eth_ipv4_udp_esp,
321 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
322 {pattern_eth_ipv6_esp,
323 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
324 {pattern_eth_ipv6_udp_esp,
325 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
326 {pattern_eth_ipv4_ah,
327 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
328 {pattern_eth_ipv6_ah,
329 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
330 {pattern_eth_ipv6_udp_ah,
331 ICE_INSET_NONE, ICE_INSET_NONE},
332 {pattern_eth_ipv4_l2tp,
333 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
334 {pattern_eth_ipv6_l2tp,
335 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
336 {pattern_eth_ipv4_pfcp,
337 ICE_INSET_NONE, ICE_INSET_NONE},
338 {pattern_eth_ipv6_pfcp,
339 ICE_INSET_NONE, ICE_INSET_NONE},
340 {pattern_eth_qinq_ipv4,
341 ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
342 {pattern_eth_qinq_ipv6,
343 ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
344 {pattern_eth_qinq_pppoes,
345 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
346 {pattern_eth_qinq_pppoes_proto,
347 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
348 {pattern_eth_qinq_pppoes_ipv4,
349 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
350 {pattern_eth_qinq_pppoes_ipv6,
351 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
355 ice_switch_create(struct ice_adapter *ad,
356 struct rte_flow *flow,
358 struct rte_flow_error *error)
361 struct ice_pf *pf = &ad->pf;
362 struct ice_hw *hw = ICE_PF_TO_HW(pf);
363 struct ice_rule_query_data rule_added = {0};
364 struct ice_rule_query_data *filter_ptr;
365 struct ice_adv_lkup_elem *list =
366 ((struct sw_meta *)meta)->list;
368 ((struct sw_meta *)meta)->lkups_num;
369 struct ice_adv_rule_info *rule_info =
370 &((struct sw_meta *)meta)->rule_info;
372 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
373 rte_flow_error_set(error, EINVAL,
374 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
375 "item number too large for rule");
379 rte_flow_error_set(error, EINVAL,
380 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
381 "lookup list should not be NULL");
384 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
386 filter_ptr = rte_zmalloc("ice_switch_filter",
387 sizeof(struct ice_rule_query_data), 0);
389 rte_flow_error_set(error, EINVAL,
390 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
391 "No memory for ice_switch_filter");
394 flow->rule = filter_ptr;
395 rte_memcpy(filter_ptr,
397 sizeof(struct ice_rule_query_data));
399 rte_flow_error_set(error, EINVAL,
400 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
401 "switch filter create flow fail");
417 ice_switch_destroy(struct ice_adapter *ad,
418 struct rte_flow *flow,
419 struct rte_flow_error *error)
421 struct ice_hw *hw = &ad->hw;
423 struct ice_rule_query_data *filter_ptr;
425 filter_ptr = (struct ice_rule_query_data *)
429 rte_flow_error_set(error, EINVAL,
430 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
432 " create by switch filter");
436 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
438 rte_flow_error_set(error, EINVAL,
439 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
440 "fail to destroy switch filter rule");
444 rte_free(filter_ptr);
449 ice_switch_filter_rule_free(struct rte_flow *flow)
451 rte_free(flow->rule);
455 ice_switch_inset_get(const struct rte_flow_item pattern[],
456 struct rte_flow_error *error,
457 struct ice_adv_lkup_elem *list,
459 enum ice_sw_tunnel_type *tun_type)
461 const struct rte_flow_item *item = pattern;
462 enum rte_flow_item_type item_type;
463 const struct rte_flow_item_eth *eth_spec, *eth_mask;
464 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
465 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
466 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
467 const struct rte_flow_item_udp *udp_spec, *udp_mask;
468 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
469 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
470 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
471 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
472 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
473 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
475 const struct rte_flow_item_esp *esp_spec, *esp_mask;
476 const struct rte_flow_item_ah *ah_spec, *ah_mask;
477 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
478 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
479 uint64_t input_set = ICE_INSET_NONE;
480 uint16_t input_set_byte = 0;
481 bool pppoe_elem_valid = 0;
482 bool pppoe_patt_valid = 0;
483 bool pppoe_prot_valid = 0;
484 bool inner_vlan_valid = 0;
485 bool outer_vlan_valid = 0;
486 bool tunnel_valid = 0;
487 bool profile_rule = 0;
488 bool nvgre_valid = 0;
489 bool vxlan_valid = 0;
496 for (item = pattern; item->type !=
497 RTE_FLOW_ITEM_TYPE_END; item++) {
499 rte_flow_error_set(error, EINVAL,
500 RTE_FLOW_ERROR_TYPE_ITEM,
502 "Not support range");
505 item_type = item->type;
508 case RTE_FLOW_ITEM_TYPE_ETH:
509 eth_spec = item->spec;
510 eth_mask = item->mask;
511 if (eth_spec && eth_mask) {
512 const uint8_t *a = eth_mask->src.addr_bytes;
513 const uint8_t *b = eth_mask->dst.addr_bytes;
514 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
515 if (a[j] && tunnel_valid) {
525 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
526 if (b[j] && tunnel_valid) {
537 input_set |= ICE_INSET_ETHERTYPE;
538 list[t].type = (tunnel_valid == 0) ?
539 ICE_MAC_OFOS : ICE_MAC_IL;
540 struct ice_ether_hdr *h;
541 struct ice_ether_hdr *m;
543 h = &list[t].h_u.eth_hdr;
544 m = &list[t].m_u.eth_hdr;
545 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
546 if (eth_mask->src.addr_bytes[j]) {
548 eth_spec->src.addr_bytes[j];
550 eth_mask->src.addr_bytes[j];
554 if (eth_mask->dst.addr_bytes[j]) {
556 eth_spec->dst.addr_bytes[j];
558 eth_mask->dst.addr_bytes[j];
565 if (eth_mask->type) {
566 list[t].type = ICE_ETYPE_OL;
567 list[t].h_u.ethertype.ethtype_id =
569 list[t].m_u.ethertype.ethtype_id =
577 case RTE_FLOW_ITEM_TYPE_IPV4:
578 ipv4_spec = item->spec;
579 ipv4_mask = item->mask;
581 if (ipv4_spec && ipv4_mask) {
582 /* Check IPv4 mask and update input set */
583 if (ipv4_mask->hdr.version_ihl ||
584 ipv4_mask->hdr.total_length ||
585 ipv4_mask->hdr.packet_id ||
586 ipv4_mask->hdr.hdr_checksum) {
587 rte_flow_error_set(error, EINVAL,
588 RTE_FLOW_ERROR_TYPE_ITEM,
590 "Invalid IPv4 mask.");
595 if (ipv4_mask->hdr.type_of_service)
597 ICE_INSET_TUN_IPV4_TOS;
598 if (ipv4_mask->hdr.src_addr)
600 ICE_INSET_TUN_IPV4_SRC;
601 if (ipv4_mask->hdr.dst_addr)
603 ICE_INSET_TUN_IPV4_DST;
604 if (ipv4_mask->hdr.time_to_live)
606 ICE_INSET_TUN_IPV4_TTL;
607 if (ipv4_mask->hdr.next_proto_id)
609 ICE_INSET_TUN_IPV4_PROTO;
611 if (ipv4_mask->hdr.src_addr)
612 input_set |= ICE_INSET_IPV4_SRC;
613 if (ipv4_mask->hdr.dst_addr)
614 input_set |= ICE_INSET_IPV4_DST;
615 if (ipv4_mask->hdr.time_to_live)
616 input_set |= ICE_INSET_IPV4_TTL;
617 if (ipv4_mask->hdr.next_proto_id)
619 ICE_INSET_IPV4_PROTO;
620 if (ipv4_mask->hdr.type_of_service)
624 list[t].type = (tunnel_valid == 0) ?
625 ICE_IPV4_OFOS : ICE_IPV4_IL;
626 if (ipv4_mask->hdr.src_addr) {
627 list[t].h_u.ipv4_hdr.src_addr =
628 ipv4_spec->hdr.src_addr;
629 list[t].m_u.ipv4_hdr.src_addr =
630 ipv4_mask->hdr.src_addr;
633 if (ipv4_mask->hdr.dst_addr) {
634 list[t].h_u.ipv4_hdr.dst_addr =
635 ipv4_spec->hdr.dst_addr;
636 list[t].m_u.ipv4_hdr.dst_addr =
637 ipv4_mask->hdr.dst_addr;
640 if (ipv4_mask->hdr.time_to_live) {
641 list[t].h_u.ipv4_hdr.time_to_live =
642 ipv4_spec->hdr.time_to_live;
643 list[t].m_u.ipv4_hdr.time_to_live =
644 ipv4_mask->hdr.time_to_live;
647 if (ipv4_mask->hdr.next_proto_id) {
648 list[t].h_u.ipv4_hdr.protocol =
649 ipv4_spec->hdr.next_proto_id;
650 list[t].m_u.ipv4_hdr.protocol =
651 ipv4_mask->hdr.next_proto_id;
654 if ((ipv4_spec->hdr.next_proto_id &
655 ipv4_mask->hdr.next_proto_id) ==
656 ICE_IPV4_PROTO_NVGRE)
657 *tun_type = ICE_SW_TUN_AND_NON_TUN;
658 if (ipv4_mask->hdr.type_of_service) {
659 list[t].h_u.ipv4_hdr.tos =
660 ipv4_spec->hdr.type_of_service;
661 list[t].m_u.ipv4_hdr.tos =
662 ipv4_mask->hdr.type_of_service;
669 case RTE_FLOW_ITEM_TYPE_IPV6:
670 ipv6_spec = item->spec;
671 ipv6_mask = item->mask;
673 if (ipv6_spec && ipv6_mask) {
674 if (ipv6_mask->hdr.payload_len) {
675 rte_flow_error_set(error, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ITEM,
678 "Invalid IPv6 mask");
682 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
683 if (ipv6_mask->hdr.src_addr[j] &&
686 ICE_INSET_TUN_IPV6_SRC;
688 } else if (ipv6_mask->hdr.src_addr[j]) {
689 input_set |= ICE_INSET_IPV6_SRC;
693 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
694 if (ipv6_mask->hdr.dst_addr[j] &&
697 ICE_INSET_TUN_IPV6_DST;
699 } else if (ipv6_mask->hdr.dst_addr[j]) {
700 input_set |= ICE_INSET_IPV6_DST;
704 if (ipv6_mask->hdr.proto &&
707 ICE_INSET_TUN_IPV6_NEXT_HDR;
708 else if (ipv6_mask->hdr.proto)
710 ICE_INSET_IPV6_NEXT_HDR;
711 if (ipv6_mask->hdr.hop_limits &&
714 ICE_INSET_TUN_IPV6_HOP_LIMIT;
715 else if (ipv6_mask->hdr.hop_limits)
717 ICE_INSET_IPV6_HOP_LIMIT;
718 if ((ipv6_mask->hdr.vtc_flow &
720 (RTE_IPV6_HDR_TC_MASK)) &&
723 ICE_INSET_TUN_IPV6_TC;
724 else if (ipv6_mask->hdr.vtc_flow &
726 (RTE_IPV6_HDR_TC_MASK))
727 input_set |= ICE_INSET_IPV6_TC;
729 list[t].type = (tunnel_valid == 0) ?
730 ICE_IPV6_OFOS : ICE_IPV6_IL;
731 struct ice_ipv6_hdr *f;
732 struct ice_ipv6_hdr *s;
733 f = &list[t].h_u.ipv6_hdr;
734 s = &list[t].m_u.ipv6_hdr;
735 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
736 if (ipv6_mask->hdr.src_addr[j]) {
738 ipv6_spec->hdr.src_addr[j];
740 ipv6_mask->hdr.src_addr[j];
743 if (ipv6_mask->hdr.dst_addr[j]) {
745 ipv6_spec->hdr.dst_addr[j];
747 ipv6_mask->hdr.dst_addr[j];
751 if (ipv6_mask->hdr.proto) {
753 ipv6_spec->hdr.proto;
755 ipv6_mask->hdr.proto;
758 if (ipv6_mask->hdr.hop_limits) {
760 ipv6_spec->hdr.hop_limits;
762 ipv6_mask->hdr.hop_limits;
765 if (ipv6_mask->hdr.vtc_flow &
767 (RTE_IPV6_HDR_TC_MASK)) {
768 struct ice_le_ver_tc_flow vtf;
769 vtf.u.fld.version = 0;
770 vtf.u.fld.flow_label = 0;
771 vtf.u.fld.tc = (rte_be_to_cpu_32
772 (ipv6_spec->hdr.vtc_flow) &
773 RTE_IPV6_HDR_TC_MASK) >>
774 RTE_IPV6_HDR_TC_SHIFT;
775 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
776 vtf.u.fld.tc = (rte_be_to_cpu_32
777 (ipv6_mask->hdr.vtc_flow) &
778 RTE_IPV6_HDR_TC_MASK) >>
779 RTE_IPV6_HDR_TC_SHIFT;
780 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
787 case RTE_FLOW_ITEM_TYPE_UDP:
788 udp_spec = item->spec;
789 udp_mask = item->mask;
791 if (udp_spec && udp_mask) {
792 /* Check UDP mask and update input set*/
793 if (udp_mask->hdr.dgram_len ||
794 udp_mask->hdr.dgram_cksum) {
795 rte_flow_error_set(error, EINVAL,
796 RTE_FLOW_ERROR_TYPE_ITEM,
803 if (udp_mask->hdr.src_port)
805 ICE_INSET_TUN_UDP_SRC_PORT;
806 if (udp_mask->hdr.dst_port)
808 ICE_INSET_TUN_UDP_DST_PORT;
810 if (udp_mask->hdr.src_port)
812 ICE_INSET_UDP_SRC_PORT;
813 if (udp_mask->hdr.dst_port)
815 ICE_INSET_UDP_DST_PORT;
817 if (*tun_type == ICE_SW_TUN_VXLAN &&
819 list[t].type = ICE_UDP_OF;
821 list[t].type = ICE_UDP_ILOS;
822 if (udp_mask->hdr.src_port) {
823 list[t].h_u.l4_hdr.src_port =
824 udp_spec->hdr.src_port;
825 list[t].m_u.l4_hdr.src_port =
826 udp_mask->hdr.src_port;
829 if (udp_mask->hdr.dst_port) {
830 list[t].h_u.l4_hdr.dst_port =
831 udp_spec->hdr.dst_port;
832 list[t].m_u.l4_hdr.dst_port =
833 udp_mask->hdr.dst_port;
840 case RTE_FLOW_ITEM_TYPE_TCP:
841 tcp_spec = item->spec;
842 tcp_mask = item->mask;
844 if (tcp_spec && tcp_mask) {
845 /* Check TCP mask and update input set */
846 if (tcp_mask->hdr.sent_seq ||
847 tcp_mask->hdr.recv_ack ||
848 tcp_mask->hdr.data_off ||
849 tcp_mask->hdr.tcp_flags ||
850 tcp_mask->hdr.rx_win ||
851 tcp_mask->hdr.cksum ||
852 tcp_mask->hdr.tcp_urp) {
853 rte_flow_error_set(error, EINVAL,
854 RTE_FLOW_ERROR_TYPE_ITEM,
861 if (tcp_mask->hdr.src_port)
863 ICE_INSET_TUN_TCP_SRC_PORT;
864 if (tcp_mask->hdr.dst_port)
866 ICE_INSET_TUN_TCP_DST_PORT;
868 if (tcp_mask->hdr.src_port)
870 ICE_INSET_TCP_SRC_PORT;
871 if (tcp_mask->hdr.dst_port)
873 ICE_INSET_TCP_DST_PORT;
875 list[t].type = ICE_TCP_IL;
876 if (tcp_mask->hdr.src_port) {
877 list[t].h_u.l4_hdr.src_port =
878 tcp_spec->hdr.src_port;
879 list[t].m_u.l4_hdr.src_port =
880 tcp_mask->hdr.src_port;
883 if (tcp_mask->hdr.dst_port) {
884 list[t].h_u.l4_hdr.dst_port =
885 tcp_spec->hdr.dst_port;
886 list[t].m_u.l4_hdr.dst_port =
887 tcp_mask->hdr.dst_port;
894 case RTE_FLOW_ITEM_TYPE_SCTP:
895 sctp_spec = item->spec;
896 sctp_mask = item->mask;
897 if (sctp_spec && sctp_mask) {
898 /* Check SCTP mask and update input set */
899 if (sctp_mask->hdr.cksum) {
900 rte_flow_error_set(error, EINVAL,
901 RTE_FLOW_ERROR_TYPE_ITEM,
903 "Invalid SCTP mask");
908 if (sctp_mask->hdr.src_port)
910 ICE_INSET_TUN_SCTP_SRC_PORT;
911 if (sctp_mask->hdr.dst_port)
913 ICE_INSET_TUN_SCTP_DST_PORT;
915 if (sctp_mask->hdr.src_port)
917 ICE_INSET_SCTP_SRC_PORT;
918 if (sctp_mask->hdr.dst_port)
920 ICE_INSET_SCTP_DST_PORT;
922 list[t].type = ICE_SCTP_IL;
923 if (sctp_mask->hdr.src_port) {
924 list[t].h_u.sctp_hdr.src_port =
925 sctp_spec->hdr.src_port;
926 list[t].m_u.sctp_hdr.src_port =
927 sctp_mask->hdr.src_port;
930 if (sctp_mask->hdr.dst_port) {
931 list[t].h_u.sctp_hdr.dst_port =
932 sctp_spec->hdr.dst_port;
933 list[t].m_u.sctp_hdr.dst_port =
934 sctp_mask->hdr.dst_port;
941 case RTE_FLOW_ITEM_TYPE_VXLAN:
942 vxlan_spec = item->spec;
943 vxlan_mask = item->mask;
944 /* Check if VXLAN item is used to describe protocol.
945 * If yes, both spec and mask should be NULL.
946 * If no, both spec and mask shouldn't be NULL.
948 if ((!vxlan_spec && vxlan_mask) ||
949 (vxlan_spec && !vxlan_mask)) {
950 rte_flow_error_set(error, EINVAL,
951 RTE_FLOW_ERROR_TYPE_ITEM,
953 "Invalid VXLAN item");
958 if (vxlan_spec && vxlan_mask) {
959 list[t].type = ICE_VXLAN;
960 if (vxlan_mask->vni[0] ||
961 vxlan_mask->vni[1] ||
962 vxlan_mask->vni[2]) {
963 list[t].h_u.tnl_hdr.vni =
964 (vxlan_spec->vni[2] << 16) |
965 (vxlan_spec->vni[1] << 8) |
967 list[t].m_u.tnl_hdr.vni =
968 (vxlan_mask->vni[2] << 16) |
969 (vxlan_mask->vni[1] << 8) |
972 ICE_INSET_TUN_VXLAN_VNI;
979 case RTE_FLOW_ITEM_TYPE_NVGRE:
980 nvgre_spec = item->spec;
981 nvgre_mask = item->mask;
982 /* Check if NVGRE item is used to describe protocol.
983 * If yes, both spec and mask should be NULL.
984 * If no, both spec and mask shouldn't be NULL.
986 if ((!nvgre_spec && nvgre_mask) ||
987 (nvgre_spec && !nvgre_mask)) {
988 rte_flow_error_set(error, EINVAL,
989 RTE_FLOW_ERROR_TYPE_ITEM,
991 "Invalid NVGRE item");
996 if (nvgre_spec && nvgre_mask) {
997 list[t].type = ICE_NVGRE;
998 if (nvgre_mask->tni[0] ||
999 nvgre_mask->tni[1] ||
1000 nvgre_mask->tni[2]) {
1001 list[t].h_u.nvgre_hdr.tni_flow =
1002 (nvgre_spec->tni[2] << 16) |
1003 (nvgre_spec->tni[1] << 8) |
1005 list[t].m_u.nvgre_hdr.tni_flow =
1006 (nvgre_mask->tni[2] << 16) |
1007 (nvgre_mask->tni[1] << 8) |
1010 ICE_INSET_TUN_NVGRE_TNI;
1011 input_set_byte += 2;
1017 case RTE_FLOW_ITEM_TYPE_VLAN:
1018 vlan_spec = item->spec;
1019 vlan_mask = item->mask;
1020 /* Check if VLAN item is used to describe protocol.
1021 * If yes, both spec and mask should be NULL.
1022 * If no, both spec and mask shouldn't be NULL.
1024 if ((!vlan_spec && vlan_mask) ||
1025 (vlan_spec && !vlan_mask)) {
1026 rte_flow_error_set(error, EINVAL,
1027 RTE_FLOW_ERROR_TYPE_ITEM,
1029 "Invalid VLAN item");
1033 if (!outer_vlan_valid &&
1034 (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
1035 *tun_type == ICE_NON_TUN_QINQ))
1036 outer_vlan_valid = 1;
1037 else if (!inner_vlan_valid &&
1038 (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
1039 *tun_type == ICE_NON_TUN_QINQ))
1040 inner_vlan_valid = 1;
1041 else if (!inner_vlan_valid)
1042 inner_vlan_valid = 1;
1044 if (vlan_spec && vlan_mask) {
1045 if (outer_vlan_valid && !inner_vlan_valid) {
1046 list[t].type = ICE_VLAN_EX;
1047 input_set |= ICE_INSET_VLAN_OUTER;
1048 } else if (inner_vlan_valid) {
1049 list[t].type = ICE_VLAN_OFOS;
1050 input_set |= ICE_INSET_VLAN_INNER;
1053 if (vlan_mask->tci) {
1054 list[t].h_u.vlan_hdr.vlan =
1056 list[t].m_u.vlan_hdr.vlan =
1058 input_set_byte += 2;
1060 if (vlan_mask->inner_type) {
1061 rte_flow_error_set(error, EINVAL,
1062 RTE_FLOW_ERROR_TYPE_ITEM,
1064 "Invalid VLAN input set.");
1071 case RTE_FLOW_ITEM_TYPE_PPPOED:
1072 case RTE_FLOW_ITEM_TYPE_PPPOES:
1073 pppoe_spec = item->spec;
1074 pppoe_mask = item->mask;
1075 /* Check if PPPoE item is used to describe protocol.
1076 * If yes, both spec and mask should be NULL.
1077 * If no, both spec and mask shouldn't be NULL.
1079 if ((!pppoe_spec && pppoe_mask) ||
1080 (pppoe_spec && !pppoe_mask)) {
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ITEM,
1084 "Invalid pppoe item");
1087 pppoe_patt_valid = 1;
1088 if (pppoe_spec && pppoe_mask) {
1089 /* Check pppoe mask and update input set */
1090 if (pppoe_mask->length ||
1092 pppoe_mask->version_type) {
1093 rte_flow_error_set(error, EINVAL,
1094 RTE_FLOW_ERROR_TYPE_ITEM,
1096 "Invalid pppoe mask");
1099 list[t].type = ICE_PPPOE;
1100 if (pppoe_mask->session_id) {
1101 list[t].h_u.pppoe_hdr.session_id =
1102 pppoe_spec->session_id;
1103 list[t].m_u.pppoe_hdr.session_id =
1104 pppoe_mask->session_id;
1105 input_set |= ICE_INSET_PPPOE_SESSION;
1106 input_set_byte += 2;
1109 pppoe_elem_valid = 1;
1113 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1114 pppoe_proto_spec = item->spec;
1115 pppoe_proto_mask = item->mask;
1116 /* Check if PPPoE optional proto_id item
1117 * is used to describe protocol.
1118 * If yes, both spec and mask should be NULL.
1119 * If no, both spec and mask shouldn't be NULL.
1121 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1122 (pppoe_proto_spec && !pppoe_proto_mask)) {
1123 rte_flow_error_set(error, EINVAL,
1124 RTE_FLOW_ERROR_TYPE_ITEM,
1126 "Invalid pppoe proto item");
1129 if (pppoe_proto_spec && pppoe_proto_mask) {
1130 if (pppoe_elem_valid)
1132 list[t].type = ICE_PPPOE;
1133 if (pppoe_proto_mask->proto_id) {
1134 list[t].h_u.pppoe_hdr.ppp_prot_id =
1135 pppoe_proto_spec->proto_id;
1136 list[t].m_u.pppoe_hdr.ppp_prot_id =
1137 pppoe_proto_mask->proto_id;
1138 input_set |= ICE_INSET_PPPOE_PROTO;
1139 input_set_byte += 2;
1140 pppoe_prot_valid = 1;
1142 if ((pppoe_proto_mask->proto_id &
1143 pppoe_proto_spec->proto_id) !=
1144 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1145 (pppoe_proto_mask->proto_id &
1146 pppoe_proto_spec->proto_id) !=
1147 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1148 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1150 *tun_type = ICE_SW_TUN_PPPOE;
1156 case RTE_FLOW_ITEM_TYPE_ESP:
1157 esp_spec = item->spec;
1158 esp_mask = item->mask;
1159 if ((esp_spec && !esp_mask) ||
1160 (!esp_spec && esp_mask)) {
1161 rte_flow_error_set(error, EINVAL,
1162 RTE_FLOW_ERROR_TYPE_ITEM,
1164 "Invalid esp item");
1167 /* Check esp mask and update input set */
1168 if (esp_mask && esp_mask->hdr.seq) {
1169 rte_flow_error_set(error, EINVAL,
1170 RTE_FLOW_ERROR_TYPE_ITEM,
1172 "Invalid esp mask");
1176 if (!esp_spec && !esp_mask && !input_set) {
1178 if (ipv6_valid && udp_valid)
1180 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1181 else if (ipv6_valid)
1182 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1183 else if (ipv4_valid)
1185 } else if (esp_spec && esp_mask &&
1188 list[t].type = ICE_NAT_T;
1190 list[t].type = ICE_ESP;
1191 list[t].h_u.esp_hdr.spi =
1193 list[t].m_u.esp_hdr.spi =
1195 input_set |= ICE_INSET_ESP_SPI;
1196 input_set_byte += 4;
1200 if (!profile_rule) {
1201 if (ipv6_valid && udp_valid)
1202 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1203 else if (ipv4_valid && udp_valid)
1204 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1205 else if (ipv6_valid)
1206 *tun_type = ICE_SW_TUN_IPV6_ESP;
1207 else if (ipv4_valid)
1208 *tun_type = ICE_SW_TUN_IPV4_ESP;
1212 case RTE_FLOW_ITEM_TYPE_AH:
1213 ah_spec = item->spec;
1214 ah_mask = item->mask;
1215 if ((ah_spec && !ah_mask) ||
1216 (!ah_spec && ah_mask)) {
1217 rte_flow_error_set(error, EINVAL,
1218 RTE_FLOW_ERROR_TYPE_ITEM,
1223 /* Check ah mask and update input set */
1225 (ah_mask->next_hdr ||
1226 ah_mask->payload_len ||
1228 ah_mask->reserved)) {
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ITEM,
1236 if (!ah_spec && !ah_mask && !input_set) {
1238 if (ipv6_valid && udp_valid)
1240 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1241 else if (ipv6_valid)
1242 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1243 else if (ipv4_valid)
1245 } else if (ah_spec && ah_mask &&
1247 list[t].type = ICE_AH;
1248 list[t].h_u.ah_hdr.spi =
1250 list[t].m_u.ah_hdr.spi =
1252 input_set |= ICE_INSET_AH_SPI;
1253 input_set_byte += 4;
1257 if (!profile_rule) {
1260 else if (ipv6_valid)
1261 *tun_type = ICE_SW_TUN_IPV6_AH;
1262 else if (ipv4_valid)
1263 *tun_type = ICE_SW_TUN_IPV4_AH;
1267 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1268 l2tp_spec = item->spec;
1269 l2tp_mask = item->mask;
1270 if ((l2tp_spec && !l2tp_mask) ||
1271 (!l2tp_spec && l2tp_mask)) {
1272 rte_flow_error_set(error, EINVAL,
1273 RTE_FLOW_ERROR_TYPE_ITEM,
1275 "Invalid l2tp item");
1279 if (!l2tp_spec && !l2tp_mask && !input_set) {
1282 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1283 else if (ipv4_valid)
1285 } else if (l2tp_spec && l2tp_mask &&
1286 l2tp_mask->session_id){
1287 list[t].type = ICE_L2TPV3;
1288 list[t].h_u.l2tpv3_sess_hdr.session_id =
1289 l2tp_spec->session_id;
1290 list[t].m_u.l2tpv3_sess_hdr.session_id =
1291 l2tp_mask->session_id;
1292 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1293 input_set_byte += 4;
1297 if (!profile_rule) {
1300 ICE_SW_TUN_IPV6_L2TPV3;
1301 else if (ipv4_valid)
1303 ICE_SW_TUN_IPV4_L2TPV3;
1307 case RTE_FLOW_ITEM_TYPE_PFCP:
1308 pfcp_spec = item->spec;
1309 pfcp_mask = item->mask;
1310 /* Check if PFCP item is used to describe protocol.
1311 * If yes, both spec and mask should be NULL.
1312 * If no, both spec and mask shouldn't be NULL.
1314 if ((!pfcp_spec && pfcp_mask) ||
1315 (pfcp_spec && !pfcp_mask)) {
1316 rte_flow_error_set(error, EINVAL,
1317 RTE_FLOW_ERROR_TYPE_ITEM,
1319 "Invalid PFCP item");
1322 if (pfcp_spec && pfcp_mask) {
1323 /* Check pfcp mask and update input set */
1324 if (pfcp_mask->msg_type ||
1325 pfcp_mask->msg_len ||
1327 rte_flow_error_set(error, EINVAL,
1328 RTE_FLOW_ERROR_TYPE_ITEM,
1330 "Invalid pfcp mask");
1333 if (pfcp_mask->s_field &&
1334 pfcp_spec->s_field == 0x01 &&
1337 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1338 else if (pfcp_mask->s_field &&
1339 pfcp_spec->s_field == 0x01)
1341 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1342 else if (pfcp_mask->s_field &&
1343 !pfcp_spec->s_field &&
1346 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1347 else if (pfcp_mask->s_field &&
1348 !pfcp_spec->s_field)
1350 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1356 case RTE_FLOW_ITEM_TYPE_VOID:
1360 rte_flow_error_set(error, EINVAL,
1361 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1362 "Invalid pattern item.");
1367 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1368 inner_vlan_valid && outer_vlan_valid)
1369 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1370 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1371 inner_vlan_valid && outer_vlan_valid)
1372 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1373 else if (*tun_type == ICE_NON_TUN &&
1374 inner_vlan_valid && outer_vlan_valid)
1375 *tun_type = ICE_NON_TUN_QINQ;
1376 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1377 inner_vlan_valid && outer_vlan_valid)
1378 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1380 if (pppoe_patt_valid && !pppoe_prot_valid) {
1381 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1382 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1383 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1384 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1385 else if (inner_vlan_valid && outer_vlan_valid)
1386 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1387 else if (ipv6_valid && udp_valid)
1388 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1389 else if (ipv6_valid && tcp_valid)
1390 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1391 else if (ipv4_valid && udp_valid)
1392 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1393 else if (ipv4_valid && tcp_valid)
1394 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1395 else if (ipv6_valid)
1396 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1397 else if (ipv4_valid)
1398 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1400 *tun_type = ICE_SW_TUN_PPPOE;
1403 if (*tun_type == ICE_NON_TUN) {
1405 *tun_type = ICE_SW_TUN_VXLAN;
1406 else if (nvgre_valid)
1407 *tun_type = ICE_SW_TUN_NVGRE;
1408 else if (ipv4_valid && tcp_valid)
1409 *tun_type = ICE_SW_IPV4_TCP;
1410 else if (ipv4_valid && udp_valid)
1411 *tun_type = ICE_SW_IPV4_UDP;
1412 else if (ipv6_valid && tcp_valid)
1413 *tun_type = ICE_SW_IPV6_TCP;
1414 else if (ipv6_valid && udp_valid)
1415 *tun_type = ICE_SW_IPV6_UDP;
1418 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1419 rte_flow_error_set(error, EINVAL,
1420 RTE_FLOW_ERROR_TYPE_ITEM,
1422 "too much input set");
1434 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1435 const struct rte_flow_action *actions,
1436 struct rte_flow_error *error,
1437 struct ice_adv_rule_info *rule_info)
1439 const struct rte_flow_action_vf *act_vf;
1440 const struct rte_flow_action *action;
1441 enum rte_flow_action_type action_type;
1443 for (action = actions; action->type !=
1444 RTE_FLOW_ACTION_TYPE_END; action++) {
1445 action_type = action->type;
1446 switch (action_type) {
1447 case RTE_FLOW_ACTION_TYPE_VF:
1448 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1449 act_vf = action->conf;
1451 if (act_vf->id >= ad->real_hw.num_vfs &&
1452 !act_vf->original) {
1453 rte_flow_error_set(error,
1454 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1460 if (act_vf->original)
1461 rule_info->sw_act.vsi_handle =
1462 ad->real_hw.avf.bus.func;
1464 rule_info->sw_act.vsi_handle = act_vf->id;
1467 case RTE_FLOW_ACTION_TYPE_DROP:
1468 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1472 rte_flow_error_set(error,
1473 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1475 "Invalid action type");
1480 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1481 rule_info->sw_act.flag = ICE_FLTR_RX;
1483 rule_info->priority = 5;
1489 ice_switch_parse_action(struct ice_pf *pf,
1490 const struct rte_flow_action *actions,
1491 struct rte_flow_error *error,
1492 struct ice_adv_rule_info *rule_info)
1494 struct ice_vsi *vsi = pf->main_vsi;
1495 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1496 const struct rte_flow_action_queue *act_q;
1497 const struct rte_flow_action_rss *act_qgrop;
1498 uint16_t base_queue, i;
1499 const struct rte_flow_action *action;
1500 enum rte_flow_action_type action_type;
1501 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1502 2, 4, 8, 16, 32, 64, 128};
1504 base_queue = pf->base_queue + vsi->base_queue;
1505 for (action = actions; action->type !=
1506 RTE_FLOW_ACTION_TYPE_END; action++) {
1507 action_type = action->type;
1508 switch (action_type) {
1509 case RTE_FLOW_ACTION_TYPE_RSS:
1510 act_qgrop = action->conf;
1511 if (act_qgrop->queue_num <= 1)
1513 rule_info->sw_act.fltr_act =
1515 rule_info->sw_act.fwd_id.q_id =
1516 base_queue + act_qgrop->queue[0];
1517 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1518 if (act_qgrop->queue_num ==
1519 valid_qgrop_number[i])
1522 if (i == MAX_QGRP_NUM_TYPE)
1524 if ((act_qgrop->queue[0] +
1525 act_qgrop->queue_num) >
1526 dev->data->nb_rx_queues)
1528 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1529 if (act_qgrop->queue[i + 1] !=
1530 act_qgrop->queue[i] + 1)
1532 rule_info->sw_act.qgrp_size =
1533 act_qgrop->queue_num;
1535 case RTE_FLOW_ACTION_TYPE_QUEUE:
1536 act_q = action->conf;
1537 if (act_q->index >= dev->data->nb_rx_queues)
1539 rule_info->sw_act.fltr_act =
1541 rule_info->sw_act.fwd_id.q_id =
1542 base_queue + act_q->index;
1545 case RTE_FLOW_ACTION_TYPE_DROP:
1546 rule_info->sw_act.fltr_act =
1550 case RTE_FLOW_ACTION_TYPE_VOID:
1558 rule_info->sw_act.vsi_handle = vsi->idx;
1560 rule_info->sw_act.src = vsi->idx;
1561 rule_info->priority = 5;
1566 rte_flow_error_set(error,
1567 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1569 "Invalid action type or queue number");
1573 rte_flow_error_set(error,
1574 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1576 "Invalid queue region indexes");
1580 rte_flow_error_set(error,
1581 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1583 "Discontinuous queue region");
1588 ice_switch_check_action(const struct rte_flow_action *actions,
1589 struct rte_flow_error *error)
1591 const struct rte_flow_action *action;
1592 enum rte_flow_action_type action_type;
1593 uint16_t actions_num = 0;
1595 for (action = actions; action->type !=
1596 RTE_FLOW_ACTION_TYPE_END; action++) {
1597 action_type = action->type;
1598 switch (action_type) {
1599 case RTE_FLOW_ACTION_TYPE_VF:
1600 case RTE_FLOW_ACTION_TYPE_RSS:
1601 case RTE_FLOW_ACTION_TYPE_QUEUE:
1602 case RTE_FLOW_ACTION_TYPE_DROP:
1605 case RTE_FLOW_ACTION_TYPE_VOID:
1608 rte_flow_error_set(error,
1609 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1611 "Invalid action type");
1616 if (actions_num != 1) {
1617 rte_flow_error_set(error,
1618 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1620 "Invalid action number");
1628 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1631 case ICE_SW_TUN_PROFID_IPV6_ESP:
1632 case ICE_SW_TUN_PROFID_IPV6_AH:
1633 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1634 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1635 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1636 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1637 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1638 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1648 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1649 struct ice_pattern_match_item *array,
1651 const struct rte_flow_item pattern[],
1652 const struct rte_flow_action actions[],
1654 struct rte_flow_error *error)
1656 struct ice_pf *pf = &ad->pf;
1657 uint64_t inputset = 0;
1659 struct sw_meta *sw_meta_ptr = NULL;
1660 struct ice_adv_rule_info rule_info;
1661 struct ice_adv_lkup_elem *list = NULL;
1662 uint16_t lkups_num = 0;
1663 const struct rte_flow_item *item = pattern;
1664 uint16_t item_num = 0;
1665 uint16_t vlan_num = 0;
1666 enum ice_sw_tunnel_type tun_type =
1668 struct ice_pattern_match_item *pattern_match_item = NULL;
1670 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1672 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1673 const struct rte_flow_item_eth *eth_mask;
1675 eth_mask = item->mask;
1678 if (eth_mask->type == UINT16_MAX)
1679 tun_type = ICE_SW_TUN_AND_NON_TUN;
1682 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1685 /* reserve one more memory slot for ETH which may
1686 * consume 2 lookup items.
1688 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1692 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1693 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1694 else if (vlan_num == 2)
1695 tun_type = ICE_NON_TUN_QINQ;
1697 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1699 rte_flow_error_set(error, EINVAL,
1700 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1701 "No memory for PMD internal items");
1706 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1708 rte_flow_error_set(error, EINVAL,
1709 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1710 "No memory for sw_pattern_meta_ptr");
1714 pattern_match_item =
1715 ice_search_pattern_match_item(ad, pattern, array, array_len,
1717 if (!pattern_match_item) {
1718 rte_flow_error_set(error, EINVAL,
1719 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1720 "Invalid input pattern");
1724 inputset = ice_switch_inset_get
1725 (pattern, error, list, &lkups_num, &tun_type);
1726 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1727 (inputset & ~pattern_match_item->input_set_mask)) {
1728 rte_flow_error_set(error, EINVAL,
1729 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1731 "Invalid input set");
1735 memset(&rule_info, 0, sizeof(rule_info));
1736 rule_info.tun_type = tun_type;
1738 ret = ice_switch_check_action(actions, error);
1742 if (ad->hw.dcf_enabled)
1743 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1746 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1752 *meta = sw_meta_ptr;
1753 ((struct sw_meta *)*meta)->list = list;
1754 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1755 ((struct sw_meta *)*meta)->rule_info = rule_info;
1758 rte_free(sw_meta_ptr);
1761 rte_free(pattern_match_item);
1767 rte_free(sw_meta_ptr);
1768 rte_free(pattern_match_item);
1774 ice_switch_query(struct ice_adapter *ad __rte_unused,
1775 struct rte_flow *flow __rte_unused,
1776 struct rte_flow_query_count *count __rte_unused,
1777 struct rte_flow_error *error)
1779 rte_flow_error_set(error, EINVAL,
1780 RTE_FLOW_ERROR_TYPE_HANDLE,
1782 "count action not supported by switch filter");
1788 ice_switch_redirect(struct ice_adapter *ad,
1789 struct rte_flow *flow,
1790 struct ice_flow_redirect *rd)
1792 struct ice_rule_query_data *rdata = flow->rule;
1793 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1794 struct ice_adv_lkup_elem *lkups_dp = NULL;
1795 struct LIST_HEAD_TYPE *list_head;
1796 struct ice_adv_rule_info rinfo;
1797 struct ice_hw *hw = &ad->hw;
1798 struct ice_switch_info *sw;
1802 if (rdata->vsi_handle != rd->vsi_handle)
1805 sw = hw->switch_info;
1806 if (!sw->recp_list[rdata->rid].recp_created)
1809 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1812 list_head = &sw->recp_list[rdata->rid].filt_rules;
1813 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1815 rinfo = list_itr->rule_info;
1816 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1817 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1818 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1819 (rinfo.fltr_rule_id == rdata->rule_id &&
1820 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1821 lkups_cnt = list_itr->lkups_cnt;
1822 lkups_dp = (struct ice_adv_lkup_elem *)
1823 ice_memdup(hw, list_itr->lkups,
1824 sizeof(*list_itr->lkups) *
1825 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1828 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1832 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1833 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1834 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1843 /* Remove the old rule */
1844 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1847 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1853 /* Update VSI context */
1854 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1856 /* Replay the rule */
1857 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1860 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1865 ice_free(hw, lkups_dp);
1870 ice_switch_init(struct ice_adapter *ad)
1873 struct ice_flow_parser *dist_parser;
1874 struct ice_flow_parser *perm_parser;
1876 if (ad->devargs.pipe_mode_support) {
1877 perm_parser = &ice_switch_perm_parser;
1878 ret = ice_register_parser(perm_parser, ad);
1880 dist_parser = &ice_switch_dist_parser;
1881 ret = ice_register_parser(dist_parser, ad);
1887 ice_switch_uninit(struct ice_adapter *ad)
1889 struct ice_flow_parser *dist_parser;
1890 struct ice_flow_parser *perm_parser;
1892 if (ad->devargs.pipe_mode_support) {
1893 perm_parser = &ice_switch_perm_parser;
1894 ice_unregister_parser(perm_parser, ad);
1896 dist_parser = &ice_switch_dist_parser;
1897 ice_unregister_parser(dist_parser, ad);
1902 ice_flow_engine ice_switch_engine = {
1903 .init = ice_switch_init,
1904 .uninit = ice_switch_uninit,
1905 .create = ice_switch_create,
1906 .destroy = ice_switch_destroy,
1907 .query_count = ice_switch_query,
1908 .redirect = ice_switch_redirect,
1909 .free = ice_switch_filter_rule_free,
1910 .type = ICE_FLOW_ENGINE_SWITCH,
1914 ice_flow_parser ice_switch_dist_parser = {
1915 .engine = &ice_switch_engine,
1916 .array = ice_switch_pattern_dist_list,
1917 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1918 .parse_pattern_action = ice_switch_parse_pattern_action,
1919 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1923 ice_flow_parser ice_switch_perm_parser = {
1924 .engine = &ice_switch_engine,
1925 .array = ice_switch_pattern_perm_list,
1926 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1927 .parse_pattern_action = ice_switch_parse_pattern_action,
1928 .stage = ICE_FLOW_STAGE_PERMISSION,
1931 RTE_INIT(ice_sw_engine_init)
1933 struct ice_flow_engine *engine = &ice_switch_engine;
1934 ice_register_flow_engine(engine);