1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
40 #define ICE_SW_INSET_MAC_IPV4 ( \
41 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
42 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
43 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
48 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6 ( \
52 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
54 ICE_INSET_IPV6_NEXT_HDR)
55 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
56 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
59 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
60 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
61 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
62 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
63 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
64 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
65 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
66 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
67 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
68 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
70 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
78 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
80 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
82 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
84 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
85 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
86 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
87 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
89 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
91 ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
93 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
95 ICE_INSET_TUN_IPV4_TOS)
96 #define ICE_SW_INSET_MAC_PPPOE ( \
97 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
98 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
99 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
100 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
101 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
102 ICE_INSET_PPPOE_PROTO)
103 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
104 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
105 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
106 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
107 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
108 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
110 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
112 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
114 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
115 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
116 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
117 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
118 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
119 #define ICE_SW_INSET_MAC_IPV4_AH ( \
120 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
121 #define ICE_SW_INSET_MAC_IPV6_AH ( \
122 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
123 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
124 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
125 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
126 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
127 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
128 ICE_SW_INSET_MAC_IPV4 | \
129 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
130 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
131 ICE_SW_INSET_MAC_IPV6 | \
132 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
135 struct ice_adv_lkup_elem *list;
137 struct ice_adv_rule_info rule_info;
140 static struct ice_flow_parser ice_switch_dist_parser_os;
141 static struct ice_flow_parser ice_switch_dist_parser_comms;
142 static struct ice_flow_parser ice_switch_perm_parser_os;
143 static struct ice_flow_parser ice_switch_perm_parser_comms;
146 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
148 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
149 {pattern_ethertype_vlan,
150 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
152 ICE_INSET_NONE, ICE_INSET_NONE},
154 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
155 {pattern_eth_ipv4_udp,
156 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
157 {pattern_eth_ipv4_tcp,
158 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
160 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
161 {pattern_eth_ipv6_udp,
162 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
163 {pattern_eth_ipv6_tcp,
164 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
165 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
166 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
167 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
168 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
169 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
170 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
171 {pattern_eth_ipv4_nvgre_eth_ipv4,
172 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
173 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
174 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
175 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
176 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
180 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
182 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
183 {pattern_ethertype_vlan,
184 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
186 ICE_INSET_NONE, ICE_INSET_NONE},
188 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
189 {pattern_eth_ipv4_udp,
190 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
191 {pattern_eth_ipv4_tcp,
192 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
194 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
195 {pattern_eth_ipv6_udp,
196 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
197 {pattern_eth_ipv6_tcp,
198 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
199 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
200 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
201 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
202 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
203 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
204 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
205 {pattern_eth_ipv4_nvgre_eth_ipv4,
206 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
207 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
208 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
209 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
210 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
212 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
213 {pattern_eth_vlan_pppoes,
214 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
215 {pattern_eth_pppoes_proto,
216 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
217 {pattern_eth_vlan_pppoes_proto,
218 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
219 {pattern_eth_pppoes_ipv4,
220 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
221 {pattern_eth_pppoes_ipv4_tcp,
222 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
223 {pattern_eth_pppoes_ipv4_udp,
224 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
225 {pattern_eth_pppoes_ipv6,
226 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
227 {pattern_eth_pppoes_ipv6_tcp,
228 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
229 {pattern_eth_pppoes_ipv6_udp,
230 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
231 {pattern_eth_vlan_pppoes_ipv4,
232 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
233 {pattern_eth_vlan_pppoes_ipv4_tcp,
234 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
235 {pattern_eth_vlan_pppoes_ipv4_udp,
236 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
237 {pattern_eth_vlan_pppoes_ipv6,
238 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
239 {pattern_eth_vlan_pppoes_ipv6_tcp,
240 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
241 {pattern_eth_vlan_pppoes_ipv6_udp,
242 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
243 {pattern_eth_ipv4_esp,
244 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
245 {pattern_eth_ipv4_udp_esp,
246 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
247 {pattern_eth_ipv6_esp,
248 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
249 {pattern_eth_ipv6_udp_esp,
250 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
251 {pattern_eth_ipv4_ah,
252 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
253 {pattern_eth_ipv6_ah,
254 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
255 {pattern_eth_ipv6_udp_ah,
256 ICE_INSET_NONE, ICE_INSET_NONE},
257 {pattern_eth_ipv4_l2tp,
258 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
259 {pattern_eth_ipv6_l2tp,
260 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
261 {pattern_eth_ipv4_pfcp,
262 ICE_INSET_NONE, ICE_INSET_NONE},
263 {pattern_eth_ipv6_pfcp,
264 ICE_INSET_NONE, ICE_INSET_NONE},
268 ice_pattern_match_item ice_switch_pattern_perm_os[] = {
270 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
271 {pattern_ethertype_vlan,
272 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
274 ICE_INSET_NONE, ICE_INSET_NONE},
276 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
277 {pattern_eth_ipv4_udp,
278 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
279 {pattern_eth_ipv4_tcp,
280 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
282 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
283 {pattern_eth_ipv6_udp,
284 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
285 {pattern_eth_ipv6_tcp,
286 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
287 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
288 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
289 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
290 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
291 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
292 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
293 {pattern_eth_ipv4_nvgre_eth_ipv4,
294 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
295 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
296 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
297 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
298 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
302 ice_pattern_match_item ice_switch_pattern_perm_comms[] = {
304 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
305 {pattern_ethertype_vlan,
306 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
308 ICE_INSET_NONE, ICE_INSET_NONE},
310 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
311 {pattern_eth_ipv4_udp,
312 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
313 {pattern_eth_ipv4_tcp,
314 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
316 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
317 {pattern_eth_ipv6_udp,
318 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
319 {pattern_eth_ipv6_tcp,
320 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
321 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
322 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
323 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
324 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
325 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
326 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
327 {pattern_eth_ipv4_nvgre_eth_ipv4,
328 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
329 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
330 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
331 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
332 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
334 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
335 {pattern_eth_vlan_pppoes,
336 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
337 {pattern_eth_pppoes_proto,
338 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
339 {pattern_eth_vlan_pppoes_proto,
340 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
341 {pattern_eth_pppoes_ipv4,
342 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
343 {pattern_eth_pppoes_ipv4_tcp,
344 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
345 {pattern_eth_pppoes_ipv4_udp,
346 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
347 {pattern_eth_pppoes_ipv6,
348 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
349 {pattern_eth_pppoes_ipv6_tcp,
350 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
351 {pattern_eth_pppoes_ipv6_udp,
352 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
353 {pattern_eth_vlan_pppoes_ipv4,
354 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
355 {pattern_eth_vlan_pppoes_ipv4_tcp,
356 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
357 {pattern_eth_vlan_pppoes_ipv4_udp,
358 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
359 {pattern_eth_vlan_pppoes_ipv6,
360 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
361 {pattern_eth_vlan_pppoes_ipv6_tcp,
362 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
363 {pattern_eth_vlan_pppoes_ipv6_udp,
364 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
365 {pattern_eth_ipv4_esp,
366 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
367 {pattern_eth_ipv4_udp_esp,
368 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
369 {pattern_eth_ipv6_esp,
370 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
371 {pattern_eth_ipv6_udp_esp,
372 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
373 {pattern_eth_ipv4_ah,
374 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
375 {pattern_eth_ipv6_ah,
376 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
377 {pattern_eth_ipv6_udp_ah,
378 ICE_INSET_NONE, ICE_INSET_NONE},
379 {pattern_eth_ipv4_l2tp,
380 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
381 {pattern_eth_ipv6_l2tp,
382 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
383 {pattern_eth_ipv4_pfcp,
384 ICE_INSET_NONE, ICE_INSET_NONE},
385 {pattern_eth_ipv6_pfcp,
386 ICE_INSET_NONE, ICE_INSET_NONE},
390 ice_switch_create(struct ice_adapter *ad,
391 struct rte_flow *flow,
393 struct rte_flow_error *error)
396 struct ice_pf *pf = &ad->pf;
397 struct ice_hw *hw = ICE_PF_TO_HW(pf);
398 struct ice_rule_query_data rule_added = {0};
399 struct ice_rule_query_data *filter_ptr;
400 struct ice_adv_lkup_elem *list =
401 ((struct sw_meta *)meta)->list;
403 ((struct sw_meta *)meta)->lkups_num;
404 struct ice_adv_rule_info *rule_info =
405 &((struct sw_meta *)meta)->rule_info;
407 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
410 "item number too large for rule");
414 rte_flow_error_set(error, EINVAL,
415 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
416 "lookup list should not be NULL");
419 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
421 filter_ptr = rte_zmalloc("ice_switch_filter",
422 sizeof(struct ice_rule_query_data), 0);
424 rte_flow_error_set(error, EINVAL,
425 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
426 "No memory for ice_switch_filter");
429 flow->rule = filter_ptr;
430 rte_memcpy(filter_ptr,
432 sizeof(struct ice_rule_query_data));
434 rte_flow_error_set(error, EINVAL,
435 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
436 "switch filter create flow fail");
452 ice_switch_destroy(struct ice_adapter *ad,
453 struct rte_flow *flow,
454 struct rte_flow_error *error)
456 struct ice_hw *hw = &ad->hw;
458 struct ice_rule_query_data *filter_ptr;
460 filter_ptr = (struct ice_rule_query_data *)
464 rte_flow_error_set(error, EINVAL,
465 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
467 " create by switch filter");
471 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
475 "fail to destroy switch filter rule");
479 rte_free(filter_ptr);
484 ice_switch_filter_rule_free(struct rte_flow *flow)
486 rte_free(flow->rule);
490 ice_switch_inset_get(const struct rte_flow_item pattern[],
491 struct rte_flow_error *error,
492 struct ice_adv_lkup_elem *list,
494 enum ice_sw_tunnel_type *tun_type)
496 const struct rte_flow_item *item = pattern;
497 enum rte_flow_item_type item_type;
498 const struct rte_flow_item_eth *eth_spec, *eth_mask;
499 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
500 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
501 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
502 const struct rte_flow_item_udp *udp_spec, *udp_mask;
503 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
504 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
505 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
506 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
507 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
508 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
510 const struct rte_flow_item_esp *esp_spec, *esp_mask;
511 const struct rte_flow_item_ah *ah_spec, *ah_mask;
512 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
513 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
514 uint64_t input_set = ICE_INSET_NONE;
515 uint16_t input_set_byte = 0;
516 bool pppoe_elem_valid = 0;
517 bool pppoe_patt_valid = 0;
518 bool pppoe_prot_valid = 0;
519 bool tunnel_valid = 0;
520 bool profile_rule = 0;
521 bool nvgre_valid = 0;
522 bool vxlan_valid = 0;
529 for (item = pattern; item->type !=
530 RTE_FLOW_ITEM_TYPE_END; item++) {
532 rte_flow_error_set(error, EINVAL,
533 RTE_FLOW_ERROR_TYPE_ITEM,
535 "Not support range");
538 item_type = item->type;
541 case RTE_FLOW_ITEM_TYPE_ETH:
542 eth_spec = item->spec;
543 eth_mask = item->mask;
544 if (eth_spec && eth_mask) {
545 const uint8_t *a = eth_mask->src.addr_bytes;
546 const uint8_t *b = eth_mask->dst.addr_bytes;
547 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
548 if (a[j] && tunnel_valid) {
558 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
559 if (b[j] && tunnel_valid) {
570 input_set |= ICE_INSET_ETHERTYPE;
571 list[t].type = (tunnel_valid == 0) ?
572 ICE_MAC_OFOS : ICE_MAC_IL;
573 struct ice_ether_hdr *h;
574 struct ice_ether_hdr *m;
576 h = &list[t].h_u.eth_hdr;
577 m = &list[t].m_u.eth_hdr;
578 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
579 if (eth_mask->src.addr_bytes[j]) {
581 eth_spec->src.addr_bytes[j];
583 eth_mask->src.addr_bytes[j];
587 if (eth_mask->dst.addr_bytes[j]) {
589 eth_spec->dst.addr_bytes[j];
591 eth_mask->dst.addr_bytes[j];
598 if (eth_mask->type) {
599 list[t].type = ICE_ETYPE_OL;
600 list[t].h_u.ethertype.ethtype_id =
602 list[t].m_u.ethertype.ethtype_id =
610 case RTE_FLOW_ITEM_TYPE_IPV4:
611 ipv4_spec = item->spec;
612 ipv4_mask = item->mask;
614 if (ipv4_spec && ipv4_mask) {
615 /* Check IPv4 mask and update input set */
616 if (ipv4_mask->hdr.version_ihl ||
617 ipv4_mask->hdr.total_length ||
618 ipv4_mask->hdr.packet_id ||
619 ipv4_mask->hdr.hdr_checksum) {
620 rte_flow_error_set(error, EINVAL,
621 RTE_FLOW_ERROR_TYPE_ITEM,
623 "Invalid IPv4 mask.");
628 if (ipv4_mask->hdr.type_of_service)
630 ICE_INSET_TUN_IPV4_TOS;
631 if (ipv4_mask->hdr.src_addr)
633 ICE_INSET_TUN_IPV4_SRC;
634 if (ipv4_mask->hdr.dst_addr)
636 ICE_INSET_TUN_IPV4_DST;
637 if (ipv4_mask->hdr.time_to_live)
639 ICE_INSET_TUN_IPV4_TTL;
640 if (ipv4_mask->hdr.next_proto_id)
642 ICE_INSET_TUN_IPV4_PROTO;
644 if (ipv4_mask->hdr.src_addr)
645 input_set |= ICE_INSET_IPV4_SRC;
646 if (ipv4_mask->hdr.dst_addr)
647 input_set |= ICE_INSET_IPV4_DST;
648 if (ipv4_mask->hdr.time_to_live)
649 input_set |= ICE_INSET_IPV4_TTL;
650 if (ipv4_mask->hdr.next_proto_id)
652 ICE_INSET_IPV4_PROTO;
653 if (ipv4_mask->hdr.type_of_service)
657 list[t].type = (tunnel_valid == 0) ?
658 ICE_IPV4_OFOS : ICE_IPV4_IL;
659 if (ipv4_mask->hdr.src_addr) {
660 list[t].h_u.ipv4_hdr.src_addr =
661 ipv4_spec->hdr.src_addr;
662 list[t].m_u.ipv4_hdr.src_addr =
663 ipv4_mask->hdr.src_addr;
666 if (ipv4_mask->hdr.dst_addr) {
667 list[t].h_u.ipv4_hdr.dst_addr =
668 ipv4_spec->hdr.dst_addr;
669 list[t].m_u.ipv4_hdr.dst_addr =
670 ipv4_mask->hdr.dst_addr;
673 if (ipv4_mask->hdr.time_to_live) {
674 list[t].h_u.ipv4_hdr.time_to_live =
675 ipv4_spec->hdr.time_to_live;
676 list[t].m_u.ipv4_hdr.time_to_live =
677 ipv4_mask->hdr.time_to_live;
680 if (ipv4_mask->hdr.next_proto_id) {
681 list[t].h_u.ipv4_hdr.protocol =
682 ipv4_spec->hdr.next_proto_id;
683 list[t].m_u.ipv4_hdr.protocol =
684 ipv4_mask->hdr.next_proto_id;
687 if ((ipv4_spec->hdr.next_proto_id &
688 ipv4_mask->hdr.next_proto_id) ==
689 ICE_IPV4_PROTO_NVGRE)
690 *tun_type = ICE_SW_TUN_AND_NON_TUN;
691 if (ipv4_mask->hdr.type_of_service) {
692 list[t].h_u.ipv4_hdr.tos =
693 ipv4_spec->hdr.type_of_service;
694 list[t].m_u.ipv4_hdr.tos =
695 ipv4_mask->hdr.type_of_service;
702 case RTE_FLOW_ITEM_TYPE_IPV6:
703 ipv6_spec = item->spec;
704 ipv6_mask = item->mask;
706 if (ipv6_spec && ipv6_mask) {
707 if (ipv6_mask->hdr.payload_len) {
708 rte_flow_error_set(error, EINVAL,
709 RTE_FLOW_ERROR_TYPE_ITEM,
711 "Invalid IPv6 mask");
715 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
716 if (ipv6_mask->hdr.src_addr[j] &&
719 ICE_INSET_TUN_IPV6_SRC;
721 } else if (ipv6_mask->hdr.src_addr[j]) {
722 input_set |= ICE_INSET_IPV6_SRC;
726 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
727 if (ipv6_mask->hdr.dst_addr[j] &&
730 ICE_INSET_TUN_IPV6_DST;
732 } else if (ipv6_mask->hdr.dst_addr[j]) {
733 input_set |= ICE_INSET_IPV6_DST;
737 if (ipv6_mask->hdr.proto &&
740 ICE_INSET_TUN_IPV6_NEXT_HDR;
741 else if (ipv6_mask->hdr.proto)
743 ICE_INSET_IPV6_NEXT_HDR;
744 if (ipv6_mask->hdr.hop_limits &&
747 ICE_INSET_TUN_IPV6_HOP_LIMIT;
748 else if (ipv6_mask->hdr.hop_limits)
750 ICE_INSET_IPV6_HOP_LIMIT;
751 if ((ipv6_mask->hdr.vtc_flow &
753 (RTE_IPV6_HDR_TC_MASK)) &&
756 ICE_INSET_TUN_IPV6_TC;
757 else if (ipv6_mask->hdr.vtc_flow &
759 (RTE_IPV6_HDR_TC_MASK))
760 input_set |= ICE_INSET_IPV6_TC;
762 list[t].type = (tunnel_valid == 0) ?
763 ICE_IPV6_OFOS : ICE_IPV6_IL;
764 struct ice_ipv6_hdr *f;
765 struct ice_ipv6_hdr *s;
766 f = &list[t].h_u.ipv6_hdr;
767 s = &list[t].m_u.ipv6_hdr;
768 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
769 if (ipv6_mask->hdr.src_addr[j]) {
771 ipv6_spec->hdr.src_addr[j];
773 ipv6_mask->hdr.src_addr[j];
776 if (ipv6_mask->hdr.dst_addr[j]) {
778 ipv6_spec->hdr.dst_addr[j];
780 ipv6_mask->hdr.dst_addr[j];
784 if (ipv6_mask->hdr.proto) {
786 ipv6_spec->hdr.proto;
788 ipv6_mask->hdr.proto;
791 if (ipv6_mask->hdr.hop_limits) {
793 ipv6_spec->hdr.hop_limits;
795 ipv6_mask->hdr.hop_limits;
798 if (ipv6_mask->hdr.vtc_flow &
800 (RTE_IPV6_HDR_TC_MASK)) {
801 struct ice_le_ver_tc_flow vtf;
802 vtf.u.fld.version = 0;
803 vtf.u.fld.flow_label = 0;
804 vtf.u.fld.tc = (rte_be_to_cpu_32
805 (ipv6_spec->hdr.vtc_flow) &
806 RTE_IPV6_HDR_TC_MASK) >>
807 RTE_IPV6_HDR_TC_SHIFT;
808 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
809 vtf.u.fld.tc = (rte_be_to_cpu_32
810 (ipv6_mask->hdr.vtc_flow) &
811 RTE_IPV6_HDR_TC_MASK) >>
812 RTE_IPV6_HDR_TC_SHIFT;
813 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
820 case RTE_FLOW_ITEM_TYPE_UDP:
821 udp_spec = item->spec;
822 udp_mask = item->mask;
824 if (udp_spec && udp_mask) {
825 /* Check UDP mask and update input set*/
826 if (udp_mask->hdr.dgram_len ||
827 udp_mask->hdr.dgram_cksum) {
828 rte_flow_error_set(error, EINVAL,
829 RTE_FLOW_ERROR_TYPE_ITEM,
836 if (udp_mask->hdr.src_port)
838 ICE_INSET_TUN_UDP_SRC_PORT;
839 if (udp_mask->hdr.dst_port)
841 ICE_INSET_TUN_UDP_DST_PORT;
843 if (udp_mask->hdr.src_port)
845 ICE_INSET_UDP_SRC_PORT;
846 if (udp_mask->hdr.dst_port)
848 ICE_INSET_UDP_DST_PORT;
850 if (*tun_type == ICE_SW_TUN_VXLAN &&
852 list[t].type = ICE_UDP_OF;
854 list[t].type = ICE_UDP_ILOS;
855 if (udp_mask->hdr.src_port) {
856 list[t].h_u.l4_hdr.src_port =
857 udp_spec->hdr.src_port;
858 list[t].m_u.l4_hdr.src_port =
859 udp_mask->hdr.src_port;
862 if (udp_mask->hdr.dst_port) {
863 list[t].h_u.l4_hdr.dst_port =
864 udp_spec->hdr.dst_port;
865 list[t].m_u.l4_hdr.dst_port =
866 udp_mask->hdr.dst_port;
873 case RTE_FLOW_ITEM_TYPE_TCP:
874 tcp_spec = item->spec;
875 tcp_mask = item->mask;
877 if (tcp_spec && tcp_mask) {
878 /* Check TCP mask and update input set */
879 if (tcp_mask->hdr.sent_seq ||
880 tcp_mask->hdr.recv_ack ||
881 tcp_mask->hdr.data_off ||
882 tcp_mask->hdr.tcp_flags ||
883 tcp_mask->hdr.rx_win ||
884 tcp_mask->hdr.cksum ||
885 tcp_mask->hdr.tcp_urp) {
886 rte_flow_error_set(error, EINVAL,
887 RTE_FLOW_ERROR_TYPE_ITEM,
894 if (tcp_mask->hdr.src_port)
896 ICE_INSET_TUN_TCP_SRC_PORT;
897 if (tcp_mask->hdr.dst_port)
899 ICE_INSET_TUN_TCP_DST_PORT;
901 if (tcp_mask->hdr.src_port)
903 ICE_INSET_TCP_SRC_PORT;
904 if (tcp_mask->hdr.dst_port)
906 ICE_INSET_TCP_DST_PORT;
908 list[t].type = ICE_TCP_IL;
909 if (tcp_mask->hdr.src_port) {
910 list[t].h_u.l4_hdr.src_port =
911 tcp_spec->hdr.src_port;
912 list[t].m_u.l4_hdr.src_port =
913 tcp_mask->hdr.src_port;
916 if (tcp_mask->hdr.dst_port) {
917 list[t].h_u.l4_hdr.dst_port =
918 tcp_spec->hdr.dst_port;
919 list[t].m_u.l4_hdr.dst_port =
920 tcp_mask->hdr.dst_port;
927 case RTE_FLOW_ITEM_TYPE_SCTP:
928 sctp_spec = item->spec;
929 sctp_mask = item->mask;
930 if (sctp_spec && sctp_mask) {
931 /* Check SCTP mask and update input set */
932 if (sctp_mask->hdr.cksum) {
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ITEM,
936 "Invalid SCTP mask");
941 if (sctp_mask->hdr.src_port)
943 ICE_INSET_TUN_SCTP_SRC_PORT;
944 if (sctp_mask->hdr.dst_port)
946 ICE_INSET_TUN_SCTP_DST_PORT;
948 if (sctp_mask->hdr.src_port)
950 ICE_INSET_SCTP_SRC_PORT;
951 if (sctp_mask->hdr.dst_port)
953 ICE_INSET_SCTP_DST_PORT;
955 list[t].type = ICE_SCTP_IL;
956 if (sctp_mask->hdr.src_port) {
957 list[t].h_u.sctp_hdr.src_port =
958 sctp_spec->hdr.src_port;
959 list[t].m_u.sctp_hdr.src_port =
960 sctp_mask->hdr.src_port;
963 if (sctp_mask->hdr.dst_port) {
964 list[t].h_u.sctp_hdr.dst_port =
965 sctp_spec->hdr.dst_port;
966 list[t].m_u.sctp_hdr.dst_port =
967 sctp_mask->hdr.dst_port;
974 case RTE_FLOW_ITEM_TYPE_VXLAN:
975 vxlan_spec = item->spec;
976 vxlan_mask = item->mask;
977 /* Check if VXLAN item is used to describe protocol.
978 * If yes, both spec and mask should be NULL.
979 * If no, both spec and mask shouldn't be NULL.
981 if ((!vxlan_spec && vxlan_mask) ||
982 (vxlan_spec && !vxlan_mask)) {
983 rte_flow_error_set(error, EINVAL,
984 RTE_FLOW_ERROR_TYPE_ITEM,
986 "Invalid VXLAN item");
991 if (vxlan_spec && vxlan_mask) {
992 list[t].type = ICE_VXLAN;
993 if (vxlan_mask->vni[0] ||
994 vxlan_mask->vni[1] ||
995 vxlan_mask->vni[2]) {
996 list[t].h_u.tnl_hdr.vni =
997 (vxlan_spec->vni[2] << 16) |
998 (vxlan_spec->vni[1] << 8) |
1000 list[t].m_u.tnl_hdr.vni =
1001 (vxlan_mask->vni[2] << 16) |
1002 (vxlan_mask->vni[1] << 8) |
1005 ICE_INSET_TUN_VXLAN_VNI;
1006 input_set_byte += 2;
1012 case RTE_FLOW_ITEM_TYPE_NVGRE:
1013 nvgre_spec = item->spec;
1014 nvgre_mask = item->mask;
1015 /* Check if NVGRE item is used to describe protocol.
1016 * If yes, both spec and mask should be NULL.
1017 * If no, both spec and mask shouldn't be NULL.
1019 if ((!nvgre_spec && nvgre_mask) ||
1020 (nvgre_spec && !nvgre_mask)) {
1021 rte_flow_error_set(error, EINVAL,
1022 RTE_FLOW_ERROR_TYPE_ITEM,
1024 "Invalid NVGRE item");
1029 if (nvgre_spec && nvgre_mask) {
1030 list[t].type = ICE_NVGRE;
1031 if (nvgre_mask->tni[0] ||
1032 nvgre_mask->tni[1] ||
1033 nvgre_mask->tni[2]) {
1034 list[t].h_u.nvgre_hdr.tni_flow =
1035 (nvgre_spec->tni[2] << 16) |
1036 (nvgre_spec->tni[1] << 8) |
1038 list[t].m_u.nvgre_hdr.tni_flow =
1039 (nvgre_mask->tni[2] << 16) |
1040 (nvgre_mask->tni[1] << 8) |
1043 ICE_INSET_TUN_NVGRE_TNI;
1044 input_set_byte += 2;
1050 case RTE_FLOW_ITEM_TYPE_VLAN:
1051 vlan_spec = item->spec;
1052 vlan_mask = item->mask;
1053 /* Check if VLAN item is used to describe protocol.
1054 * If yes, both spec and mask should be NULL.
1055 * If no, both spec and mask shouldn't be NULL.
1057 if ((!vlan_spec && vlan_mask) ||
1058 (vlan_spec && !vlan_mask)) {
1059 rte_flow_error_set(error, EINVAL,
1060 RTE_FLOW_ERROR_TYPE_ITEM,
1062 "Invalid VLAN item");
1065 if (vlan_spec && vlan_mask) {
1066 list[t].type = ICE_VLAN_OFOS;
1067 if (vlan_mask->tci) {
1068 list[t].h_u.vlan_hdr.vlan =
1070 list[t].m_u.vlan_hdr.vlan =
1072 input_set |= ICE_INSET_VLAN_OUTER;
1073 input_set_byte += 2;
1075 if (vlan_mask->inner_type) {
1076 list[t].h_u.vlan_hdr.type =
1077 vlan_spec->inner_type;
1078 list[t].m_u.vlan_hdr.type =
1079 vlan_mask->inner_type;
1080 input_set |= ICE_INSET_ETHERTYPE;
1081 input_set_byte += 2;
1087 case RTE_FLOW_ITEM_TYPE_PPPOED:
1088 case RTE_FLOW_ITEM_TYPE_PPPOES:
1089 pppoe_spec = item->spec;
1090 pppoe_mask = item->mask;
1091 /* Check if PPPoE item is used to describe protocol.
1092 * If yes, both spec and mask should be NULL.
1093 * If no, both spec and mask shouldn't be NULL.
1095 if ((!pppoe_spec && pppoe_mask) ||
1096 (pppoe_spec && !pppoe_mask)) {
1097 rte_flow_error_set(error, EINVAL,
1098 RTE_FLOW_ERROR_TYPE_ITEM,
1100 "Invalid pppoe item");
1103 pppoe_patt_valid = 1;
1104 if (pppoe_spec && pppoe_mask) {
1105 /* Check pppoe mask and update input set */
1106 if (pppoe_mask->length ||
1108 pppoe_mask->version_type) {
1109 rte_flow_error_set(error, EINVAL,
1110 RTE_FLOW_ERROR_TYPE_ITEM,
1112 "Invalid pppoe mask");
1115 list[t].type = ICE_PPPOE;
1116 if (pppoe_mask->session_id) {
1117 list[t].h_u.pppoe_hdr.session_id =
1118 pppoe_spec->session_id;
1119 list[t].m_u.pppoe_hdr.session_id =
1120 pppoe_mask->session_id;
1121 input_set |= ICE_INSET_PPPOE_SESSION;
1122 input_set_byte += 2;
1125 pppoe_elem_valid = 1;
1129 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1130 pppoe_proto_spec = item->spec;
1131 pppoe_proto_mask = item->mask;
1132 /* Check if PPPoE optional proto_id item
1133 * is used to describe protocol.
1134 * If yes, both spec and mask should be NULL.
1135 * If no, both spec and mask shouldn't be NULL.
1137 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1138 (pppoe_proto_spec && !pppoe_proto_mask)) {
1139 rte_flow_error_set(error, EINVAL,
1140 RTE_FLOW_ERROR_TYPE_ITEM,
1142 "Invalid pppoe proto item");
1145 if (pppoe_proto_spec && pppoe_proto_mask) {
1146 if (pppoe_elem_valid)
1148 list[t].type = ICE_PPPOE;
1149 if (pppoe_proto_mask->proto_id) {
1150 list[t].h_u.pppoe_hdr.ppp_prot_id =
1151 pppoe_proto_spec->proto_id;
1152 list[t].m_u.pppoe_hdr.ppp_prot_id =
1153 pppoe_proto_mask->proto_id;
1154 input_set |= ICE_INSET_PPPOE_PROTO;
1155 input_set_byte += 2;
1156 pppoe_prot_valid = 1;
1158 if ((pppoe_proto_mask->proto_id &
1159 pppoe_proto_spec->proto_id) !=
1160 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1161 (pppoe_proto_mask->proto_id &
1162 pppoe_proto_spec->proto_id) !=
1163 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1164 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1166 *tun_type = ICE_SW_TUN_PPPOE;
1172 case RTE_FLOW_ITEM_TYPE_ESP:
1173 esp_spec = item->spec;
1174 esp_mask = item->mask;
1175 if ((esp_spec && !esp_mask) ||
1176 (!esp_spec && esp_mask)) {
1177 rte_flow_error_set(error, EINVAL,
1178 RTE_FLOW_ERROR_TYPE_ITEM,
1180 "Invalid esp item");
1183 /* Check esp mask and update input set */
1184 if (esp_mask && esp_mask->hdr.seq) {
1185 rte_flow_error_set(error, EINVAL,
1186 RTE_FLOW_ERROR_TYPE_ITEM,
1188 "Invalid esp mask");
1192 if (!esp_spec && !esp_mask && !input_set) {
1194 if (ipv6_valid && udp_valid)
1196 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1197 else if (ipv6_valid)
1198 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1199 else if (ipv4_valid)
1201 } else if (esp_spec && esp_mask &&
1204 list[t].type = ICE_NAT_T;
1206 list[t].type = ICE_ESP;
1207 list[t].h_u.esp_hdr.spi =
1209 list[t].m_u.esp_hdr.spi =
1211 input_set |= ICE_INSET_ESP_SPI;
1212 input_set_byte += 4;
1216 if (!profile_rule) {
1217 if (ipv6_valid && udp_valid)
1218 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1219 else if (ipv4_valid && udp_valid)
1220 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1221 else if (ipv6_valid)
1222 *tun_type = ICE_SW_TUN_IPV6_ESP;
1223 else if (ipv4_valid)
1224 *tun_type = ICE_SW_TUN_IPV4_ESP;
1228 case RTE_FLOW_ITEM_TYPE_AH:
1229 ah_spec = item->spec;
1230 ah_mask = item->mask;
1231 if ((ah_spec && !ah_mask) ||
1232 (!ah_spec && ah_mask)) {
1233 rte_flow_error_set(error, EINVAL,
1234 RTE_FLOW_ERROR_TYPE_ITEM,
1239 /* Check ah mask and update input set */
1241 (ah_mask->next_hdr ||
1242 ah_mask->payload_len ||
1244 ah_mask->reserved)) {
1245 rte_flow_error_set(error, EINVAL,
1246 RTE_FLOW_ERROR_TYPE_ITEM,
1252 if (!ah_spec && !ah_mask && !input_set) {
1254 if (ipv6_valid && udp_valid)
1256 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1257 else if (ipv6_valid)
1258 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1259 else if (ipv4_valid)
1261 } else if (ah_spec && ah_mask &&
1263 list[t].type = ICE_AH;
1264 list[t].h_u.ah_hdr.spi =
1266 list[t].m_u.ah_hdr.spi =
1268 input_set |= ICE_INSET_AH_SPI;
1269 input_set_byte += 4;
1273 if (!profile_rule) {
1276 else if (ipv6_valid)
1277 *tun_type = ICE_SW_TUN_IPV6_AH;
1278 else if (ipv4_valid)
1279 *tun_type = ICE_SW_TUN_IPV4_AH;
1283 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1284 l2tp_spec = item->spec;
1285 l2tp_mask = item->mask;
1286 if ((l2tp_spec && !l2tp_mask) ||
1287 (!l2tp_spec && l2tp_mask)) {
1288 rte_flow_error_set(error, EINVAL,
1289 RTE_FLOW_ERROR_TYPE_ITEM,
1291 "Invalid l2tp item");
1295 if (!l2tp_spec && !l2tp_mask && !input_set) {
1298 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1299 else if (ipv4_valid)
1301 } else if (l2tp_spec && l2tp_mask &&
1302 l2tp_mask->session_id){
1303 list[t].type = ICE_L2TPV3;
1304 list[t].h_u.l2tpv3_sess_hdr.session_id =
1305 l2tp_spec->session_id;
1306 list[t].m_u.l2tpv3_sess_hdr.session_id =
1307 l2tp_mask->session_id;
1308 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1309 input_set_byte += 4;
1313 if (!profile_rule) {
1316 ICE_SW_TUN_IPV6_L2TPV3;
1317 else if (ipv4_valid)
1319 ICE_SW_TUN_IPV4_L2TPV3;
1323 case RTE_FLOW_ITEM_TYPE_PFCP:
1324 pfcp_spec = item->spec;
1325 pfcp_mask = item->mask;
1326 /* Check if PFCP item is used to describe protocol.
1327 * If yes, both spec and mask should be NULL.
1328 * If no, both spec and mask shouldn't be NULL.
1330 if ((!pfcp_spec && pfcp_mask) ||
1331 (pfcp_spec && !pfcp_mask)) {
1332 rte_flow_error_set(error, EINVAL,
1333 RTE_FLOW_ERROR_TYPE_ITEM,
1335 "Invalid PFCP item");
1338 if (pfcp_spec && pfcp_mask) {
1339 /* Check pfcp mask and update input set */
1340 if (pfcp_mask->msg_type ||
1341 pfcp_mask->msg_len ||
1343 rte_flow_error_set(error, EINVAL,
1344 RTE_FLOW_ERROR_TYPE_ITEM,
1346 "Invalid pfcp mask");
1349 if (pfcp_mask->s_field &&
1350 pfcp_spec->s_field == 0x01 &&
1353 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1354 else if (pfcp_mask->s_field &&
1355 pfcp_spec->s_field == 0x01)
1357 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1358 else if (pfcp_mask->s_field &&
1359 !pfcp_spec->s_field &&
1362 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1363 else if (pfcp_mask->s_field &&
1364 !pfcp_spec->s_field)
1366 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1372 case RTE_FLOW_ITEM_TYPE_VOID:
1376 rte_flow_error_set(error, EINVAL,
1377 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1378 "Invalid pattern item.");
1383 if (pppoe_patt_valid && !pppoe_prot_valid) {
1384 if (ipv6_valid && udp_valid)
1385 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1386 else if (ipv6_valid && tcp_valid)
1387 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1388 else if (ipv4_valid && udp_valid)
1389 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1390 else if (ipv4_valid && tcp_valid)
1391 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1392 else if (ipv6_valid)
1393 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1394 else if (ipv4_valid)
1395 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1397 *tun_type = ICE_SW_TUN_PPPOE;
1400 if (*tun_type == ICE_NON_TUN) {
1402 *tun_type = ICE_SW_TUN_VXLAN;
1403 else if (nvgre_valid)
1404 *tun_type = ICE_SW_TUN_NVGRE;
1405 else if (ipv4_valid && tcp_valid)
1406 *tun_type = ICE_SW_IPV4_TCP;
1407 else if (ipv4_valid && udp_valid)
1408 *tun_type = ICE_SW_IPV4_UDP;
1409 else if (ipv6_valid && tcp_valid)
1410 *tun_type = ICE_SW_IPV6_TCP;
1411 else if (ipv6_valid && udp_valid)
1412 *tun_type = ICE_SW_IPV6_UDP;
1415 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1416 rte_flow_error_set(error, EINVAL,
1417 RTE_FLOW_ERROR_TYPE_ITEM,
1419 "too much input set");
1431 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1432 const struct rte_flow_action *actions,
1433 struct rte_flow_error *error,
1434 struct ice_adv_rule_info *rule_info)
1436 const struct rte_flow_action_vf *act_vf;
1437 const struct rte_flow_action *action;
1438 enum rte_flow_action_type action_type;
1440 for (action = actions; action->type !=
1441 RTE_FLOW_ACTION_TYPE_END; action++) {
1442 action_type = action->type;
1443 switch (action_type) {
1444 case RTE_FLOW_ACTION_TYPE_VF:
1445 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1446 act_vf = action->conf;
1448 if (act_vf->id >= ad->real_hw.num_vfs &&
1449 !act_vf->original) {
1450 rte_flow_error_set(error,
1451 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1457 if (act_vf->original)
1458 rule_info->sw_act.vsi_handle =
1459 ad->real_hw.avf.bus.func;
1461 rule_info->sw_act.vsi_handle = act_vf->id;
1464 case RTE_FLOW_ACTION_TYPE_DROP:
1465 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1469 rte_flow_error_set(error,
1470 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1472 "Invalid action type");
1477 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1478 rule_info->sw_act.flag = ICE_FLTR_RX;
1480 rule_info->priority = 5;
1486 ice_switch_parse_action(struct ice_pf *pf,
1487 const struct rte_flow_action *actions,
1488 struct rte_flow_error *error,
1489 struct ice_adv_rule_info *rule_info)
1491 struct ice_vsi *vsi = pf->main_vsi;
1492 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1493 const struct rte_flow_action_queue *act_q;
1494 const struct rte_flow_action_rss *act_qgrop;
1495 uint16_t base_queue, i;
1496 const struct rte_flow_action *action;
1497 enum rte_flow_action_type action_type;
1498 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1499 2, 4, 8, 16, 32, 64, 128};
1501 base_queue = pf->base_queue + vsi->base_queue;
1502 for (action = actions; action->type !=
1503 RTE_FLOW_ACTION_TYPE_END; action++) {
1504 action_type = action->type;
1505 switch (action_type) {
1506 case RTE_FLOW_ACTION_TYPE_RSS:
1507 act_qgrop = action->conf;
1508 if (act_qgrop->queue_num <= 1)
1510 rule_info->sw_act.fltr_act =
1512 rule_info->sw_act.fwd_id.q_id =
1513 base_queue + act_qgrop->queue[0];
1514 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1515 if (act_qgrop->queue_num ==
1516 valid_qgrop_number[i])
1519 if (i == MAX_QGRP_NUM_TYPE)
1521 if ((act_qgrop->queue[0] +
1522 act_qgrop->queue_num) >
1523 dev->data->nb_rx_queues)
1525 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1526 if (act_qgrop->queue[i + 1] !=
1527 act_qgrop->queue[i] + 1)
1529 rule_info->sw_act.qgrp_size =
1530 act_qgrop->queue_num;
1532 case RTE_FLOW_ACTION_TYPE_QUEUE:
1533 act_q = action->conf;
1534 if (act_q->index >= dev->data->nb_rx_queues)
1536 rule_info->sw_act.fltr_act =
1538 rule_info->sw_act.fwd_id.q_id =
1539 base_queue + act_q->index;
1542 case RTE_FLOW_ACTION_TYPE_DROP:
1543 rule_info->sw_act.fltr_act =
1547 case RTE_FLOW_ACTION_TYPE_VOID:
1555 rule_info->sw_act.vsi_handle = vsi->idx;
1557 rule_info->sw_act.src = vsi->idx;
1558 rule_info->priority = 5;
1563 rte_flow_error_set(error,
1564 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1566 "Invalid action type or queue number");
1570 rte_flow_error_set(error,
1571 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1573 "Invalid queue region indexes");
1577 rte_flow_error_set(error,
1578 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1580 "Discontinuous queue region");
1585 ice_switch_check_action(const struct rte_flow_action *actions,
1586 struct rte_flow_error *error)
1588 const struct rte_flow_action *action;
1589 enum rte_flow_action_type action_type;
1590 uint16_t actions_num = 0;
1592 for (action = actions; action->type !=
1593 RTE_FLOW_ACTION_TYPE_END; action++) {
1594 action_type = action->type;
1595 switch (action_type) {
1596 case RTE_FLOW_ACTION_TYPE_VF:
1597 case RTE_FLOW_ACTION_TYPE_RSS:
1598 case RTE_FLOW_ACTION_TYPE_QUEUE:
1599 case RTE_FLOW_ACTION_TYPE_DROP:
1602 case RTE_FLOW_ACTION_TYPE_VOID:
1605 rte_flow_error_set(error,
1606 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1608 "Invalid action type");
1613 if (actions_num != 1) {
1614 rte_flow_error_set(error,
1615 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1617 "Invalid action number");
1625 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1628 case ICE_SW_TUN_PROFID_IPV6_ESP:
1629 case ICE_SW_TUN_PROFID_IPV6_AH:
1630 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1631 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1632 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1633 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1634 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1635 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1645 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1646 struct ice_pattern_match_item *array,
1648 const struct rte_flow_item pattern[],
1649 const struct rte_flow_action actions[],
1651 struct rte_flow_error *error)
1653 struct ice_pf *pf = &ad->pf;
1654 uint64_t inputset = 0;
1656 struct sw_meta *sw_meta_ptr = NULL;
1657 struct ice_adv_rule_info rule_info;
1658 struct ice_adv_lkup_elem *list = NULL;
1659 uint16_t lkups_num = 0;
1660 const struct rte_flow_item *item = pattern;
1661 uint16_t item_num = 0;
1662 enum ice_sw_tunnel_type tun_type =
1664 struct ice_pattern_match_item *pattern_match_item = NULL;
1666 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1668 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1669 const struct rte_flow_item_eth *eth_mask;
1671 eth_mask = item->mask;
1674 if (eth_mask->type == UINT16_MAX)
1675 tun_type = ICE_SW_TUN_AND_NON_TUN;
1677 /* reserve one more memory slot for ETH which may
1678 * consume 2 lookup items.
1680 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1684 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1686 rte_flow_error_set(error, EINVAL,
1687 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1688 "No memory for PMD internal items");
1693 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1695 rte_flow_error_set(error, EINVAL,
1696 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1697 "No memory for sw_pattern_meta_ptr");
1701 pattern_match_item =
1702 ice_search_pattern_match_item(pattern, array, array_len, error);
1703 if (!pattern_match_item) {
1704 rte_flow_error_set(error, EINVAL,
1705 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1706 "Invalid input pattern");
1710 inputset = ice_switch_inset_get
1711 (pattern, error, list, &lkups_num, &tun_type);
1712 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1713 (inputset & ~pattern_match_item->input_set_mask)) {
1714 rte_flow_error_set(error, EINVAL,
1715 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1717 "Invalid input set");
1721 memset(&rule_info, 0, sizeof(rule_info));
1722 rule_info.tun_type = tun_type;
1724 ret = ice_switch_check_action(actions, error);
1728 if (ad->hw.dcf_enabled)
1729 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1732 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1738 *meta = sw_meta_ptr;
1739 ((struct sw_meta *)*meta)->list = list;
1740 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1741 ((struct sw_meta *)*meta)->rule_info = rule_info;
1744 rte_free(sw_meta_ptr);
1747 rte_free(pattern_match_item);
1753 rte_free(sw_meta_ptr);
1754 rte_free(pattern_match_item);
1760 ice_switch_query(struct ice_adapter *ad __rte_unused,
1761 struct rte_flow *flow __rte_unused,
1762 struct rte_flow_query_count *count __rte_unused,
1763 struct rte_flow_error *error)
1765 rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_HANDLE,
1768 "count action not supported by switch filter");
1774 ice_switch_redirect(struct ice_adapter *ad,
1775 struct rte_flow *flow,
1776 struct ice_flow_redirect *rd)
1778 struct ice_rule_query_data *rdata = flow->rule;
1779 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1780 struct ice_adv_lkup_elem *lkups_dp = NULL;
1781 struct LIST_HEAD_TYPE *list_head;
1782 struct ice_adv_rule_info rinfo;
1783 struct ice_hw *hw = &ad->hw;
1784 struct ice_switch_info *sw;
1788 if (rdata->vsi_handle != rd->vsi_handle)
1791 sw = hw->switch_info;
1792 if (!sw->recp_list[rdata->rid].recp_created)
1795 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1798 list_head = &sw->recp_list[rdata->rid].filt_rules;
1799 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1801 rinfo = list_itr->rule_info;
1802 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1803 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1804 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1805 (rinfo.fltr_rule_id == rdata->rule_id &&
1806 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1807 lkups_cnt = list_itr->lkups_cnt;
1808 lkups_dp = (struct ice_adv_lkup_elem *)
1809 ice_memdup(hw, list_itr->lkups,
1810 sizeof(*list_itr->lkups) *
1811 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1814 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1818 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1819 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1820 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1829 /* Remove the old rule */
1830 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1833 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1839 /* Update VSI context */
1840 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1842 /* Replay the rule */
1843 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1846 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1851 ice_free(hw, lkups_dp);
1856 ice_switch_init(struct ice_adapter *ad)
1859 struct ice_flow_parser *dist_parser;
1860 struct ice_flow_parser *perm_parser;
1862 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1863 dist_parser = &ice_switch_dist_parser_comms;
1864 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1865 dist_parser = &ice_switch_dist_parser_os;
1869 if (ad->devargs.pipe_mode_support) {
1870 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1871 perm_parser = &ice_switch_perm_parser_comms;
1873 perm_parser = &ice_switch_perm_parser_os;
1875 ret = ice_register_parser(perm_parser, ad);
1877 ret = ice_register_parser(dist_parser, ad);
1883 ice_switch_uninit(struct ice_adapter *ad)
1885 struct ice_flow_parser *dist_parser;
1886 struct ice_flow_parser *perm_parser;
1888 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1889 dist_parser = &ice_switch_dist_parser_comms;
1890 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1891 dist_parser = &ice_switch_dist_parser_os;
1895 if (ad->devargs.pipe_mode_support) {
1896 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1897 perm_parser = &ice_switch_perm_parser_comms;
1899 perm_parser = &ice_switch_perm_parser_os;
1901 ice_unregister_parser(perm_parser, ad);
1903 ice_unregister_parser(dist_parser, ad);
1908 ice_flow_engine ice_switch_engine = {
1909 .init = ice_switch_init,
1910 .uninit = ice_switch_uninit,
1911 .create = ice_switch_create,
1912 .destroy = ice_switch_destroy,
1913 .query_count = ice_switch_query,
1914 .redirect = ice_switch_redirect,
1915 .free = ice_switch_filter_rule_free,
1916 .type = ICE_FLOW_ENGINE_SWITCH,
1920 ice_flow_parser ice_switch_dist_parser_os = {
1921 .engine = &ice_switch_engine,
1922 .array = ice_switch_pattern_dist_os,
1923 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1924 .parse_pattern_action = ice_switch_parse_pattern_action,
1925 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1929 ice_flow_parser ice_switch_dist_parser_comms = {
1930 .engine = &ice_switch_engine,
1931 .array = ice_switch_pattern_dist_comms,
1932 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1933 .parse_pattern_action = ice_switch_parse_pattern_action,
1934 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1938 ice_flow_parser ice_switch_perm_parser_os = {
1939 .engine = &ice_switch_engine,
1940 .array = ice_switch_pattern_perm_os,
1941 .array_len = RTE_DIM(ice_switch_pattern_perm_os),
1942 .parse_pattern_action = ice_switch_parse_pattern_action,
1943 .stage = ICE_FLOW_STAGE_PERMISSION,
1947 ice_flow_parser ice_switch_perm_parser_comms = {
1948 .engine = &ice_switch_engine,
1949 .array = ice_switch_pattern_perm_comms,
1950 .array_len = RTE_DIM(ice_switch_pattern_perm_comms),
1951 .parse_pattern_action = ice_switch_parse_pattern_action,
1952 .stage = ICE_FLOW_STAGE_PERMISSION,
1955 RTE_INIT(ice_sw_engine_init)
1957 struct ice_flow_engine *engine = &ice_switch_engine;
1958 ice_register_flow_engine(engine);