1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_PPP_IPV4_PROTO 0x0021
31 #define ICE_PPP_IPV6_PROTO 0x0057
32 #define ICE_IPV4_PROTO_NVGRE 0x002F
34 #define ICE_SW_INSET_ETHER ( \
35 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
36 #define ICE_SW_INSET_MAC_VLAN ( \
37 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39 #define ICE_SW_INSET_MAC_IPV4 ( \
40 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
42 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
47 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
48 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
49 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
50 #define ICE_SW_INSET_MAC_IPV6 ( \
51 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
53 ICE_INSET_IPV6_NEXT_HDR)
54 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
58 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
59 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
60 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
61 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
62 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
63 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
64 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
66 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
69 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
85 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
90 ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
92 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
93 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
94 ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_MAC_PPPOE ( \
96 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
97 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
98 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
99 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
100 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
101 ICE_INSET_PPPOE_PROTO)
102 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
103 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
104 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
105 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
106 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
107 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
108 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
109 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
111 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
113 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
114 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
115 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
116 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
117 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
118 #define ICE_SW_INSET_MAC_IPV4_AH ( \
119 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
120 #define ICE_SW_INSET_MAC_IPV6_AH ( \
121 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
122 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
123 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
124 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
125 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
126 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
127 ICE_SW_INSET_MAC_IPV4 | \
128 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
129 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
130 ICE_SW_INSET_MAC_IPV6 | \
131 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
134 struct ice_adv_lkup_elem *list;
136 struct ice_adv_rule_info rule_info;
139 static struct ice_flow_parser ice_switch_dist_parser_os;
140 static struct ice_flow_parser ice_switch_dist_parser_comms;
141 static struct ice_flow_parser ice_switch_perm_parser;
144 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
146 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
147 {pattern_ethertype_vlan,
148 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
150 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
151 {pattern_eth_ipv4_udp,
152 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
153 {pattern_eth_ipv4_tcp,
154 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
156 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
157 {pattern_eth_ipv6_udp,
158 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
159 {pattern_eth_ipv6_tcp,
160 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
161 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
162 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
163 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
164 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
165 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
166 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
167 {pattern_eth_ipv4_nvgre_eth_ipv4,
168 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
169 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
170 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
171 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
172 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
174 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
175 {pattern_eth_vlan_pppoes,
176 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
177 {pattern_eth_pppoes_proto,
178 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
179 {pattern_eth_vlan_pppoes_proto,
180 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
181 {pattern_eth_pppoes_ipv4,
182 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
183 {pattern_eth_pppoes_ipv4_tcp,
184 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
185 {pattern_eth_pppoes_ipv4_udp,
186 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
187 {pattern_eth_pppoes_ipv6,
188 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
189 {pattern_eth_pppoes_ipv6_tcp,
190 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
191 {pattern_eth_pppoes_ipv6_udp,
192 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
193 {pattern_eth_vlan_pppoes_ipv4,
194 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
195 {pattern_eth_vlan_pppoes_ipv4_tcp,
196 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
197 {pattern_eth_vlan_pppoes_ipv4_udp,
198 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
199 {pattern_eth_vlan_pppoes_ipv6,
200 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
201 {pattern_eth_vlan_pppoes_ipv6_tcp,
202 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
203 {pattern_eth_vlan_pppoes_ipv6_udp,
204 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
205 {pattern_eth_ipv4_esp,
206 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
207 {pattern_eth_ipv4_udp_esp,
208 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
209 {pattern_eth_ipv6_esp,
210 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
211 {pattern_eth_ipv6_udp_esp,
212 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
213 {pattern_eth_ipv4_ah,
214 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
215 {pattern_eth_ipv6_ah,
216 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
217 {pattern_eth_ipv6_udp_ah,
218 ICE_INSET_NONE, ICE_INSET_NONE},
219 {pattern_eth_ipv4_l2tp,
220 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
221 {pattern_eth_ipv6_l2tp,
222 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
223 {pattern_eth_ipv4_pfcp,
224 ICE_INSET_NONE, ICE_INSET_NONE},
225 {pattern_eth_ipv6_pfcp,
226 ICE_INSET_NONE, ICE_INSET_NONE},
230 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
232 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
233 {pattern_ethertype_vlan,
234 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
236 ICE_INSET_NONE, ICE_INSET_NONE},
238 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
239 {pattern_eth_ipv4_udp,
240 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
241 {pattern_eth_ipv4_tcp,
242 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
244 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
245 {pattern_eth_ipv6_udp,
246 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
247 {pattern_eth_ipv6_tcp,
248 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
249 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
250 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
251 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
252 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
253 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
254 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
255 {pattern_eth_ipv4_nvgre_eth_ipv4,
256 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
257 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
258 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
259 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
260 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
264 ice_pattern_match_item ice_switch_pattern_perm[] = {
266 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
267 {pattern_ethertype_vlan,
268 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
270 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
271 {pattern_eth_ipv4_udp,
272 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
273 {pattern_eth_ipv4_tcp,
274 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
276 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
277 {pattern_eth_ipv6_udp,
278 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
279 {pattern_eth_ipv6_tcp,
280 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
281 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
282 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
283 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
284 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
285 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
286 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
287 {pattern_eth_ipv4_nvgre_eth_ipv4,
288 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
289 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
290 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
291 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
292 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
294 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
295 {pattern_eth_vlan_pppoes,
296 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
297 {pattern_eth_pppoes_proto,
298 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
299 {pattern_eth_vlan_pppoes_proto,
300 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
301 {pattern_eth_pppoes_ipv4,
302 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
303 {pattern_eth_pppoes_ipv4_tcp,
304 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
305 {pattern_eth_pppoes_ipv4_udp,
306 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
307 {pattern_eth_pppoes_ipv6,
308 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
309 {pattern_eth_pppoes_ipv6_tcp,
310 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
311 {pattern_eth_pppoes_ipv6_udp,
312 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
313 {pattern_eth_vlan_pppoes_ipv4,
314 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
315 {pattern_eth_vlan_pppoes_ipv4_tcp,
316 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
317 {pattern_eth_vlan_pppoes_ipv4_udp,
318 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
319 {pattern_eth_vlan_pppoes_ipv6,
320 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
321 {pattern_eth_vlan_pppoes_ipv6_tcp,
322 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
323 {pattern_eth_vlan_pppoes_ipv6_udp,
324 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
325 {pattern_eth_ipv4_esp,
326 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
327 {pattern_eth_ipv4_udp_esp,
328 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
329 {pattern_eth_ipv6_esp,
330 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
331 {pattern_eth_ipv6_udp_esp,
332 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
333 {pattern_eth_ipv4_ah,
334 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
335 {pattern_eth_ipv6_ah,
336 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
337 {pattern_eth_ipv6_udp_ah,
338 ICE_INSET_NONE, ICE_INSET_NONE},
339 {pattern_eth_ipv4_l2tp,
340 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
341 {pattern_eth_ipv6_l2tp,
342 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
343 {pattern_eth_ipv4_pfcp,
344 ICE_INSET_NONE, ICE_INSET_NONE},
345 {pattern_eth_ipv6_pfcp,
346 ICE_INSET_NONE, ICE_INSET_NONE},
350 ice_switch_create(struct ice_adapter *ad,
351 struct rte_flow *flow,
353 struct rte_flow_error *error)
356 struct ice_pf *pf = &ad->pf;
357 struct ice_hw *hw = ICE_PF_TO_HW(pf);
358 struct ice_rule_query_data rule_added = {0};
359 struct ice_rule_query_data *filter_ptr;
360 struct ice_adv_lkup_elem *list =
361 ((struct sw_meta *)meta)->list;
363 ((struct sw_meta *)meta)->lkups_num;
364 struct ice_adv_rule_info *rule_info =
365 &((struct sw_meta *)meta)->rule_info;
367 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
370 "item number too large for rule");
374 rte_flow_error_set(error, EINVAL,
375 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
376 "lookup list should not be NULL");
379 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
381 filter_ptr = rte_zmalloc("ice_switch_filter",
382 sizeof(struct ice_rule_query_data), 0);
384 rte_flow_error_set(error, EINVAL,
385 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
386 "No memory for ice_switch_filter");
389 flow->rule = filter_ptr;
390 rte_memcpy(filter_ptr,
392 sizeof(struct ice_rule_query_data));
394 rte_flow_error_set(error, EINVAL,
395 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
396 "switch filter create flow fail");
412 ice_switch_destroy(struct ice_adapter *ad,
413 struct rte_flow *flow,
414 struct rte_flow_error *error)
416 struct ice_hw *hw = &ad->hw;
418 struct ice_rule_query_data *filter_ptr;
420 filter_ptr = (struct ice_rule_query_data *)
424 rte_flow_error_set(error, EINVAL,
425 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
427 " create by switch filter");
431 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
433 rte_flow_error_set(error, EINVAL,
434 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
435 "fail to destroy switch filter rule");
439 rte_free(filter_ptr);
444 ice_switch_filter_rule_free(struct rte_flow *flow)
446 rte_free(flow->rule);
450 ice_switch_inset_get(const struct rte_flow_item pattern[],
451 struct rte_flow_error *error,
452 struct ice_adv_lkup_elem *list,
454 enum ice_sw_tunnel_type *tun_type)
456 const struct rte_flow_item *item = pattern;
457 enum rte_flow_item_type item_type;
458 const struct rte_flow_item_eth *eth_spec, *eth_mask;
459 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
460 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
461 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
462 const struct rte_flow_item_udp *udp_spec, *udp_mask;
463 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
464 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
465 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
466 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
467 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
468 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
470 const struct rte_flow_item_esp *esp_spec, *esp_mask;
471 const struct rte_flow_item_ah *ah_spec, *ah_mask;
472 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
473 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
474 uint64_t input_set = ICE_INSET_NONE;
475 bool pppoe_elem_valid = 0;
476 bool pppoe_patt_valid = 0;
477 bool pppoe_prot_valid = 0;
478 bool profile_rule = 0;
479 bool tunnel_valid = 0;
480 bool ipv6_valiad = 0;
481 bool ipv4_valiad = 0;
486 for (item = pattern; item->type !=
487 RTE_FLOW_ITEM_TYPE_END; item++) {
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_ITEM,
492 "Not support range");
495 item_type = item->type;
498 case RTE_FLOW_ITEM_TYPE_ETH:
499 eth_spec = item->spec;
500 eth_mask = item->mask;
501 if (eth_spec && eth_mask) {
502 const uint8_t *a = eth_mask->src.addr_bytes;
503 const uint8_t *b = eth_mask->dst.addr_bytes;
504 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
505 if (a[j] && tunnel_valid) {
515 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
516 if (b[j] && tunnel_valid) {
527 input_set |= ICE_INSET_ETHERTYPE;
528 list[t].type = (tunnel_valid == 0) ?
529 ICE_MAC_OFOS : ICE_MAC_IL;
530 struct ice_ether_hdr *h;
531 struct ice_ether_hdr *m;
533 h = &list[t].h_u.eth_hdr;
534 m = &list[t].m_u.eth_hdr;
535 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
536 if (eth_mask->src.addr_bytes[j]) {
538 eth_spec->src.addr_bytes[j];
540 eth_mask->src.addr_bytes[j];
543 if (eth_mask->dst.addr_bytes[j]) {
545 eth_spec->dst.addr_bytes[j];
547 eth_mask->dst.addr_bytes[j];
553 if (eth_mask->type) {
554 list[t].type = ICE_ETYPE_OL;
555 list[t].h_u.ethertype.ethtype_id =
557 list[t].m_u.ethertype.ethtype_id =
564 case RTE_FLOW_ITEM_TYPE_IPV4:
565 ipv4_spec = item->spec;
566 ipv4_mask = item->mask;
568 if (ipv4_spec && ipv4_mask) {
569 /* Check IPv4 mask and update input set */
570 if (ipv4_mask->hdr.version_ihl ||
571 ipv4_mask->hdr.total_length ||
572 ipv4_mask->hdr.packet_id ||
573 ipv4_mask->hdr.hdr_checksum) {
574 rte_flow_error_set(error, EINVAL,
575 RTE_FLOW_ERROR_TYPE_ITEM,
577 "Invalid IPv4 mask.");
582 if (ipv4_mask->hdr.type_of_service)
584 ICE_INSET_TUN_IPV4_TOS;
585 if (ipv4_mask->hdr.src_addr)
587 ICE_INSET_TUN_IPV4_SRC;
588 if (ipv4_mask->hdr.dst_addr)
590 ICE_INSET_TUN_IPV4_DST;
591 if (ipv4_mask->hdr.time_to_live)
593 ICE_INSET_TUN_IPV4_TTL;
594 if (ipv4_mask->hdr.next_proto_id)
596 ICE_INSET_TUN_IPV4_PROTO;
598 if (ipv4_mask->hdr.src_addr)
599 input_set |= ICE_INSET_IPV4_SRC;
600 if (ipv4_mask->hdr.dst_addr)
601 input_set |= ICE_INSET_IPV4_DST;
602 if (ipv4_mask->hdr.time_to_live)
603 input_set |= ICE_INSET_IPV4_TTL;
604 if (ipv4_mask->hdr.next_proto_id)
606 ICE_INSET_IPV4_PROTO;
607 if (ipv4_mask->hdr.type_of_service)
611 list[t].type = (tunnel_valid == 0) ?
612 ICE_IPV4_OFOS : ICE_IPV4_IL;
613 if (ipv4_mask->hdr.src_addr) {
614 list[t].h_u.ipv4_hdr.src_addr =
615 ipv4_spec->hdr.src_addr;
616 list[t].m_u.ipv4_hdr.src_addr =
617 ipv4_mask->hdr.src_addr;
619 if (ipv4_mask->hdr.dst_addr) {
620 list[t].h_u.ipv4_hdr.dst_addr =
621 ipv4_spec->hdr.dst_addr;
622 list[t].m_u.ipv4_hdr.dst_addr =
623 ipv4_mask->hdr.dst_addr;
625 if (ipv4_mask->hdr.time_to_live) {
626 list[t].h_u.ipv4_hdr.time_to_live =
627 ipv4_spec->hdr.time_to_live;
628 list[t].m_u.ipv4_hdr.time_to_live =
629 ipv4_mask->hdr.time_to_live;
631 if (ipv4_mask->hdr.next_proto_id) {
632 list[t].h_u.ipv4_hdr.protocol =
633 ipv4_spec->hdr.next_proto_id;
634 list[t].m_u.ipv4_hdr.protocol =
635 ipv4_mask->hdr.next_proto_id;
637 if ((ipv4_spec->hdr.next_proto_id &
638 ipv4_mask->hdr.next_proto_id) ==
639 ICE_IPV4_PROTO_NVGRE)
640 *tun_type = ICE_SW_TUN_AND_NON_TUN;
641 if (ipv4_mask->hdr.type_of_service) {
642 list[t].h_u.ipv4_hdr.tos =
643 ipv4_spec->hdr.type_of_service;
644 list[t].m_u.ipv4_hdr.tos =
645 ipv4_mask->hdr.type_of_service;
651 case RTE_FLOW_ITEM_TYPE_IPV6:
652 ipv6_spec = item->spec;
653 ipv6_mask = item->mask;
655 if (ipv6_spec && ipv6_mask) {
656 if (ipv6_mask->hdr.payload_len) {
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ITEM,
660 "Invalid IPv6 mask");
664 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
665 if (ipv6_mask->hdr.src_addr[j] &&
668 ICE_INSET_TUN_IPV6_SRC;
670 } else if (ipv6_mask->hdr.src_addr[j]) {
671 input_set |= ICE_INSET_IPV6_SRC;
675 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
676 if (ipv6_mask->hdr.dst_addr[j] &&
679 ICE_INSET_TUN_IPV6_DST;
681 } else if (ipv6_mask->hdr.dst_addr[j]) {
682 input_set |= ICE_INSET_IPV6_DST;
686 if (ipv6_mask->hdr.proto &&
689 ICE_INSET_TUN_IPV6_NEXT_HDR;
690 else if (ipv6_mask->hdr.proto)
692 ICE_INSET_IPV6_NEXT_HDR;
693 if (ipv6_mask->hdr.hop_limits &&
696 ICE_INSET_TUN_IPV6_HOP_LIMIT;
697 else if (ipv6_mask->hdr.hop_limits)
699 ICE_INSET_IPV6_HOP_LIMIT;
700 if ((ipv6_mask->hdr.vtc_flow &
702 (RTE_IPV6_HDR_TC_MASK)) &&
705 ICE_INSET_TUN_IPV6_TC;
706 else if (ipv6_mask->hdr.vtc_flow &
708 (RTE_IPV6_HDR_TC_MASK))
709 input_set |= ICE_INSET_IPV6_TC;
711 list[t].type = (tunnel_valid == 0) ?
712 ICE_IPV6_OFOS : ICE_IPV6_IL;
713 struct ice_ipv6_hdr *f;
714 struct ice_ipv6_hdr *s;
715 f = &list[t].h_u.ipv6_hdr;
716 s = &list[t].m_u.ipv6_hdr;
717 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
718 if (ipv6_mask->hdr.src_addr[j]) {
720 ipv6_spec->hdr.src_addr[j];
722 ipv6_mask->hdr.src_addr[j];
724 if (ipv6_mask->hdr.dst_addr[j]) {
726 ipv6_spec->hdr.dst_addr[j];
728 ipv6_mask->hdr.dst_addr[j];
731 if (ipv6_mask->hdr.proto) {
733 ipv6_spec->hdr.proto;
735 ipv6_mask->hdr.proto;
737 if (ipv6_mask->hdr.hop_limits) {
739 ipv6_spec->hdr.hop_limits;
741 ipv6_mask->hdr.hop_limits;
743 if (ipv6_mask->hdr.vtc_flow &
745 (RTE_IPV6_HDR_TC_MASK)) {
746 struct ice_le_ver_tc_flow vtf;
747 vtf.u.fld.version = 0;
748 vtf.u.fld.flow_label = 0;
749 vtf.u.fld.tc = (rte_be_to_cpu_32
750 (ipv6_spec->hdr.vtc_flow) &
751 RTE_IPV6_HDR_TC_MASK) >>
752 RTE_IPV6_HDR_TC_SHIFT;
753 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
754 vtf.u.fld.tc = (rte_be_to_cpu_32
755 (ipv6_mask->hdr.vtc_flow) &
756 RTE_IPV6_HDR_TC_MASK) >>
757 RTE_IPV6_HDR_TC_SHIFT;
758 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
764 case RTE_FLOW_ITEM_TYPE_UDP:
765 udp_spec = item->spec;
766 udp_mask = item->mask;
768 if (udp_spec && udp_mask) {
769 /* Check UDP mask and update input set*/
770 if (udp_mask->hdr.dgram_len ||
771 udp_mask->hdr.dgram_cksum) {
772 rte_flow_error_set(error, EINVAL,
773 RTE_FLOW_ERROR_TYPE_ITEM,
780 if (udp_mask->hdr.src_port)
782 ICE_INSET_TUN_UDP_SRC_PORT;
783 if (udp_mask->hdr.dst_port)
785 ICE_INSET_TUN_UDP_DST_PORT;
787 if (udp_mask->hdr.src_port)
789 ICE_INSET_UDP_SRC_PORT;
790 if (udp_mask->hdr.dst_port)
792 ICE_INSET_UDP_DST_PORT;
794 if (*tun_type == ICE_SW_TUN_VXLAN &&
796 list[t].type = ICE_UDP_OF;
798 list[t].type = ICE_UDP_ILOS;
799 if (udp_mask->hdr.src_port) {
800 list[t].h_u.l4_hdr.src_port =
801 udp_spec->hdr.src_port;
802 list[t].m_u.l4_hdr.src_port =
803 udp_mask->hdr.src_port;
805 if (udp_mask->hdr.dst_port) {
806 list[t].h_u.l4_hdr.dst_port =
807 udp_spec->hdr.dst_port;
808 list[t].m_u.l4_hdr.dst_port =
809 udp_mask->hdr.dst_port;
815 case RTE_FLOW_ITEM_TYPE_TCP:
816 tcp_spec = item->spec;
817 tcp_mask = item->mask;
819 if (tcp_spec && tcp_mask) {
820 /* Check TCP mask and update input set */
821 if (tcp_mask->hdr.sent_seq ||
822 tcp_mask->hdr.recv_ack ||
823 tcp_mask->hdr.data_off ||
824 tcp_mask->hdr.tcp_flags ||
825 tcp_mask->hdr.rx_win ||
826 tcp_mask->hdr.cksum ||
827 tcp_mask->hdr.tcp_urp) {
828 rte_flow_error_set(error, EINVAL,
829 RTE_FLOW_ERROR_TYPE_ITEM,
836 if (tcp_mask->hdr.src_port)
838 ICE_INSET_TUN_TCP_SRC_PORT;
839 if (tcp_mask->hdr.dst_port)
841 ICE_INSET_TUN_TCP_DST_PORT;
843 if (tcp_mask->hdr.src_port)
845 ICE_INSET_TCP_SRC_PORT;
846 if (tcp_mask->hdr.dst_port)
848 ICE_INSET_TCP_DST_PORT;
850 list[t].type = ICE_TCP_IL;
851 if (tcp_mask->hdr.src_port) {
852 list[t].h_u.l4_hdr.src_port =
853 tcp_spec->hdr.src_port;
854 list[t].m_u.l4_hdr.src_port =
855 tcp_mask->hdr.src_port;
857 if (tcp_mask->hdr.dst_port) {
858 list[t].h_u.l4_hdr.dst_port =
859 tcp_spec->hdr.dst_port;
860 list[t].m_u.l4_hdr.dst_port =
861 tcp_mask->hdr.dst_port;
867 case RTE_FLOW_ITEM_TYPE_SCTP:
868 sctp_spec = item->spec;
869 sctp_mask = item->mask;
870 if (sctp_spec && sctp_mask) {
871 /* Check SCTP mask and update input set */
872 if (sctp_mask->hdr.cksum) {
873 rte_flow_error_set(error, EINVAL,
874 RTE_FLOW_ERROR_TYPE_ITEM,
876 "Invalid SCTP mask");
881 if (sctp_mask->hdr.src_port)
883 ICE_INSET_TUN_SCTP_SRC_PORT;
884 if (sctp_mask->hdr.dst_port)
886 ICE_INSET_TUN_SCTP_DST_PORT;
888 if (sctp_mask->hdr.src_port)
890 ICE_INSET_SCTP_SRC_PORT;
891 if (sctp_mask->hdr.dst_port)
893 ICE_INSET_SCTP_DST_PORT;
895 list[t].type = ICE_SCTP_IL;
896 if (sctp_mask->hdr.src_port) {
897 list[t].h_u.sctp_hdr.src_port =
898 sctp_spec->hdr.src_port;
899 list[t].m_u.sctp_hdr.src_port =
900 sctp_mask->hdr.src_port;
902 if (sctp_mask->hdr.dst_port) {
903 list[t].h_u.sctp_hdr.dst_port =
904 sctp_spec->hdr.dst_port;
905 list[t].m_u.sctp_hdr.dst_port =
906 sctp_mask->hdr.dst_port;
912 case RTE_FLOW_ITEM_TYPE_VXLAN:
913 vxlan_spec = item->spec;
914 vxlan_mask = item->mask;
915 /* Check if VXLAN item is used to describe protocol.
916 * If yes, both spec and mask should be NULL.
917 * If no, both spec and mask shouldn't be NULL.
919 if ((!vxlan_spec && vxlan_mask) ||
920 (vxlan_spec && !vxlan_mask)) {
921 rte_flow_error_set(error, EINVAL,
922 RTE_FLOW_ERROR_TYPE_ITEM,
924 "Invalid VXLAN item");
929 if (vxlan_spec && vxlan_mask) {
930 list[t].type = ICE_VXLAN;
931 if (vxlan_mask->vni[0] ||
932 vxlan_mask->vni[1] ||
933 vxlan_mask->vni[2]) {
934 list[t].h_u.tnl_hdr.vni =
935 (vxlan_spec->vni[2] << 16) |
936 (vxlan_spec->vni[1] << 8) |
938 list[t].m_u.tnl_hdr.vni =
939 (vxlan_mask->vni[2] << 16) |
940 (vxlan_mask->vni[1] << 8) |
943 ICE_INSET_TUN_VXLAN_VNI;
949 case RTE_FLOW_ITEM_TYPE_NVGRE:
950 nvgre_spec = item->spec;
951 nvgre_mask = item->mask;
952 /* Check if NVGRE item is used to describe protocol.
953 * If yes, both spec and mask should be NULL.
954 * If no, both spec and mask shouldn't be NULL.
956 if ((!nvgre_spec && nvgre_mask) ||
957 (nvgre_spec && !nvgre_mask)) {
958 rte_flow_error_set(error, EINVAL,
959 RTE_FLOW_ERROR_TYPE_ITEM,
961 "Invalid NVGRE item");
965 if (nvgre_spec && nvgre_mask) {
966 list[t].type = ICE_NVGRE;
967 if (nvgre_mask->tni[0] ||
968 nvgre_mask->tni[1] ||
969 nvgre_mask->tni[2]) {
970 list[t].h_u.nvgre_hdr.tni_flow =
971 (nvgre_spec->tni[2] << 16) |
972 (nvgre_spec->tni[1] << 8) |
974 list[t].m_u.nvgre_hdr.tni_flow =
975 (nvgre_mask->tni[2] << 16) |
976 (nvgre_mask->tni[1] << 8) |
979 ICE_INSET_TUN_NVGRE_TNI;
985 case RTE_FLOW_ITEM_TYPE_VLAN:
986 vlan_spec = item->spec;
987 vlan_mask = item->mask;
988 /* Check if VLAN item is used to describe protocol.
989 * If yes, both spec and mask should be NULL.
990 * If no, both spec and mask shouldn't be NULL.
992 if ((!vlan_spec && vlan_mask) ||
993 (vlan_spec && !vlan_mask)) {
994 rte_flow_error_set(error, EINVAL,
995 RTE_FLOW_ERROR_TYPE_ITEM,
997 "Invalid VLAN item");
1000 if (vlan_spec && vlan_mask) {
1001 list[t].type = ICE_VLAN_OFOS;
1002 if (vlan_mask->tci) {
1003 list[t].h_u.vlan_hdr.vlan =
1005 list[t].m_u.vlan_hdr.vlan =
1007 input_set |= ICE_INSET_VLAN_OUTER;
1009 if (vlan_mask->inner_type) {
1010 list[t].h_u.vlan_hdr.type =
1011 vlan_spec->inner_type;
1012 list[t].m_u.vlan_hdr.type =
1013 vlan_mask->inner_type;
1014 input_set |= ICE_INSET_ETHERTYPE;
1020 case RTE_FLOW_ITEM_TYPE_PPPOED:
1021 case RTE_FLOW_ITEM_TYPE_PPPOES:
1022 pppoe_spec = item->spec;
1023 pppoe_mask = item->mask;
1024 /* Check if PPPoE item is used to describe protocol.
1025 * If yes, both spec and mask should be NULL.
1026 * If no, both spec and mask shouldn't be NULL.
1028 if ((!pppoe_spec && pppoe_mask) ||
1029 (pppoe_spec && !pppoe_mask)) {
1030 rte_flow_error_set(error, EINVAL,
1031 RTE_FLOW_ERROR_TYPE_ITEM,
1033 "Invalid pppoe item");
1036 pppoe_patt_valid = 1;
1037 if (pppoe_spec && pppoe_mask) {
1038 /* Check pppoe mask and update input set */
1039 if (pppoe_mask->length ||
1041 pppoe_mask->version_type) {
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ITEM,
1045 "Invalid pppoe mask");
1048 list[t].type = ICE_PPPOE;
1049 if (pppoe_mask->session_id) {
1050 list[t].h_u.pppoe_hdr.session_id =
1051 pppoe_spec->session_id;
1052 list[t].m_u.pppoe_hdr.session_id =
1053 pppoe_mask->session_id;
1054 input_set |= ICE_INSET_PPPOE_SESSION;
1057 pppoe_elem_valid = 1;
1061 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1062 pppoe_proto_spec = item->spec;
1063 pppoe_proto_mask = item->mask;
1064 /* Check if PPPoE optional proto_id item
1065 * is used to describe protocol.
1066 * If yes, both spec and mask should be NULL.
1067 * If no, both spec and mask shouldn't be NULL.
1069 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1070 (pppoe_proto_spec && !pppoe_proto_mask)) {
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ITEM,
1074 "Invalid pppoe proto item");
1077 if (pppoe_proto_spec && pppoe_proto_mask) {
1078 if (pppoe_elem_valid)
1080 list[t].type = ICE_PPPOE;
1081 if (pppoe_proto_mask->proto_id) {
1082 list[t].h_u.pppoe_hdr.ppp_prot_id =
1083 pppoe_proto_spec->proto_id;
1084 list[t].m_u.pppoe_hdr.ppp_prot_id =
1085 pppoe_proto_mask->proto_id;
1086 input_set |= ICE_INSET_PPPOE_PROTO;
1088 pppoe_prot_valid = 1;
1090 if ((pppoe_proto_mask->proto_id &
1091 pppoe_proto_spec->proto_id) !=
1092 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1093 (pppoe_proto_mask->proto_id &
1094 pppoe_proto_spec->proto_id) !=
1095 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1096 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1098 *tun_type = ICE_SW_TUN_PPPOE;
1104 case RTE_FLOW_ITEM_TYPE_ESP:
1105 esp_spec = item->spec;
1106 esp_mask = item->mask;
1107 if ((esp_spec && !esp_mask) ||
1108 (!esp_spec && esp_mask)) {
1109 rte_flow_error_set(error, EINVAL,
1110 RTE_FLOW_ERROR_TYPE_ITEM,
1112 "Invalid esp item");
1115 /* Check esp mask and update input set */
1116 if (esp_mask && esp_mask->hdr.seq) {
1117 rte_flow_error_set(error, EINVAL,
1118 RTE_FLOW_ERROR_TYPE_ITEM,
1120 "Invalid esp mask");
1124 if (!esp_spec && !esp_mask && !input_set) {
1126 if (ipv6_valiad && udp_valiad)
1128 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1129 else if (ipv6_valiad)
1130 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1131 else if (ipv4_valiad)
1133 } else if (esp_spec && esp_mask &&
1136 list[t].type = ICE_NAT_T;
1138 list[t].type = ICE_ESP;
1139 list[t].h_u.esp_hdr.spi =
1141 list[t].m_u.esp_hdr.spi =
1143 input_set |= ICE_INSET_ESP_SPI;
1147 if (!profile_rule) {
1148 if (ipv6_valiad && udp_valiad)
1149 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1150 else if (ipv4_valiad && udp_valiad)
1151 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1152 else if (ipv6_valiad)
1153 *tun_type = ICE_SW_TUN_IPV6_ESP;
1154 else if (ipv4_valiad)
1155 *tun_type = ICE_SW_TUN_IPV4_ESP;
1159 case RTE_FLOW_ITEM_TYPE_AH:
1160 ah_spec = item->spec;
1161 ah_mask = item->mask;
1162 if ((ah_spec && !ah_mask) ||
1163 (!ah_spec && ah_mask)) {
1164 rte_flow_error_set(error, EINVAL,
1165 RTE_FLOW_ERROR_TYPE_ITEM,
1170 /* Check ah mask and update input set */
1172 (ah_mask->next_hdr ||
1173 ah_mask->payload_len ||
1175 ah_mask->reserved)) {
1176 rte_flow_error_set(error, EINVAL,
1177 RTE_FLOW_ERROR_TYPE_ITEM,
1183 if (!ah_spec && !ah_mask && !input_set) {
1185 if (ipv6_valiad && udp_valiad)
1187 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1188 else if (ipv6_valiad)
1189 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1190 else if (ipv4_valiad)
1192 } else if (ah_spec && ah_mask &&
1194 list[t].type = ICE_AH;
1195 list[t].h_u.ah_hdr.spi =
1197 list[t].m_u.ah_hdr.spi =
1199 input_set |= ICE_INSET_AH_SPI;
1203 if (!profile_rule) {
1206 else if (ipv6_valiad)
1207 *tun_type = ICE_SW_TUN_IPV6_AH;
1208 else if (ipv4_valiad)
1209 *tun_type = ICE_SW_TUN_IPV4_AH;
1213 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1214 l2tp_spec = item->spec;
1215 l2tp_mask = item->mask;
1216 if ((l2tp_spec && !l2tp_mask) ||
1217 (!l2tp_spec && l2tp_mask)) {
1218 rte_flow_error_set(error, EINVAL,
1219 RTE_FLOW_ERROR_TYPE_ITEM,
1221 "Invalid l2tp item");
1225 if (!l2tp_spec && !l2tp_mask && !input_set) {
1228 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1229 else if (ipv4_valiad)
1231 } else if (l2tp_spec && l2tp_mask &&
1232 l2tp_mask->session_id){
1233 list[t].type = ICE_L2TPV3;
1234 list[t].h_u.l2tpv3_sess_hdr.session_id =
1235 l2tp_spec->session_id;
1236 list[t].m_u.l2tpv3_sess_hdr.session_id =
1237 l2tp_mask->session_id;
1238 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1242 if (!profile_rule) {
1245 ICE_SW_TUN_IPV6_L2TPV3;
1246 else if (ipv4_valiad)
1248 ICE_SW_TUN_IPV4_L2TPV3;
1252 case RTE_FLOW_ITEM_TYPE_PFCP:
1253 pfcp_spec = item->spec;
1254 pfcp_mask = item->mask;
1255 /* Check if PFCP item is used to describe protocol.
1256 * If yes, both spec and mask should be NULL.
1257 * If no, both spec and mask shouldn't be NULL.
1259 if ((!pfcp_spec && pfcp_mask) ||
1260 (pfcp_spec && !pfcp_mask)) {
1261 rte_flow_error_set(error, EINVAL,
1262 RTE_FLOW_ERROR_TYPE_ITEM,
1264 "Invalid PFCP item");
1267 if (pfcp_spec && pfcp_mask) {
1268 /* Check pfcp mask and update input set */
1269 if (pfcp_mask->msg_type ||
1270 pfcp_mask->msg_len ||
1272 rte_flow_error_set(error, EINVAL,
1273 RTE_FLOW_ERROR_TYPE_ITEM,
1275 "Invalid pfcp mask");
1278 if (pfcp_mask->s_field &&
1279 pfcp_spec->s_field == 0x01 &&
1282 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1283 else if (pfcp_mask->s_field &&
1284 pfcp_spec->s_field == 0x01)
1286 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1287 else if (pfcp_mask->s_field &&
1288 !pfcp_spec->s_field &&
1291 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1292 else if (pfcp_mask->s_field &&
1293 !pfcp_spec->s_field)
1295 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1301 case RTE_FLOW_ITEM_TYPE_VOID:
1305 rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1307 "Invalid pattern item.");
1312 if (pppoe_patt_valid && !pppoe_prot_valid) {
1313 if (ipv6_valiad && udp_valiad)
1314 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1315 else if (ipv6_valiad && tcp_valiad)
1316 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1317 else if (ipv4_valiad && udp_valiad)
1318 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1319 else if (ipv4_valiad && tcp_valiad)
1320 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1321 else if (ipv6_valiad)
1322 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1323 else if (ipv4_valiad)
1324 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1326 *tun_type = ICE_SW_TUN_PPPOE;
1337 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1338 const struct rte_flow_action *actions,
1339 struct rte_flow_error *error,
1340 struct ice_adv_rule_info *rule_info)
1342 const struct rte_flow_action_vf *act_vf;
1343 const struct rte_flow_action *action;
1344 enum rte_flow_action_type action_type;
1346 for (action = actions; action->type !=
1347 RTE_FLOW_ACTION_TYPE_END; action++) {
1348 action_type = action->type;
1349 switch (action_type) {
1350 case RTE_FLOW_ACTION_TYPE_VF:
1351 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1352 act_vf = action->conf;
1353 if (act_vf->original)
1354 rule_info->sw_act.vsi_handle =
1355 ad->real_hw.avf.bus.func;
1357 rule_info->sw_act.vsi_handle = act_vf->id;
1360 rte_flow_error_set(error,
1361 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1363 "Invalid action type or queue number");
1368 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1369 rule_info->sw_act.flag = ICE_FLTR_RX;
1371 rule_info->priority = 5;
1377 ice_switch_parse_action(struct ice_pf *pf,
1378 const struct rte_flow_action *actions,
1379 struct rte_flow_error *error,
1380 struct ice_adv_rule_info *rule_info)
1382 struct ice_vsi *vsi = pf->main_vsi;
1383 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1384 const struct rte_flow_action_queue *act_q;
1385 const struct rte_flow_action_rss *act_qgrop;
1386 uint16_t base_queue, i;
1387 const struct rte_flow_action *action;
1388 enum rte_flow_action_type action_type;
1389 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1390 2, 4, 8, 16, 32, 64, 128};
1392 base_queue = pf->base_queue + vsi->base_queue;
1393 for (action = actions; action->type !=
1394 RTE_FLOW_ACTION_TYPE_END; action++) {
1395 action_type = action->type;
1396 switch (action_type) {
1397 case RTE_FLOW_ACTION_TYPE_RSS:
1398 act_qgrop = action->conf;
1399 if (act_qgrop->queue_num <= 1)
1401 rule_info->sw_act.fltr_act =
1403 rule_info->sw_act.fwd_id.q_id =
1404 base_queue + act_qgrop->queue[0];
1405 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1406 if (act_qgrop->queue_num ==
1407 valid_qgrop_number[i])
1410 if (i == MAX_QGRP_NUM_TYPE)
1412 if ((act_qgrop->queue[0] +
1413 act_qgrop->queue_num) >
1414 dev->data->nb_rx_queues)
1416 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1417 if (act_qgrop->queue[i + 1] !=
1418 act_qgrop->queue[i] + 1)
1420 rule_info->sw_act.qgrp_size =
1421 act_qgrop->queue_num;
1423 case RTE_FLOW_ACTION_TYPE_QUEUE:
1424 act_q = action->conf;
1425 if (act_q->index >= dev->data->nb_rx_queues)
1427 rule_info->sw_act.fltr_act =
1429 rule_info->sw_act.fwd_id.q_id =
1430 base_queue + act_q->index;
1433 case RTE_FLOW_ACTION_TYPE_DROP:
1434 rule_info->sw_act.fltr_act =
1438 case RTE_FLOW_ACTION_TYPE_VOID:
1446 rule_info->sw_act.vsi_handle = vsi->idx;
1448 rule_info->sw_act.src = vsi->idx;
1449 rule_info->priority = 5;
1454 rte_flow_error_set(error,
1455 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1457 "Invalid action type or queue number");
1462 ice_switch_check_action(const struct rte_flow_action *actions,
1463 struct rte_flow_error *error)
1465 const struct rte_flow_action *action;
1466 enum rte_flow_action_type action_type;
1467 uint16_t actions_num = 0;
1469 for (action = actions; action->type !=
1470 RTE_FLOW_ACTION_TYPE_END; action++) {
1471 action_type = action->type;
1472 switch (action_type) {
1473 case RTE_FLOW_ACTION_TYPE_VF:
1474 case RTE_FLOW_ACTION_TYPE_RSS:
1475 case RTE_FLOW_ACTION_TYPE_QUEUE:
1476 case RTE_FLOW_ACTION_TYPE_DROP:
1479 case RTE_FLOW_ACTION_TYPE_VOID:
1482 rte_flow_error_set(error,
1483 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1485 "Invalid action type");
1490 if (actions_num != 1) {
1491 rte_flow_error_set(error,
1492 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1494 "Invalid action number");
1502 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1505 case ICE_SW_TUN_PROFID_IPV6_ESP:
1506 case ICE_SW_TUN_PROFID_IPV6_AH:
1507 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1508 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1509 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1510 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1511 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1512 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1522 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1523 struct ice_pattern_match_item *array,
1525 const struct rte_flow_item pattern[],
1526 const struct rte_flow_action actions[],
1528 struct rte_flow_error *error)
1530 struct ice_pf *pf = &ad->pf;
1531 uint64_t inputset = 0;
1533 struct sw_meta *sw_meta_ptr = NULL;
1534 struct ice_adv_rule_info rule_info;
1535 struct ice_adv_lkup_elem *list = NULL;
1536 uint16_t lkups_num = 0;
1537 const struct rte_flow_item *item = pattern;
1538 uint16_t item_num = 0;
1539 enum ice_sw_tunnel_type tun_type =
1541 struct ice_pattern_match_item *pattern_match_item = NULL;
1543 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1545 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1546 tun_type = ICE_SW_TUN_VXLAN;
1547 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1548 tun_type = ICE_SW_TUN_NVGRE;
1549 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1550 const struct rte_flow_item_eth *eth_mask;
1552 eth_mask = item->mask;
1555 if (eth_mask->type == UINT16_MAX)
1556 tun_type = ICE_SW_TUN_AND_NON_TUN;
1558 /* reserve one more memory slot for ETH which may
1559 * consume 2 lookup items.
1561 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1565 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1567 rte_flow_error_set(error, EINVAL,
1568 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1569 "No memory for PMD internal items");
1574 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1576 rte_flow_error_set(error, EINVAL,
1577 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1578 "No memory for sw_pattern_meta_ptr");
1582 pattern_match_item =
1583 ice_search_pattern_match_item(pattern, array, array_len, error);
1584 if (!pattern_match_item) {
1585 rte_flow_error_set(error, EINVAL,
1586 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1587 "Invalid input pattern");
1591 inputset = ice_switch_inset_get
1592 (pattern, error, list, &lkups_num, &tun_type);
1593 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1594 (inputset & ~pattern_match_item->input_set_mask)) {
1595 rte_flow_error_set(error, EINVAL,
1596 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1598 "Invalid input set");
1602 memset(&rule_info, 0, sizeof(rule_info));
1603 rule_info.tun_type = tun_type;
1605 ret = ice_switch_check_action(actions, error);
1607 rte_flow_error_set(error, EINVAL,
1608 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1609 "Invalid input action number");
1613 if (ad->hw.dcf_enabled)
1614 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1617 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1620 rte_flow_error_set(error, EINVAL,
1621 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1622 "Invalid input action");
1627 *meta = sw_meta_ptr;
1628 ((struct sw_meta *)*meta)->list = list;
1629 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1630 ((struct sw_meta *)*meta)->rule_info = rule_info;
1633 rte_free(sw_meta_ptr);
1636 rte_free(pattern_match_item);
1642 rte_free(sw_meta_ptr);
1643 rte_free(pattern_match_item);
1649 ice_switch_query(struct ice_adapter *ad __rte_unused,
1650 struct rte_flow *flow __rte_unused,
1651 struct rte_flow_query_count *count __rte_unused,
1652 struct rte_flow_error *error)
1654 rte_flow_error_set(error, EINVAL,
1655 RTE_FLOW_ERROR_TYPE_HANDLE,
1657 "count action not supported by switch filter");
1663 ice_switch_redirect(struct ice_adapter *ad,
1664 struct rte_flow *flow,
1665 struct ice_flow_redirect *rd)
1667 struct ice_rule_query_data *rdata = flow->rule;
1668 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1669 struct ice_adv_lkup_elem *lkups_dp = NULL;
1670 struct LIST_HEAD_TYPE *list_head;
1671 struct ice_adv_rule_info rinfo;
1672 struct ice_hw *hw = &ad->hw;
1673 struct ice_switch_info *sw;
1677 if (rdata->vsi_handle != rd->vsi_handle)
1680 sw = hw->switch_info;
1681 if (!sw->recp_list[rdata->rid].recp_created)
1684 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1687 list_head = &sw->recp_list[rdata->rid].filt_rules;
1688 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1690 rinfo = list_itr->rule_info;
1691 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1692 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1693 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1694 (rinfo.fltr_rule_id == rdata->rule_id &&
1695 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1696 lkups_cnt = list_itr->lkups_cnt;
1697 lkups_dp = (struct ice_adv_lkup_elem *)
1698 ice_memdup(hw, list_itr->lkups,
1699 sizeof(*list_itr->lkups) *
1700 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1703 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1707 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1708 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1709 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1718 /* Remove the old rule */
1719 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1722 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1728 /* Update VSI context */
1729 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1731 /* Replay the rule */
1732 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1735 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1740 ice_free(hw, lkups_dp);
1745 ice_switch_init(struct ice_adapter *ad)
1748 struct ice_flow_parser *dist_parser;
1749 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1751 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1752 dist_parser = &ice_switch_dist_parser_comms;
1753 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1754 dist_parser = &ice_switch_dist_parser_os;
1758 if (ad->devargs.pipe_mode_support)
1759 ret = ice_register_parser(perm_parser, ad);
1761 ret = ice_register_parser(dist_parser, ad);
1766 ice_switch_uninit(struct ice_adapter *ad)
1768 struct ice_flow_parser *dist_parser;
1769 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1771 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1772 dist_parser = &ice_switch_dist_parser_comms;
1774 dist_parser = &ice_switch_dist_parser_os;
1776 if (ad->devargs.pipe_mode_support)
1777 ice_unregister_parser(perm_parser, ad);
1779 ice_unregister_parser(dist_parser, ad);
1783 ice_flow_engine ice_switch_engine = {
1784 .init = ice_switch_init,
1785 .uninit = ice_switch_uninit,
1786 .create = ice_switch_create,
1787 .destroy = ice_switch_destroy,
1788 .query_count = ice_switch_query,
1789 .redirect = ice_switch_redirect,
1790 .free = ice_switch_filter_rule_free,
1791 .type = ICE_FLOW_ENGINE_SWITCH,
1795 ice_flow_parser ice_switch_dist_parser_os = {
1796 .engine = &ice_switch_engine,
1797 .array = ice_switch_pattern_dist_os,
1798 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1799 .parse_pattern_action = ice_switch_parse_pattern_action,
1800 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1804 ice_flow_parser ice_switch_dist_parser_comms = {
1805 .engine = &ice_switch_engine,
1806 .array = ice_switch_pattern_dist_comms,
1807 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1808 .parse_pattern_action = ice_switch_parse_pattern_action,
1809 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1813 ice_flow_parser ice_switch_perm_parser = {
1814 .engine = &ice_switch_engine,
1815 .array = ice_switch_pattern_perm,
1816 .array_len = RTE_DIM(ice_switch_pattern_perm),
1817 .parse_pattern_action = ice_switch_parse_pattern_action,
1818 .stage = ICE_FLOW_STAGE_PERMISSION,
1821 RTE_INIT(ice_sw_engine_init)
1823 struct ice_flow_engine *engine = &ice_switch_engine;
1824 ice_register_flow_engine(engine);