1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
40 #define ICE_SW_INSET_MAC_IPV4 ( \
41 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
42 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
43 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
48 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6 ( \
52 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
54 ICE_INSET_IPV6_NEXT_HDR)
55 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
56 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
59 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
60 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
61 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
62 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
63 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
64 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
65 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
66 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
67 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
68 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
70 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
78 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
80 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
82 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
84 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
85 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
86 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
87 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
89 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
91 ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
93 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
95 ICE_INSET_TUN_IPV4_TOS)
96 #define ICE_SW_INSET_MAC_PPPOE ( \
97 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
98 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
99 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
100 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
101 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
102 ICE_INSET_PPPOE_PROTO)
103 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
104 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
105 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
106 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
107 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
108 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
110 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
112 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
114 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
115 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
116 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
117 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
118 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
119 #define ICE_SW_INSET_MAC_IPV4_AH ( \
120 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
121 #define ICE_SW_INSET_MAC_IPV6_AH ( \
122 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
123 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
124 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
125 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
126 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
127 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
128 ICE_SW_INSET_MAC_IPV4 | \
129 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
130 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
131 ICE_SW_INSET_MAC_IPV6 | \
132 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
135 struct ice_adv_lkup_elem *list;
137 struct ice_adv_rule_info rule_info;
140 static struct ice_flow_parser ice_switch_dist_parser;
141 static struct ice_flow_parser ice_switch_perm_parser;
144 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
146 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
147 {pattern_ethertype_vlan,
148 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
150 ICE_INSET_NONE, ICE_INSET_NONE},
152 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
153 {pattern_eth_ipv4_udp,
154 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
155 {pattern_eth_ipv4_tcp,
156 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
158 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
159 {pattern_eth_ipv6_udp,
160 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
161 {pattern_eth_ipv6_tcp,
162 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
163 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
164 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
165 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
166 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
167 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
168 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
169 {pattern_eth_ipv4_nvgre_eth_ipv4,
170 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
171 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
172 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
173 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
174 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
176 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
177 {pattern_eth_vlan_pppoes,
178 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
179 {pattern_eth_pppoes_proto,
180 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
181 {pattern_eth_vlan_pppoes_proto,
182 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
183 {pattern_eth_pppoes_ipv4,
184 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
185 {pattern_eth_pppoes_ipv4_tcp,
186 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
187 {pattern_eth_pppoes_ipv4_udp,
188 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
189 {pattern_eth_pppoes_ipv6,
190 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
191 {pattern_eth_pppoes_ipv6_tcp,
192 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
193 {pattern_eth_pppoes_ipv6_udp,
194 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
195 {pattern_eth_vlan_pppoes_ipv4,
196 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
197 {pattern_eth_vlan_pppoes_ipv4_tcp,
198 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
199 {pattern_eth_vlan_pppoes_ipv4_udp,
200 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
201 {pattern_eth_vlan_pppoes_ipv6,
202 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
203 {pattern_eth_vlan_pppoes_ipv6_tcp,
204 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
205 {pattern_eth_vlan_pppoes_ipv6_udp,
206 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
207 {pattern_eth_ipv4_esp,
208 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
209 {pattern_eth_ipv4_udp_esp,
210 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
211 {pattern_eth_ipv6_esp,
212 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
213 {pattern_eth_ipv6_udp_esp,
214 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
215 {pattern_eth_ipv4_ah,
216 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
217 {pattern_eth_ipv6_ah,
218 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
219 {pattern_eth_ipv6_udp_ah,
220 ICE_INSET_NONE, ICE_INSET_NONE},
221 {pattern_eth_ipv4_l2tp,
222 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
223 {pattern_eth_ipv6_l2tp,
224 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
225 {pattern_eth_ipv4_pfcp,
226 ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_ipv6_pfcp,
228 ICE_INSET_NONE, ICE_INSET_NONE},
232 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
234 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
235 {pattern_ethertype_vlan,
236 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
238 ICE_INSET_NONE, ICE_INSET_NONE},
240 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
241 {pattern_eth_ipv4_udp,
242 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
243 {pattern_eth_ipv4_tcp,
244 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
246 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
247 {pattern_eth_ipv6_udp,
248 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
249 {pattern_eth_ipv6_tcp,
250 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
251 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
252 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
253 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
254 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
255 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
256 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
257 {pattern_eth_ipv4_nvgre_eth_ipv4,
258 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
259 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
260 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
261 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
262 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
264 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
265 {pattern_eth_vlan_pppoes,
266 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
267 {pattern_eth_pppoes_proto,
268 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
269 {pattern_eth_vlan_pppoes_proto,
270 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
271 {pattern_eth_pppoes_ipv4,
272 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
273 {pattern_eth_pppoes_ipv4_tcp,
274 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
275 {pattern_eth_pppoes_ipv4_udp,
276 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
277 {pattern_eth_pppoes_ipv6,
278 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
279 {pattern_eth_pppoes_ipv6_tcp,
280 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
281 {pattern_eth_pppoes_ipv6_udp,
282 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
283 {pattern_eth_vlan_pppoes_ipv4,
284 ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
285 {pattern_eth_vlan_pppoes_ipv4_tcp,
286 ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
287 {pattern_eth_vlan_pppoes_ipv4_udp,
288 ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
289 {pattern_eth_vlan_pppoes_ipv6,
290 ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
291 {pattern_eth_vlan_pppoes_ipv6_tcp,
292 ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
293 {pattern_eth_vlan_pppoes_ipv6_udp,
294 ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
295 {pattern_eth_ipv4_esp,
296 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
297 {pattern_eth_ipv4_udp_esp,
298 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
299 {pattern_eth_ipv6_esp,
300 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
301 {pattern_eth_ipv6_udp_esp,
302 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
303 {pattern_eth_ipv4_ah,
304 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
305 {pattern_eth_ipv6_ah,
306 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
307 {pattern_eth_ipv6_udp_ah,
308 ICE_INSET_NONE, ICE_INSET_NONE},
309 {pattern_eth_ipv4_l2tp,
310 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
311 {pattern_eth_ipv6_l2tp,
312 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
313 {pattern_eth_ipv4_pfcp,
314 ICE_INSET_NONE, ICE_INSET_NONE},
315 {pattern_eth_ipv6_pfcp,
316 ICE_INSET_NONE, ICE_INSET_NONE},
320 ice_switch_create(struct ice_adapter *ad,
321 struct rte_flow *flow,
323 struct rte_flow_error *error)
326 struct ice_pf *pf = &ad->pf;
327 struct ice_hw *hw = ICE_PF_TO_HW(pf);
328 struct ice_rule_query_data rule_added = {0};
329 struct ice_rule_query_data *filter_ptr;
330 struct ice_adv_lkup_elem *list =
331 ((struct sw_meta *)meta)->list;
333 ((struct sw_meta *)meta)->lkups_num;
334 struct ice_adv_rule_info *rule_info =
335 &((struct sw_meta *)meta)->rule_info;
337 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
340 "item number too large for rule");
344 rte_flow_error_set(error, EINVAL,
345 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
346 "lookup list should not be NULL");
349 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
351 filter_ptr = rte_zmalloc("ice_switch_filter",
352 sizeof(struct ice_rule_query_data), 0);
354 rte_flow_error_set(error, EINVAL,
355 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
356 "No memory for ice_switch_filter");
359 flow->rule = filter_ptr;
360 rte_memcpy(filter_ptr,
362 sizeof(struct ice_rule_query_data));
364 rte_flow_error_set(error, EINVAL,
365 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
366 "switch filter create flow fail");
382 ice_switch_destroy(struct ice_adapter *ad,
383 struct rte_flow *flow,
384 struct rte_flow_error *error)
386 struct ice_hw *hw = &ad->hw;
388 struct ice_rule_query_data *filter_ptr;
390 filter_ptr = (struct ice_rule_query_data *)
394 rte_flow_error_set(error, EINVAL,
395 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
397 " create by switch filter");
401 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
403 rte_flow_error_set(error, EINVAL,
404 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
405 "fail to destroy switch filter rule");
409 rte_free(filter_ptr);
414 ice_switch_filter_rule_free(struct rte_flow *flow)
416 rte_free(flow->rule);
420 ice_switch_inset_get(const struct rte_flow_item pattern[],
421 struct rte_flow_error *error,
422 struct ice_adv_lkup_elem *list,
424 enum ice_sw_tunnel_type *tun_type)
426 const struct rte_flow_item *item = pattern;
427 enum rte_flow_item_type item_type;
428 const struct rte_flow_item_eth *eth_spec, *eth_mask;
429 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
430 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
431 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
432 const struct rte_flow_item_udp *udp_spec, *udp_mask;
433 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
434 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
435 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
436 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
437 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
438 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
440 const struct rte_flow_item_esp *esp_spec, *esp_mask;
441 const struct rte_flow_item_ah *ah_spec, *ah_mask;
442 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
443 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
444 uint64_t input_set = ICE_INSET_NONE;
445 uint16_t input_set_byte = 0;
446 bool pppoe_elem_valid = 0;
447 bool pppoe_patt_valid = 0;
448 bool pppoe_prot_valid = 0;
449 bool tunnel_valid = 0;
450 bool profile_rule = 0;
451 bool nvgre_valid = 0;
452 bool vxlan_valid = 0;
459 for (item = pattern; item->type !=
460 RTE_FLOW_ITEM_TYPE_END; item++) {
462 rte_flow_error_set(error, EINVAL,
463 RTE_FLOW_ERROR_TYPE_ITEM,
465 "Not support range");
468 item_type = item->type;
471 case RTE_FLOW_ITEM_TYPE_ETH:
472 eth_spec = item->spec;
473 eth_mask = item->mask;
474 if (eth_spec && eth_mask) {
475 const uint8_t *a = eth_mask->src.addr_bytes;
476 const uint8_t *b = eth_mask->dst.addr_bytes;
477 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
478 if (a[j] && tunnel_valid) {
488 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
489 if (b[j] && tunnel_valid) {
500 input_set |= ICE_INSET_ETHERTYPE;
501 list[t].type = (tunnel_valid == 0) ?
502 ICE_MAC_OFOS : ICE_MAC_IL;
503 struct ice_ether_hdr *h;
504 struct ice_ether_hdr *m;
506 h = &list[t].h_u.eth_hdr;
507 m = &list[t].m_u.eth_hdr;
508 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
509 if (eth_mask->src.addr_bytes[j]) {
511 eth_spec->src.addr_bytes[j];
513 eth_mask->src.addr_bytes[j];
517 if (eth_mask->dst.addr_bytes[j]) {
519 eth_spec->dst.addr_bytes[j];
521 eth_mask->dst.addr_bytes[j];
528 if (eth_mask->type) {
529 list[t].type = ICE_ETYPE_OL;
530 list[t].h_u.ethertype.ethtype_id =
532 list[t].m_u.ethertype.ethtype_id =
540 case RTE_FLOW_ITEM_TYPE_IPV4:
541 ipv4_spec = item->spec;
542 ipv4_mask = item->mask;
544 if (ipv4_spec && ipv4_mask) {
545 /* Check IPv4 mask and update input set */
546 if (ipv4_mask->hdr.version_ihl ||
547 ipv4_mask->hdr.total_length ||
548 ipv4_mask->hdr.packet_id ||
549 ipv4_mask->hdr.hdr_checksum) {
550 rte_flow_error_set(error, EINVAL,
551 RTE_FLOW_ERROR_TYPE_ITEM,
553 "Invalid IPv4 mask.");
558 if (ipv4_mask->hdr.type_of_service)
560 ICE_INSET_TUN_IPV4_TOS;
561 if (ipv4_mask->hdr.src_addr)
563 ICE_INSET_TUN_IPV4_SRC;
564 if (ipv4_mask->hdr.dst_addr)
566 ICE_INSET_TUN_IPV4_DST;
567 if (ipv4_mask->hdr.time_to_live)
569 ICE_INSET_TUN_IPV4_TTL;
570 if (ipv4_mask->hdr.next_proto_id)
572 ICE_INSET_TUN_IPV4_PROTO;
574 if (ipv4_mask->hdr.src_addr)
575 input_set |= ICE_INSET_IPV4_SRC;
576 if (ipv4_mask->hdr.dst_addr)
577 input_set |= ICE_INSET_IPV4_DST;
578 if (ipv4_mask->hdr.time_to_live)
579 input_set |= ICE_INSET_IPV4_TTL;
580 if (ipv4_mask->hdr.next_proto_id)
582 ICE_INSET_IPV4_PROTO;
583 if (ipv4_mask->hdr.type_of_service)
587 list[t].type = (tunnel_valid == 0) ?
588 ICE_IPV4_OFOS : ICE_IPV4_IL;
589 if (ipv4_mask->hdr.src_addr) {
590 list[t].h_u.ipv4_hdr.src_addr =
591 ipv4_spec->hdr.src_addr;
592 list[t].m_u.ipv4_hdr.src_addr =
593 ipv4_mask->hdr.src_addr;
596 if (ipv4_mask->hdr.dst_addr) {
597 list[t].h_u.ipv4_hdr.dst_addr =
598 ipv4_spec->hdr.dst_addr;
599 list[t].m_u.ipv4_hdr.dst_addr =
600 ipv4_mask->hdr.dst_addr;
603 if (ipv4_mask->hdr.time_to_live) {
604 list[t].h_u.ipv4_hdr.time_to_live =
605 ipv4_spec->hdr.time_to_live;
606 list[t].m_u.ipv4_hdr.time_to_live =
607 ipv4_mask->hdr.time_to_live;
610 if (ipv4_mask->hdr.next_proto_id) {
611 list[t].h_u.ipv4_hdr.protocol =
612 ipv4_spec->hdr.next_proto_id;
613 list[t].m_u.ipv4_hdr.protocol =
614 ipv4_mask->hdr.next_proto_id;
617 if ((ipv4_spec->hdr.next_proto_id &
618 ipv4_mask->hdr.next_proto_id) ==
619 ICE_IPV4_PROTO_NVGRE)
620 *tun_type = ICE_SW_TUN_AND_NON_TUN;
621 if (ipv4_mask->hdr.type_of_service) {
622 list[t].h_u.ipv4_hdr.tos =
623 ipv4_spec->hdr.type_of_service;
624 list[t].m_u.ipv4_hdr.tos =
625 ipv4_mask->hdr.type_of_service;
632 case RTE_FLOW_ITEM_TYPE_IPV6:
633 ipv6_spec = item->spec;
634 ipv6_mask = item->mask;
636 if (ipv6_spec && ipv6_mask) {
637 if (ipv6_mask->hdr.payload_len) {
638 rte_flow_error_set(error, EINVAL,
639 RTE_FLOW_ERROR_TYPE_ITEM,
641 "Invalid IPv6 mask");
645 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
646 if (ipv6_mask->hdr.src_addr[j] &&
649 ICE_INSET_TUN_IPV6_SRC;
651 } else if (ipv6_mask->hdr.src_addr[j]) {
652 input_set |= ICE_INSET_IPV6_SRC;
656 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
657 if (ipv6_mask->hdr.dst_addr[j] &&
660 ICE_INSET_TUN_IPV6_DST;
662 } else if (ipv6_mask->hdr.dst_addr[j]) {
663 input_set |= ICE_INSET_IPV6_DST;
667 if (ipv6_mask->hdr.proto &&
670 ICE_INSET_TUN_IPV6_NEXT_HDR;
671 else if (ipv6_mask->hdr.proto)
673 ICE_INSET_IPV6_NEXT_HDR;
674 if (ipv6_mask->hdr.hop_limits &&
677 ICE_INSET_TUN_IPV6_HOP_LIMIT;
678 else if (ipv6_mask->hdr.hop_limits)
680 ICE_INSET_IPV6_HOP_LIMIT;
681 if ((ipv6_mask->hdr.vtc_flow &
683 (RTE_IPV6_HDR_TC_MASK)) &&
686 ICE_INSET_TUN_IPV6_TC;
687 else if (ipv6_mask->hdr.vtc_flow &
689 (RTE_IPV6_HDR_TC_MASK))
690 input_set |= ICE_INSET_IPV6_TC;
692 list[t].type = (tunnel_valid == 0) ?
693 ICE_IPV6_OFOS : ICE_IPV6_IL;
694 struct ice_ipv6_hdr *f;
695 struct ice_ipv6_hdr *s;
696 f = &list[t].h_u.ipv6_hdr;
697 s = &list[t].m_u.ipv6_hdr;
698 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
699 if (ipv6_mask->hdr.src_addr[j]) {
701 ipv6_spec->hdr.src_addr[j];
703 ipv6_mask->hdr.src_addr[j];
706 if (ipv6_mask->hdr.dst_addr[j]) {
708 ipv6_spec->hdr.dst_addr[j];
710 ipv6_mask->hdr.dst_addr[j];
714 if (ipv6_mask->hdr.proto) {
716 ipv6_spec->hdr.proto;
718 ipv6_mask->hdr.proto;
721 if (ipv6_mask->hdr.hop_limits) {
723 ipv6_spec->hdr.hop_limits;
725 ipv6_mask->hdr.hop_limits;
728 if (ipv6_mask->hdr.vtc_flow &
730 (RTE_IPV6_HDR_TC_MASK)) {
731 struct ice_le_ver_tc_flow vtf;
732 vtf.u.fld.version = 0;
733 vtf.u.fld.flow_label = 0;
734 vtf.u.fld.tc = (rte_be_to_cpu_32
735 (ipv6_spec->hdr.vtc_flow) &
736 RTE_IPV6_HDR_TC_MASK) >>
737 RTE_IPV6_HDR_TC_SHIFT;
738 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
739 vtf.u.fld.tc = (rte_be_to_cpu_32
740 (ipv6_mask->hdr.vtc_flow) &
741 RTE_IPV6_HDR_TC_MASK) >>
742 RTE_IPV6_HDR_TC_SHIFT;
743 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
750 case RTE_FLOW_ITEM_TYPE_UDP:
751 udp_spec = item->spec;
752 udp_mask = item->mask;
754 if (udp_spec && udp_mask) {
755 /* Check UDP mask and update input set*/
756 if (udp_mask->hdr.dgram_len ||
757 udp_mask->hdr.dgram_cksum) {
758 rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ITEM,
766 if (udp_mask->hdr.src_port)
768 ICE_INSET_TUN_UDP_SRC_PORT;
769 if (udp_mask->hdr.dst_port)
771 ICE_INSET_TUN_UDP_DST_PORT;
773 if (udp_mask->hdr.src_port)
775 ICE_INSET_UDP_SRC_PORT;
776 if (udp_mask->hdr.dst_port)
778 ICE_INSET_UDP_DST_PORT;
780 if (*tun_type == ICE_SW_TUN_VXLAN &&
782 list[t].type = ICE_UDP_OF;
784 list[t].type = ICE_UDP_ILOS;
785 if (udp_mask->hdr.src_port) {
786 list[t].h_u.l4_hdr.src_port =
787 udp_spec->hdr.src_port;
788 list[t].m_u.l4_hdr.src_port =
789 udp_mask->hdr.src_port;
792 if (udp_mask->hdr.dst_port) {
793 list[t].h_u.l4_hdr.dst_port =
794 udp_spec->hdr.dst_port;
795 list[t].m_u.l4_hdr.dst_port =
796 udp_mask->hdr.dst_port;
803 case RTE_FLOW_ITEM_TYPE_TCP:
804 tcp_spec = item->spec;
805 tcp_mask = item->mask;
807 if (tcp_spec && tcp_mask) {
808 /* Check TCP mask and update input set */
809 if (tcp_mask->hdr.sent_seq ||
810 tcp_mask->hdr.recv_ack ||
811 tcp_mask->hdr.data_off ||
812 tcp_mask->hdr.tcp_flags ||
813 tcp_mask->hdr.rx_win ||
814 tcp_mask->hdr.cksum ||
815 tcp_mask->hdr.tcp_urp) {
816 rte_flow_error_set(error, EINVAL,
817 RTE_FLOW_ERROR_TYPE_ITEM,
824 if (tcp_mask->hdr.src_port)
826 ICE_INSET_TUN_TCP_SRC_PORT;
827 if (tcp_mask->hdr.dst_port)
829 ICE_INSET_TUN_TCP_DST_PORT;
831 if (tcp_mask->hdr.src_port)
833 ICE_INSET_TCP_SRC_PORT;
834 if (tcp_mask->hdr.dst_port)
836 ICE_INSET_TCP_DST_PORT;
838 list[t].type = ICE_TCP_IL;
839 if (tcp_mask->hdr.src_port) {
840 list[t].h_u.l4_hdr.src_port =
841 tcp_spec->hdr.src_port;
842 list[t].m_u.l4_hdr.src_port =
843 tcp_mask->hdr.src_port;
846 if (tcp_mask->hdr.dst_port) {
847 list[t].h_u.l4_hdr.dst_port =
848 tcp_spec->hdr.dst_port;
849 list[t].m_u.l4_hdr.dst_port =
850 tcp_mask->hdr.dst_port;
857 case RTE_FLOW_ITEM_TYPE_SCTP:
858 sctp_spec = item->spec;
859 sctp_mask = item->mask;
860 if (sctp_spec && sctp_mask) {
861 /* Check SCTP mask and update input set */
862 if (sctp_mask->hdr.cksum) {
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ITEM,
866 "Invalid SCTP mask");
871 if (sctp_mask->hdr.src_port)
873 ICE_INSET_TUN_SCTP_SRC_PORT;
874 if (sctp_mask->hdr.dst_port)
876 ICE_INSET_TUN_SCTP_DST_PORT;
878 if (sctp_mask->hdr.src_port)
880 ICE_INSET_SCTP_SRC_PORT;
881 if (sctp_mask->hdr.dst_port)
883 ICE_INSET_SCTP_DST_PORT;
885 list[t].type = ICE_SCTP_IL;
886 if (sctp_mask->hdr.src_port) {
887 list[t].h_u.sctp_hdr.src_port =
888 sctp_spec->hdr.src_port;
889 list[t].m_u.sctp_hdr.src_port =
890 sctp_mask->hdr.src_port;
893 if (sctp_mask->hdr.dst_port) {
894 list[t].h_u.sctp_hdr.dst_port =
895 sctp_spec->hdr.dst_port;
896 list[t].m_u.sctp_hdr.dst_port =
897 sctp_mask->hdr.dst_port;
904 case RTE_FLOW_ITEM_TYPE_VXLAN:
905 vxlan_spec = item->spec;
906 vxlan_mask = item->mask;
907 /* Check if VXLAN item is used to describe protocol.
908 * If yes, both spec and mask should be NULL.
909 * If no, both spec and mask shouldn't be NULL.
911 if ((!vxlan_spec && vxlan_mask) ||
912 (vxlan_spec && !vxlan_mask)) {
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ITEM,
916 "Invalid VXLAN item");
921 if (vxlan_spec && vxlan_mask) {
922 list[t].type = ICE_VXLAN;
923 if (vxlan_mask->vni[0] ||
924 vxlan_mask->vni[1] ||
925 vxlan_mask->vni[2]) {
926 list[t].h_u.tnl_hdr.vni =
927 (vxlan_spec->vni[2] << 16) |
928 (vxlan_spec->vni[1] << 8) |
930 list[t].m_u.tnl_hdr.vni =
931 (vxlan_mask->vni[2] << 16) |
932 (vxlan_mask->vni[1] << 8) |
935 ICE_INSET_TUN_VXLAN_VNI;
942 case RTE_FLOW_ITEM_TYPE_NVGRE:
943 nvgre_spec = item->spec;
944 nvgre_mask = item->mask;
945 /* Check if NVGRE item is used to describe protocol.
946 * If yes, both spec and mask should be NULL.
947 * If no, both spec and mask shouldn't be NULL.
949 if ((!nvgre_spec && nvgre_mask) ||
950 (nvgre_spec && !nvgre_mask)) {
951 rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ITEM,
954 "Invalid NVGRE item");
959 if (nvgre_spec && nvgre_mask) {
960 list[t].type = ICE_NVGRE;
961 if (nvgre_mask->tni[0] ||
962 nvgre_mask->tni[1] ||
963 nvgre_mask->tni[2]) {
964 list[t].h_u.nvgre_hdr.tni_flow =
965 (nvgre_spec->tni[2] << 16) |
966 (nvgre_spec->tni[1] << 8) |
968 list[t].m_u.nvgre_hdr.tni_flow =
969 (nvgre_mask->tni[2] << 16) |
970 (nvgre_mask->tni[1] << 8) |
973 ICE_INSET_TUN_NVGRE_TNI;
980 case RTE_FLOW_ITEM_TYPE_VLAN:
981 vlan_spec = item->spec;
982 vlan_mask = item->mask;
983 /* Check if VLAN item is used to describe protocol.
984 * If yes, both spec and mask should be NULL.
985 * If no, both spec and mask shouldn't be NULL.
987 if ((!vlan_spec && vlan_mask) ||
988 (vlan_spec && !vlan_mask)) {
989 rte_flow_error_set(error, EINVAL,
990 RTE_FLOW_ERROR_TYPE_ITEM,
992 "Invalid VLAN item");
995 if (vlan_spec && vlan_mask) {
996 list[t].type = ICE_VLAN_OFOS;
997 if (vlan_mask->tci) {
998 list[t].h_u.vlan_hdr.vlan =
1000 list[t].m_u.vlan_hdr.vlan =
1002 input_set |= ICE_INSET_VLAN_OUTER;
1003 input_set_byte += 2;
1005 if (vlan_mask->inner_type) {
1006 list[t].h_u.vlan_hdr.type =
1007 vlan_spec->inner_type;
1008 list[t].m_u.vlan_hdr.type =
1009 vlan_mask->inner_type;
1010 input_set |= ICE_INSET_ETHERTYPE;
1011 input_set_byte += 2;
1017 case RTE_FLOW_ITEM_TYPE_PPPOED:
1018 case RTE_FLOW_ITEM_TYPE_PPPOES:
1019 pppoe_spec = item->spec;
1020 pppoe_mask = item->mask;
1021 /* Check if PPPoE item is used to describe protocol.
1022 * If yes, both spec and mask should be NULL.
1023 * If no, both spec and mask shouldn't be NULL.
1025 if ((!pppoe_spec && pppoe_mask) ||
1026 (pppoe_spec && !pppoe_mask)) {
1027 rte_flow_error_set(error, EINVAL,
1028 RTE_FLOW_ERROR_TYPE_ITEM,
1030 "Invalid pppoe item");
1033 pppoe_patt_valid = 1;
1034 if (pppoe_spec && pppoe_mask) {
1035 /* Check pppoe mask and update input set */
1036 if (pppoe_mask->length ||
1038 pppoe_mask->version_type) {
1039 rte_flow_error_set(error, EINVAL,
1040 RTE_FLOW_ERROR_TYPE_ITEM,
1042 "Invalid pppoe mask");
1045 list[t].type = ICE_PPPOE;
1046 if (pppoe_mask->session_id) {
1047 list[t].h_u.pppoe_hdr.session_id =
1048 pppoe_spec->session_id;
1049 list[t].m_u.pppoe_hdr.session_id =
1050 pppoe_mask->session_id;
1051 input_set |= ICE_INSET_PPPOE_SESSION;
1052 input_set_byte += 2;
1055 pppoe_elem_valid = 1;
1059 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1060 pppoe_proto_spec = item->spec;
1061 pppoe_proto_mask = item->mask;
1062 /* Check if PPPoE optional proto_id item
1063 * is used to describe protocol.
1064 * If yes, both spec and mask should be NULL.
1065 * If no, both spec and mask shouldn't be NULL.
1067 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1068 (pppoe_proto_spec && !pppoe_proto_mask)) {
1069 rte_flow_error_set(error, EINVAL,
1070 RTE_FLOW_ERROR_TYPE_ITEM,
1072 "Invalid pppoe proto item");
1075 if (pppoe_proto_spec && pppoe_proto_mask) {
1076 if (pppoe_elem_valid)
1078 list[t].type = ICE_PPPOE;
1079 if (pppoe_proto_mask->proto_id) {
1080 list[t].h_u.pppoe_hdr.ppp_prot_id =
1081 pppoe_proto_spec->proto_id;
1082 list[t].m_u.pppoe_hdr.ppp_prot_id =
1083 pppoe_proto_mask->proto_id;
1084 input_set |= ICE_INSET_PPPOE_PROTO;
1085 input_set_byte += 2;
1086 pppoe_prot_valid = 1;
1088 if ((pppoe_proto_mask->proto_id &
1089 pppoe_proto_spec->proto_id) !=
1090 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1091 (pppoe_proto_mask->proto_id &
1092 pppoe_proto_spec->proto_id) !=
1093 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1094 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1096 *tun_type = ICE_SW_TUN_PPPOE;
1102 case RTE_FLOW_ITEM_TYPE_ESP:
1103 esp_spec = item->spec;
1104 esp_mask = item->mask;
1105 if ((esp_spec && !esp_mask) ||
1106 (!esp_spec && esp_mask)) {
1107 rte_flow_error_set(error, EINVAL,
1108 RTE_FLOW_ERROR_TYPE_ITEM,
1110 "Invalid esp item");
1113 /* Check esp mask and update input set */
1114 if (esp_mask && esp_mask->hdr.seq) {
1115 rte_flow_error_set(error, EINVAL,
1116 RTE_FLOW_ERROR_TYPE_ITEM,
1118 "Invalid esp mask");
1122 if (!esp_spec && !esp_mask && !input_set) {
1124 if (ipv6_valid && udp_valid)
1126 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1127 else if (ipv6_valid)
1128 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1129 else if (ipv4_valid)
1131 } else if (esp_spec && esp_mask &&
1134 list[t].type = ICE_NAT_T;
1136 list[t].type = ICE_ESP;
1137 list[t].h_u.esp_hdr.spi =
1139 list[t].m_u.esp_hdr.spi =
1141 input_set |= ICE_INSET_ESP_SPI;
1142 input_set_byte += 4;
1146 if (!profile_rule) {
1147 if (ipv6_valid && udp_valid)
1148 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1149 else if (ipv4_valid && udp_valid)
1150 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1151 else if (ipv6_valid)
1152 *tun_type = ICE_SW_TUN_IPV6_ESP;
1153 else if (ipv4_valid)
1154 *tun_type = ICE_SW_TUN_IPV4_ESP;
1158 case RTE_FLOW_ITEM_TYPE_AH:
1159 ah_spec = item->spec;
1160 ah_mask = item->mask;
1161 if ((ah_spec && !ah_mask) ||
1162 (!ah_spec && ah_mask)) {
1163 rte_flow_error_set(error, EINVAL,
1164 RTE_FLOW_ERROR_TYPE_ITEM,
1169 /* Check ah mask and update input set */
1171 (ah_mask->next_hdr ||
1172 ah_mask->payload_len ||
1174 ah_mask->reserved)) {
1175 rte_flow_error_set(error, EINVAL,
1176 RTE_FLOW_ERROR_TYPE_ITEM,
1182 if (!ah_spec && !ah_mask && !input_set) {
1184 if (ipv6_valid && udp_valid)
1186 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1187 else if (ipv6_valid)
1188 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1189 else if (ipv4_valid)
1191 } else if (ah_spec && ah_mask &&
1193 list[t].type = ICE_AH;
1194 list[t].h_u.ah_hdr.spi =
1196 list[t].m_u.ah_hdr.spi =
1198 input_set |= ICE_INSET_AH_SPI;
1199 input_set_byte += 4;
1203 if (!profile_rule) {
1206 else if (ipv6_valid)
1207 *tun_type = ICE_SW_TUN_IPV6_AH;
1208 else if (ipv4_valid)
1209 *tun_type = ICE_SW_TUN_IPV4_AH;
1213 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1214 l2tp_spec = item->spec;
1215 l2tp_mask = item->mask;
1216 if ((l2tp_spec && !l2tp_mask) ||
1217 (!l2tp_spec && l2tp_mask)) {
1218 rte_flow_error_set(error, EINVAL,
1219 RTE_FLOW_ERROR_TYPE_ITEM,
1221 "Invalid l2tp item");
1225 if (!l2tp_spec && !l2tp_mask && !input_set) {
1228 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1229 else if (ipv4_valid)
1231 } else if (l2tp_spec && l2tp_mask &&
1232 l2tp_mask->session_id){
1233 list[t].type = ICE_L2TPV3;
1234 list[t].h_u.l2tpv3_sess_hdr.session_id =
1235 l2tp_spec->session_id;
1236 list[t].m_u.l2tpv3_sess_hdr.session_id =
1237 l2tp_mask->session_id;
1238 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1239 input_set_byte += 4;
1243 if (!profile_rule) {
1246 ICE_SW_TUN_IPV6_L2TPV3;
1247 else if (ipv4_valid)
1249 ICE_SW_TUN_IPV4_L2TPV3;
1253 case RTE_FLOW_ITEM_TYPE_PFCP:
1254 pfcp_spec = item->spec;
1255 pfcp_mask = item->mask;
1256 /* Check if PFCP item is used to describe protocol.
1257 * If yes, both spec and mask should be NULL.
1258 * If no, both spec and mask shouldn't be NULL.
1260 if ((!pfcp_spec && pfcp_mask) ||
1261 (pfcp_spec && !pfcp_mask)) {
1262 rte_flow_error_set(error, EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ITEM,
1265 "Invalid PFCP item");
1268 if (pfcp_spec && pfcp_mask) {
1269 /* Check pfcp mask and update input set */
1270 if (pfcp_mask->msg_type ||
1271 pfcp_mask->msg_len ||
1273 rte_flow_error_set(error, EINVAL,
1274 RTE_FLOW_ERROR_TYPE_ITEM,
1276 "Invalid pfcp mask");
1279 if (pfcp_mask->s_field &&
1280 pfcp_spec->s_field == 0x01 &&
1283 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1284 else if (pfcp_mask->s_field &&
1285 pfcp_spec->s_field == 0x01)
1287 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1288 else if (pfcp_mask->s_field &&
1289 !pfcp_spec->s_field &&
1292 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1293 else if (pfcp_mask->s_field &&
1294 !pfcp_spec->s_field)
1296 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1302 case RTE_FLOW_ITEM_TYPE_VOID:
1306 rte_flow_error_set(error, EINVAL,
1307 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1308 "Invalid pattern item.");
1313 if (pppoe_patt_valid && !pppoe_prot_valid) {
1314 if (ipv6_valid && udp_valid)
1315 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1316 else if (ipv6_valid && tcp_valid)
1317 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1318 else if (ipv4_valid && udp_valid)
1319 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1320 else if (ipv4_valid && tcp_valid)
1321 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1322 else if (ipv6_valid)
1323 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1324 else if (ipv4_valid)
1325 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1327 *tun_type = ICE_SW_TUN_PPPOE;
1330 if (*tun_type == ICE_NON_TUN) {
1332 *tun_type = ICE_SW_TUN_VXLAN;
1333 else if (nvgre_valid)
1334 *tun_type = ICE_SW_TUN_NVGRE;
1335 else if (ipv4_valid && tcp_valid)
1336 *tun_type = ICE_SW_IPV4_TCP;
1337 else if (ipv4_valid && udp_valid)
1338 *tun_type = ICE_SW_IPV4_UDP;
1339 else if (ipv6_valid && tcp_valid)
1340 *tun_type = ICE_SW_IPV6_TCP;
1341 else if (ipv6_valid && udp_valid)
1342 *tun_type = ICE_SW_IPV6_UDP;
1345 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1346 rte_flow_error_set(error, EINVAL,
1347 RTE_FLOW_ERROR_TYPE_ITEM,
1349 "too much input set");
1361 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1362 const struct rte_flow_action *actions,
1363 struct rte_flow_error *error,
1364 struct ice_adv_rule_info *rule_info)
1366 const struct rte_flow_action_vf *act_vf;
1367 const struct rte_flow_action *action;
1368 enum rte_flow_action_type action_type;
1370 for (action = actions; action->type !=
1371 RTE_FLOW_ACTION_TYPE_END; action++) {
1372 action_type = action->type;
1373 switch (action_type) {
1374 case RTE_FLOW_ACTION_TYPE_VF:
1375 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1376 act_vf = action->conf;
1378 if (act_vf->id >= ad->real_hw.num_vfs &&
1379 !act_vf->original) {
1380 rte_flow_error_set(error,
1381 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1387 if (act_vf->original)
1388 rule_info->sw_act.vsi_handle =
1389 ad->real_hw.avf.bus.func;
1391 rule_info->sw_act.vsi_handle = act_vf->id;
1394 case RTE_FLOW_ACTION_TYPE_DROP:
1395 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1399 rte_flow_error_set(error,
1400 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1402 "Invalid action type");
1407 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1408 rule_info->sw_act.flag = ICE_FLTR_RX;
1410 rule_info->priority = 5;
1416 ice_switch_parse_action(struct ice_pf *pf,
1417 const struct rte_flow_action *actions,
1418 struct rte_flow_error *error,
1419 struct ice_adv_rule_info *rule_info)
1421 struct ice_vsi *vsi = pf->main_vsi;
1422 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1423 const struct rte_flow_action_queue *act_q;
1424 const struct rte_flow_action_rss *act_qgrop;
1425 uint16_t base_queue, i;
1426 const struct rte_flow_action *action;
1427 enum rte_flow_action_type action_type;
1428 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1429 2, 4, 8, 16, 32, 64, 128};
1431 base_queue = pf->base_queue + vsi->base_queue;
1432 for (action = actions; action->type !=
1433 RTE_FLOW_ACTION_TYPE_END; action++) {
1434 action_type = action->type;
1435 switch (action_type) {
1436 case RTE_FLOW_ACTION_TYPE_RSS:
1437 act_qgrop = action->conf;
1438 if (act_qgrop->queue_num <= 1)
1440 rule_info->sw_act.fltr_act =
1442 rule_info->sw_act.fwd_id.q_id =
1443 base_queue + act_qgrop->queue[0];
1444 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1445 if (act_qgrop->queue_num ==
1446 valid_qgrop_number[i])
1449 if (i == MAX_QGRP_NUM_TYPE)
1451 if ((act_qgrop->queue[0] +
1452 act_qgrop->queue_num) >
1453 dev->data->nb_rx_queues)
1455 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1456 if (act_qgrop->queue[i + 1] !=
1457 act_qgrop->queue[i] + 1)
1459 rule_info->sw_act.qgrp_size =
1460 act_qgrop->queue_num;
1462 case RTE_FLOW_ACTION_TYPE_QUEUE:
1463 act_q = action->conf;
1464 if (act_q->index >= dev->data->nb_rx_queues)
1466 rule_info->sw_act.fltr_act =
1468 rule_info->sw_act.fwd_id.q_id =
1469 base_queue + act_q->index;
1472 case RTE_FLOW_ACTION_TYPE_DROP:
1473 rule_info->sw_act.fltr_act =
1477 case RTE_FLOW_ACTION_TYPE_VOID:
1485 rule_info->sw_act.vsi_handle = vsi->idx;
1487 rule_info->sw_act.src = vsi->idx;
1488 rule_info->priority = 5;
1493 rte_flow_error_set(error,
1494 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1496 "Invalid action type or queue number");
1500 rte_flow_error_set(error,
1501 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1503 "Invalid queue region indexes");
1507 rte_flow_error_set(error,
1508 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1510 "Discontinuous queue region");
1515 ice_switch_check_action(const struct rte_flow_action *actions,
1516 struct rte_flow_error *error)
1518 const struct rte_flow_action *action;
1519 enum rte_flow_action_type action_type;
1520 uint16_t actions_num = 0;
1522 for (action = actions; action->type !=
1523 RTE_FLOW_ACTION_TYPE_END; action++) {
1524 action_type = action->type;
1525 switch (action_type) {
1526 case RTE_FLOW_ACTION_TYPE_VF:
1527 case RTE_FLOW_ACTION_TYPE_RSS:
1528 case RTE_FLOW_ACTION_TYPE_QUEUE:
1529 case RTE_FLOW_ACTION_TYPE_DROP:
1532 case RTE_FLOW_ACTION_TYPE_VOID:
1535 rte_flow_error_set(error,
1536 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1538 "Invalid action type");
1543 if (actions_num != 1) {
1544 rte_flow_error_set(error,
1545 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1547 "Invalid action number");
1555 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1558 case ICE_SW_TUN_PROFID_IPV6_ESP:
1559 case ICE_SW_TUN_PROFID_IPV6_AH:
1560 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1561 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1562 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1563 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1564 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1565 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1575 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1576 struct ice_pattern_match_item *array,
1578 const struct rte_flow_item pattern[],
1579 const struct rte_flow_action actions[],
1581 struct rte_flow_error *error)
1583 struct ice_pf *pf = &ad->pf;
1584 uint64_t inputset = 0;
1586 struct sw_meta *sw_meta_ptr = NULL;
1587 struct ice_adv_rule_info rule_info;
1588 struct ice_adv_lkup_elem *list = NULL;
1589 uint16_t lkups_num = 0;
1590 const struct rte_flow_item *item = pattern;
1591 uint16_t item_num = 0;
1592 enum ice_sw_tunnel_type tun_type =
1594 struct ice_pattern_match_item *pattern_match_item = NULL;
1596 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1598 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1599 const struct rte_flow_item_eth *eth_mask;
1601 eth_mask = item->mask;
1604 if (eth_mask->type == UINT16_MAX)
1605 tun_type = ICE_SW_TUN_AND_NON_TUN;
1607 /* reserve one more memory slot for ETH which may
1608 * consume 2 lookup items.
1610 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1614 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1616 rte_flow_error_set(error, EINVAL,
1617 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1618 "No memory for PMD internal items");
1623 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1625 rte_flow_error_set(error, EINVAL,
1626 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1627 "No memory for sw_pattern_meta_ptr");
1631 pattern_match_item =
1632 ice_search_pattern_match_item(ad, pattern, array, array_len,
1634 if (!pattern_match_item) {
1635 rte_flow_error_set(error, EINVAL,
1636 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1637 "Invalid input pattern");
1641 inputset = ice_switch_inset_get
1642 (pattern, error, list, &lkups_num, &tun_type);
1643 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1644 (inputset & ~pattern_match_item->input_set_mask)) {
1645 rte_flow_error_set(error, EINVAL,
1646 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1648 "Invalid input set");
1652 memset(&rule_info, 0, sizeof(rule_info));
1653 rule_info.tun_type = tun_type;
1655 ret = ice_switch_check_action(actions, error);
1659 if (ad->hw.dcf_enabled)
1660 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1663 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1669 *meta = sw_meta_ptr;
1670 ((struct sw_meta *)*meta)->list = list;
1671 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1672 ((struct sw_meta *)*meta)->rule_info = rule_info;
1675 rte_free(sw_meta_ptr);
1678 rte_free(pattern_match_item);
1684 rte_free(sw_meta_ptr);
1685 rte_free(pattern_match_item);
1691 ice_switch_query(struct ice_adapter *ad __rte_unused,
1692 struct rte_flow *flow __rte_unused,
1693 struct rte_flow_query_count *count __rte_unused,
1694 struct rte_flow_error *error)
1696 rte_flow_error_set(error, EINVAL,
1697 RTE_FLOW_ERROR_TYPE_HANDLE,
1699 "count action not supported by switch filter");
1705 ice_switch_redirect(struct ice_adapter *ad,
1706 struct rte_flow *flow,
1707 struct ice_flow_redirect *rd)
1709 struct ice_rule_query_data *rdata = flow->rule;
1710 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1711 struct ice_adv_lkup_elem *lkups_dp = NULL;
1712 struct LIST_HEAD_TYPE *list_head;
1713 struct ice_adv_rule_info rinfo;
1714 struct ice_hw *hw = &ad->hw;
1715 struct ice_switch_info *sw;
1719 if (rdata->vsi_handle != rd->vsi_handle)
1722 sw = hw->switch_info;
1723 if (!sw->recp_list[rdata->rid].recp_created)
1726 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1729 list_head = &sw->recp_list[rdata->rid].filt_rules;
1730 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1732 rinfo = list_itr->rule_info;
1733 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1734 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1735 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1736 (rinfo.fltr_rule_id == rdata->rule_id &&
1737 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1738 lkups_cnt = list_itr->lkups_cnt;
1739 lkups_dp = (struct ice_adv_lkup_elem *)
1740 ice_memdup(hw, list_itr->lkups,
1741 sizeof(*list_itr->lkups) *
1742 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1745 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1749 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1750 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1751 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1760 /* Remove the old rule */
1761 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1764 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1770 /* Update VSI context */
1771 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1773 /* Replay the rule */
1774 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1777 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1782 ice_free(hw, lkups_dp);
1787 ice_switch_init(struct ice_adapter *ad)
1790 struct ice_flow_parser *dist_parser;
1791 struct ice_flow_parser *perm_parser;
1793 if (ad->devargs.pipe_mode_support) {
1794 perm_parser = &ice_switch_perm_parser;
1795 ret = ice_register_parser(perm_parser, ad);
1797 dist_parser = &ice_switch_dist_parser;
1798 ret = ice_register_parser(dist_parser, ad);
1804 ice_switch_uninit(struct ice_adapter *ad)
1806 struct ice_flow_parser *dist_parser;
1807 struct ice_flow_parser *perm_parser;
1809 if (ad->devargs.pipe_mode_support) {
1810 perm_parser = &ice_switch_perm_parser;
1811 ice_unregister_parser(perm_parser, ad);
1813 dist_parser = &ice_switch_dist_parser;
1814 ice_unregister_parser(dist_parser, ad);
1819 ice_flow_engine ice_switch_engine = {
1820 .init = ice_switch_init,
1821 .uninit = ice_switch_uninit,
1822 .create = ice_switch_create,
1823 .destroy = ice_switch_destroy,
1824 .query_count = ice_switch_query,
1825 .redirect = ice_switch_redirect,
1826 .free = ice_switch_filter_rule_free,
1827 .type = ICE_FLOW_ENGINE_SWITCH,
1831 ice_flow_parser ice_switch_dist_parser = {
1832 .engine = &ice_switch_engine,
1833 .array = ice_switch_pattern_dist_list,
1834 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1835 .parse_pattern_action = ice_switch_parse_pattern_action,
1836 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1840 ice_flow_parser ice_switch_perm_parser = {
1841 .engine = &ice_switch_engine,
1842 .array = ice_switch_pattern_perm_list,
1843 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1844 .parse_pattern_action = ice_switch_parse_pattern_action,
1845 .stage = ICE_FLOW_STAGE_PERMISSION,
1848 RTE_INIT(ice_sw_engine_init)
1850 struct ice_flow_engine *engine = &ice_switch_engine;
1851 ice_register_flow_engine(engine);