1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
31 #define ICE_SW_INSET_ETHER ( \
32 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
33 #define ICE_SW_INSET_MAC_VLAN ( \
34 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
36 #define ICE_SW_INSET_MAC_IPV4 ( \
37 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
38 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
39 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
40 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
42 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
43 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV6 ( \
48 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
49 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
50 ICE_INSET_IPV6_NEXT_HDR)
51 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
52 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
54 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
55 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
56 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
59 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
60 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
61 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
62 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
63 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
64 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
66 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
68 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
70 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
76 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
78 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
80 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
82 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
85 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87 ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
89 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91 ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_MAC_PPPOE ( \
93 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
94 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
95 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
96 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
97 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
98 ICE_INSET_PPPOE_PROTO)
99 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
100 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
101 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
102 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
103 #define ICE_SW_INSET_MAC_IPV4_AH ( \
104 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
105 #define ICE_SW_INSET_MAC_IPV6_AH ( \
106 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
107 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
108 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
109 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
110 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
111 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
112 ICE_SW_INSET_MAC_IPV4 | \
113 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
114 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
115 ICE_SW_INSET_MAC_IPV6 | \
116 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
119 struct ice_adv_lkup_elem *list;
121 struct ice_adv_rule_info rule_info;
124 static struct ice_flow_parser ice_switch_dist_parser_os;
125 static struct ice_flow_parser ice_switch_dist_parser_comms;
126 static struct ice_flow_parser ice_switch_perm_parser;
129 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
131 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
132 {pattern_ethertype_vlan,
133 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
135 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
136 {pattern_eth_ipv4_udp,
137 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
138 {pattern_eth_ipv4_tcp,
139 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
141 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
142 {pattern_eth_ipv6_udp,
143 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
144 {pattern_eth_ipv6_tcp,
145 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
146 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
147 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
148 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
149 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
150 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
151 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
152 {pattern_eth_ipv4_nvgre_eth_ipv4,
153 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
154 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
155 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
156 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
157 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
159 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
160 {pattern_eth_vlan_pppoed,
161 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
163 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
164 {pattern_eth_vlan_pppoes,
165 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
166 {pattern_eth_pppoes_proto,
167 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
168 {pattern_eth_vlan_pppoes_proto,
169 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
170 {pattern_eth_ipv4_esp,
171 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
172 {pattern_eth_ipv4_udp_esp,
173 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
174 {pattern_eth_ipv6_esp,
175 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
176 {pattern_eth_ipv6_udp_esp,
177 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
178 {pattern_eth_ipv4_ah,
179 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
180 {pattern_eth_ipv6_ah,
181 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
182 {pattern_eth_ipv6_udp_ah,
183 ICE_INSET_NONE, ICE_INSET_NONE},
184 {pattern_eth_ipv4_l2tp,
185 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
186 {pattern_eth_ipv6_l2tp,
187 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
188 {pattern_eth_ipv4_pfcp,
189 ICE_INSET_NONE, ICE_INSET_NONE},
190 {pattern_eth_ipv6_pfcp,
191 ICE_INSET_NONE, ICE_INSET_NONE},
195 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
197 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
198 {pattern_ethertype_vlan,
199 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
201 ICE_INSET_NONE, ICE_INSET_NONE},
203 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
204 {pattern_eth_ipv4_udp,
205 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
206 {pattern_eth_ipv4_tcp,
207 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
209 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
210 {pattern_eth_ipv6_udp,
211 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
212 {pattern_eth_ipv6_tcp,
213 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
214 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
215 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
216 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
217 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
218 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
219 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
220 {pattern_eth_ipv4_nvgre_eth_ipv4,
221 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
222 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
223 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
224 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
225 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
229 ice_pattern_match_item ice_switch_pattern_perm[] = {
231 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
232 {pattern_ethertype_vlan,
233 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
235 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
236 {pattern_eth_ipv4_udp,
237 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
238 {pattern_eth_ipv4_tcp,
239 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
241 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
242 {pattern_eth_ipv6_udp,
243 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
244 {pattern_eth_ipv6_tcp,
245 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
246 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
247 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
248 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
249 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
250 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
251 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
252 {pattern_eth_ipv4_nvgre_eth_ipv4,
253 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
254 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
255 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
256 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
257 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
259 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
260 {pattern_eth_vlan_pppoed,
261 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
263 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
264 {pattern_eth_vlan_pppoes,
265 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
266 {pattern_eth_pppoes_proto,
267 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
268 {pattern_eth_vlan_pppoes_proto,
269 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
270 {pattern_eth_ipv4_esp,
271 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
272 {pattern_eth_ipv4_udp_esp,
273 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
274 {pattern_eth_ipv6_esp,
275 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
276 {pattern_eth_ipv6_udp_esp,
277 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
278 {pattern_eth_ipv4_ah,
279 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
280 {pattern_eth_ipv6_ah,
281 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
282 {pattern_eth_ipv6_udp_ah,
283 ICE_INSET_NONE, ICE_INSET_NONE},
284 {pattern_eth_ipv4_l2tp,
285 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
286 {pattern_eth_ipv6_l2tp,
287 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
288 {pattern_eth_ipv4_pfcp,
289 ICE_INSET_NONE, ICE_INSET_NONE},
290 {pattern_eth_ipv6_pfcp,
291 ICE_INSET_NONE, ICE_INSET_NONE},
295 ice_switch_create(struct ice_adapter *ad,
296 struct rte_flow *flow,
298 struct rte_flow_error *error)
301 struct ice_pf *pf = &ad->pf;
302 struct ice_hw *hw = ICE_PF_TO_HW(pf);
303 struct ice_rule_query_data rule_added = {0};
304 struct ice_rule_query_data *filter_ptr;
305 struct ice_adv_lkup_elem *list =
306 ((struct sw_meta *)meta)->list;
308 ((struct sw_meta *)meta)->lkups_num;
309 struct ice_adv_rule_info *rule_info =
310 &((struct sw_meta *)meta)->rule_info;
312 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
313 rte_flow_error_set(error, EINVAL,
314 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
315 "item number too large for rule");
319 rte_flow_error_set(error, EINVAL,
320 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
321 "lookup list should not be NULL");
324 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
326 filter_ptr = rte_zmalloc("ice_switch_filter",
327 sizeof(struct ice_rule_query_data), 0);
329 rte_flow_error_set(error, EINVAL,
330 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
331 "No memory for ice_switch_filter");
334 flow->rule = filter_ptr;
335 rte_memcpy(filter_ptr,
337 sizeof(struct ice_rule_query_data));
339 rte_flow_error_set(error, EINVAL,
340 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
341 "switch filter create flow fail");
357 ice_switch_destroy(struct ice_adapter *ad,
358 struct rte_flow *flow,
359 struct rte_flow_error *error)
361 struct ice_hw *hw = &ad->hw;
363 struct ice_rule_query_data *filter_ptr;
365 filter_ptr = (struct ice_rule_query_data *)
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
372 " create by switch filter");
376 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
378 rte_flow_error_set(error, EINVAL,
379 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
380 "fail to destroy switch filter rule");
384 rte_free(filter_ptr);
389 ice_switch_filter_rule_free(struct rte_flow *flow)
391 rte_free(flow->rule);
395 ice_switch_inset_get(const struct rte_flow_item pattern[],
396 struct rte_flow_error *error,
397 struct ice_adv_lkup_elem *list,
399 enum ice_sw_tunnel_type *tun_type)
401 const struct rte_flow_item *item = pattern;
402 enum rte_flow_item_type item_type;
403 const struct rte_flow_item_eth *eth_spec, *eth_mask;
404 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
405 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
406 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
407 const struct rte_flow_item_udp *udp_spec, *udp_mask;
408 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
409 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
410 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
411 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
412 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
413 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
415 const struct rte_flow_item_esp *esp_spec, *esp_mask;
416 const struct rte_flow_item_ah *ah_spec, *ah_mask;
417 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
418 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
419 uint64_t input_set = ICE_INSET_NONE;
421 bool profile_rule = 0;
422 bool tunnel_valid = 0;
423 bool pppoe_valid = 0;
424 bool ipv6_valiad = 0;
425 bool ipv4_valiad = 0;
428 for (item = pattern; item->type !=
429 RTE_FLOW_ITEM_TYPE_END; item++) {
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ITEM,
434 "Not support range");
437 item_type = item->type;
440 case RTE_FLOW_ITEM_TYPE_ETH:
441 eth_spec = item->spec;
442 eth_mask = item->mask;
443 if (eth_spec && eth_mask) {
444 const uint8_t *a = eth_mask->src.addr_bytes;
445 const uint8_t *b = eth_mask->dst.addr_bytes;
446 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
447 if (a[j] && tunnel_valid) {
457 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
458 if (b[j] && tunnel_valid) {
469 input_set |= ICE_INSET_ETHERTYPE;
470 list[t].type = (tunnel_valid == 0) ?
471 ICE_MAC_OFOS : ICE_MAC_IL;
472 struct ice_ether_hdr *h;
473 struct ice_ether_hdr *m;
475 h = &list[t].h_u.eth_hdr;
476 m = &list[t].m_u.eth_hdr;
477 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
478 if (eth_mask->src.addr_bytes[j]) {
480 eth_spec->src.addr_bytes[j];
482 eth_mask->src.addr_bytes[j];
485 if (eth_mask->dst.addr_bytes[j]) {
487 eth_spec->dst.addr_bytes[j];
489 eth_mask->dst.addr_bytes[j];
495 if (eth_mask->type) {
496 list[t].type = ICE_ETYPE_OL;
497 list[t].h_u.ethertype.ethtype_id =
499 list[t].m_u.ethertype.ethtype_id =
506 case RTE_FLOW_ITEM_TYPE_IPV4:
507 ipv4_spec = item->spec;
508 ipv4_mask = item->mask;
510 if (ipv4_spec && ipv4_mask) {
511 /* Check IPv4 mask and update input set */
512 if (ipv4_mask->hdr.version_ihl ||
513 ipv4_mask->hdr.total_length ||
514 ipv4_mask->hdr.packet_id ||
515 ipv4_mask->hdr.hdr_checksum) {
516 rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ITEM,
519 "Invalid IPv4 mask.");
524 if (ipv4_mask->hdr.type_of_service)
526 ICE_INSET_TUN_IPV4_TOS;
527 if (ipv4_mask->hdr.src_addr)
529 ICE_INSET_TUN_IPV4_SRC;
530 if (ipv4_mask->hdr.dst_addr)
532 ICE_INSET_TUN_IPV4_DST;
533 if (ipv4_mask->hdr.time_to_live)
535 ICE_INSET_TUN_IPV4_TTL;
536 if (ipv4_mask->hdr.next_proto_id)
538 ICE_INSET_TUN_IPV4_PROTO;
540 if (ipv4_mask->hdr.src_addr)
541 input_set |= ICE_INSET_IPV4_SRC;
542 if (ipv4_mask->hdr.dst_addr)
543 input_set |= ICE_INSET_IPV4_DST;
544 if (ipv4_mask->hdr.time_to_live)
545 input_set |= ICE_INSET_IPV4_TTL;
546 if (ipv4_mask->hdr.next_proto_id)
548 ICE_INSET_IPV4_PROTO;
549 if (ipv4_mask->hdr.type_of_service)
553 list[t].type = (tunnel_valid == 0) ?
554 ICE_IPV4_OFOS : ICE_IPV4_IL;
555 if (ipv4_mask->hdr.src_addr) {
556 list[t].h_u.ipv4_hdr.src_addr =
557 ipv4_spec->hdr.src_addr;
558 list[t].m_u.ipv4_hdr.src_addr =
559 ipv4_mask->hdr.src_addr;
561 if (ipv4_mask->hdr.dst_addr) {
562 list[t].h_u.ipv4_hdr.dst_addr =
563 ipv4_spec->hdr.dst_addr;
564 list[t].m_u.ipv4_hdr.dst_addr =
565 ipv4_mask->hdr.dst_addr;
567 if (ipv4_mask->hdr.time_to_live) {
568 list[t].h_u.ipv4_hdr.time_to_live =
569 ipv4_spec->hdr.time_to_live;
570 list[t].m_u.ipv4_hdr.time_to_live =
571 ipv4_mask->hdr.time_to_live;
573 if (ipv4_mask->hdr.next_proto_id) {
574 list[t].h_u.ipv4_hdr.protocol =
575 ipv4_spec->hdr.next_proto_id;
576 list[t].m_u.ipv4_hdr.protocol =
577 ipv4_mask->hdr.next_proto_id;
579 if (ipv4_mask->hdr.type_of_service) {
580 list[t].h_u.ipv4_hdr.tos =
581 ipv4_spec->hdr.type_of_service;
582 list[t].m_u.ipv4_hdr.tos =
583 ipv4_mask->hdr.type_of_service;
589 case RTE_FLOW_ITEM_TYPE_IPV6:
590 ipv6_spec = item->spec;
591 ipv6_mask = item->mask;
593 if (ipv6_spec && ipv6_mask) {
594 if (ipv6_mask->hdr.payload_len) {
595 rte_flow_error_set(error, EINVAL,
596 RTE_FLOW_ERROR_TYPE_ITEM,
598 "Invalid IPv6 mask");
602 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
603 if (ipv6_mask->hdr.src_addr[j] &&
606 ICE_INSET_TUN_IPV6_SRC;
608 } else if (ipv6_mask->hdr.src_addr[j]) {
609 input_set |= ICE_INSET_IPV6_SRC;
613 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
614 if (ipv6_mask->hdr.dst_addr[j] &&
617 ICE_INSET_TUN_IPV6_DST;
619 } else if (ipv6_mask->hdr.dst_addr[j]) {
620 input_set |= ICE_INSET_IPV6_DST;
624 if (ipv6_mask->hdr.proto &&
627 ICE_INSET_TUN_IPV6_NEXT_HDR;
628 else if (ipv6_mask->hdr.proto)
630 ICE_INSET_IPV6_NEXT_HDR;
631 if (ipv6_mask->hdr.hop_limits &&
634 ICE_INSET_TUN_IPV6_HOP_LIMIT;
635 else if (ipv6_mask->hdr.hop_limits)
637 ICE_INSET_IPV6_HOP_LIMIT;
638 if ((ipv6_mask->hdr.vtc_flow &
640 (RTE_IPV6_HDR_TC_MASK)) &&
643 ICE_INSET_TUN_IPV6_TC;
644 else if (ipv6_mask->hdr.vtc_flow &
646 (RTE_IPV6_HDR_TC_MASK))
647 input_set |= ICE_INSET_IPV6_TC;
649 list[t].type = (tunnel_valid == 0) ?
650 ICE_IPV6_OFOS : ICE_IPV6_IL;
651 struct ice_ipv6_hdr *f;
652 struct ice_ipv6_hdr *s;
653 f = &list[t].h_u.ipv6_hdr;
654 s = &list[t].m_u.ipv6_hdr;
655 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
656 if (ipv6_mask->hdr.src_addr[j]) {
658 ipv6_spec->hdr.src_addr[j];
660 ipv6_mask->hdr.src_addr[j];
662 if (ipv6_mask->hdr.dst_addr[j]) {
664 ipv6_spec->hdr.dst_addr[j];
666 ipv6_mask->hdr.dst_addr[j];
669 if (ipv6_mask->hdr.proto) {
671 ipv6_spec->hdr.proto;
673 ipv6_mask->hdr.proto;
675 if (ipv6_mask->hdr.hop_limits) {
677 ipv6_spec->hdr.hop_limits;
679 ipv6_mask->hdr.hop_limits;
681 if (ipv6_mask->hdr.vtc_flow &
683 (RTE_IPV6_HDR_TC_MASK)) {
684 struct ice_le_ver_tc_flow vtf;
685 vtf.u.fld.version = 0;
686 vtf.u.fld.flow_label = 0;
687 vtf.u.fld.tc = (rte_be_to_cpu_32
688 (ipv6_spec->hdr.vtc_flow) &
689 RTE_IPV6_HDR_TC_MASK) >>
690 RTE_IPV6_HDR_TC_SHIFT;
691 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
692 vtf.u.fld.tc = (rte_be_to_cpu_32
693 (ipv6_mask->hdr.vtc_flow) &
694 RTE_IPV6_HDR_TC_MASK) >>
695 RTE_IPV6_HDR_TC_SHIFT;
696 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
702 case RTE_FLOW_ITEM_TYPE_UDP:
703 udp_spec = item->spec;
704 udp_mask = item->mask;
706 if (udp_spec && udp_mask) {
707 /* Check UDP mask and update input set*/
708 if (udp_mask->hdr.dgram_len ||
709 udp_mask->hdr.dgram_cksum) {
710 rte_flow_error_set(error, EINVAL,
711 RTE_FLOW_ERROR_TYPE_ITEM,
718 if (udp_mask->hdr.src_port)
720 ICE_INSET_TUN_UDP_SRC_PORT;
721 if (udp_mask->hdr.dst_port)
723 ICE_INSET_TUN_UDP_DST_PORT;
725 if (udp_mask->hdr.src_port)
727 ICE_INSET_UDP_SRC_PORT;
728 if (udp_mask->hdr.dst_port)
730 ICE_INSET_UDP_DST_PORT;
732 if (*tun_type == ICE_SW_TUN_VXLAN &&
734 list[t].type = ICE_UDP_OF;
736 list[t].type = ICE_UDP_ILOS;
737 if (udp_mask->hdr.src_port) {
738 list[t].h_u.l4_hdr.src_port =
739 udp_spec->hdr.src_port;
740 list[t].m_u.l4_hdr.src_port =
741 udp_mask->hdr.src_port;
743 if (udp_mask->hdr.dst_port) {
744 list[t].h_u.l4_hdr.dst_port =
745 udp_spec->hdr.dst_port;
746 list[t].m_u.l4_hdr.dst_port =
747 udp_mask->hdr.dst_port;
753 case RTE_FLOW_ITEM_TYPE_TCP:
754 tcp_spec = item->spec;
755 tcp_mask = item->mask;
756 if (tcp_spec && tcp_mask) {
757 /* Check TCP mask and update input set */
758 if (tcp_mask->hdr.sent_seq ||
759 tcp_mask->hdr.recv_ack ||
760 tcp_mask->hdr.data_off ||
761 tcp_mask->hdr.tcp_flags ||
762 tcp_mask->hdr.rx_win ||
763 tcp_mask->hdr.cksum ||
764 tcp_mask->hdr.tcp_urp) {
765 rte_flow_error_set(error, EINVAL,
766 RTE_FLOW_ERROR_TYPE_ITEM,
773 if (tcp_mask->hdr.src_port)
775 ICE_INSET_TUN_TCP_SRC_PORT;
776 if (tcp_mask->hdr.dst_port)
778 ICE_INSET_TUN_TCP_DST_PORT;
780 if (tcp_mask->hdr.src_port)
782 ICE_INSET_TCP_SRC_PORT;
783 if (tcp_mask->hdr.dst_port)
785 ICE_INSET_TCP_DST_PORT;
787 list[t].type = ICE_TCP_IL;
788 if (tcp_mask->hdr.src_port) {
789 list[t].h_u.l4_hdr.src_port =
790 tcp_spec->hdr.src_port;
791 list[t].m_u.l4_hdr.src_port =
792 tcp_mask->hdr.src_port;
794 if (tcp_mask->hdr.dst_port) {
795 list[t].h_u.l4_hdr.dst_port =
796 tcp_spec->hdr.dst_port;
797 list[t].m_u.l4_hdr.dst_port =
798 tcp_mask->hdr.dst_port;
804 case RTE_FLOW_ITEM_TYPE_SCTP:
805 sctp_spec = item->spec;
806 sctp_mask = item->mask;
807 if (sctp_spec && sctp_mask) {
808 /* Check SCTP mask and update input set */
809 if (sctp_mask->hdr.cksum) {
810 rte_flow_error_set(error, EINVAL,
811 RTE_FLOW_ERROR_TYPE_ITEM,
813 "Invalid SCTP mask");
818 if (sctp_mask->hdr.src_port)
820 ICE_INSET_TUN_SCTP_SRC_PORT;
821 if (sctp_mask->hdr.dst_port)
823 ICE_INSET_TUN_SCTP_DST_PORT;
825 if (sctp_mask->hdr.src_port)
827 ICE_INSET_SCTP_SRC_PORT;
828 if (sctp_mask->hdr.dst_port)
830 ICE_INSET_SCTP_DST_PORT;
832 list[t].type = ICE_SCTP_IL;
833 if (sctp_mask->hdr.src_port) {
834 list[t].h_u.sctp_hdr.src_port =
835 sctp_spec->hdr.src_port;
836 list[t].m_u.sctp_hdr.src_port =
837 sctp_mask->hdr.src_port;
839 if (sctp_mask->hdr.dst_port) {
840 list[t].h_u.sctp_hdr.dst_port =
841 sctp_spec->hdr.dst_port;
842 list[t].m_u.sctp_hdr.dst_port =
843 sctp_mask->hdr.dst_port;
849 case RTE_FLOW_ITEM_TYPE_VXLAN:
850 vxlan_spec = item->spec;
851 vxlan_mask = item->mask;
852 /* Check if VXLAN item is used to describe protocol.
853 * If yes, both spec and mask should be NULL.
854 * If no, both spec and mask shouldn't be NULL.
856 if ((!vxlan_spec && vxlan_mask) ||
857 (vxlan_spec && !vxlan_mask)) {
858 rte_flow_error_set(error, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ITEM,
861 "Invalid VXLAN item");
866 if (vxlan_spec && vxlan_mask) {
867 list[t].type = ICE_VXLAN;
868 if (vxlan_mask->vni[0] ||
869 vxlan_mask->vni[1] ||
870 vxlan_mask->vni[2]) {
871 list[t].h_u.tnl_hdr.vni =
872 (vxlan_spec->vni[2] << 16) |
873 (vxlan_spec->vni[1] << 8) |
875 list[t].m_u.tnl_hdr.vni =
876 (vxlan_mask->vni[2] << 16) |
877 (vxlan_mask->vni[1] << 8) |
880 ICE_INSET_TUN_VXLAN_VNI;
886 case RTE_FLOW_ITEM_TYPE_NVGRE:
887 nvgre_spec = item->spec;
888 nvgre_mask = item->mask;
889 /* Check if NVGRE item is used to describe protocol.
890 * If yes, both spec and mask should be NULL.
891 * If no, both spec and mask shouldn't be NULL.
893 if ((!nvgre_spec && nvgre_mask) ||
894 (nvgre_spec && !nvgre_mask)) {
895 rte_flow_error_set(error, EINVAL,
896 RTE_FLOW_ERROR_TYPE_ITEM,
898 "Invalid NVGRE item");
902 if (nvgre_spec && nvgre_mask) {
903 list[t].type = ICE_NVGRE;
904 if (nvgre_mask->tni[0] ||
905 nvgre_mask->tni[1] ||
906 nvgre_mask->tni[2]) {
907 list[t].h_u.nvgre_hdr.tni_flow =
908 (nvgre_spec->tni[2] << 16) |
909 (nvgre_spec->tni[1] << 8) |
911 list[t].m_u.nvgre_hdr.tni_flow =
912 (nvgre_mask->tni[2] << 16) |
913 (nvgre_mask->tni[1] << 8) |
916 ICE_INSET_TUN_NVGRE_TNI;
922 case RTE_FLOW_ITEM_TYPE_VLAN:
923 vlan_spec = item->spec;
924 vlan_mask = item->mask;
925 /* Check if VLAN item is used to describe protocol.
926 * If yes, both spec and mask should be NULL.
927 * If no, both spec and mask shouldn't be NULL.
929 if ((!vlan_spec && vlan_mask) ||
930 (vlan_spec && !vlan_mask)) {
931 rte_flow_error_set(error, EINVAL,
932 RTE_FLOW_ERROR_TYPE_ITEM,
934 "Invalid VLAN item");
937 if (vlan_spec && vlan_mask) {
938 list[t].type = ICE_VLAN_OFOS;
939 if (vlan_mask->tci) {
940 list[t].h_u.vlan_hdr.vlan =
942 list[t].m_u.vlan_hdr.vlan =
944 input_set |= ICE_INSET_VLAN_OUTER;
946 if (vlan_mask->inner_type) {
947 list[t].h_u.vlan_hdr.type =
948 vlan_spec->inner_type;
949 list[t].m_u.vlan_hdr.type =
950 vlan_mask->inner_type;
951 input_set |= ICE_INSET_ETHERTYPE;
957 case RTE_FLOW_ITEM_TYPE_PPPOED:
958 case RTE_FLOW_ITEM_TYPE_PPPOES:
959 pppoe_spec = item->spec;
960 pppoe_mask = item->mask;
961 /* Check if PPPoE item is used to describe protocol.
962 * If yes, both spec and mask should be NULL.
963 * If no, both spec and mask shouldn't be NULL.
965 if ((!pppoe_spec && pppoe_mask) ||
966 (pppoe_spec && !pppoe_mask)) {
967 rte_flow_error_set(error, EINVAL,
968 RTE_FLOW_ERROR_TYPE_ITEM,
970 "Invalid pppoe item");
973 if (pppoe_spec && pppoe_mask) {
974 /* Check pppoe mask and update input set */
975 if (pppoe_mask->length ||
977 pppoe_mask->version_type) {
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ITEM,
981 "Invalid pppoe mask");
984 list[t].type = ICE_PPPOE;
985 if (pppoe_mask->session_id) {
986 list[t].h_u.pppoe_hdr.session_id =
987 pppoe_spec->session_id;
988 list[t].m_u.pppoe_hdr.session_id =
989 pppoe_mask->session_id;
990 input_set |= ICE_INSET_PPPOE_SESSION;
997 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
998 pppoe_proto_spec = item->spec;
999 pppoe_proto_mask = item->mask;
1000 /* Check if PPPoE optional proto_id item
1001 * is used to describe protocol.
1002 * If yes, both spec and mask should be NULL.
1003 * If no, both spec and mask shouldn't be NULL.
1005 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1006 (pppoe_proto_spec && !pppoe_proto_mask)) {
1007 rte_flow_error_set(error, EINVAL,
1008 RTE_FLOW_ERROR_TYPE_ITEM,
1010 "Invalid pppoe proto item");
1013 if (pppoe_proto_spec && pppoe_proto_mask) {
1016 list[t].type = ICE_PPPOE;
1017 if (pppoe_proto_mask->proto_id) {
1018 list[t].h_u.pppoe_hdr.ppp_prot_id =
1019 pppoe_proto_spec->proto_id;
1020 list[t].m_u.pppoe_hdr.ppp_prot_id =
1021 pppoe_proto_mask->proto_id;
1022 input_set |= ICE_INSET_PPPOE_PROTO;
1028 case RTE_FLOW_ITEM_TYPE_ESP:
1029 esp_spec = item->spec;
1030 esp_mask = item->mask;
1031 if ((esp_spec && !esp_mask) ||
1032 (!esp_spec && esp_mask)) {
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ITEM,
1036 "Invalid esp item");
1039 /* Check esp mask and update input set */
1040 if (esp_mask && esp_mask->hdr.seq) {
1041 rte_flow_error_set(error, EINVAL,
1042 RTE_FLOW_ERROR_TYPE_ITEM,
1044 "Invalid esp mask");
1048 if (!esp_spec && !esp_mask && !input_set) {
1050 if (ipv6_valiad && udp_valiad)
1052 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1053 else if (ipv6_valiad)
1054 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1055 else if (ipv4_valiad)
1057 } else if (esp_spec && esp_mask &&
1060 list[t].type = ICE_NAT_T;
1062 list[t].type = ICE_ESP;
1063 list[t].h_u.esp_hdr.spi =
1065 list[t].m_u.esp_hdr.spi =
1067 input_set |= ICE_INSET_ESP_SPI;
1071 if (!profile_rule) {
1072 if (ipv6_valiad && udp_valiad)
1073 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1074 else if (ipv4_valiad && udp_valiad)
1075 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1076 else if (ipv6_valiad)
1077 *tun_type = ICE_SW_TUN_IPV6_ESP;
1078 else if (ipv4_valiad)
1079 *tun_type = ICE_SW_TUN_IPV4_ESP;
1083 case RTE_FLOW_ITEM_TYPE_AH:
1084 ah_spec = item->spec;
1085 ah_mask = item->mask;
1086 if ((ah_spec && !ah_mask) ||
1087 (!ah_spec && ah_mask)) {
1088 rte_flow_error_set(error, EINVAL,
1089 RTE_FLOW_ERROR_TYPE_ITEM,
1094 /* Check ah mask and update input set */
1096 (ah_mask->next_hdr ||
1097 ah_mask->payload_len ||
1099 ah_mask->reserved)) {
1100 rte_flow_error_set(error, EINVAL,
1101 RTE_FLOW_ERROR_TYPE_ITEM,
1107 if (!ah_spec && !ah_mask && !input_set) {
1109 if (ipv6_valiad && udp_valiad)
1111 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1112 else if (ipv6_valiad)
1113 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1114 else if (ipv4_valiad)
1116 } else if (ah_spec && ah_mask &&
1118 list[t].type = ICE_AH;
1119 list[t].h_u.ah_hdr.spi =
1121 list[t].m_u.ah_hdr.spi =
1123 input_set |= ICE_INSET_AH_SPI;
1127 if (!profile_rule) {
1130 else if (ipv6_valiad)
1131 *tun_type = ICE_SW_TUN_IPV6_AH;
1132 else if (ipv4_valiad)
1133 *tun_type = ICE_SW_TUN_IPV4_AH;
1137 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1138 l2tp_spec = item->spec;
1139 l2tp_mask = item->mask;
1140 if ((l2tp_spec && !l2tp_mask) ||
1141 (!l2tp_spec && l2tp_mask)) {
1142 rte_flow_error_set(error, EINVAL,
1143 RTE_FLOW_ERROR_TYPE_ITEM,
1145 "Invalid l2tp item");
1149 if (!l2tp_spec && !l2tp_mask && !input_set) {
1152 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1153 else if (ipv4_valiad)
1155 } else if (l2tp_spec && l2tp_mask &&
1156 l2tp_mask->session_id){
1157 list[t].type = ICE_L2TPV3;
1158 list[t].h_u.l2tpv3_sess_hdr.session_id =
1159 l2tp_spec->session_id;
1160 list[t].m_u.l2tpv3_sess_hdr.session_id =
1161 l2tp_mask->session_id;
1162 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1166 if (!profile_rule) {
1169 ICE_SW_TUN_IPV6_L2TPV3;
1170 else if (ipv4_valiad)
1172 ICE_SW_TUN_IPV4_L2TPV3;
1176 case RTE_FLOW_ITEM_TYPE_PFCP:
1177 pfcp_spec = item->spec;
1178 pfcp_mask = item->mask;
1179 /* Check if PFCP item is used to describe protocol.
1180 * If yes, both spec and mask should be NULL.
1181 * If no, both spec and mask shouldn't be NULL.
1183 if ((!pfcp_spec && pfcp_mask) ||
1184 (pfcp_spec && !pfcp_mask)) {
1185 rte_flow_error_set(error, EINVAL,
1186 RTE_FLOW_ERROR_TYPE_ITEM,
1188 "Invalid PFCP item");
1191 if (pfcp_spec && pfcp_mask) {
1192 /* Check pfcp mask and update input set */
1193 if (pfcp_mask->msg_type ||
1194 pfcp_mask->msg_len ||
1196 rte_flow_error_set(error, EINVAL,
1197 RTE_FLOW_ERROR_TYPE_ITEM,
1199 "Invalid pfcp mask");
1202 if (pfcp_mask->s_field &&
1203 pfcp_spec->s_field == 0x01 &&
1206 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1207 else if (pfcp_mask->s_field &&
1208 pfcp_spec->s_field == 0x01)
1210 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1211 else if (pfcp_mask->s_field &&
1212 !pfcp_spec->s_field &&
1215 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1216 else if (pfcp_mask->s_field &&
1217 !pfcp_spec->s_field)
1219 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1225 case RTE_FLOW_ITEM_TYPE_VOID:
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1231 "Invalid pattern item.");
1244 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1245 const struct rte_flow_action *actions,
1246 struct rte_flow_error *error,
1247 struct ice_adv_rule_info *rule_info)
1249 const struct rte_flow_action_vf *act_vf;
1250 const struct rte_flow_action *action;
1251 enum rte_flow_action_type action_type;
1253 for (action = actions; action->type !=
1254 RTE_FLOW_ACTION_TYPE_END; action++) {
1255 action_type = action->type;
1256 switch (action_type) {
1257 case RTE_FLOW_ACTION_TYPE_VF:
1258 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1259 act_vf = action->conf;
1260 if (act_vf->original)
1261 rule_info->sw_act.vsi_handle =
1262 ad->real_hw.avf.bus.func;
1264 rule_info->sw_act.vsi_handle = act_vf->id;
1267 rte_flow_error_set(error,
1268 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1270 "Invalid action type or queue number");
1275 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1276 rule_info->sw_act.flag = ICE_FLTR_RX;
1278 rule_info->priority = 5;
1284 ice_switch_parse_action(struct ice_pf *pf,
1285 const struct rte_flow_action *actions,
1286 struct rte_flow_error *error,
1287 struct ice_adv_rule_info *rule_info)
1289 struct ice_vsi *vsi = pf->main_vsi;
1290 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1291 const struct rte_flow_action_queue *act_q;
1292 const struct rte_flow_action_rss *act_qgrop;
1293 uint16_t base_queue, i;
1294 const struct rte_flow_action *action;
1295 enum rte_flow_action_type action_type;
1296 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1297 2, 4, 8, 16, 32, 64, 128};
1299 base_queue = pf->base_queue + vsi->base_queue;
1300 for (action = actions; action->type !=
1301 RTE_FLOW_ACTION_TYPE_END; action++) {
1302 action_type = action->type;
1303 switch (action_type) {
1304 case RTE_FLOW_ACTION_TYPE_RSS:
1305 act_qgrop = action->conf;
1306 if (act_qgrop->queue_num <= 1)
1308 rule_info->sw_act.fltr_act =
1310 rule_info->sw_act.fwd_id.q_id =
1311 base_queue + act_qgrop->queue[0];
1312 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1313 if (act_qgrop->queue_num ==
1314 valid_qgrop_number[i])
1317 if (i == MAX_QGRP_NUM_TYPE)
1319 if ((act_qgrop->queue[0] +
1320 act_qgrop->queue_num) >
1321 dev->data->nb_rx_queues)
1323 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1324 if (act_qgrop->queue[i + 1] !=
1325 act_qgrop->queue[i] + 1)
1327 rule_info->sw_act.qgrp_size =
1328 act_qgrop->queue_num;
1330 case RTE_FLOW_ACTION_TYPE_QUEUE:
1331 act_q = action->conf;
1332 if (act_q->index >= dev->data->nb_rx_queues)
1334 rule_info->sw_act.fltr_act =
1336 rule_info->sw_act.fwd_id.q_id =
1337 base_queue + act_q->index;
1340 case RTE_FLOW_ACTION_TYPE_DROP:
1341 rule_info->sw_act.fltr_act =
1345 case RTE_FLOW_ACTION_TYPE_VOID:
1353 rule_info->sw_act.vsi_handle = vsi->idx;
1355 rule_info->sw_act.src = vsi->idx;
1356 rule_info->priority = 5;
1361 rte_flow_error_set(error,
1362 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1364 "Invalid action type or queue number");
1369 ice_switch_check_action(const struct rte_flow_action *actions,
1370 struct rte_flow_error *error)
1372 const struct rte_flow_action *action;
1373 enum rte_flow_action_type action_type;
1374 uint16_t actions_num = 0;
1376 for (action = actions; action->type !=
1377 RTE_FLOW_ACTION_TYPE_END; action++) {
1378 action_type = action->type;
1379 switch (action_type) {
1380 case RTE_FLOW_ACTION_TYPE_VF:
1381 case RTE_FLOW_ACTION_TYPE_RSS:
1382 case RTE_FLOW_ACTION_TYPE_QUEUE:
1383 case RTE_FLOW_ACTION_TYPE_DROP:
1386 case RTE_FLOW_ACTION_TYPE_VOID:
1389 rte_flow_error_set(error,
1390 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1392 "Invalid action type");
1397 if (actions_num != 1) {
1398 rte_flow_error_set(error,
1399 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1401 "Invalid action number");
1409 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1412 case ICE_SW_TUN_PROFID_IPV6_ESP:
1413 case ICE_SW_TUN_PROFID_IPV6_AH:
1414 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1415 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1416 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1417 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1418 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1419 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1429 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1430 struct ice_pattern_match_item *array,
1432 const struct rte_flow_item pattern[],
1433 const struct rte_flow_action actions[],
1435 struct rte_flow_error *error)
1437 struct ice_pf *pf = &ad->pf;
1438 uint64_t inputset = 0;
1440 struct sw_meta *sw_meta_ptr = NULL;
1441 struct ice_adv_rule_info rule_info;
1442 struct ice_adv_lkup_elem *list = NULL;
1443 uint16_t lkups_num = 0;
1444 const struct rte_flow_item *item = pattern;
1445 uint16_t item_num = 0;
1446 enum ice_sw_tunnel_type tun_type =
1447 ICE_SW_TUN_AND_NON_TUN;
1448 struct ice_pattern_match_item *pattern_match_item = NULL;
1450 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1452 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1453 tun_type = ICE_SW_TUN_VXLAN;
1454 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1455 tun_type = ICE_SW_TUN_NVGRE;
1456 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1457 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1458 tun_type = ICE_SW_TUN_PPPOE;
1459 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1460 const struct rte_flow_item_eth *eth_mask;
1462 eth_mask = item->mask;
1465 if (eth_mask->type == UINT16_MAX)
1466 tun_type = ICE_SW_TUN_AND_NON_TUN;
1468 /* reserve one more memory slot for ETH which may
1469 * consume 2 lookup items.
1471 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1475 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1477 rte_flow_error_set(error, EINVAL,
1478 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1479 "No memory for PMD internal items");
1484 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1486 rte_flow_error_set(error, EINVAL,
1487 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1488 "No memory for sw_pattern_meta_ptr");
1492 pattern_match_item =
1493 ice_search_pattern_match_item(pattern, array, array_len, error);
1494 if (!pattern_match_item) {
1495 rte_flow_error_set(error, EINVAL,
1496 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1497 "Invalid input pattern");
1501 inputset = ice_switch_inset_get
1502 (pattern, error, list, &lkups_num, &tun_type);
1503 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1504 (inputset & ~pattern_match_item->input_set_mask)) {
1505 rte_flow_error_set(error, EINVAL,
1506 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1508 "Invalid input set");
1512 memset(&rule_info, 0, sizeof(rule_info));
1513 rule_info.tun_type = tun_type;
1515 ret = ice_switch_check_action(actions, error);
1517 rte_flow_error_set(error, EINVAL,
1518 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1519 "Invalid input action number");
1523 if (ad->hw.dcf_enabled)
1524 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1527 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1530 rte_flow_error_set(error, EINVAL,
1531 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1532 "Invalid input action");
1537 *meta = sw_meta_ptr;
1538 ((struct sw_meta *)*meta)->list = list;
1539 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1540 ((struct sw_meta *)*meta)->rule_info = rule_info;
1543 rte_free(sw_meta_ptr);
1546 rte_free(pattern_match_item);
1552 rte_free(sw_meta_ptr);
1553 rte_free(pattern_match_item);
1559 ice_switch_query(struct ice_adapter *ad __rte_unused,
1560 struct rte_flow *flow __rte_unused,
1561 struct rte_flow_query_count *count __rte_unused,
1562 struct rte_flow_error *error)
1564 rte_flow_error_set(error, EINVAL,
1565 RTE_FLOW_ERROR_TYPE_HANDLE,
1567 "count action not supported by switch filter");
1573 ice_switch_redirect(struct ice_adapter *ad,
1574 struct rte_flow *flow,
1575 struct ice_flow_redirect *rd)
1577 struct ice_rule_query_data *rdata = flow->rule;
1578 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1579 struct ice_adv_lkup_elem *lkups_dp = NULL;
1580 struct LIST_HEAD_TYPE *list_head;
1581 struct ice_adv_rule_info rinfo;
1582 struct ice_hw *hw = &ad->hw;
1583 struct ice_switch_info *sw;
1587 if (rdata->vsi_handle != rd->vsi_handle)
1590 sw = hw->switch_info;
1591 if (!sw->recp_list[rdata->rid].recp_created)
1594 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1597 list_head = &sw->recp_list[rdata->rid].filt_rules;
1598 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1600 rinfo = list_itr->rule_info;
1601 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1602 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1603 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1604 (rinfo.fltr_rule_id == rdata->rule_id &&
1605 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1606 lkups_cnt = list_itr->lkups_cnt;
1607 lkups_dp = (struct ice_adv_lkup_elem *)
1608 ice_memdup(hw, list_itr->lkups,
1609 sizeof(*list_itr->lkups) *
1610 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1613 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1617 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1618 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1619 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1628 /* Remove the old rule */
1629 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1632 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1638 /* Update VSI context */
1639 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1641 /* Replay the rule */
1642 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1645 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1650 ice_free(hw, lkups_dp);
1655 ice_switch_init(struct ice_adapter *ad)
1658 struct ice_flow_parser *dist_parser;
1659 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1661 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1662 dist_parser = &ice_switch_dist_parser_comms;
1663 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1664 dist_parser = &ice_switch_dist_parser_os;
1668 if (ad->devargs.pipe_mode_support)
1669 ret = ice_register_parser(perm_parser, ad);
1671 ret = ice_register_parser(dist_parser, ad);
1676 ice_switch_uninit(struct ice_adapter *ad)
1678 struct ice_flow_parser *dist_parser;
1679 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1681 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1682 dist_parser = &ice_switch_dist_parser_comms;
1684 dist_parser = &ice_switch_dist_parser_os;
1686 if (ad->devargs.pipe_mode_support)
1687 ice_unregister_parser(perm_parser, ad);
1689 ice_unregister_parser(dist_parser, ad);
1693 ice_flow_engine ice_switch_engine = {
1694 .init = ice_switch_init,
1695 .uninit = ice_switch_uninit,
1696 .create = ice_switch_create,
1697 .destroy = ice_switch_destroy,
1698 .query_count = ice_switch_query,
1699 .redirect = ice_switch_redirect,
1700 .free = ice_switch_filter_rule_free,
1701 .type = ICE_FLOW_ENGINE_SWITCH,
1705 ice_flow_parser ice_switch_dist_parser_os = {
1706 .engine = &ice_switch_engine,
1707 .array = ice_switch_pattern_dist_os,
1708 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1709 .parse_pattern_action = ice_switch_parse_pattern_action,
1710 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1714 ice_flow_parser ice_switch_dist_parser_comms = {
1715 .engine = &ice_switch_engine,
1716 .array = ice_switch_pattern_dist_comms,
1717 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1718 .parse_pattern_action = ice_switch_parse_pattern_action,
1719 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1723 ice_flow_parser ice_switch_perm_parser = {
1724 .engine = &ice_switch_engine,
1725 .array = ice_switch_pattern_perm,
1726 .array_len = RTE_DIM(ice_switch_pattern_perm),
1727 .parse_pattern_action = ice_switch_parse_pattern_action,
1728 .stage = ICE_FLOW_STAGE_PERMISSION,
1731 RTE_INIT(ice_sw_engine_init)
1733 struct ice_flow_engine *engine = &ice_switch_engine;
1734 ice_register_flow_engine(engine);