1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49 ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86 ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90 ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE ( \
92 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
95 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97 ICE_INSET_PPPOE_PROTO)
98 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
99 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
100 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
101 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
102 #define ICE_SW_INSET_MAC_IPV4_AH ( \
103 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
104 #define ICE_SW_INSET_MAC_IPV6_AH ( \
105 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
106 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
107 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
108 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
109 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
110 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
111 ICE_SW_INSET_MAC_IPV4 | \
112 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
113 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
114 ICE_SW_INSET_MAC_IPV6 | \
115 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
118 struct ice_adv_lkup_elem *list;
120 struct ice_adv_rule_info rule_info;
123 static struct ice_flow_parser ice_switch_dist_parser_os;
124 static struct ice_flow_parser ice_switch_dist_parser_comms;
125 static struct ice_flow_parser ice_switch_perm_parser;
128 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
130 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
131 {pattern_ethertype_vlan,
132 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
134 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
135 {pattern_eth_ipv4_udp,
136 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
137 {pattern_eth_ipv4_tcp,
138 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
140 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
141 {pattern_eth_ipv6_udp,
142 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
143 {pattern_eth_ipv6_tcp,
144 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
145 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
146 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
147 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
148 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
149 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
150 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
151 {pattern_eth_ipv4_nvgre_eth_ipv4,
152 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
153 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
154 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
155 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
156 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
158 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
159 {pattern_eth_vlan_pppoed,
160 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
162 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
163 {pattern_eth_vlan_pppoes,
164 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
165 {pattern_eth_pppoes_proto,
166 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
167 {pattern_eth_vlan_pppoes_proto,
168 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
169 {pattern_eth_ipv4_esp,
170 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
171 {pattern_eth_ipv4_udp_esp,
172 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
173 {pattern_eth_ipv6_esp,
174 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
175 {pattern_eth_ipv6_udp_esp,
176 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
177 {pattern_eth_ipv4_ah,
178 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
179 {pattern_eth_ipv6_ah,
180 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
181 {pattern_eth_ipv6_udp_ah,
182 ICE_INSET_NONE, ICE_INSET_NONE},
183 {pattern_eth_ipv4_l2tp,
184 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
185 {pattern_eth_ipv6_l2tp,
186 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
187 {pattern_eth_ipv4_pfcp,
188 ICE_INSET_NONE, ICE_INSET_NONE},
189 {pattern_eth_ipv6_pfcp,
190 ICE_INSET_NONE, ICE_INSET_NONE},
194 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
196 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
197 {pattern_ethertype_vlan,
198 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
200 ICE_INSET_NONE, ICE_INSET_NONE},
202 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
203 {pattern_eth_ipv4_udp,
204 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
205 {pattern_eth_ipv4_tcp,
206 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
208 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
209 {pattern_eth_ipv6_udp,
210 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
211 {pattern_eth_ipv6_tcp,
212 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
213 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
214 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
215 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
216 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
217 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
218 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
219 {pattern_eth_ipv4_nvgre_eth_ipv4,
220 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
221 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
222 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
223 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
224 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
228 ice_pattern_match_item ice_switch_pattern_perm[] = {
230 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
231 {pattern_ethertype_vlan,
232 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
234 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
235 {pattern_eth_ipv4_udp,
236 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
237 {pattern_eth_ipv4_tcp,
238 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
240 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
241 {pattern_eth_ipv6_udp,
242 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
243 {pattern_eth_ipv6_tcp,
244 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
245 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
246 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
247 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
248 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
249 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
250 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
251 {pattern_eth_ipv4_nvgre_eth_ipv4,
252 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
253 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
254 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
255 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
256 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
258 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
259 {pattern_eth_vlan_pppoed,
260 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
262 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
263 {pattern_eth_vlan_pppoes,
264 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
265 {pattern_eth_pppoes_proto,
266 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
267 {pattern_eth_vlan_pppoes_proto,
268 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
269 {pattern_eth_ipv4_esp,
270 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
271 {pattern_eth_ipv4_udp_esp,
272 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
273 {pattern_eth_ipv6_esp,
274 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
275 {pattern_eth_ipv6_udp_esp,
276 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
277 {pattern_eth_ipv4_ah,
278 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
279 {pattern_eth_ipv6_ah,
280 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
281 {pattern_eth_ipv6_udp_ah,
282 ICE_INSET_NONE, ICE_INSET_NONE},
283 {pattern_eth_ipv4_l2tp,
284 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
285 {pattern_eth_ipv6_l2tp,
286 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
287 {pattern_eth_ipv4_pfcp,
288 ICE_INSET_NONE, ICE_INSET_NONE},
289 {pattern_eth_ipv6_pfcp,
290 ICE_INSET_NONE, ICE_INSET_NONE},
294 ice_switch_create(struct ice_adapter *ad,
295 struct rte_flow *flow,
297 struct rte_flow_error *error)
300 struct ice_pf *pf = &ad->pf;
301 struct ice_hw *hw = ICE_PF_TO_HW(pf);
302 struct ice_rule_query_data rule_added = {0};
303 struct ice_rule_query_data *filter_ptr;
304 struct ice_adv_lkup_elem *list =
305 ((struct sw_meta *)meta)->list;
307 ((struct sw_meta *)meta)->lkups_num;
308 struct ice_adv_rule_info *rule_info =
309 &((struct sw_meta *)meta)->rule_info;
311 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
312 rte_flow_error_set(error, EINVAL,
313 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
314 "item number too large for rule");
318 rte_flow_error_set(error, EINVAL,
319 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
320 "lookup list should not be NULL");
323 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
325 filter_ptr = rte_zmalloc("ice_switch_filter",
326 sizeof(struct ice_rule_query_data), 0);
328 rte_flow_error_set(error, EINVAL,
329 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
330 "No memory for ice_switch_filter");
333 flow->rule = filter_ptr;
334 rte_memcpy(filter_ptr,
336 sizeof(struct ice_rule_query_data));
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
340 "switch filter create flow fail");
356 ice_switch_destroy(struct ice_adapter *ad,
357 struct rte_flow *flow,
358 struct rte_flow_error *error)
360 struct ice_hw *hw = &ad->hw;
362 struct ice_rule_query_data *filter_ptr;
364 filter_ptr = (struct ice_rule_query_data *)
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
371 " create by switch filter");
375 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
379 "fail to destroy switch filter rule");
383 rte_free(filter_ptr);
388 ice_switch_filter_rule_free(struct rte_flow *flow)
390 rte_free(flow->rule);
394 ice_switch_inset_get(const struct rte_flow_item pattern[],
395 struct rte_flow_error *error,
396 struct ice_adv_lkup_elem *list,
398 enum ice_sw_tunnel_type *tun_type)
400 const struct rte_flow_item *item = pattern;
401 enum rte_flow_item_type item_type;
402 const struct rte_flow_item_eth *eth_spec, *eth_mask;
403 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
404 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
405 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
406 const struct rte_flow_item_udp *udp_spec, *udp_mask;
407 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
408 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
409 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
410 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
411 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
412 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
414 const struct rte_flow_item_esp *esp_spec, *esp_mask;
415 const struct rte_flow_item_ah *ah_spec, *ah_mask;
416 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
417 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
418 uint64_t input_set = ICE_INSET_NONE;
420 bool profile_rule = 0;
421 bool tunnel_valid = 0;
422 bool pppoe_valid = 0;
423 bool ipv6_valiad = 0;
424 bool ipv4_valiad = 0;
427 for (item = pattern; item->type !=
428 RTE_FLOW_ITEM_TYPE_END; item++) {
430 rte_flow_error_set(error, EINVAL,
431 RTE_FLOW_ERROR_TYPE_ITEM,
433 "Not support range");
436 item_type = item->type;
439 case RTE_FLOW_ITEM_TYPE_ETH:
440 eth_spec = item->spec;
441 eth_mask = item->mask;
442 if (eth_spec && eth_mask) {
443 const uint8_t *a = eth_mask->src.addr_bytes;
444 const uint8_t *b = eth_mask->dst.addr_bytes;
445 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
446 if (a[j] && tunnel_valid) {
456 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
457 if (b[j] && tunnel_valid) {
468 input_set |= ICE_INSET_ETHERTYPE;
469 list[t].type = (tunnel_valid == 0) ?
470 ICE_MAC_OFOS : ICE_MAC_IL;
471 struct ice_ether_hdr *h;
472 struct ice_ether_hdr *m;
474 h = &list[t].h_u.eth_hdr;
475 m = &list[t].m_u.eth_hdr;
476 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
477 if (eth_mask->src.addr_bytes[j]) {
479 eth_spec->src.addr_bytes[j];
481 eth_mask->src.addr_bytes[j];
484 if (eth_mask->dst.addr_bytes[j]) {
486 eth_spec->dst.addr_bytes[j];
488 eth_mask->dst.addr_bytes[j];
494 if (eth_mask->type) {
495 list[t].type = ICE_ETYPE_OL;
496 list[t].h_u.ethertype.ethtype_id =
498 list[t].m_u.ethertype.ethtype_id =
505 case RTE_FLOW_ITEM_TYPE_IPV4:
506 ipv4_spec = item->spec;
507 ipv4_mask = item->mask;
509 if (ipv4_spec && ipv4_mask) {
510 /* Check IPv4 mask and update input set */
511 if (ipv4_mask->hdr.version_ihl ||
512 ipv4_mask->hdr.total_length ||
513 ipv4_mask->hdr.packet_id ||
514 ipv4_mask->hdr.hdr_checksum) {
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ITEM,
518 "Invalid IPv4 mask.");
523 if (ipv4_mask->hdr.type_of_service)
525 ICE_INSET_TUN_IPV4_TOS;
526 if (ipv4_mask->hdr.src_addr)
528 ICE_INSET_TUN_IPV4_SRC;
529 if (ipv4_mask->hdr.dst_addr)
531 ICE_INSET_TUN_IPV4_DST;
532 if (ipv4_mask->hdr.time_to_live)
534 ICE_INSET_TUN_IPV4_TTL;
535 if (ipv4_mask->hdr.next_proto_id)
537 ICE_INSET_TUN_IPV4_PROTO;
539 if (ipv4_mask->hdr.src_addr)
540 input_set |= ICE_INSET_IPV4_SRC;
541 if (ipv4_mask->hdr.dst_addr)
542 input_set |= ICE_INSET_IPV4_DST;
543 if (ipv4_mask->hdr.time_to_live)
544 input_set |= ICE_INSET_IPV4_TTL;
545 if (ipv4_mask->hdr.next_proto_id)
547 ICE_INSET_IPV4_PROTO;
548 if (ipv4_mask->hdr.type_of_service)
552 list[t].type = (tunnel_valid == 0) ?
553 ICE_IPV4_OFOS : ICE_IPV4_IL;
554 if (ipv4_mask->hdr.src_addr) {
555 list[t].h_u.ipv4_hdr.src_addr =
556 ipv4_spec->hdr.src_addr;
557 list[t].m_u.ipv4_hdr.src_addr =
558 ipv4_mask->hdr.src_addr;
560 if (ipv4_mask->hdr.dst_addr) {
561 list[t].h_u.ipv4_hdr.dst_addr =
562 ipv4_spec->hdr.dst_addr;
563 list[t].m_u.ipv4_hdr.dst_addr =
564 ipv4_mask->hdr.dst_addr;
566 if (ipv4_mask->hdr.time_to_live) {
567 list[t].h_u.ipv4_hdr.time_to_live =
568 ipv4_spec->hdr.time_to_live;
569 list[t].m_u.ipv4_hdr.time_to_live =
570 ipv4_mask->hdr.time_to_live;
572 if (ipv4_mask->hdr.next_proto_id) {
573 list[t].h_u.ipv4_hdr.protocol =
574 ipv4_spec->hdr.next_proto_id;
575 list[t].m_u.ipv4_hdr.protocol =
576 ipv4_mask->hdr.next_proto_id;
578 if (ipv4_mask->hdr.type_of_service) {
579 list[t].h_u.ipv4_hdr.tos =
580 ipv4_spec->hdr.type_of_service;
581 list[t].m_u.ipv4_hdr.tos =
582 ipv4_mask->hdr.type_of_service;
588 case RTE_FLOW_ITEM_TYPE_IPV6:
589 ipv6_spec = item->spec;
590 ipv6_mask = item->mask;
592 if (ipv6_spec && ipv6_mask) {
593 if (ipv6_mask->hdr.payload_len) {
594 rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM,
597 "Invalid IPv6 mask");
601 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
602 if (ipv6_mask->hdr.src_addr[j] &&
605 ICE_INSET_TUN_IPV6_SRC;
607 } else if (ipv6_mask->hdr.src_addr[j]) {
608 input_set |= ICE_INSET_IPV6_SRC;
612 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
613 if (ipv6_mask->hdr.dst_addr[j] &&
616 ICE_INSET_TUN_IPV6_DST;
618 } else if (ipv6_mask->hdr.dst_addr[j]) {
619 input_set |= ICE_INSET_IPV6_DST;
623 if (ipv6_mask->hdr.proto &&
626 ICE_INSET_TUN_IPV6_NEXT_HDR;
627 else if (ipv6_mask->hdr.proto)
629 ICE_INSET_IPV6_NEXT_HDR;
630 if (ipv6_mask->hdr.hop_limits &&
633 ICE_INSET_TUN_IPV6_HOP_LIMIT;
634 else if (ipv6_mask->hdr.hop_limits)
636 ICE_INSET_IPV6_HOP_LIMIT;
637 if ((ipv6_mask->hdr.vtc_flow &
639 (RTE_IPV6_HDR_TC_MASK)) &&
642 ICE_INSET_TUN_IPV6_TC;
643 else if (ipv6_mask->hdr.vtc_flow &
645 (RTE_IPV6_HDR_TC_MASK))
646 input_set |= ICE_INSET_IPV6_TC;
648 list[t].type = (tunnel_valid == 0) ?
649 ICE_IPV6_OFOS : ICE_IPV6_IL;
650 struct ice_ipv6_hdr *f;
651 struct ice_ipv6_hdr *s;
652 f = &list[t].h_u.ipv6_hdr;
653 s = &list[t].m_u.ipv6_hdr;
654 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
655 if (ipv6_mask->hdr.src_addr[j]) {
657 ipv6_spec->hdr.src_addr[j];
659 ipv6_mask->hdr.src_addr[j];
661 if (ipv6_mask->hdr.dst_addr[j]) {
663 ipv6_spec->hdr.dst_addr[j];
665 ipv6_mask->hdr.dst_addr[j];
668 if (ipv6_mask->hdr.proto) {
670 ipv6_spec->hdr.proto;
672 ipv6_mask->hdr.proto;
674 if (ipv6_mask->hdr.hop_limits) {
676 ipv6_spec->hdr.hop_limits;
678 ipv6_mask->hdr.hop_limits;
680 if (ipv6_mask->hdr.vtc_flow &
682 (RTE_IPV6_HDR_TC_MASK)) {
683 struct ice_le_ver_tc_flow vtf;
684 vtf.u.fld.version = 0;
685 vtf.u.fld.flow_label = 0;
686 vtf.u.fld.tc = (rte_be_to_cpu_32
687 (ipv6_spec->hdr.vtc_flow) &
688 RTE_IPV6_HDR_TC_MASK) >>
689 RTE_IPV6_HDR_TC_SHIFT;
690 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
691 vtf.u.fld.tc = (rte_be_to_cpu_32
692 (ipv6_mask->hdr.vtc_flow) &
693 RTE_IPV6_HDR_TC_MASK) >>
694 RTE_IPV6_HDR_TC_SHIFT;
695 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
701 case RTE_FLOW_ITEM_TYPE_UDP:
702 udp_spec = item->spec;
703 udp_mask = item->mask;
705 if (udp_spec && udp_mask) {
706 /* Check UDP mask and update input set*/
707 if (udp_mask->hdr.dgram_len ||
708 udp_mask->hdr.dgram_cksum) {
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ITEM,
717 if (udp_mask->hdr.src_port)
719 ICE_INSET_TUN_UDP_SRC_PORT;
720 if (udp_mask->hdr.dst_port)
722 ICE_INSET_TUN_UDP_DST_PORT;
724 if (udp_mask->hdr.src_port)
726 ICE_INSET_UDP_SRC_PORT;
727 if (udp_mask->hdr.dst_port)
729 ICE_INSET_UDP_DST_PORT;
731 if (*tun_type == ICE_SW_TUN_VXLAN &&
733 list[t].type = ICE_UDP_OF;
735 list[t].type = ICE_UDP_ILOS;
736 if (udp_mask->hdr.src_port) {
737 list[t].h_u.l4_hdr.src_port =
738 udp_spec->hdr.src_port;
739 list[t].m_u.l4_hdr.src_port =
740 udp_mask->hdr.src_port;
742 if (udp_mask->hdr.dst_port) {
743 list[t].h_u.l4_hdr.dst_port =
744 udp_spec->hdr.dst_port;
745 list[t].m_u.l4_hdr.dst_port =
746 udp_mask->hdr.dst_port;
752 case RTE_FLOW_ITEM_TYPE_TCP:
753 tcp_spec = item->spec;
754 tcp_mask = item->mask;
755 if (tcp_spec && tcp_mask) {
756 /* Check TCP mask and update input set */
757 if (tcp_mask->hdr.sent_seq ||
758 tcp_mask->hdr.recv_ack ||
759 tcp_mask->hdr.data_off ||
760 tcp_mask->hdr.tcp_flags ||
761 tcp_mask->hdr.rx_win ||
762 tcp_mask->hdr.cksum ||
763 tcp_mask->hdr.tcp_urp) {
764 rte_flow_error_set(error, EINVAL,
765 RTE_FLOW_ERROR_TYPE_ITEM,
772 if (tcp_mask->hdr.src_port)
774 ICE_INSET_TUN_TCP_SRC_PORT;
775 if (tcp_mask->hdr.dst_port)
777 ICE_INSET_TUN_TCP_DST_PORT;
779 if (tcp_mask->hdr.src_port)
781 ICE_INSET_TCP_SRC_PORT;
782 if (tcp_mask->hdr.dst_port)
784 ICE_INSET_TCP_DST_PORT;
786 list[t].type = ICE_TCP_IL;
787 if (tcp_mask->hdr.src_port) {
788 list[t].h_u.l4_hdr.src_port =
789 tcp_spec->hdr.src_port;
790 list[t].m_u.l4_hdr.src_port =
791 tcp_mask->hdr.src_port;
793 if (tcp_mask->hdr.dst_port) {
794 list[t].h_u.l4_hdr.dst_port =
795 tcp_spec->hdr.dst_port;
796 list[t].m_u.l4_hdr.dst_port =
797 tcp_mask->hdr.dst_port;
803 case RTE_FLOW_ITEM_TYPE_SCTP:
804 sctp_spec = item->spec;
805 sctp_mask = item->mask;
806 if (sctp_spec && sctp_mask) {
807 /* Check SCTP mask and update input set */
808 if (sctp_mask->hdr.cksum) {
809 rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_ITEM,
812 "Invalid SCTP mask");
817 if (sctp_mask->hdr.src_port)
819 ICE_INSET_TUN_SCTP_SRC_PORT;
820 if (sctp_mask->hdr.dst_port)
822 ICE_INSET_TUN_SCTP_DST_PORT;
824 if (sctp_mask->hdr.src_port)
826 ICE_INSET_SCTP_SRC_PORT;
827 if (sctp_mask->hdr.dst_port)
829 ICE_INSET_SCTP_DST_PORT;
831 list[t].type = ICE_SCTP_IL;
832 if (sctp_mask->hdr.src_port) {
833 list[t].h_u.sctp_hdr.src_port =
834 sctp_spec->hdr.src_port;
835 list[t].m_u.sctp_hdr.src_port =
836 sctp_mask->hdr.src_port;
838 if (sctp_mask->hdr.dst_port) {
839 list[t].h_u.sctp_hdr.dst_port =
840 sctp_spec->hdr.dst_port;
841 list[t].m_u.sctp_hdr.dst_port =
842 sctp_mask->hdr.dst_port;
848 case RTE_FLOW_ITEM_TYPE_VXLAN:
849 vxlan_spec = item->spec;
850 vxlan_mask = item->mask;
851 /* Check if VXLAN item is used to describe protocol.
852 * If yes, both spec and mask should be NULL.
853 * If no, both spec and mask shouldn't be NULL.
855 if ((!vxlan_spec && vxlan_mask) ||
856 (vxlan_spec && !vxlan_mask)) {
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM,
860 "Invalid VXLAN item");
865 if (vxlan_spec && vxlan_mask) {
866 list[t].type = ICE_VXLAN;
867 if (vxlan_mask->vni[0] ||
868 vxlan_mask->vni[1] ||
869 vxlan_mask->vni[2]) {
870 list[t].h_u.tnl_hdr.vni =
871 (vxlan_spec->vni[2] << 16) |
872 (vxlan_spec->vni[1] << 8) |
874 list[t].m_u.tnl_hdr.vni =
875 (vxlan_mask->vni[2] << 16) |
876 (vxlan_mask->vni[1] << 8) |
879 ICE_INSET_TUN_VXLAN_VNI;
885 case RTE_FLOW_ITEM_TYPE_NVGRE:
886 nvgre_spec = item->spec;
887 nvgre_mask = item->mask;
888 /* Check if NVGRE item is used to describe protocol.
889 * If yes, both spec and mask should be NULL.
890 * If no, both spec and mask shouldn't be NULL.
892 if ((!nvgre_spec && nvgre_mask) ||
893 (nvgre_spec && !nvgre_mask)) {
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_ITEM,
897 "Invalid NVGRE item");
901 if (nvgre_spec && nvgre_mask) {
902 list[t].type = ICE_NVGRE;
903 if (nvgre_mask->tni[0] ||
904 nvgre_mask->tni[1] ||
905 nvgre_mask->tni[2]) {
906 list[t].h_u.nvgre_hdr.tni_flow =
907 (nvgre_spec->tni[2] << 16) |
908 (nvgre_spec->tni[1] << 8) |
910 list[t].m_u.nvgre_hdr.tni_flow =
911 (nvgre_mask->tni[2] << 16) |
912 (nvgre_mask->tni[1] << 8) |
915 ICE_INSET_TUN_NVGRE_TNI;
921 case RTE_FLOW_ITEM_TYPE_VLAN:
922 vlan_spec = item->spec;
923 vlan_mask = item->mask;
924 /* Check if VLAN item is used to describe protocol.
925 * If yes, both spec and mask should be NULL.
926 * If no, both spec and mask shouldn't be NULL.
928 if ((!vlan_spec && vlan_mask) ||
929 (vlan_spec && !vlan_mask)) {
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ITEM,
933 "Invalid VLAN item");
936 if (vlan_spec && vlan_mask) {
937 list[t].type = ICE_VLAN_OFOS;
938 if (vlan_mask->tci) {
939 list[t].h_u.vlan_hdr.vlan =
941 list[t].m_u.vlan_hdr.vlan =
943 input_set |= ICE_INSET_VLAN_OUTER;
945 if (vlan_mask->inner_type) {
946 list[t].h_u.vlan_hdr.type =
947 vlan_spec->inner_type;
948 list[t].m_u.vlan_hdr.type =
949 vlan_mask->inner_type;
950 input_set |= ICE_INSET_ETHERTYPE;
956 case RTE_FLOW_ITEM_TYPE_PPPOED:
957 case RTE_FLOW_ITEM_TYPE_PPPOES:
958 pppoe_spec = item->spec;
959 pppoe_mask = item->mask;
960 /* Check if PPPoE item is used to describe protocol.
961 * If yes, both spec and mask should be NULL.
962 * If no, both spec and mask shouldn't be NULL.
964 if ((!pppoe_spec && pppoe_mask) ||
965 (pppoe_spec && !pppoe_mask)) {
966 rte_flow_error_set(error, EINVAL,
967 RTE_FLOW_ERROR_TYPE_ITEM,
969 "Invalid pppoe item");
972 if (pppoe_spec && pppoe_mask) {
973 /* Check pppoe mask and update input set */
974 if (pppoe_mask->length ||
976 pppoe_mask->version_type) {
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
980 "Invalid pppoe mask");
983 list[t].type = ICE_PPPOE;
984 if (pppoe_mask->session_id) {
985 list[t].h_u.pppoe_hdr.session_id =
986 pppoe_spec->session_id;
987 list[t].m_u.pppoe_hdr.session_id =
988 pppoe_mask->session_id;
989 input_set |= ICE_INSET_PPPOE_SESSION;
996 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
997 pppoe_proto_spec = item->spec;
998 pppoe_proto_mask = item->mask;
999 /* Check if PPPoE optional proto_id item
1000 * is used to describe protocol.
1001 * If yes, both spec and mask should be NULL.
1002 * If no, both spec and mask shouldn't be NULL.
1004 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1005 (pppoe_proto_spec && !pppoe_proto_mask)) {
1006 rte_flow_error_set(error, EINVAL,
1007 RTE_FLOW_ERROR_TYPE_ITEM,
1009 "Invalid pppoe proto item");
1012 if (pppoe_proto_spec && pppoe_proto_mask) {
1015 list[t].type = ICE_PPPOE;
1016 if (pppoe_proto_mask->proto_id) {
1017 list[t].h_u.pppoe_hdr.ppp_prot_id =
1018 pppoe_proto_spec->proto_id;
1019 list[t].m_u.pppoe_hdr.ppp_prot_id =
1020 pppoe_proto_mask->proto_id;
1021 input_set |= ICE_INSET_PPPOE_PROTO;
1027 case RTE_FLOW_ITEM_TYPE_ESP:
1028 esp_spec = item->spec;
1029 esp_mask = item->mask;
1030 if ((esp_spec && !esp_mask) ||
1031 (!esp_spec && esp_mask)) {
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ITEM,
1035 "Invalid esp item");
1038 /* Check esp mask and update input set */
1039 if (esp_mask && esp_mask->hdr.seq) {
1040 rte_flow_error_set(error, EINVAL,
1041 RTE_FLOW_ERROR_TYPE_ITEM,
1043 "Invalid esp mask");
1047 if (!esp_spec && !esp_mask && !input_set) {
1049 if (ipv6_valiad && udp_valiad)
1051 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1052 else if (ipv6_valiad)
1053 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1054 else if (ipv4_valiad)
1056 } else if (esp_spec && esp_mask &&
1059 list[t].type = ICE_NAT_T;
1061 list[t].type = ICE_ESP;
1062 list[t].h_u.esp_hdr.spi =
1064 list[t].m_u.esp_hdr.spi =
1066 input_set |= ICE_INSET_ESP_SPI;
1070 if (!profile_rule) {
1071 if (ipv6_valiad && udp_valiad)
1072 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1073 else if (ipv4_valiad && udp_valiad)
1074 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1075 else if (ipv6_valiad)
1076 *tun_type = ICE_SW_TUN_IPV6_ESP;
1077 else if (ipv4_valiad)
1078 *tun_type = ICE_SW_TUN_IPV4_ESP;
1082 case RTE_FLOW_ITEM_TYPE_AH:
1083 ah_spec = item->spec;
1084 ah_mask = item->mask;
1085 if ((ah_spec && !ah_mask) ||
1086 (!ah_spec && ah_mask)) {
1087 rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ITEM,
1093 /* Check ah mask and update input set */
1095 (ah_mask->next_hdr ||
1096 ah_mask->payload_len ||
1098 ah_mask->reserved)) {
1099 rte_flow_error_set(error, EINVAL,
1100 RTE_FLOW_ERROR_TYPE_ITEM,
1106 if (!ah_spec && !ah_mask && !input_set) {
1108 if (ipv6_valiad && udp_valiad)
1110 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1111 else if (ipv6_valiad)
1112 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1113 else if (ipv4_valiad)
1115 } else if (ah_spec && ah_mask &&
1117 list[t].type = ICE_AH;
1118 list[t].h_u.ah_hdr.spi =
1120 list[t].m_u.ah_hdr.spi =
1122 input_set |= ICE_INSET_AH_SPI;
1126 if (!profile_rule) {
1129 else if (ipv6_valiad)
1130 *tun_type = ICE_SW_TUN_IPV6_AH;
1131 else if (ipv4_valiad)
1132 *tun_type = ICE_SW_TUN_IPV4_AH;
1136 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1137 l2tp_spec = item->spec;
1138 l2tp_mask = item->mask;
1139 if ((l2tp_spec && !l2tp_mask) ||
1140 (!l2tp_spec && l2tp_mask)) {
1141 rte_flow_error_set(error, EINVAL,
1142 RTE_FLOW_ERROR_TYPE_ITEM,
1144 "Invalid l2tp item");
1148 if (!l2tp_spec && !l2tp_mask && !input_set) {
1151 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1152 else if (ipv4_valiad)
1154 } else if (l2tp_spec && l2tp_mask &&
1155 l2tp_mask->session_id){
1156 list[t].type = ICE_L2TPV3;
1157 list[t].h_u.l2tpv3_sess_hdr.session_id =
1158 l2tp_spec->session_id;
1159 list[t].m_u.l2tpv3_sess_hdr.session_id =
1160 l2tp_mask->session_id;
1161 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1165 if (!profile_rule) {
1168 ICE_SW_TUN_IPV6_L2TPV3;
1169 else if (ipv4_valiad)
1171 ICE_SW_TUN_IPV4_L2TPV3;
1175 case RTE_FLOW_ITEM_TYPE_PFCP:
1176 pfcp_spec = item->spec;
1177 pfcp_mask = item->mask;
1178 /* Check if PFCP item is used to describe protocol.
1179 * If yes, both spec and mask should be NULL.
1180 * If no, both spec and mask shouldn't be NULL.
1182 if ((!pfcp_spec && pfcp_mask) ||
1183 (pfcp_spec && !pfcp_mask)) {
1184 rte_flow_error_set(error, EINVAL,
1185 RTE_FLOW_ERROR_TYPE_ITEM,
1187 "Invalid PFCP item");
1190 if (pfcp_spec && pfcp_mask) {
1191 /* Check pfcp mask and update input set */
1192 if (pfcp_mask->msg_type ||
1193 pfcp_mask->msg_len ||
1195 rte_flow_error_set(error, EINVAL,
1196 RTE_FLOW_ERROR_TYPE_ITEM,
1198 "Invalid pfcp mask");
1201 if (pfcp_mask->s_field &&
1202 pfcp_spec->s_field == 0x01 &&
1205 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1206 else if (pfcp_mask->s_field &&
1207 pfcp_spec->s_field == 0x01)
1209 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1210 else if (pfcp_mask->s_field &&
1211 !pfcp_spec->s_field &&
1214 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1215 else if (pfcp_mask->s_field &&
1216 !pfcp_spec->s_field)
1218 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1224 case RTE_FLOW_ITEM_TYPE_VOID:
1228 rte_flow_error_set(error, EINVAL,
1229 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1230 "Invalid pattern item.");
1243 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1244 struct rte_flow_error *error,
1245 struct ice_adv_rule_info *rule_info)
1247 const struct rte_flow_action_vf *act_vf;
1248 const struct rte_flow_action *action;
1249 enum rte_flow_action_type action_type;
1251 for (action = actions; action->type !=
1252 RTE_FLOW_ACTION_TYPE_END; action++) {
1253 action_type = action->type;
1254 switch (action_type) {
1255 case RTE_FLOW_ACTION_TYPE_VF:
1256 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1257 act_vf = action->conf;
1258 rule_info->sw_act.vsi_handle = act_vf->id;
1261 rte_flow_error_set(error,
1262 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1264 "Invalid action type or queue number");
1269 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1271 rule_info->priority = 5;
1277 ice_switch_parse_action(struct ice_pf *pf,
1278 const struct rte_flow_action *actions,
1279 struct rte_flow_error *error,
1280 struct ice_adv_rule_info *rule_info)
1282 struct ice_vsi *vsi = pf->main_vsi;
1283 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1284 const struct rte_flow_action_queue *act_q;
1285 const struct rte_flow_action_rss *act_qgrop;
1286 uint16_t base_queue, i;
1287 const struct rte_flow_action *action;
1288 enum rte_flow_action_type action_type;
1289 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1290 2, 4, 8, 16, 32, 64, 128};
1292 base_queue = pf->base_queue + vsi->base_queue;
1293 for (action = actions; action->type !=
1294 RTE_FLOW_ACTION_TYPE_END; action++) {
1295 action_type = action->type;
1296 switch (action_type) {
1297 case RTE_FLOW_ACTION_TYPE_RSS:
1298 act_qgrop = action->conf;
1299 rule_info->sw_act.fltr_act =
1301 rule_info->sw_act.fwd_id.q_id =
1302 base_queue + act_qgrop->queue[0];
1303 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1304 if (act_qgrop->queue_num ==
1305 valid_qgrop_number[i])
1308 if (i == MAX_QGRP_NUM_TYPE)
1310 if ((act_qgrop->queue[0] +
1311 act_qgrop->queue_num) >
1312 dev->data->nb_rx_queues)
1314 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1315 if (act_qgrop->queue[i + 1] !=
1316 act_qgrop->queue[i] + 1)
1318 rule_info->sw_act.qgrp_size =
1319 act_qgrop->queue_num;
1321 case RTE_FLOW_ACTION_TYPE_QUEUE:
1322 act_q = action->conf;
1323 if (act_q->index >= dev->data->nb_rx_queues)
1325 rule_info->sw_act.fltr_act =
1327 rule_info->sw_act.fwd_id.q_id =
1328 base_queue + act_q->index;
1331 case RTE_FLOW_ACTION_TYPE_DROP:
1332 rule_info->sw_act.fltr_act =
1336 case RTE_FLOW_ACTION_TYPE_VOID:
1344 rule_info->sw_act.vsi_handle = vsi->idx;
1346 rule_info->sw_act.src = vsi->idx;
1347 rule_info->priority = 5;
1352 rte_flow_error_set(error,
1353 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1355 "Invalid action type or queue number");
1360 ice_switch_check_action(const struct rte_flow_action *actions,
1361 struct rte_flow_error *error)
1363 const struct rte_flow_action *action;
1364 enum rte_flow_action_type action_type;
1365 uint16_t actions_num = 0;
1367 for (action = actions; action->type !=
1368 RTE_FLOW_ACTION_TYPE_END; action++) {
1369 action_type = action->type;
1370 switch (action_type) {
1371 case RTE_FLOW_ACTION_TYPE_VF:
1372 case RTE_FLOW_ACTION_TYPE_RSS:
1373 case RTE_FLOW_ACTION_TYPE_QUEUE:
1374 case RTE_FLOW_ACTION_TYPE_DROP:
1377 case RTE_FLOW_ACTION_TYPE_VOID:
1380 rte_flow_error_set(error,
1381 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1383 "Invalid action type");
1388 if (actions_num > 1) {
1389 rte_flow_error_set(error,
1390 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1392 "Invalid action number");
1400 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1403 case ICE_SW_TUN_PROFID_IPV6_ESP:
1404 case ICE_SW_TUN_PROFID_IPV6_AH:
1405 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1406 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1407 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1408 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1409 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1410 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1420 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1421 struct ice_pattern_match_item *array,
1423 const struct rte_flow_item pattern[],
1424 const struct rte_flow_action actions[],
1426 struct rte_flow_error *error)
1428 struct ice_pf *pf = &ad->pf;
1429 uint64_t inputset = 0;
1431 struct sw_meta *sw_meta_ptr = NULL;
1432 struct ice_adv_rule_info rule_info;
1433 struct ice_adv_lkup_elem *list = NULL;
1434 uint16_t lkups_num = 0;
1435 const struct rte_flow_item *item = pattern;
1436 uint16_t item_num = 0;
1437 enum ice_sw_tunnel_type tun_type =
1438 ICE_SW_TUN_AND_NON_TUN;
1439 struct ice_pattern_match_item *pattern_match_item = NULL;
1441 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1443 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1444 tun_type = ICE_SW_TUN_VXLAN;
1445 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1446 tun_type = ICE_SW_TUN_NVGRE;
1447 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1448 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1449 tun_type = ICE_SW_TUN_PPPOE;
1450 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1451 const struct rte_flow_item_eth *eth_mask;
1453 eth_mask = item->mask;
1456 if (eth_mask->type == UINT16_MAX)
1457 tun_type = ICE_SW_TUN_AND_NON_TUN;
1459 /* reserve one more memory slot for ETH which may
1460 * consume 2 lookup items.
1462 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1466 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1468 rte_flow_error_set(error, EINVAL,
1469 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1470 "No memory for PMD internal items");
1475 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1477 rte_flow_error_set(error, EINVAL,
1478 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1479 "No memory for sw_pattern_meta_ptr");
1483 pattern_match_item =
1484 ice_search_pattern_match_item(pattern, array, array_len, error);
1485 if (!pattern_match_item) {
1486 rte_flow_error_set(error, EINVAL,
1487 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1488 "Invalid input pattern");
1492 inputset = ice_switch_inset_get
1493 (pattern, error, list, &lkups_num, &tun_type);
1494 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1495 (inputset & ~pattern_match_item->input_set_mask)) {
1496 rte_flow_error_set(error, EINVAL,
1497 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1499 "Invalid input set");
1503 rule_info.tun_type = tun_type;
1505 ret = ice_switch_check_action(actions, error);
1507 rte_flow_error_set(error, EINVAL,
1508 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1509 "Invalid input action number");
1513 if (ad->hw.dcf_enabled)
1514 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1516 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1519 rte_flow_error_set(error, EINVAL,
1520 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1521 "Invalid input action");
1526 *meta = sw_meta_ptr;
1527 ((struct sw_meta *)*meta)->list = list;
1528 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1529 ((struct sw_meta *)*meta)->rule_info = rule_info;
1532 rte_free(sw_meta_ptr);
1535 rte_free(pattern_match_item);
1541 rte_free(sw_meta_ptr);
1542 rte_free(pattern_match_item);
1548 ice_switch_query(struct ice_adapter *ad __rte_unused,
1549 struct rte_flow *flow __rte_unused,
1550 struct rte_flow_query_count *count __rte_unused,
1551 struct rte_flow_error *error)
1553 rte_flow_error_set(error, EINVAL,
1554 RTE_FLOW_ERROR_TYPE_HANDLE,
1556 "count action not supported by switch filter");
1562 ice_switch_redirect(struct ice_adapter *ad,
1563 struct rte_flow *flow,
1564 struct ice_flow_redirect *rd)
1566 struct ice_rule_query_data *rdata = flow->rule;
1567 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1568 struct ice_adv_lkup_elem *lkups_dp = NULL;
1569 struct LIST_HEAD_TYPE *list_head;
1570 struct ice_adv_rule_info rinfo;
1571 struct ice_hw *hw = &ad->hw;
1572 struct ice_switch_info *sw;
1576 sw = hw->switch_info;
1577 if (!sw->recp_list[rdata->rid].recp_created)
1580 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1583 list_head = &sw->recp_list[rdata->rid].filt_rules;
1584 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1586 rinfo = list_itr->rule_info;
1587 if (rinfo.fltr_rule_id == rdata->rule_id &&
1588 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1589 rinfo.sw_act.vsi_handle == rd->vsi_handle) {
1590 lkups_cnt = list_itr->lkups_cnt;
1591 lkups_dp = (struct ice_adv_lkup_elem *)
1592 ice_memdup(hw, list_itr->lkups,
1593 sizeof(*list_itr->lkups) *
1594 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1596 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1607 /* Remove the old rule */
1608 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1611 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1617 /* Update VSI context */
1618 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1620 /* Replay the rule */
1621 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1624 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1629 ice_free(hw, lkups_dp);
1634 ice_switch_init(struct ice_adapter *ad)
1637 struct ice_flow_parser *dist_parser;
1638 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1640 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1641 dist_parser = &ice_switch_dist_parser_comms;
1642 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1643 dist_parser = &ice_switch_dist_parser_os;
1647 if (ad->devargs.pipe_mode_support)
1648 ret = ice_register_parser(perm_parser, ad);
1650 ret = ice_register_parser(dist_parser, ad);
1655 ice_switch_uninit(struct ice_adapter *ad)
1657 struct ice_flow_parser *dist_parser;
1658 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1660 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1661 dist_parser = &ice_switch_dist_parser_comms;
1663 dist_parser = &ice_switch_dist_parser_os;
1665 if (ad->devargs.pipe_mode_support)
1666 ice_unregister_parser(perm_parser, ad);
1668 ice_unregister_parser(dist_parser, ad);
1672 ice_flow_engine ice_switch_engine = {
1673 .init = ice_switch_init,
1674 .uninit = ice_switch_uninit,
1675 .create = ice_switch_create,
1676 .destroy = ice_switch_destroy,
1677 .query_count = ice_switch_query,
1678 .redirect = ice_switch_redirect,
1679 .free = ice_switch_filter_rule_free,
1680 .type = ICE_FLOW_ENGINE_SWITCH,
1684 ice_flow_parser ice_switch_dist_parser_os = {
1685 .engine = &ice_switch_engine,
1686 .array = ice_switch_pattern_dist_os,
1687 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1688 .parse_pattern_action = ice_switch_parse_pattern_action,
1689 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1693 ice_flow_parser ice_switch_dist_parser_comms = {
1694 .engine = &ice_switch_engine,
1695 .array = ice_switch_pattern_dist_comms,
1696 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1697 .parse_pattern_action = ice_switch_parse_pattern_action,
1698 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1702 ice_flow_parser ice_switch_perm_parser = {
1703 .engine = &ice_switch_engine,
1704 .array = ice_switch_pattern_perm,
1705 .array_len = RTE_DIM(ice_switch_pattern_perm),
1706 .parse_pattern_action = ice_switch_parse_pattern_action,
1707 .stage = ICE_FLOW_STAGE_PERMISSION,
1710 RTE_INIT(ice_sw_engine_init)
1712 struct ice_flow_engine *engine = &ice_switch_engine;
1713 ice_register_flow_engine(engine);