1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49 ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86 ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90 ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE ( \
92 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
95 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97 ICE_INSET_PPPOE_PROTO)
98 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
99 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
100 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
101 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
102 #define ICE_SW_INSET_MAC_IPV4_AH ( \
103 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
104 #define ICE_SW_INSET_MAC_IPV6_AH ( \
105 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
106 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
107 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
108 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
109 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
110 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
111 ICE_SW_INSET_MAC_IPV4 | \
112 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
113 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
114 ICE_SW_INSET_MAC_IPV6 | \
115 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
118 struct ice_adv_lkup_elem *list;
120 struct ice_adv_rule_info rule_info;
123 static struct ice_flow_parser ice_switch_dist_parser_os;
124 static struct ice_flow_parser ice_switch_dist_parser_comms;
125 static struct ice_flow_parser ice_switch_perm_parser;
128 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
130 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
131 {pattern_ethertype_vlan,
132 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
134 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
135 {pattern_eth_ipv4_udp,
136 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
137 {pattern_eth_ipv4_tcp,
138 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
140 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
141 {pattern_eth_ipv6_udp,
142 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
143 {pattern_eth_ipv6_tcp,
144 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
145 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
146 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
147 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
148 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
149 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
150 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
151 {pattern_eth_ipv4_nvgre_eth_ipv4,
152 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
153 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
154 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
155 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
156 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
158 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
159 {pattern_eth_vlan_pppoed,
160 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
162 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
163 {pattern_eth_vlan_pppoes,
164 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
165 {pattern_eth_pppoes_proto,
166 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
167 {pattern_eth_vlan_pppoes_proto,
168 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
169 {pattern_eth_ipv4_esp,
170 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
171 {pattern_eth_ipv4_udp_esp,
172 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
173 {pattern_eth_ipv6_esp,
174 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
175 {pattern_eth_ipv6_udp_esp,
176 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
177 {pattern_eth_ipv4_ah,
178 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
179 {pattern_eth_ipv6_ah,
180 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
181 {pattern_eth_ipv6_udp_ah,
182 ICE_INSET_NONE, ICE_INSET_NONE},
183 {pattern_eth_ipv4_l2tp,
184 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
185 {pattern_eth_ipv6_l2tp,
186 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
187 {pattern_eth_ipv4_pfcp,
188 ICE_INSET_NONE, ICE_INSET_NONE},
189 {pattern_eth_ipv6_pfcp,
190 ICE_INSET_NONE, ICE_INSET_NONE},
194 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
196 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
197 {pattern_ethertype_vlan,
198 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
200 ICE_INSET_NONE, ICE_INSET_NONE},
202 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
203 {pattern_eth_ipv4_udp,
204 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
205 {pattern_eth_ipv4_tcp,
206 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
208 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
209 {pattern_eth_ipv6_udp,
210 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
211 {pattern_eth_ipv6_tcp,
212 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
213 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
214 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
215 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
216 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
217 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
218 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
219 {pattern_eth_ipv4_nvgre_eth_ipv4,
220 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
221 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
222 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
223 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
224 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
228 ice_pattern_match_item ice_switch_pattern_perm[] = {
230 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
231 {pattern_ethertype_vlan,
232 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
234 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
235 {pattern_eth_ipv4_udp,
236 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
237 {pattern_eth_ipv4_tcp,
238 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
240 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
241 {pattern_eth_ipv6_udp,
242 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
243 {pattern_eth_ipv6_tcp,
244 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
245 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
246 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
247 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
248 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
249 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
250 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
251 {pattern_eth_ipv4_nvgre_eth_ipv4,
252 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
253 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
254 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
255 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
256 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
258 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
259 {pattern_eth_vlan_pppoed,
260 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
262 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
263 {pattern_eth_vlan_pppoes,
264 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
265 {pattern_eth_pppoes_proto,
266 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
267 {pattern_eth_vlan_pppoes_proto,
268 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
269 {pattern_eth_ipv4_esp,
270 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
271 {pattern_eth_ipv4_udp_esp,
272 ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
273 {pattern_eth_ipv6_esp,
274 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
275 {pattern_eth_ipv6_udp_esp,
276 ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
277 {pattern_eth_ipv4_ah,
278 ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
279 {pattern_eth_ipv6_ah,
280 ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
281 {pattern_eth_ipv6_udp_ah,
282 ICE_INSET_NONE, ICE_INSET_NONE},
283 {pattern_eth_ipv4_l2tp,
284 ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
285 {pattern_eth_ipv6_l2tp,
286 ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
287 {pattern_eth_ipv4_pfcp,
288 ICE_INSET_NONE, ICE_INSET_NONE},
289 {pattern_eth_ipv6_pfcp,
290 ICE_INSET_NONE, ICE_INSET_NONE},
294 ice_switch_create(struct ice_adapter *ad,
295 struct rte_flow *flow,
297 struct rte_flow_error *error)
300 struct ice_pf *pf = &ad->pf;
301 struct ice_hw *hw = ICE_PF_TO_HW(pf);
302 struct ice_rule_query_data rule_added = {0};
303 struct ice_rule_query_data *filter_ptr;
304 struct ice_adv_lkup_elem *list =
305 ((struct sw_meta *)meta)->list;
307 ((struct sw_meta *)meta)->lkups_num;
308 struct ice_adv_rule_info *rule_info =
309 &((struct sw_meta *)meta)->rule_info;
311 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
312 rte_flow_error_set(error, EINVAL,
313 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
314 "item number too large for rule");
318 rte_flow_error_set(error, EINVAL,
319 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
320 "lookup list should not be NULL");
323 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
325 filter_ptr = rte_zmalloc("ice_switch_filter",
326 sizeof(struct ice_rule_query_data), 0);
328 rte_flow_error_set(error, EINVAL,
329 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
330 "No memory for ice_switch_filter");
333 flow->rule = filter_ptr;
334 rte_memcpy(filter_ptr,
336 sizeof(struct ice_rule_query_data));
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
340 "switch filter create flow fail");
356 ice_switch_destroy(struct ice_adapter *ad,
357 struct rte_flow *flow,
358 struct rte_flow_error *error)
360 struct ice_hw *hw = &ad->hw;
362 struct ice_rule_query_data *filter_ptr;
364 filter_ptr = (struct ice_rule_query_data *)
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
371 " create by switch filter");
375 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
379 "fail to destroy switch filter rule");
383 rte_free(filter_ptr);
388 ice_switch_filter_rule_free(struct rte_flow *flow)
390 rte_free(flow->rule);
394 ice_switch_inset_get(const struct rte_flow_item pattern[],
395 struct rte_flow_error *error,
396 struct ice_adv_lkup_elem *list,
398 enum ice_sw_tunnel_type *tun_type)
400 const struct rte_flow_item *item = pattern;
401 enum rte_flow_item_type item_type;
402 const struct rte_flow_item_eth *eth_spec, *eth_mask;
403 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
404 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
405 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
406 const struct rte_flow_item_udp *udp_spec, *udp_mask;
407 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
408 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
409 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
410 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
411 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
412 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
414 const struct rte_flow_item_esp *esp_spec, *esp_mask;
415 const struct rte_flow_item_ah *ah_spec, *ah_mask;
416 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
417 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
418 uint64_t input_set = ICE_INSET_NONE;
420 bool profile_rule = 0;
421 bool tunnel_valid = 0;
422 bool pppoe_valid = 0;
423 bool ipv6_valiad = 0;
424 bool ipv4_valiad = 0;
427 for (item = pattern; item->type !=
428 RTE_FLOW_ITEM_TYPE_END; item++) {
430 rte_flow_error_set(error, EINVAL,
431 RTE_FLOW_ERROR_TYPE_ITEM,
433 "Not support range");
436 item_type = item->type;
439 case RTE_FLOW_ITEM_TYPE_ETH:
440 eth_spec = item->spec;
441 eth_mask = item->mask;
442 if (eth_spec && eth_mask) {
443 const uint8_t *a = eth_mask->src.addr_bytes;
444 const uint8_t *b = eth_mask->dst.addr_bytes;
445 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
446 if (a[j] && tunnel_valid) {
456 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
457 if (b[j] && tunnel_valid) {
468 input_set |= ICE_INSET_ETHERTYPE;
469 list[t].type = (tunnel_valid == 0) ?
470 ICE_MAC_OFOS : ICE_MAC_IL;
471 struct ice_ether_hdr *h;
472 struct ice_ether_hdr *m;
474 h = &list[t].h_u.eth_hdr;
475 m = &list[t].m_u.eth_hdr;
476 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
477 if (eth_mask->src.addr_bytes[j]) {
479 eth_spec->src.addr_bytes[j];
481 eth_mask->src.addr_bytes[j];
484 if (eth_mask->dst.addr_bytes[j]) {
486 eth_spec->dst.addr_bytes[j];
488 eth_mask->dst.addr_bytes[j];
494 if (eth_mask->type) {
495 list[t].type = ICE_ETYPE_OL;
496 list[t].h_u.ethertype.ethtype_id =
498 list[t].m_u.ethertype.ethtype_id =
505 case RTE_FLOW_ITEM_TYPE_IPV4:
506 ipv4_spec = item->spec;
507 ipv4_mask = item->mask;
509 if (ipv4_spec && ipv4_mask) {
510 /* Check IPv4 mask and update input set */
511 if (ipv4_mask->hdr.version_ihl ||
512 ipv4_mask->hdr.total_length ||
513 ipv4_mask->hdr.packet_id ||
514 ipv4_mask->hdr.hdr_checksum) {
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ITEM,
518 "Invalid IPv4 mask.");
523 if (ipv4_mask->hdr.type_of_service)
525 ICE_INSET_TUN_IPV4_TOS;
526 if (ipv4_mask->hdr.src_addr)
528 ICE_INSET_TUN_IPV4_SRC;
529 if (ipv4_mask->hdr.dst_addr)
531 ICE_INSET_TUN_IPV4_DST;
532 if (ipv4_mask->hdr.time_to_live)
534 ICE_INSET_TUN_IPV4_TTL;
535 if (ipv4_mask->hdr.next_proto_id)
537 ICE_INSET_TUN_IPV4_PROTO;
539 if (ipv4_mask->hdr.src_addr)
540 input_set |= ICE_INSET_IPV4_SRC;
541 if (ipv4_mask->hdr.dst_addr)
542 input_set |= ICE_INSET_IPV4_DST;
543 if (ipv4_mask->hdr.time_to_live)
544 input_set |= ICE_INSET_IPV4_TTL;
545 if (ipv4_mask->hdr.next_proto_id)
547 ICE_INSET_IPV4_PROTO;
548 if (ipv4_mask->hdr.type_of_service)
552 list[t].type = (tunnel_valid == 0) ?
553 ICE_IPV4_OFOS : ICE_IPV4_IL;
554 if (ipv4_mask->hdr.src_addr) {
555 list[t].h_u.ipv4_hdr.src_addr =
556 ipv4_spec->hdr.src_addr;
557 list[t].m_u.ipv4_hdr.src_addr =
558 ipv4_mask->hdr.src_addr;
560 if (ipv4_mask->hdr.dst_addr) {
561 list[t].h_u.ipv4_hdr.dst_addr =
562 ipv4_spec->hdr.dst_addr;
563 list[t].m_u.ipv4_hdr.dst_addr =
564 ipv4_mask->hdr.dst_addr;
566 if (ipv4_mask->hdr.time_to_live) {
567 list[t].h_u.ipv4_hdr.time_to_live =
568 ipv4_spec->hdr.time_to_live;
569 list[t].m_u.ipv4_hdr.time_to_live =
570 ipv4_mask->hdr.time_to_live;
572 if (ipv4_mask->hdr.next_proto_id) {
573 list[t].h_u.ipv4_hdr.protocol =
574 ipv4_spec->hdr.next_proto_id;
575 list[t].m_u.ipv4_hdr.protocol =
576 ipv4_mask->hdr.next_proto_id;
578 if (ipv4_mask->hdr.type_of_service) {
579 list[t].h_u.ipv4_hdr.tos =
580 ipv4_spec->hdr.type_of_service;
581 list[t].m_u.ipv4_hdr.tos =
582 ipv4_mask->hdr.type_of_service;
588 case RTE_FLOW_ITEM_TYPE_IPV6:
589 ipv6_spec = item->spec;
590 ipv6_mask = item->mask;
592 if (ipv6_spec && ipv6_mask) {
593 if (ipv6_mask->hdr.payload_len) {
594 rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM,
597 "Invalid IPv6 mask");
601 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
602 if (ipv6_mask->hdr.src_addr[j] &&
605 ICE_INSET_TUN_IPV6_SRC;
607 } else if (ipv6_mask->hdr.src_addr[j]) {
608 input_set |= ICE_INSET_IPV6_SRC;
612 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
613 if (ipv6_mask->hdr.dst_addr[j] &&
616 ICE_INSET_TUN_IPV6_DST;
618 } else if (ipv6_mask->hdr.dst_addr[j]) {
619 input_set |= ICE_INSET_IPV6_DST;
623 if (ipv6_mask->hdr.proto &&
626 ICE_INSET_TUN_IPV6_NEXT_HDR;
627 else if (ipv6_mask->hdr.proto)
629 ICE_INSET_IPV6_NEXT_HDR;
630 if (ipv6_mask->hdr.hop_limits &&
633 ICE_INSET_TUN_IPV6_HOP_LIMIT;
634 else if (ipv6_mask->hdr.hop_limits)
636 ICE_INSET_IPV6_HOP_LIMIT;
637 if ((ipv6_mask->hdr.vtc_flow &
639 (RTE_IPV6_HDR_TC_MASK)) &&
642 ICE_INSET_TUN_IPV6_TC;
643 else if (ipv6_mask->hdr.vtc_flow &
645 (RTE_IPV6_HDR_TC_MASK))
646 input_set |= ICE_INSET_IPV6_TC;
648 list[t].type = (tunnel_valid == 0) ?
649 ICE_IPV6_OFOS : ICE_IPV6_IL;
650 struct ice_ipv6_hdr *f;
651 struct ice_ipv6_hdr *s;
652 f = &list[t].h_u.ipv6_hdr;
653 s = &list[t].m_u.ipv6_hdr;
654 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
655 if (ipv6_mask->hdr.src_addr[j]) {
657 ipv6_spec->hdr.src_addr[j];
659 ipv6_mask->hdr.src_addr[j];
661 if (ipv6_mask->hdr.dst_addr[j]) {
663 ipv6_spec->hdr.dst_addr[j];
665 ipv6_mask->hdr.dst_addr[j];
668 if (ipv6_mask->hdr.proto) {
670 ipv6_spec->hdr.proto;
672 ipv6_mask->hdr.proto;
674 if (ipv6_mask->hdr.hop_limits) {
676 ipv6_spec->hdr.hop_limits;
678 ipv6_mask->hdr.hop_limits;
680 if (ipv6_mask->hdr.vtc_flow &
682 (RTE_IPV6_HDR_TC_MASK)) {
683 struct ice_le_ver_tc_flow vtf;
684 vtf.u.fld.version = 0;
685 vtf.u.fld.flow_label = 0;
686 vtf.u.fld.tc = (rte_be_to_cpu_32
687 (ipv6_spec->hdr.vtc_flow) &
688 RTE_IPV6_HDR_TC_MASK) >>
689 RTE_IPV6_HDR_TC_SHIFT;
690 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
691 vtf.u.fld.tc = (rte_be_to_cpu_32
692 (ipv6_mask->hdr.vtc_flow) &
693 RTE_IPV6_HDR_TC_MASK) >>
694 RTE_IPV6_HDR_TC_SHIFT;
695 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
701 case RTE_FLOW_ITEM_TYPE_UDP:
702 udp_spec = item->spec;
703 udp_mask = item->mask;
705 if (udp_spec && udp_mask) {
706 /* Check UDP mask and update input set*/
707 if (udp_mask->hdr.dgram_len ||
708 udp_mask->hdr.dgram_cksum) {
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ITEM,
717 if (udp_mask->hdr.src_port)
719 ICE_INSET_TUN_UDP_SRC_PORT;
720 if (udp_mask->hdr.dst_port)
722 ICE_INSET_TUN_UDP_DST_PORT;
724 if (udp_mask->hdr.src_port)
726 ICE_INSET_UDP_SRC_PORT;
727 if (udp_mask->hdr.dst_port)
729 ICE_INSET_UDP_DST_PORT;
731 if (*tun_type == ICE_SW_TUN_VXLAN &&
733 list[t].type = ICE_UDP_OF;
735 list[t].type = ICE_UDP_ILOS;
736 if (udp_mask->hdr.src_port) {
737 list[t].h_u.l4_hdr.src_port =
738 udp_spec->hdr.src_port;
739 list[t].m_u.l4_hdr.src_port =
740 udp_mask->hdr.src_port;
742 if (udp_mask->hdr.dst_port) {
743 list[t].h_u.l4_hdr.dst_port =
744 udp_spec->hdr.dst_port;
745 list[t].m_u.l4_hdr.dst_port =
746 udp_mask->hdr.dst_port;
752 case RTE_FLOW_ITEM_TYPE_TCP:
753 tcp_spec = item->spec;
754 tcp_mask = item->mask;
755 if (tcp_spec && tcp_mask) {
756 /* Check TCP mask and update input set */
757 if (tcp_mask->hdr.sent_seq ||
758 tcp_mask->hdr.recv_ack ||
759 tcp_mask->hdr.data_off ||
760 tcp_mask->hdr.tcp_flags ||
761 tcp_mask->hdr.rx_win ||
762 tcp_mask->hdr.cksum ||
763 tcp_mask->hdr.tcp_urp) {
764 rte_flow_error_set(error, EINVAL,
765 RTE_FLOW_ERROR_TYPE_ITEM,
772 if (tcp_mask->hdr.src_port)
774 ICE_INSET_TUN_TCP_SRC_PORT;
775 if (tcp_mask->hdr.dst_port)
777 ICE_INSET_TUN_TCP_DST_PORT;
779 if (tcp_mask->hdr.src_port)
781 ICE_INSET_TCP_SRC_PORT;
782 if (tcp_mask->hdr.dst_port)
784 ICE_INSET_TCP_DST_PORT;
786 list[t].type = ICE_TCP_IL;
787 if (tcp_mask->hdr.src_port) {
788 list[t].h_u.l4_hdr.src_port =
789 tcp_spec->hdr.src_port;
790 list[t].m_u.l4_hdr.src_port =
791 tcp_mask->hdr.src_port;
793 if (tcp_mask->hdr.dst_port) {
794 list[t].h_u.l4_hdr.dst_port =
795 tcp_spec->hdr.dst_port;
796 list[t].m_u.l4_hdr.dst_port =
797 tcp_mask->hdr.dst_port;
803 case RTE_FLOW_ITEM_TYPE_SCTP:
804 sctp_spec = item->spec;
805 sctp_mask = item->mask;
806 if (sctp_spec && sctp_mask) {
807 /* Check SCTP mask and update input set */
808 if (sctp_mask->hdr.cksum) {
809 rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_ITEM,
812 "Invalid SCTP mask");
817 if (sctp_mask->hdr.src_port)
819 ICE_INSET_TUN_SCTP_SRC_PORT;
820 if (sctp_mask->hdr.dst_port)
822 ICE_INSET_TUN_SCTP_DST_PORT;
824 if (sctp_mask->hdr.src_port)
826 ICE_INSET_SCTP_SRC_PORT;
827 if (sctp_mask->hdr.dst_port)
829 ICE_INSET_SCTP_DST_PORT;
831 list[t].type = ICE_SCTP_IL;
832 if (sctp_mask->hdr.src_port) {
833 list[t].h_u.sctp_hdr.src_port =
834 sctp_spec->hdr.src_port;
835 list[t].m_u.sctp_hdr.src_port =
836 sctp_mask->hdr.src_port;
838 if (sctp_mask->hdr.dst_port) {
839 list[t].h_u.sctp_hdr.dst_port =
840 sctp_spec->hdr.dst_port;
841 list[t].m_u.sctp_hdr.dst_port =
842 sctp_mask->hdr.dst_port;
848 case RTE_FLOW_ITEM_TYPE_VXLAN:
849 vxlan_spec = item->spec;
850 vxlan_mask = item->mask;
851 /* Check if VXLAN item is used to describe protocol.
852 * If yes, both spec and mask should be NULL.
853 * If no, both spec and mask shouldn't be NULL.
855 if ((!vxlan_spec && vxlan_mask) ||
856 (vxlan_spec && !vxlan_mask)) {
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM,
860 "Invalid VXLAN item");
865 if (vxlan_spec && vxlan_mask) {
866 list[t].type = ICE_VXLAN;
867 if (vxlan_mask->vni[0] ||
868 vxlan_mask->vni[1] ||
869 vxlan_mask->vni[2]) {
870 list[t].h_u.tnl_hdr.vni =
871 (vxlan_spec->vni[2] << 16) |
872 (vxlan_spec->vni[1] << 8) |
874 list[t].m_u.tnl_hdr.vni =
875 (vxlan_mask->vni[2] << 16) |
876 (vxlan_mask->vni[1] << 8) |
879 ICE_INSET_TUN_VXLAN_VNI;
885 case RTE_FLOW_ITEM_TYPE_NVGRE:
886 nvgre_spec = item->spec;
887 nvgre_mask = item->mask;
888 /* Check if NVGRE item is used to describe protocol.
889 * If yes, both spec and mask should be NULL.
890 * If no, both spec and mask shouldn't be NULL.
892 if ((!nvgre_spec && nvgre_mask) ||
893 (nvgre_spec && !nvgre_mask)) {
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_ITEM,
897 "Invalid NVGRE item");
901 if (nvgre_spec && nvgre_mask) {
902 list[t].type = ICE_NVGRE;
903 if (nvgre_mask->tni[0] ||
904 nvgre_mask->tni[1] ||
905 nvgre_mask->tni[2]) {
906 list[t].h_u.nvgre_hdr.tni_flow =
907 (nvgre_spec->tni[2] << 16) |
908 (nvgre_spec->tni[1] << 8) |
910 list[t].m_u.nvgre_hdr.tni_flow =
911 (nvgre_mask->tni[2] << 16) |
912 (nvgre_mask->tni[1] << 8) |
915 ICE_INSET_TUN_NVGRE_TNI;
921 case RTE_FLOW_ITEM_TYPE_VLAN:
922 vlan_spec = item->spec;
923 vlan_mask = item->mask;
924 /* Check if VLAN item is used to describe protocol.
925 * If yes, both spec and mask should be NULL.
926 * If no, both spec and mask shouldn't be NULL.
928 if ((!vlan_spec && vlan_mask) ||
929 (vlan_spec && !vlan_mask)) {
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ITEM,
933 "Invalid VLAN item");
936 if (vlan_spec && vlan_mask) {
937 list[t].type = ICE_VLAN_OFOS;
938 if (vlan_mask->tci) {
939 list[t].h_u.vlan_hdr.vlan =
941 list[t].m_u.vlan_hdr.vlan =
943 input_set |= ICE_INSET_VLAN_OUTER;
945 if (vlan_mask->inner_type) {
946 list[t].h_u.vlan_hdr.type =
947 vlan_spec->inner_type;
948 list[t].m_u.vlan_hdr.type =
949 vlan_mask->inner_type;
950 input_set |= ICE_INSET_ETHERTYPE;
956 case RTE_FLOW_ITEM_TYPE_PPPOED:
957 case RTE_FLOW_ITEM_TYPE_PPPOES:
958 pppoe_spec = item->spec;
959 pppoe_mask = item->mask;
960 /* Check if PPPoE item is used to describe protocol.
961 * If yes, both spec and mask should be NULL.
962 * If no, both spec and mask shouldn't be NULL.
964 if ((!pppoe_spec && pppoe_mask) ||
965 (pppoe_spec && !pppoe_mask)) {
966 rte_flow_error_set(error, EINVAL,
967 RTE_FLOW_ERROR_TYPE_ITEM,
969 "Invalid pppoe item");
972 if (pppoe_spec && pppoe_mask) {
973 /* Check pppoe mask and update input set */
974 if (pppoe_mask->length ||
976 pppoe_mask->version_type) {
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
980 "Invalid pppoe mask");
983 list[t].type = ICE_PPPOE;
984 if (pppoe_mask->session_id) {
985 list[t].h_u.pppoe_hdr.session_id =
986 pppoe_spec->session_id;
987 list[t].m_u.pppoe_hdr.session_id =
988 pppoe_mask->session_id;
989 input_set |= ICE_INSET_PPPOE_SESSION;
996 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
997 pppoe_proto_spec = item->spec;
998 pppoe_proto_mask = item->mask;
999 /* Check if PPPoE optional proto_id item
1000 * is used to describe protocol.
1001 * If yes, both spec and mask should be NULL.
1002 * If no, both spec and mask shouldn't be NULL.
1004 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1005 (pppoe_proto_spec && !pppoe_proto_mask)) {
1006 rte_flow_error_set(error, EINVAL,
1007 RTE_FLOW_ERROR_TYPE_ITEM,
1009 "Invalid pppoe proto item");
1012 if (pppoe_proto_spec && pppoe_proto_mask) {
1015 list[t].type = ICE_PPPOE;
1016 if (pppoe_proto_mask->proto_id) {
1017 list[t].h_u.pppoe_hdr.ppp_prot_id =
1018 pppoe_proto_spec->proto_id;
1019 list[t].m_u.pppoe_hdr.ppp_prot_id =
1020 pppoe_proto_mask->proto_id;
1021 input_set |= ICE_INSET_PPPOE_PROTO;
1027 case RTE_FLOW_ITEM_TYPE_ESP:
1028 esp_spec = item->spec;
1029 esp_mask = item->mask;
1030 if ((esp_spec && !esp_mask) ||
1031 (!esp_spec && esp_mask)) {
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ITEM,
1035 "Invalid esp item");
1038 /* Check esp mask and update input set */
1039 if (esp_mask && esp_mask->hdr.seq) {
1040 rte_flow_error_set(error, EINVAL,
1041 RTE_FLOW_ERROR_TYPE_ITEM,
1043 "Invalid esp mask");
1047 if (!esp_spec && !esp_mask && !input_set) {
1049 if (ipv6_valiad && udp_valiad)
1051 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1052 else if (ipv6_valiad)
1053 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1054 else if (ipv4_valiad)
1056 } else if (esp_spec && esp_mask &&
1059 list[t].type = ICE_NAT_T;
1061 list[t].type = ICE_ESP;
1062 list[t].h_u.esp_hdr.spi =
1064 list[t].m_u.esp_hdr.spi =
1066 input_set |= ICE_INSET_ESP_SPI;
1070 if (!profile_rule) {
1071 if (ipv6_valiad && udp_valiad)
1072 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1073 else if (ipv4_valiad && udp_valiad)
1074 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1075 else if (ipv6_valiad)
1076 *tun_type = ICE_SW_TUN_IPV6_ESP;
1077 else if (ipv4_valiad)
1078 *tun_type = ICE_SW_TUN_IPV4_ESP;
1082 case RTE_FLOW_ITEM_TYPE_AH:
1083 ah_spec = item->spec;
1084 ah_mask = item->mask;
1085 if ((ah_spec && !ah_mask) ||
1086 (!ah_spec && ah_mask)) {
1087 rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ITEM,
1093 /* Check ah mask and update input set */
1095 (ah_mask->next_hdr ||
1096 ah_mask->payload_len ||
1098 ah_mask->reserved)) {
1099 rte_flow_error_set(error, EINVAL,
1100 RTE_FLOW_ERROR_TYPE_ITEM,
1106 if (!ah_spec && !ah_mask && !input_set) {
1108 if (ipv6_valiad && udp_valiad)
1110 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1111 else if (ipv6_valiad)
1112 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1113 else if (ipv4_valiad)
1115 } else if (ah_spec && ah_mask &&
1117 list[t].type = ICE_AH;
1118 list[t].h_u.ah_hdr.spi =
1120 list[t].m_u.ah_hdr.spi =
1122 input_set |= ICE_INSET_AH_SPI;
1126 if (!profile_rule) {
1129 else if (ipv6_valiad)
1130 *tun_type = ICE_SW_TUN_IPV6_AH;
1131 else if (ipv4_valiad)
1132 *tun_type = ICE_SW_TUN_IPV4_AH;
1136 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1137 l2tp_spec = item->spec;
1138 l2tp_mask = item->mask;
1139 if ((l2tp_spec && !l2tp_mask) ||
1140 (!l2tp_spec && l2tp_mask)) {
1141 rte_flow_error_set(error, EINVAL,
1142 RTE_FLOW_ERROR_TYPE_ITEM,
1144 "Invalid l2tp item");
1148 if (!l2tp_spec && !l2tp_mask && !input_set) {
1151 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1152 else if (ipv4_valiad)
1154 } else if (l2tp_spec && l2tp_mask &&
1155 l2tp_mask->session_id){
1156 list[t].type = ICE_L2TPV3;
1157 list[t].h_u.l2tpv3_sess_hdr.session_id =
1158 l2tp_spec->session_id;
1159 list[t].m_u.l2tpv3_sess_hdr.session_id =
1160 l2tp_mask->session_id;
1161 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1165 if (!profile_rule) {
1168 ICE_SW_TUN_IPV6_L2TPV3;
1169 else if (ipv4_valiad)
1171 ICE_SW_TUN_IPV4_L2TPV3;
1175 case RTE_FLOW_ITEM_TYPE_PFCP:
1176 pfcp_spec = item->spec;
1177 pfcp_mask = item->mask;
1178 /* Check if PFCP item is used to describe protocol.
1179 * If yes, both spec and mask should be NULL.
1180 * If no, both spec and mask shouldn't be NULL.
1182 if ((!pfcp_spec && pfcp_mask) ||
1183 (pfcp_spec && !pfcp_mask)) {
1184 rte_flow_error_set(error, EINVAL,
1185 RTE_FLOW_ERROR_TYPE_ITEM,
1187 "Invalid PFCP item");
1190 if (pfcp_spec && pfcp_mask) {
1191 /* Check pfcp mask and update input set */
1192 if (pfcp_mask->msg_type ||
1193 pfcp_mask->msg_len ||
1195 rte_flow_error_set(error, EINVAL,
1196 RTE_FLOW_ERROR_TYPE_ITEM,
1198 "Invalid pfcp mask");
1201 if (pfcp_mask->s_field &&
1202 pfcp_spec->s_field == 0x01 &&
1205 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1206 else if (pfcp_mask->s_field &&
1207 pfcp_spec->s_field == 0x01)
1209 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1210 else if (pfcp_mask->s_field &&
1211 !pfcp_spec->s_field &&
1214 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1215 else if (pfcp_mask->s_field &&
1216 !pfcp_spec->s_field)
1218 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1224 case RTE_FLOW_ITEM_TYPE_VOID:
1228 rte_flow_error_set(error, EINVAL,
1229 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1230 "Invalid pattern item.");
1243 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1244 struct rte_flow_error *error,
1245 struct ice_adv_rule_info *rule_info)
1247 const struct rte_flow_action_vf *act_vf;
1248 const struct rte_flow_action *action;
1249 enum rte_flow_action_type action_type;
1251 for (action = actions; action->type !=
1252 RTE_FLOW_ACTION_TYPE_END; action++) {
1253 action_type = action->type;
1254 switch (action_type) {
1255 case RTE_FLOW_ACTION_TYPE_VF:
1256 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1257 act_vf = action->conf;
1258 rule_info->sw_act.vsi_handle = act_vf->id;
1261 rte_flow_error_set(error,
1262 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1264 "Invalid action type or queue number");
1269 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1270 rule_info->sw_act.flag = ICE_FLTR_RX;
1272 rule_info->priority = 5;
1278 ice_switch_parse_action(struct ice_pf *pf,
1279 const struct rte_flow_action *actions,
1280 struct rte_flow_error *error,
1281 struct ice_adv_rule_info *rule_info)
1283 struct ice_vsi *vsi = pf->main_vsi;
1284 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1285 const struct rte_flow_action_queue *act_q;
1286 const struct rte_flow_action_rss *act_qgrop;
1287 uint16_t base_queue, i;
1288 const struct rte_flow_action *action;
1289 enum rte_flow_action_type action_type;
1290 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1291 2, 4, 8, 16, 32, 64, 128};
1293 base_queue = pf->base_queue + vsi->base_queue;
1294 for (action = actions; action->type !=
1295 RTE_FLOW_ACTION_TYPE_END; action++) {
1296 action_type = action->type;
1297 switch (action_type) {
1298 case RTE_FLOW_ACTION_TYPE_RSS:
1299 act_qgrop = action->conf;
1300 if (act_qgrop->queue_num <= 1)
1302 rule_info->sw_act.fltr_act =
1304 rule_info->sw_act.fwd_id.q_id =
1305 base_queue + act_qgrop->queue[0];
1306 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1307 if (act_qgrop->queue_num ==
1308 valid_qgrop_number[i])
1311 if (i == MAX_QGRP_NUM_TYPE)
1313 if ((act_qgrop->queue[0] +
1314 act_qgrop->queue_num) >
1315 dev->data->nb_rx_queues)
1317 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1318 if (act_qgrop->queue[i + 1] !=
1319 act_qgrop->queue[i] + 1)
1321 rule_info->sw_act.qgrp_size =
1322 act_qgrop->queue_num;
1324 case RTE_FLOW_ACTION_TYPE_QUEUE:
1325 act_q = action->conf;
1326 if (act_q->index >= dev->data->nb_rx_queues)
1328 rule_info->sw_act.fltr_act =
1330 rule_info->sw_act.fwd_id.q_id =
1331 base_queue + act_q->index;
1334 case RTE_FLOW_ACTION_TYPE_DROP:
1335 rule_info->sw_act.fltr_act =
1339 case RTE_FLOW_ACTION_TYPE_VOID:
1347 rule_info->sw_act.vsi_handle = vsi->idx;
1349 rule_info->sw_act.src = vsi->idx;
1350 rule_info->priority = 5;
1355 rte_flow_error_set(error,
1356 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1358 "Invalid action type or queue number");
1363 ice_switch_check_action(const struct rte_flow_action *actions,
1364 struct rte_flow_error *error)
1366 const struct rte_flow_action *action;
1367 enum rte_flow_action_type action_type;
1368 uint16_t actions_num = 0;
1370 for (action = actions; action->type !=
1371 RTE_FLOW_ACTION_TYPE_END; action++) {
1372 action_type = action->type;
1373 switch (action_type) {
1374 case RTE_FLOW_ACTION_TYPE_VF:
1375 case RTE_FLOW_ACTION_TYPE_RSS:
1376 case RTE_FLOW_ACTION_TYPE_QUEUE:
1377 case RTE_FLOW_ACTION_TYPE_DROP:
1380 case RTE_FLOW_ACTION_TYPE_VOID:
1383 rte_flow_error_set(error,
1384 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1386 "Invalid action type");
1391 if (actions_num > 1) {
1392 rte_flow_error_set(error,
1393 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1395 "Invalid action number");
1403 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1406 case ICE_SW_TUN_PROFID_IPV6_ESP:
1407 case ICE_SW_TUN_PROFID_IPV6_AH:
1408 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1409 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1410 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1411 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1412 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1413 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1423 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1424 struct ice_pattern_match_item *array,
1426 const struct rte_flow_item pattern[],
1427 const struct rte_flow_action actions[],
1429 struct rte_flow_error *error)
1431 struct ice_pf *pf = &ad->pf;
1432 uint64_t inputset = 0;
1434 struct sw_meta *sw_meta_ptr = NULL;
1435 struct ice_adv_rule_info rule_info;
1436 struct ice_adv_lkup_elem *list = NULL;
1437 uint16_t lkups_num = 0;
1438 const struct rte_flow_item *item = pattern;
1439 uint16_t item_num = 0;
1440 enum ice_sw_tunnel_type tun_type =
1441 ICE_SW_TUN_AND_NON_TUN;
1442 struct ice_pattern_match_item *pattern_match_item = NULL;
1444 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1446 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1447 tun_type = ICE_SW_TUN_VXLAN;
1448 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1449 tun_type = ICE_SW_TUN_NVGRE;
1450 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1451 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1452 tun_type = ICE_SW_TUN_PPPOE;
1453 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1454 const struct rte_flow_item_eth *eth_mask;
1456 eth_mask = item->mask;
1459 if (eth_mask->type == UINT16_MAX)
1460 tun_type = ICE_SW_TUN_AND_NON_TUN;
1462 /* reserve one more memory slot for ETH which may
1463 * consume 2 lookup items.
1465 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1469 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1471 rte_flow_error_set(error, EINVAL,
1472 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1473 "No memory for PMD internal items");
1478 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1480 rte_flow_error_set(error, EINVAL,
1481 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1482 "No memory for sw_pattern_meta_ptr");
1486 pattern_match_item =
1487 ice_search_pattern_match_item(pattern, array, array_len, error);
1488 if (!pattern_match_item) {
1489 rte_flow_error_set(error, EINVAL,
1490 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1491 "Invalid input pattern");
1495 inputset = ice_switch_inset_get
1496 (pattern, error, list, &lkups_num, &tun_type);
1497 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1498 (inputset & ~pattern_match_item->input_set_mask)) {
1499 rte_flow_error_set(error, EINVAL,
1500 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1502 "Invalid input set");
1506 rule_info.tun_type = tun_type;
1508 ret = ice_switch_check_action(actions, error);
1510 rte_flow_error_set(error, EINVAL,
1511 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1512 "Invalid input action number");
1516 if (ad->hw.dcf_enabled)
1517 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1519 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1522 rte_flow_error_set(error, EINVAL,
1523 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1524 "Invalid input action");
1529 *meta = sw_meta_ptr;
1530 ((struct sw_meta *)*meta)->list = list;
1531 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1532 ((struct sw_meta *)*meta)->rule_info = rule_info;
1535 rte_free(sw_meta_ptr);
1538 rte_free(pattern_match_item);
1544 rte_free(sw_meta_ptr);
1545 rte_free(pattern_match_item);
1551 ice_switch_query(struct ice_adapter *ad __rte_unused,
1552 struct rte_flow *flow __rte_unused,
1553 struct rte_flow_query_count *count __rte_unused,
1554 struct rte_flow_error *error)
1556 rte_flow_error_set(error, EINVAL,
1557 RTE_FLOW_ERROR_TYPE_HANDLE,
1559 "count action not supported by switch filter");
1565 ice_switch_redirect(struct ice_adapter *ad,
1566 struct rte_flow *flow,
1567 struct ice_flow_redirect *rd)
1569 struct ice_rule_query_data *rdata = flow->rule;
1570 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1571 struct ice_adv_lkup_elem *lkups_dp = NULL;
1572 struct LIST_HEAD_TYPE *list_head;
1573 struct ice_adv_rule_info rinfo;
1574 struct ice_hw *hw = &ad->hw;
1575 struct ice_switch_info *sw;
1579 sw = hw->switch_info;
1580 if (!sw->recp_list[rdata->rid].recp_created)
1583 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1586 list_head = &sw->recp_list[rdata->rid].filt_rules;
1587 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1589 rinfo = list_itr->rule_info;
1590 if (rinfo.fltr_rule_id == rdata->rule_id &&
1591 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1592 rinfo.sw_act.vsi_handle == rd->vsi_handle) {
1593 lkups_cnt = list_itr->lkups_cnt;
1594 lkups_dp = (struct ice_adv_lkup_elem *)
1595 ice_memdup(hw, list_itr->lkups,
1596 sizeof(*list_itr->lkups) *
1597 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1599 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1610 /* Remove the old rule */
1611 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1614 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1620 /* Update VSI context */
1621 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1623 /* Replay the rule */
1624 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1627 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1632 ice_free(hw, lkups_dp);
1637 ice_switch_init(struct ice_adapter *ad)
1640 struct ice_flow_parser *dist_parser;
1641 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1643 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1644 dist_parser = &ice_switch_dist_parser_comms;
1645 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1646 dist_parser = &ice_switch_dist_parser_os;
1650 if (ad->devargs.pipe_mode_support)
1651 ret = ice_register_parser(perm_parser, ad);
1653 ret = ice_register_parser(dist_parser, ad);
1658 ice_switch_uninit(struct ice_adapter *ad)
1660 struct ice_flow_parser *dist_parser;
1661 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1663 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1664 dist_parser = &ice_switch_dist_parser_comms;
1666 dist_parser = &ice_switch_dist_parser_os;
1668 if (ad->devargs.pipe_mode_support)
1669 ice_unregister_parser(perm_parser, ad);
1671 ice_unregister_parser(dist_parser, ad);
1675 ice_flow_engine ice_switch_engine = {
1676 .init = ice_switch_init,
1677 .uninit = ice_switch_uninit,
1678 .create = ice_switch_create,
1679 .destroy = ice_switch_destroy,
1680 .query_count = ice_switch_query,
1681 .redirect = ice_switch_redirect,
1682 .free = ice_switch_filter_rule_free,
1683 .type = ICE_FLOW_ENGINE_SWITCH,
1687 ice_flow_parser ice_switch_dist_parser_os = {
1688 .engine = &ice_switch_engine,
1689 .array = ice_switch_pattern_dist_os,
1690 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1691 .parse_pattern_action = ice_switch_parse_pattern_action,
1692 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1696 ice_flow_parser ice_switch_dist_parser_comms = {
1697 .engine = &ice_switch_engine,
1698 .array = ice_switch_pattern_dist_comms,
1699 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1700 .parse_pattern_action = ice_switch_parse_pattern_action,
1701 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1705 ice_flow_parser ice_switch_perm_parser = {
1706 .engine = &ice_switch_engine,
1707 .array = ice_switch_pattern_perm,
1708 .array_len = RTE_DIM(ice_switch_pattern_perm),
1709 .parse_pattern_action = ice_switch_parse_pattern_action,
1710 .stage = ICE_FLOW_STAGE_PERMISSION,
1713 RTE_INIT(ice_sw_engine_init)
1715 struct ice_flow_engine *engine = &ice_switch_engine;
1716 ice_register_flow_engine(engine);