1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_IPV4 ( \
33 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
34 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
35 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
38 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
39 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
40 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
42 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
43 #define ICE_SW_INSET_MAC_IPV6 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
45 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
46 ICE_INSET_IPV6_NEXT_HDR)
47 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
48 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
49 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
50 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
52 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
54 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
56 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
57 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
58 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
64 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
66 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
68 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
70 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
78 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
83 ICE_INSET_TUN_IPV4_TOS)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
85 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
87 ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_MAC_PPPOE ( \
89 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
90 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
91 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
92 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
94 ICE_INSET_PPPOE_PROTO)
97 struct ice_adv_lkup_elem *list;
99 struct ice_adv_rule_info rule_info;
102 static struct ice_flow_parser ice_switch_dist_parser_os;
103 static struct ice_flow_parser ice_switch_dist_parser_comms;
104 static struct ice_flow_parser ice_switch_perm_parser;
107 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
109 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
111 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
112 {pattern_eth_ipv4_udp,
113 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
114 {pattern_eth_ipv4_tcp,
115 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
117 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
118 {pattern_eth_ipv6_udp,
119 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
120 {pattern_eth_ipv6_tcp,
121 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
122 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
123 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
124 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
125 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
126 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
127 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
128 {pattern_eth_ipv4_nvgre_eth_ipv4,
129 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
130 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
131 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
132 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
133 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
135 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
136 {pattern_eth_vlan_pppoed,
137 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
139 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
140 {pattern_eth_vlan_pppoes,
141 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
142 {pattern_eth_pppoes_proto,
143 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
144 {pattern_eth_vlan_pppoes_proto,
145 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
151 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
153 ICE_INSET_NONE, ICE_INSET_NONE},
155 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
156 {pattern_eth_ipv4_udp,
157 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
158 {pattern_eth_ipv4_tcp,
159 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
161 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
162 {pattern_eth_ipv6_udp,
163 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
164 {pattern_eth_ipv6_tcp,
165 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
166 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
167 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
168 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
169 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
170 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
171 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
172 {pattern_eth_ipv4_nvgre_eth_ipv4,
173 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
174 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
175 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
176 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
177 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
181 ice_pattern_match_item ice_switch_pattern_perm[] = {
183 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
184 {pattern_eth_ipv4_udp,
185 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
186 {pattern_eth_ipv4_tcp,
187 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
189 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
190 {pattern_eth_ipv6_udp,
191 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
192 {pattern_eth_ipv6_tcp,
193 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
194 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
195 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
196 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
197 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
198 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
199 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
200 {pattern_eth_ipv4_nvgre_eth_ipv4,
201 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
202 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
203 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
204 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
205 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
209 ice_switch_create(struct ice_adapter *ad,
210 struct rte_flow *flow,
212 struct rte_flow_error *error)
215 struct ice_pf *pf = &ad->pf;
216 struct ice_hw *hw = ICE_PF_TO_HW(pf);
217 struct ice_rule_query_data rule_added = {0};
218 struct ice_rule_query_data *filter_ptr;
219 struct ice_adv_lkup_elem *list =
220 ((struct sw_meta *)meta)->list;
222 ((struct sw_meta *)meta)->lkups_num;
223 struct ice_adv_rule_info *rule_info =
224 &((struct sw_meta *)meta)->rule_info;
226 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
227 rte_flow_error_set(error, EINVAL,
228 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
229 "item number too large for rule");
233 rte_flow_error_set(error, EINVAL,
234 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
235 "lookup list should not be NULL");
238 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
240 filter_ptr = rte_zmalloc("ice_switch_filter",
241 sizeof(struct ice_rule_query_data), 0);
243 rte_flow_error_set(error, EINVAL,
244 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
245 "No memory for ice_switch_filter");
248 flow->rule = filter_ptr;
249 rte_memcpy(filter_ptr,
251 sizeof(struct ice_rule_query_data));
253 rte_flow_error_set(error, EINVAL,
254 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
255 "switch filter create flow fail");
271 ice_switch_destroy(struct ice_adapter *ad,
272 struct rte_flow *flow,
273 struct rte_flow_error *error)
275 struct ice_hw *hw = &ad->hw;
277 struct ice_rule_query_data *filter_ptr;
279 filter_ptr = (struct ice_rule_query_data *)
283 rte_flow_error_set(error, EINVAL,
284 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
286 " create by switch filter");
290 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
292 rte_flow_error_set(error, EINVAL,
293 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
294 "fail to destroy switch filter rule");
298 rte_free(filter_ptr);
303 ice_switch_filter_rule_free(struct rte_flow *flow)
305 rte_free(flow->rule);
309 ice_switch_inset_get(const struct rte_flow_item pattern[],
310 struct rte_flow_error *error,
311 struct ice_adv_lkup_elem *list,
313 enum ice_sw_tunnel_type tun_type)
315 const struct rte_flow_item *item = pattern;
316 enum rte_flow_item_type item_type;
317 const struct rte_flow_item_eth *eth_spec, *eth_mask;
318 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
319 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
320 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
321 const struct rte_flow_item_udp *udp_spec, *udp_mask;
322 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
323 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
324 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
325 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
326 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
327 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
329 uint8_t ipv6_addr_mask[16] = {
330 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
331 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
332 uint64_t input_set = ICE_INSET_NONE;
334 uint16_t tunnel_valid = 0;
335 uint16_t pppoe_valid = 0;
338 for (item = pattern; item->type !=
339 RTE_FLOW_ITEM_TYPE_END; item++) {
341 rte_flow_error_set(error, EINVAL,
342 RTE_FLOW_ERROR_TYPE_ITEM,
344 "Not support range");
347 item_type = item->type;
350 case RTE_FLOW_ITEM_TYPE_ETH:
351 eth_spec = item->spec;
352 eth_mask = item->mask;
353 if (eth_spec && eth_mask) {
355 rte_is_broadcast_ether_addr(ð_mask->src))
356 input_set |= ICE_INSET_TUN_SMAC;
358 rte_is_broadcast_ether_addr(ð_mask->src))
359 input_set |= ICE_INSET_SMAC;
361 rte_is_broadcast_ether_addr(ð_mask->dst))
362 input_set |= ICE_INSET_TUN_DMAC;
364 rte_is_broadcast_ether_addr(ð_mask->dst))
365 input_set |= ICE_INSET_DMAC;
366 if (eth_mask->type == RTE_BE16(0xffff))
367 input_set |= ICE_INSET_ETHERTYPE;
368 list[t].type = (tunnel_valid == 0) ?
369 ICE_MAC_OFOS : ICE_MAC_IL;
370 struct ice_ether_hdr *h;
371 struct ice_ether_hdr *m;
373 h = &list[t].h_u.eth_hdr;
374 m = &list[t].m_u.eth_hdr;
375 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
376 if (eth_mask->src.addr_bytes[j] ==
379 eth_spec->src.addr_bytes[j];
381 eth_mask->src.addr_bytes[j];
384 if (eth_mask->dst.addr_bytes[j] ==
387 eth_spec->dst.addr_bytes[j];
389 eth_mask->dst.addr_bytes[j];
395 if (eth_mask->type == UINT16_MAX) {
396 list[t].type = ICE_ETYPE_OL;
397 list[t].h_u.ethertype.ethtype_id =
399 list[t].m_u.ethertype.ethtype_id =
403 } else if (!eth_spec && !eth_mask) {
404 list[t].type = (tun_type == ICE_NON_TUN) ?
405 ICE_MAC_OFOS : ICE_MAC_IL;
409 case RTE_FLOW_ITEM_TYPE_IPV4:
410 ipv4_spec = item->spec;
411 ipv4_mask = item->mask;
412 if (ipv4_spec && ipv4_mask) {
413 /* Check IPv4 mask and update input set */
414 if (ipv4_mask->hdr.version_ihl ||
415 ipv4_mask->hdr.total_length ||
416 ipv4_mask->hdr.packet_id ||
417 ipv4_mask->hdr.hdr_checksum) {
418 rte_flow_error_set(error, EINVAL,
419 RTE_FLOW_ERROR_TYPE_ITEM,
421 "Invalid IPv4 mask.");
426 if (ipv4_mask->hdr.type_of_service ==
429 ICE_INSET_TUN_IPV4_TOS;
430 if (ipv4_mask->hdr.src_addr ==
433 ICE_INSET_TUN_IPV4_SRC;
434 if (ipv4_mask->hdr.dst_addr ==
437 ICE_INSET_TUN_IPV4_DST;
438 if (ipv4_mask->hdr.time_to_live ==
441 ICE_INSET_TUN_IPV4_TTL;
442 if (ipv4_mask->hdr.next_proto_id ==
445 ICE_INSET_TUN_IPV4_PROTO;
447 if (ipv4_mask->hdr.src_addr ==
449 input_set |= ICE_INSET_IPV4_SRC;
450 if (ipv4_mask->hdr.dst_addr ==
452 input_set |= ICE_INSET_IPV4_DST;
453 if (ipv4_mask->hdr.time_to_live ==
455 input_set |= ICE_INSET_IPV4_TTL;
456 if (ipv4_mask->hdr.next_proto_id ==
459 ICE_INSET_IPV4_PROTO;
460 if (ipv4_mask->hdr.type_of_service ==
465 list[t].type = (tunnel_valid == 0) ?
466 ICE_IPV4_OFOS : ICE_IPV4_IL;
467 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
468 list[t].h_u.ipv4_hdr.src_addr =
469 ipv4_spec->hdr.src_addr;
470 list[t].m_u.ipv4_hdr.src_addr =
473 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
474 list[t].h_u.ipv4_hdr.dst_addr =
475 ipv4_spec->hdr.dst_addr;
476 list[t].m_u.ipv4_hdr.dst_addr =
479 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
480 list[t].h_u.ipv4_hdr.time_to_live =
481 ipv4_spec->hdr.time_to_live;
482 list[t].m_u.ipv4_hdr.time_to_live =
485 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
486 list[t].h_u.ipv4_hdr.protocol =
487 ipv4_spec->hdr.next_proto_id;
488 list[t].m_u.ipv4_hdr.protocol =
491 if (ipv4_mask->hdr.type_of_service ==
493 list[t].h_u.ipv4_hdr.tos =
494 ipv4_spec->hdr.type_of_service;
495 list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
498 } else if (!ipv4_spec && !ipv4_mask) {
499 list[t].type = (tunnel_valid == 0) ?
500 ICE_IPV4_OFOS : ICE_IPV4_IL;
504 case RTE_FLOW_ITEM_TYPE_IPV6:
505 ipv6_spec = item->spec;
506 ipv6_mask = item->mask;
507 if (ipv6_spec && ipv6_mask) {
508 if (ipv6_mask->hdr.payload_len) {
509 rte_flow_error_set(error, EINVAL,
510 RTE_FLOW_ERROR_TYPE_ITEM,
512 "Invalid IPv6 mask");
517 if (!memcmp(ipv6_mask->hdr.src_addr,
519 RTE_DIM(ipv6_mask->hdr.src_addr)))
521 ICE_INSET_TUN_IPV6_SRC;
522 if (!memcmp(ipv6_mask->hdr.dst_addr,
524 RTE_DIM(ipv6_mask->hdr.dst_addr)))
526 ICE_INSET_TUN_IPV6_DST;
527 if (ipv6_mask->hdr.proto == UINT8_MAX)
529 ICE_INSET_TUN_IPV6_NEXT_HDR;
530 if (ipv6_mask->hdr.hop_limits ==
533 ICE_INSET_TUN_IPV6_HOP_LIMIT;
534 if ((ipv6_mask->hdr.vtc_flow &
536 (RTE_IPV6_HDR_TC_MASK))
538 (RTE_IPV6_HDR_TC_MASK))
540 ICE_INSET_TUN_IPV6_TC;
542 if (!memcmp(ipv6_mask->hdr.src_addr,
544 RTE_DIM(ipv6_mask->hdr.src_addr)))
545 input_set |= ICE_INSET_IPV6_SRC;
546 if (!memcmp(ipv6_mask->hdr.dst_addr,
548 RTE_DIM(ipv6_mask->hdr.dst_addr)))
549 input_set |= ICE_INSET_IPV6_DST;
550 if (ipv6_mask->hdr.proto == UINT8_MAX)
552 ICE_INSET_IPV6_NEXT_HDR;
553 if (ipv6_mask->hdr.hop_limits ==
556 ICE_INSET_IPV6_HOP_LIMIT;
557 if ((ipv6_mask->hdr.vtc_flow &
559 (RTE_IPV6_HDR_TC_MASK))
561 (RTE_IPV6_HDR_TC_MASK))
562 input_set |= ICE_INSET_IPV6_TC;
564 list[t].type = (tunnel_valid == 0) ?
565 ICE_IPV6_OFOS : ICE_IPV6_IL;
566 struct ice_ipv6_hdr *f;
567 struct ice_ipv6_hdr *s;
568 f = &list[t].h_u.ipv6_hdr;
569 s = &list[t].m_u.ipv6_hdr;
570 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
571 if (ipv6_mask->hdr.src_addr[j] ==
574 ipv6_spec->hdr.src_addr[j];
576 ipv6_mask->hdr.src_addr[j];
578 if (ipv6_mask->hdr.dst_addr[j] ==
581 ipv6_spec->hdr.dst_addr[j];
583 ipv6_mask->hdr.dst_addr[j];
586 if (ipv6_mask->hdr.proto == UINT8_MAX) {
588 ipv6_spec->hdr.proto;
589 s->next_hdr = UINT8_MAX;
591 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
593 ipv6_spec->hdr.hop_limits;
594 s->hop_limit = UINT8_MAX;
596 if ((ipv6_mask->hdr.vtc_flow &
598 (RTE_IPV6_HDR_TC_MASK))
600 (RTE_IPV6_HDR_TC_MASK)) {
601 struct ice_le_ver_tc_flow vtf;
602 vtf.u.fld.version = 0;
603 vtf.u.fld.flow_label = 0;
604 vtf.u.fld.tc = (rte_be_to_cpu_32
605 (ipv6_spec->hdr.vtc_flow) &
606 RTE_IPV6_HDR_TC_MASK) >>
607 RTE_IPV6_HDR_TC_SHIFT;
608 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
609 vtf.u.fld.tc = UINT8_MAX;
610 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
613 } else if (!ipv6_spec && !ipv6_mask) {
614 list[t].type = (tun_type == ICE_NON_TUN) ?
615 ICE_IPV4_OFOS : ICE_IPV4_IL;
619 case RTE_FLOW_ITEM_TYPE_UDP:
620 udp_spec = item->spec;
621 udp_mask = item->mask;
622 if (udp_spec && udp_mask) {
623 /* Check UDP mask and update input set*/
624 if (udp_mask->hdr.dgram_len ||
625 udp_mask->hdr.dgram_cksum) {
626 rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ITEM,
634 if (udp_mask->hdr.src_port ==
637 ICE_INSET_TUN_UDP_SRC_PORT;
638 if (udp_mask->hdr.dst_port ==
641 ICE_INSET_TUN_UDP_DST_PORT;
643 if (udp_mask->hdr.src_port ==
646 ICE_INSET_UDP_SRC_PORT;
647 if (udp_mask->hdr.dst_port ==
650 ICE_INSET_UDP_DST_PORT;
652 if (tun_type == ICE_SW_TUN_VXLAN &&
654 list[t].type = ICE_UDP_OF;
656 list[t].type = ICE_UDP_ILOS;
657 if (udp_mask->hdr.src_port == UINT16_MAX) {
658 list[t].h_u.l4_hdr.src_port =
659 udp_spec->hdr.src_port;
660 list[t].m_u.l4_hdr.src_port =
661 udp_mask->hdr.src_port;
663 if (udp_mask->hdr.dst_port == UINT16_MAX) {
664 list[t].h_u.l4_hdr.dst_port =
665 udp_spec->hdr.dst_port;
666 list[t].m_u.l4_hdr.dst_port =
667 udp_mask->hdr.dst_port;
670 } else if (!udp_spec && !udp_mask) {
671 list[t].type = ICE_UDP_ILOS;
675 case RTE_FLOW_ITEM_TYPE_TCP:
676 tcp_spec = item->spec;
677 tcp_mask = item->mask;
678 if (tcp_spec && tcp_mask) {
679 /* Check TCP mask and update input set */
680 if (tcp_mask->hdr.sent_seq ||
681 tcp_mask->hdr.recv_ack ||
682 tcp_mask->hdr.data_off ||
683 tcp_mask->hdr.tcp_flags ||
684 tcp_mask->hdr.rx_win ||
685 tcp_mask->hdr.cksum ||
686 tcp_mask->hdr.tcp_urp) {
687 rte_flow_error_set(error, EINVAL,
688 RTE_FLOW_ERROR_TYPE_ITEM,
695 if (tcp_mask->hdr.src_port ==
698 ICE_INSET_TUN_TCP_SRC_PORT;
699 if (tcp_mask->hdr.dst_port ==
702 ICE_INSET_TUN_TCP_DST_PORT;
704 if (tcp_mask->hdr.src_port ==
707 ICE_INSET_TCP_SRC_PORT;
708 if (tcp_mask->hdr.dst_port ==
711 ICE_INSET_TCP_DST_PORT;
713 list[t].type = ICE_TCP_IL;
714 if (tcp_mask->hdr.src_port == UINT16_MAX) {
715 list[t].h_u.l4_hdr.src_port =
716 tcp_spec->hdr.src_port;
717 list[t].m_u.l4_hdr.src_port =
718 tcp_mask->hdr.src_port;
720 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
721 list[t].h_u.l4_hdr.dst_port =
722 tcp_spec->hdr.dst_port;
723 list[t].m_u.l4_hdr.dst_port =
724 tcp_mask->hdr.dst_port;
727 } else if (!tcp_spec && !tcp_mask) {
728 list[t].type = ICE_TCP_IL;
732 case RTE_FLOW_ITEM_TYPE_SCTP:
733 sctp_spec = item->spec;
734 sctp_mask = item->mask;
735 if (sctp_spec && sctp_mask) {
736 /* Check SCTP mask and update input set */
737 if (sctp_mask->hdr.cksum) {
738 rte_flow_error_set(error, EINVAL,
739 RTE_FLOW_ERROR_TYPE_ITEM,
741 "Invalid SCTP mask");
746 if (sctp_mask->hdr.src_port ==
749 ICE_INSET_TUN_SCTP_SRC_PORT;
750 if (sctp_mask->hdr.dst_port ==
753 ICE_INSET_TUN_SCTP_DST_PORT;
755 if (sctp_mask->hdr.src_port ==
758 ICE_INSET_SCTP_SRC_PORT;
759 if (sctp_mask->hdr.dst_port ==
762 ICE_INSET_SCTP_DST_PORT;
764 list[t].type = ICE_SCTP_IL;
765 if (sctp_mask->hdr.src_port == UINT16_MAX) {
766 list[t].h_u.sctp_hdr.src_port =
767 sctp_spec->hdr.src_port;
768 list[t].m_u.sctp_hdr.src_port =
769 sctp_mask->hdr.src_port;
771 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
772 list[t].h_u.sctp_hdr.dst_port =
773 sctp_spec->hdr.dst_port;
774 list[t].m_u.sctp_hdr.dst_port =
775 sctp_mask->hdr.dst_port;
778 } else if (!sctp_spec && !sctp_mask) {
779 list[t].type = ICE_SCTP_IL;
783 case RTE_FLOW_ITEM_TYPE_VXLAN:
784 vxlan_spec = item->spec;
785 vxlan_mask = item->mask;
786 /* Check if VXLAN item is used to describe protocol.
787 * If yes, both spec and mask should be NULL.
788 * If no, both spec and mask shouldn't be NULL.
790 if ((!vxlan_spec && vxlan_mask) ||
791 (vxlan_spec && !vxlan_mask)) {
792 rte_flow_error_set(error, EINVAL,
793 RTE_FLOW_ERROR_TYPE_ITEM,
795 "Invalid VXLAN item");
800 if (vxlan_spec && vxlan_mask) {
801 list[t].type = ICE_VXLAN;
802 if (vxlan_mask->vni[0] == UINT8_MAX &&
803 vxlan_mask->vni[1] == UINT8_MAX &&
804 vxlan_mask->vni[2] == UINT8_MAX) {
805 list[t].h_u.tnl_hdr.vni =
806 (vxlan_spec->vni[2] << 16) |
807 (vxlan_spec->vni[1] << 8) |
809 list[t].m_u.tnl_hdr.vni =
812 ICE_INSET_TUN_VXLAN_VNI;
815 } else if (!vxlan_spec && !vxlan_mask) {
816 list[t].type = ICE_VXLAN;
820 case RTE_FLOW_ITEM_TYPE_NVGRE:
821 nvgre_spec = item->spec;
822 nvgre_mask = item->mask;
823 /* Check if NVGRE item is used to describe protocol.
824 * If yes, both spec and mask should be NULL.
825 * If no, both spec and mask shouldn't be NULL.
827 if ((!nvgre_spec && nvgre_mask) ||
828 (nvgre_spec && !nvgre_mask)) {
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ITEM,
832 "Invalid NVGRE item");
836 if (nvgre_spec && nvgre_mask) {
837 list[t].type = ICE_NVGRE;
838 if (nvgre_mask->tni[0] == UINT8_MAX &&
839 nvgre_mask->tni[1] == UINT8_MAX &&
840 nvgre_mask->tni[2] == UINT8_MAX) {
841 list[t].h_u.nvgre_hdr.tni_flow =
842 (nvgre_spec->tni[2] << 16) |
843 (nvgre_spec->tni[1] << 8) |
845 list[t].m_u.nvgre_hdr.tni_flow =
848 ICE_INSET_TUN_NVGRE_TNI;
851 } else if (!nvgre_spec && !nvgre_mask) {
852 list[t].type = ICE_NVGRE;
856 case RTE_FLOW_ITEM_TYPE_VLAN:
857 vlan_spec = item->spec;
858 vlan_mask = item->mask;
859 /* Check if VLAN item is used to describe protocol.
860 * If yes, both spec and mask should be NULL.
861 * If no, both spec and mask shouldn't be NULL.
863 if ((!vlan_spec && vlan_mask) ||
864 (vlan_spec && !vlan_mask)) {
865 rte_flow_error_set(error, EINVAL,
866 RTE_FLOW_ERROR_TYPE_ITEM,
868 "Invalid VLAN item");
871 if (vlan_spec && vlan_mask) {
872 list[t].type = ICE_VLAN_OFOS;
873 if (vlan_mask->tci == UINT16_MAX) {
874 list[t].h_u.vlan_hdr.vlan =
876 list[t].m_u.vlan_hdr.vlan =
878 input_set |= ICE_INSET_VLAN_OUTER;
880 if (vlan_mask->inner_type == UINT16_MAX) {
881 list[t].h_u.vlan_hdr.type =
882 vlan_spec->inner_type;
883 list[t].m_u.vlan_hdr.type =
885 input_set |= ICE_INSET_VLAN_OUTER;
888 } else if (!vlan_spec && !vlan_mask) {
889 list[t].type = ICE_VLAN_OFOS;
893 case RTE_FLOW_ITEM_TYPE_PPPOED:
894 case RTE_FLOW_ITEM_TYPE_PPPOES:
895 pppoe_spec = item->spec;
896 pppoe_mask = item->mask;
897 /* Check if PPPoE item is used to describe protocol.
898 * If yes, both spec and mask should be NULL.
899 * If no, both spec and mask shouldn't be NULL.
901 if ((!pppoe_spec && pppoe_mask) ||
902 (pppoe_spec && !pppoe_mask)) {
903 rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ITEM,
906 "Invalid pppoe item");
909 if (pppoe_spec && pppoe_mask) {
910 /* Check pppoe mask and update input set */
911 if (pppoe_mask->length ||
913 pppoe_mask->version_type) {
914 rte_flow_error_set(error, EINVAL,
915 RTE_FLOW_ERROR_TYPE_ITEM,
917 "Invalid pppoe mask");
920 list[t].type = ICE_PPPOE;
921 if (pppoe_mask->session_id == UINT16_MAX) {
922 list[t].h_u.pppoe_hdr.session_id =
923 pppoe_spec->session_id;
924 list[t].m_u.pppoe_hdr.session_id =
926 input_set |= ICE_INSET_PPPOE_SESSION;
930 } else if (!pppoe_spec && !pppoe_mask) {
931 list[t].type = ICE_PPPOE;
936 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
937 pppoe_proto_spec = item->spec;
938 pppoe_proto_mask = item->mask;
939 /* Check if PPPoE optional proto_id item
940 * is used to describe protocol.
941 * If yes, both spec and mask should be NULL.
942 * If no, both spec and mask shouldn't be NULL.
944 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
945 (pppoe_proto_spec && !pppoe_proto_mask)) {
946 rte_flow_error_set(error, EINVAL,
947 RTE_FLOW_ERROR_TYPE_ITEM,
949 "Invalid pppoe proto item");
952 if (pppoe_proto_spec && pppoe_proto_mask) {
955 list[t].type = ICE_PPPOE;
956 if (pppoe_proto_mask->proto_id == UINT16_MAX) {
957 list[t].h_u.pppoe_hdr.ppp_prot_id =
958 pppoe_proto_spec->proto_id;
959 list[t].m_u.pppoe_hdr.ppp_prot_id =
961 input_set |= ICE_INSET_PPPOE_PROTO;
964 } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
965 list[t].type = ICE_PPPOE;
970 case RTE_FLOW_ITEM_TYPE_VOID:
974 rte_flow_error_set(error, EINVAL,
975 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
976 "Invalid pattern item.");
989 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
990 struct rte_flow_error *error,
991 struct ice_adv_rule_info *rule_info)
993 const struct rte_flow_action_vf *act_vf;
994 const struct rte_flow_action *action;
995 enum rte_flow_action_type action_type;
997 for (action = actions; action->type !=
998 RTE_FLOW_ACTION_TYPE_END; action++) {
999 action_type = action->type;
1000 switch (action_type) {
1001 case RTE_FLOW_ACTION_TYPE_VF:
1002 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1003 act_vf = action->conf;
1004 rule_info->sw_act.vsi_handle = act_vf->id;
1007 rte_flow_error_set(error,
1008 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1010 "Invalid action type or queue number");
1015 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1017 rule_info->priority = 5;
1023 ice_switch_parse_action(struct ice_pf *pf,
1024 const struct rte_flow_action *actions,
1025 struct rte_flow_error *error,
1026 struct ice_adv_rule_info *rule_info)
1028 struct ice_vsi *vsi = pf->main_vsi;
1029 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1030 const struct rte_flow_action_queue *act_q;
1031 const struct rte_flow_action_rss *act_qgrop;
1032 uint16_t base_queue, i;
1033 const struct rte_flow_action *action;
1034 enum rte_flow_action_type action_type;
1035 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1036 2, 4, 8, 16, 32, 64, 128};
1038 base_queue = pf->base_queue + vsi->base_queue;
1039 for (action = actions; action->type !=
1040 RTE_FLOW_ACTION_TYPE_END; action++) {
1041 action_type = action->type;
1042 switch (action_type) {
1043 case RTE_FLOW_ACTION_TYPE_RSS:
1044 act_qgrop = action->conf;
1045 rule_info->sw_act.fltr_act =
1047 rule_info->sw_act.fwd_id.q_id =
1048 base_queue + act_qgrop->queue[0];
1049 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1050 if (act_qgrop->queue_num ==
1051 valid_qgrop_number[i])
1054 if (i == MAX_QGRP_NUM_TYPE)
1056 if ((act_qgrop->queue[0] +
1057 act_qgrop->queue_num) >
1058 dev->data->nb_rx_queues)
1060 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1061 if (act_qgrop->queue[i + 1] !=
1062 act_qgrop->queue[i] + 1)
1064 rule_info->sw_act.qgrp_size =
1065 act_qgrop->queue_num;
1067 case RTE_FLOW_ACTION_TYPE_QUEUE:
1068 act_q = action->conf;
1069 if (act_q->index >= dev->data->nb_rx_queues)
1071 rule_info->sw_act.fltr_act =
1073 rule_info->sw_act.fwd_id.q_id =
1074 base_queue + act_q->index;
1077 case RTE_FLOW_ACTION_TYPE_DROP:
1078 rule_info->sw_act.fltr_act =
1082 case RTE_FLOW_ACTION_TYPE_VOID:
1090 rule_info->sw_act.vsi_handle = vsi->idx;
1092 rule_info->sw_act.src = vsi->idx;
1093 rule_info->priority = 5;
1098 rte_flow_error_set(error,
1099 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1101 "Invalid action type or queue number");
1106 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1107 struct ice_pattern_match_item *array,
1109 const struct rte_flow_item pattern[],
1110 const struct rte_flow_action actions[],
1112 struct rte_flow_error *error)
1114 struct ice_pf *pf = &ad->pf;
1115 uint64_t inputset = 0;
1117 struct sw_meta *sw_meta_ptr = NULL;
1118 struct ice_adv_rule_info rule_info;
1119 struct ice_adv_lkup_elem *list = NULL;
1120 uint16_t lkups_num = 0;
1121 const struct rte_flow_item *item = pattern;
1122 uint16_t item_num = 0;
1123 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1124 struct ice_pattern_match_item *pattern_match_item = NULL;
1126 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1128 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1129 tun_type = ICE_SW_TUN_VXLAN;
1130 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1131 tun_type = ICE_SW_TUN_NVGRE;
1132 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1133 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1134 tun_type = ICE_SW_TUN_PPPOE;
1135 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1136 const struct rte_flow_item_eth *eth_mask;
1138 eth_mask = item->mask;
1141 if (eth_mask->type == UINT16_MAX)
1142 tun_type = ICE_SW_TUN_AND_NON_TUN;
1144 /* reserve one more memory slot for ETH which may
1145 * consume 2 lookup items.
1147 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1151 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1153 rte_flow_error_set(error, EINVAL,
1154 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1155 "No memory for PMD internal items");
1159 rule_info.tun_type = tun_type;
1162 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1164 rte_flow_error_set(error, EINVAL,
1165 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1166 "No memory for sw_pattern_meta_ptr");
1170 pattern_match_item =
1171 ice_search_pattern_match_item(pattern, array, array_len, error);
1172 if (!pattern_match_item) {
1173 rte_flow_error_set(error, EINVAL,
1174 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1175 "Invalid input pattern");
1179 inputset = ice_switch_inset_get
1180 (pattern, error, list, &lkups_num, tun_type);
1181 if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
1182 rte_flow_error_set(error, EINVAL,
1183 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1185 "Invalid input set");
1189 if (ad->hw.dcf_enabled)
1190 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1192 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1195 rte_flow_error_set(error, EINVAL,
1196 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1197 "Invalid input action");
1202 *meta = sw_meta_ptr;
1203 ((struct sw_meta *)*meta)->list = list;
1204 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1205 ((struct sw_meta *)*meta)->rule_info = rule_info;
1208 rte_free(sw_meta_ptr);
1211 rte_free(pattern_match_item);
1217 rte_free(sw_meta_ptr);
1218 rte_free(pattern_match_item);
1224 ice_switch_query(struct ice_adapter *ad __rte_unused,
1225 struct rte_flow *flow __rte_unused,
1226 struct rte_flow_query_count *count __rte_unused,
1227 struct rte_flow_error *error)
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_HANDLE,
1232 "count action not supported by switch filter");
1238 ice_switch_init(struct ice_adapter *ad)
1241 struct ice_flow_parser *dist_parser;
1242 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1244 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1245 dist_parser = &ice_switch_dist_parser_comms;
1246 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1247 dist_parser = &ice_switch_dist_parser_os;
1251 if (ad->devargs.pipe_mode_support)
1252 ret = ice_register_parser(perm_parser, ad);
1254 ret = ice_register_parser(dist_parser, ad);
1259 ice_switch_uninit(struct ice_adapter *ad)
1261 struct ice_flow_parser *dist_parser;
1262 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1264 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1265 dist_parser = &ice_switch_dist_parser_comms;
1267 dist_parser = &ice_switch_dist_parser_os;
1269 if (ad->devargs.pipe_mode_support)
1270 ice_unregister_parser(perm_parser, ad);
1272 ice_unregister_parser(dist_parser, ad);
1276 ice_flow_engine ice_switch_engine = {
1277 .init = ice_switch_init,
1278 .uninit = ice_switch_uninit,
1279 .create = ice_switch_create,
1280 .destroy = ice_switch_destroy,
1281 .query_count = ice_switch_query,
1282 .free = ice_switch_filter_rule_free,
1283 .type = ICE_FLOW_ENGINE_SWITCH,
1287 ice_flow_parser ice_switch_dist_parser_os = {
1288 .engine = &ice_switch_engine,
1289 .array = ice_switch_pattern_dist_os,
1290 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1291 .parse_pattern_action = ice_switch_parse_pattern_action,
1292 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1296 ice_flow_parser ice_switch_dist_parser_comms = {
1297 .engine = &ice_switch_engine,
1298 .array = ice_switch_pattern_dist_comms,
1299 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1300 .parse_pattern_action = ice_switch_parse_pattern_action,
1301 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1305 ice_flow_parser ice_switch_perm_parser = {
1306 .engine = &ice_switch_engine,
1307 .array = ice_switch_pattern_perm,
1308 .array_len = RTE_DIM(ice_switch_pattern_perm),
1309 .parse_pattern_action = ice_switch_parse_pattern_action,
1310 .stage = ICE_FLOW_STAGE_PERMISSION,
1313 RTE_INIT(ice_sw_engine_init)
1315 struct ice_flow_engine *engine = &ice_switch_engine;
1316 ice_register_flow_engine(engine);