1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
34 #define ICE_SW_PRI_BASE 6
36 #define ICE_SW_INSET_ETHER ( \
37 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
38 #define ICE_SW_INSET_MAC_VLAN ( \
39 ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ ( \
41 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
49 ICE_SW_INSET_MAC_QINQ_IPV4 | \
50 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
52 ICE_SW_INSET_MAC_QINQ_IPV4 | \
53 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
56 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
57 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
58 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
59 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
60 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
61 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
62 #define ICE_SW_INSET_MAC_IPV6 ( \
63 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
65 ICE_INSET_IPV6_NEXT_HDR)
66 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
67 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
68 #define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
69 ICE_SW_INSET_MAC_QINQ_IPV6 | \
70 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
71 #define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
72 ICE_SW_INSET_MAC_QINQ_IPV6 | \
73 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
74 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
75 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
76 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
77 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
78 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
79 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
80 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
81 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
82 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
83 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
85 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
86 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
88 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
89 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
90 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
91 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
92 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
93 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
94 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
95 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
96 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
97 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
98 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
99 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
100 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
101 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
102 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
103 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
104 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
105 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
106 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
107 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
108 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
109 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
111 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
112 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
113 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
115 #define ICE_SW_INSET_MAC_PPPOE ( \
116 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
117 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
118 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
119 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
120 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
121 ICE_INSET_PPPOE_PROTO)
122 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
123 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
124 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
125 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
126 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
127 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
128 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
129 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
130 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
131 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
132 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
133 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
134 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
135 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
136 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
137 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
138 #define ICE_SW_INSET_MAC_IPV4_AH ( \
139 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
140 #define ICE_SW_INSET_MAC_IPV6_AH ( \
141 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
142 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
143 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
144 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
145 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
146 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
147 ICE_SW_INSET_MAC_IPV4 | \
148 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
149 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
150 ICE_SW_INSET_MAC_IPV6 | \
151 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
152 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
153 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
154 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
155 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
156 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
157 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
158 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
159 ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
160 #define ICE_SW_INSET_GTPU_IPV4 ( \
161 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
162 #define ICE_SW_INSET_GTPU_IPV6 ( \
163 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
164 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
165 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
166 ICE_INSET_UDP_DST_PORT)
167 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
168 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
169 ICE_INSET_TCP_DST_PORT)
170 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
171 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
172 ICE_INSET_UDP_DST_PORT)
173 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
174 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
175 ICE_INSET_TCP_DST_PORT)
178 struct ice_adv_lkup_elem *list;
180 struct ice_adv_rule_info rule_info;
183 static struct ice_flow_parser ice_switch_dist_parser;
184 static struct ice_flow_parser ice_switch_perm_parser;
187 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
188 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
189 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
190 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
191 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
192 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
193 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
194 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
195 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
196 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
197 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
198 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
199 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
200 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
201 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
202 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
203 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
204 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
205 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
206 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
207 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
208 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
209 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
210 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
211 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
212 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
213 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
214 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
215 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
216 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
217 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
218 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
219 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
220 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
221 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
222 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
223 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
224 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
225 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
226 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
228 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
230 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
232 {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
234 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
236 {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
238 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
239 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
240 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
241 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
242 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
243 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
244 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
245 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
246 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
247 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
248 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
249 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
250 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
251 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
252 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
253 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
254 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
255 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
256 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
257 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
258 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
259 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
260 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
261 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
262 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
263 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
264 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
265 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
266 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
270 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
271 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
272 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
273 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
274 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
275 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
276 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
277 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
278 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
279 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
280 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
281 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
282 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
283 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
284 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
285 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
286 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
287 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
288 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
289 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
290 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
291 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
292 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
293 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
294 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
295 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
296 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
297 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
298 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
299 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
300 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
301 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
302 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
303 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
304 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
305 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
306 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
307 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
308 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
309 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
310 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
311 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
312 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
313 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
314 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
315 {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
316 {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
317 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
318 {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
319 {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
320 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
321 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
322 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
323 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
324 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
325 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
326 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
327 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
328 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
329 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
330 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
331 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
332 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
333 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
334 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
335 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
336 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
337 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
338 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
339 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
340 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
341 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
342 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
343 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
344 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
345 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
346 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
347 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
348 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
349 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
353 ice_switch_create(struct ice_adapter *ad,
354 struct rte_flow *flow,
356 struct rte_flow_error *error)
359 struct ice_pf *pf = &ad->pf;
360 struct ice_hw *hw = ICE_PF_TO_HW(pf);
361 struct ice_rule_query_data rule_added = {0};
362 struct ice_rule_query_data *filter_ptr;
363 struct ice_adv_lkup_elem *list =
364 ((struct sw_meta *)meta)->list;
366 ((struct sw_meta *)meta)->lkups_num;
367 struct ice_adv_rule_info *rule_info =
368 &((struct sw_meta *)meta)->rule_info;
370 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
371 rte_flow_error_set(error, EINVAL,
372 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
373 "item number too large for rule");
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
379 "lookup list should not be NULL");
382 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
384 filter_ptr = rte_zmalloc("ice_switch_filter",
385 sizeof(struct ice_rule_query_data), 0);
387 rte_flow_error_set(error, EINVAL,
388 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
389 "No memory for ice_switch_filter");
392 flow->rule = filter_ptr;
393 rte_memcpy(filter_ptr,
395 sizeof(struct ice_rule_query_data));
397 rte_flow_error_set(error, EINVAL,
398 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
399 "switch filter create flow fail");
415 ice_switch_destroy(struct ice_adapter *ad,
416 struct rte_flow *flow,
417 struct rte_flow_error *error)
419 struct ice_hw *hw = &ad->hw;
421 struct ice_rule_query_data *filter_ptr;
423 filter_ptr = (struct ice_rule_query_data *)
427 rte_flow_error_set(error, EINVAL,
428 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
430 " create by switch filter");
434 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
436 rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
438 "fail to destroy switch filter rule");
442 rte_free(filter_ptr);
447 ice_switch_filter_rule_free(struct rte_flow *flow)
449 rte_free(flow->rule);
453 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
454 struct rte_flow_error *error,
455 struct ice_adv_lkup_elem *list,
457 enum ice_sw_tunnel_type *tun_type,
458 const struct ice_pattern_match_item *pattern_match_item)
460 const struct rte_flow_item *item = pattern;
461 enum rte_flow_item_type item_type;
462 const struct rte_flow_item_eth *eth_spec, *eth_mask;
463 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
464 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
465 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
466 const struct rte_flow_item_udp *udp_spec, *udp_mask;
467 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
468 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
469 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
470 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
471 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
472 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
474 const struct rte_flow_item_esp *esp_spec, *esp_mask;
475 const struct rte_flow_item_ah *ah_spec, *ah_mask;
476 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
477 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
478 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
479 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
480 uint64_t outer_input_set = ICE_INSET_NONE;
481 uint64_t inner_input_set = ICE_INSET_NONE;
482 uint64_t *input = NULL;
483 uint16_t input_set_byte = 0;
484 bool pppoe_elem_valid = 0;
485 bool pppoe_patt_valid = 0;
486 bool pppoe_prot_valid = 0;
487 bool inner_vlan_valid = 0;
488 bool outer_vlan_valid = 0;
489 bool tunnel_valid = 0;
490 bool profile_rule = 0;
491 bool nvgre_valid = 0;
492 bool vxlan_valid = 0;
499 bool gtpu_psc_valid = 0;
500 bool inner_ipv4_valid = 0;
501 bool inner_ipv6_valid = 0;
502 bool inner_tcp_valid = 0;
503 bool inner_udp_valid = 0;
504 uint16_t j, k, t = 0;
506 if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
507 *tun_type == ICE_NON_TUN_QINQ)
510 for (item = pattern; item->type !=
511 RTE_FLOW_ITEM_TYPE_END; item++) {
513 rte_flow_error_set(error, EINVAL,
514 RTE_FLOW_ERROR_TYPE_ITEM,
516 "Not support range");
519 item_type = item->type;
522 case RTE_FLOW_ITEM_TYPE_ETH:
523 eth_spec = item->spec;
524 eth_mask = item->mask;
525 if (eth_spec && eth_mask) {
526 const uint8_t *a = eth_mask->src.addr_bytes;
527 const uint8_t *b = eth_mask->dst.addr_bytes;
529 input = &inner_input_set;
531 input = &outer_input_set;
532 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
534 *input |= ICE_INSET_SMAC;
538 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
540 *input |= ICE_INSET_DMAC;
545 *input |= ICE_INSET_ETHERTYPE;
546 list[t].type = (tunnel_valid == 0) ?
547 ICE_MAC_OFOS : ICE_MAC_IL;
548 struct ice_ether_hdr *h;
549 struct ice_ether_hdr *m;
551 h = &list[t].h_u.eth_hdr;
552 m = &list[t].m_u.eth_hdr;
553 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
554 if (eth_mask->src.addr_bytes[j]) {
556 eth_spec->src.addr_bytes[j];
558 eth_mask->src.addr_bytes[j];
562 if (eth_mask->dst.addr_bytes[j]) {
564 eth_spec->dst.addr_bytes[j];
566 eth_mask->dst.addr_bytes[j];
573 if (eth_mask->type) {
574 list[t].type = ICE_ETYPE_OL;
575 list[t].h_u.ethertype.ethtype_id =
577 list[t].m_u.ethertype.ethtype_id =
585 case RTE_FLOW_ITEM_TYPE_IPV4:
586 ipv4_spec = item->spec;
587 ipv4_mask = item->mask;
589 inner_ipv4_valid = 1;
590 input = &inner_input_set;
593 input = &outer_input_set;
596 if (ipv4_spec && ipv4_mask) {
597 /* Check IPv4 mask and update input set */
598 if (ipv4_mask->hdr.version_ihl ||
599 ipv4_mask->hdr.total_length ||
600 ipv4_mask->hdr.packet_id ||
601 ipv4_mask->hdr.hdr_checksum) {
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ITEM,
605 "Invalid IPv4 mask.");
609 if (ipv4_mask->hdr.src_addr)
610 *input |= ICE_INSET_IPV4_SRC;
611 if (ipv4_mask->hdr.dst_addr)
612 *input |= ICE_INSET_IPV4_DST;
613 if (ipv4_mask->hdr.time_to_live)
614 *input |= ICE_INSET_IPV4_TTL;
615 if (ipv4_mask->hdr.next_proto_id)
616 *input |= ICE_INSET_IPV4_PROTO;
617 if (ipv4_mask->hdr.type_of_service)
618 *input |= ICE_INSET_IPV4_TOS;
620 list[t].type = (tunnel_valid == 0) ?
621 ICE_IPV4_OFOS : ICE_IPV4_IL;
622 if (ipv4_mask->hdr.src_addr) {
623 list[t].h_u.ipv4_hdr.src_addr =
624 ipv4_spec->hdr.src_addr;
625 list[t].m_u.ipv4_hdr.src_addr =
626 ipv4_mask->hdr.src_addr;
629 if (ipv4_mask->hdr.dst_addr) {
630 list[t].h_u.ipv4_hdr.dst_addr =
631 ipv4_spec->hdr.dst_addr;
632 list[t].m_u.ipv4_hdr.dst_addr =
633 ipv4_mask->hdr.dst_addr;
636 if (ipv4_mask->hdr.time_to_live) {
637 list[t].h_u.ipv4_hdr.time_to_live =
638 ipv4_spec->hdr.time_to_live;
639 list[t].m_u.ipv4_hdr.time_to_live =
640 ipv4_mask->hdr.time_to_live;
643 if (ipv4_mask->hdr.next_proto_id) {
644 list[t].h_u.ipv4_hdr.protocol =
645 ipv4_spec->hdr.next_proto_id;
646 list[t].m_u.ipv4_hdr.protocol =
647 ipv4_mask->hdr.next_proto_id;
650 if ((ipv4_spec->hdr.next_proto_id &
651 ipv4_mask->hdr.next_proto_id) ==
652 ICE_IPV4_PROTO_NVGRE)
653 *tun_type = ICE_SW_TUN_AND_NON_TUN;
654 if (ipv4_mask->hdr.type_of_service) {
655 list[t].h_u.ipv4_hdr.tos =
656 ipv4_spec->hdr.type_of_service;
657 list[t].m_u.ipv4_hdr.tos =
658 ipv4_mask->hdr.type_of_service;
665 case RTE_FLOW_ITEM_TYPE_IPV6:
666 ipv6_spec = item->spec;
667 ipv6_mask = item->mask;
669 inner_ipv6_valid = 1;
670 input = &inner_input_set;
673 input = &outer_input_set;
676 if (ipv6_spec && ipv6_mask) {
677 if (ipv6_mask->hdr.payload_len) {
678 rte_flow_error_set(error, EINVAL,
679 RTE_FLOW_ERROR_TYPE_ITEM,
681 "Invalid IPv6 mask");
685 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
686 if (ipv6_mask->hdr.src_addr[j]) {
687 *input |= ICE_INSET_IPV6_SRC;
691 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
692 if (ipv6_mask->hdr.dst_addr[j]) {
693 *input |= ICE_INSET_IPV6_DST;
697 if (ipv6_mask->hdr.proto)
698 *input |= ICE_INSET_IPV6_NEXT_HDR;
699 if (ipv6_mask->hdr.hop_limits)
700 *input |= ICE_INSET_IPV6_HOP_LIMIT;
701 if (ipv6_mask->hdr.vtc_flow &
702 rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
703 *input |= ICE_INSET_IPV6_TC;
705 list[t].type = (tunnel_valid == 0) ?
706 ICE_IPV6_OFOS : ICE_IPV6_IL;
707 struct ice_ipv6_hdr *f;
708 struct ice_ipv6_hdr *s;
709 f = &list[t].h_u.ipv6_hdr;
710 s = &list[t].m_u.ipv6_hdr;
711 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
712 if (ipv6_mask->hdr.src_addr[j]) {
714 ipv6_spec->hdr.src_addr[j];
716 ipv6_mask->hdr.src_addr[j];
719 if (ipv6_mask->hdr.dst_addr[j]) {
721 ipv6_spec->hdr.dst_addr[j];
723 ipv6_mask->hdr.dst_addr[j];
727 if (ipv6_mask->hdr.proto) {
729 ipv6_spec->hdr.proto;
731 ipv6_mask->hdr.proto;
734 if (ipv6_mask->hdr.hop_limits) {
736 ipv6_spec->hdr.hop_limits;
738 ipv6_mask->hdr.hop_limits;
741 if (ipv6_mask->hdr.vtc_flow &
743 (RTE_IPV6_HDR_TC_MASK)) {
744 struct ice_le_ver_tc_flow vtf;
745 vtf.u.fld.version = 0;
746 vtf.u.fld.flow_label = 0;
747 vtf.u.fld.tc = (rte_be_to_cpu_32
748 (ipv6_spec->hdr.vtc_flow) &
749 RTE_IPV6_HDR_TC_MASK) >>
750 RTE_IPV6_HDR_TC_SHIFT;
751 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
752 vtf.u.fld.tc = (rte_be_to_cpu_32
753 (ipv6_mask->hdr.vtc_flow) &
754 RTE_IPV6_HDR_TC_MASK) >>
755 RTE_IPV6_HDR_TC_SHIFT;
756 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
763 case RTE_FLOW_ITEM_TYPE_UDP:
764 udp_spec = item->spec;
765 udp_mask = item->mask;
768 input = &inner_input_set;
771 input = &outer_input_set;
774 if (udp_spec && udp_mask) {
775 /* Check UDP mask and update input set*/
776 if (udp_mask->hdr.dgram_len ||
777 udp_mask->hdr.dgram_cksum) {
778 rte_flow_error_set(error, EINVAL,
779 RTE_FLOW_ERROR_TYPE_ITEM,
785 if (udp_mask->hdr.src_port)
786 *input |= ICE_INSET_UDP_SRC_PORT;
787 if (udp_mask->hdr.dst_port)
788 *input |= ICE_INSET_UDP_DST_PORT;
790 if (*tun_type == ICE_SW_TUN_VXLAN &&
792 list[t].type = ICE_UDP_OF;
794 list[t].type = ICE_UDP_ILOS;
795 if (udp_mask->hdr.src_port) {
796 list[t].h_u.l4_hdr.src_port =
797 udp_spec->hdr.src_port;
798 list[t].m_u.l4_hdr.src_port =
799 udp_mask->hdr.src_port;
802 if (udp_mask->hdr.dst_port) {
803 list[t].h_u.l4_hdr.dst_port =
804 udp_spec->hdr.dst_port;
805 list[t].m_u.l4_hdr.dst_port =
806 udp_mask->hdr.dst_port;
813 case RTE_FLOW_ITEM_TYPE_TCP:
814 tcp_spec = item->spec;
815 tcp_mask = item->mask;
818 input = &inner_input_set;
821 input = &outer_input_set;
824 if (tcp_spec && tcp_mask) {
825 /* Check TCP mask and update input set */
826 if (tcp_mask->hdr.sent_seq ||
827 tcp_mask->hdr.recv_ack ||
828 tcp_mask->hdr.data_off ||
829 tcp_mask->hdr.tcp_flags ||
830 tcp_mask->hdr.rx_win ||
831 tcp_mask->hdr.cksum ||
832 tcp_mask->hdr.tcp_urp) {
833 rte_flow_error_set(error, EINVAL,
834 RTE_FLOW_ERROR_TYPE_ITEM,
840 if (tcp_mask->hdr.src_port)
841 *input |= ICE_INSET_TCP_SRC_PORT;
842 if (tcp_mask->hdr.dst_port)
843 *input |= ICE_INSET_TCP_DST_PORT;
844 list[t].type = ICE_TCP_IL;
845 if (tcp_mask->hdr.src_port) {
846 list[t].h_u.l4_hdr.src_port =
847 tcp_spec->hdr.src_port;
848 list[t].m_u.l4_hdr.src_port =
849 tcp_mask->hdr.src_port;
852 if (tcp_mask->hdr.dst_port) {
853 list[t].h_u.l4_hdr.dst_port =
854 tcp_spec->hdr.dst_port;
855 list[t].m_u.l4_hdr.dst_port =
856 tcp_mask->hdr.dst_port;
863 case RTE_FLOW_ITEM_TYPE_SCTP:
864 sctp_spec = item->spec;
865 sctp_mask = item->mask;
866 if (sctp_spec && sctp_mask) {
867 /* Check SCTP mask and update input set */
868 if (sctp_mask->hdr.cksum) {
869 rte_flow_error_set(error, EINVAL,
870 RTE_FLOW_ERROR_TYPE_ITEM,
872 "Invalid SCTP mask");
876 input = &inner_input_set;
878 input = &outer_input_set;
880 if (sctp_mask->hdr.src_port)
881 *input |= ICE_INSET_SCTP_SRC_PORT;
882 if (sctp_mask->hdr.dst_port)
883 *input |= ICE_INSET_SCTP_DST_PORT;
885 list[t].type = ICE_SCTP_IL;
886 if (sctp_mask->hdr.src_port) {
887 list[t].h_u.sctp_hdr.src_port =
888 sctp_spec->hdr.src_port;
889 list[t].m_u.sctp_hdr.src_port =
890 sctp_mask->hdr.src_port;
893 if (sctp_mask->hdr.dst_port) {
894 list[t].h_u.sctp_hdr.dst_port =
895 sctp_spec->hdr.dst_port;
896 list[t].m_u.sctp_hdr.dst_port =
897 sctp_mask->hdr.dst_port;
904 case RTE_FLOW_ITEM_TYPE_VXLAN:
905 vxlan_spec = item->spec;
906 vxlan_mask = item->mask;
907 /* Check if VXLAN item is used to describe protocol.
908 * If yes, both spec and mask should be NULL.
909 * If no, both spec and mask shouldn't be NULL.
911 if ((!vxlan_spec && vxlan_mask) ||
912 (vxlan_spec && !vxlan_mask)) {
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ITEM,
916 "Invalid VXLAN item");
921 input = &inner_input_set;
922 if (vxlan_spec && vxlan_mask) {
923 list[t].type = ICE_VXLAN;
924 if (vxlan_mask->vni[0] ||
925 vxlan_mask->vni[1] ||
926 vxlan_mask->vni[2]) {
927 list[t].h_u.tnl_hdr.vni =
928 (vxlan_spec->vni[2] << 16) |
929 (vxlan_spec->vni[1] << 8) |
931 list[t].m_u.tnl_hdr.vni =
932 (vxlan_mask->vni[2] << 16) |
933 (vxlan_mask->vni[1] << 8) |
935 *input |= ICE_INSET_VXLAN_VNI;
942 case RTE_FLOW_ITEM_TYPE_NVGRE:
943 nvgre_spec = item->spec;
944 nvgre_mask = item->mask;
945 /* Check if NVGRE item is used to describe protocol.
946 * If yes, both spec and mask should be NULL.
947 * If no, both spec and mask shouldn't be NULL.
949 if ((!nvgre_spec && nvgre_mask) ||
950 (nvgre_spec && !nvgre_mask)) {
951 rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ITEM,
954 "Invalid NVGRE item");
959 input = &inner_input_set;
960 if (nvgre_spec && nvgre_mask) {
961 list[t].type = ICE_NVGRE;
962 if (nvgre_mask->tni[0] ||
963 nvgre_mask->tni[1] ||
964 nvgre_mask->tni[2]) {
965 list[t].h_u.nvgre_hdr.tni_flow =
966 (nvgre_spec->tni[2] << 16) |
967 (nvgre_spec->tni[1] << 8) |
969 list[t].m_u.nvgre_hdr.tni_flow =
970 (nvgre_mask->tni[2] << 16) |
971 (nvgre_mask->tni[1] << 8) |
973 *input |= ICE_INSET_NVGRE_TNI;
980 case RTE_FLOW_ITEM_TYPE_VLAN:
981 vlan_spec = item->spec;
982 vlan_mask = item->mask;
983 /* Check if VLAN item is used to describe protocol.
984 * If yes, both spec and mask should be NULL.
985 * If no, both spec and mask shouldn't be NULL.
987 if ((!vlan_spec && vlan_mask) ||
988 (vlan_spec && !vlan_mask)) {
989 rte_flow_error_set(error, EINVAL,
990 RTE_FLOW_ERROR_TYPE_ITEM,
992 "Invalid VLAN item");
997 if (!outer_vlan_valid)
998 outer_vlan_valid = 1;
1000 inner_vlan_valid = 1;
1003 input = &outer_input_set;
1005 if (vlan_spec && vlan_mask) {
1007 if (!inner_vlan_valid) {
1008 list[t].type = ICE_VLAN_EX;
1010 ICE_INSET_VLAN_OUTER;
1012 list[t].type = ICE_VLAN_IN;
1014 ICE_INSET_VLAN_INNER;
1017 list[t].type = ICE_VLAN_OFOS;
1018 *input |= ICE_INSET_VLAN_INNER;
1021 if (vlan_mask->tci) {
1022 list[t].h_u.vlan_hdr.vlan =
1024 list[t].m_u.vlan_hdr.vlan =
1026 input_set_byte += 2;
1028 if (vlan_mask->inner_type) {
1029 rte_flow_error_set(error, EINVAL,
1030 RTE_FLOW_ERROR_TYPE_ITEM,
1032 "Invalid VLAN input set.");
1039 case RTE_FLOW_ITEM_TYPE_PPPOED:
1040 case RTE_FLOW_ITEM_TYPE_PPPOES:
1041 pppoe_spec = item->spec;
1042 pppoe_mask = item->mask;
1043 /* Check if PPPoE item is used to describe protocol.
1044 * If yes, both spec and mask should be NULL.
1045 * If no, both spec and mask shouldn't be NULL.
1047 if ((!pppoe_spec && pppoe_mask) ||
1048 (pppoe_spec && !pppoe_mask)) {
1049 rte_flow_error_set(error, EINVAL,
1050 RTE_FLOW_ERROR_TYPE_ITEM,
1052 "Invalid pppoe item");
1055 pppoe_patt_valid = 1;
1056 input = &outer_input_set;
1057 if (pppoe_spec && pppoe_mask) {
1058 /* Check pppoe mask and update input set */
1059 if (pppoe_mask->length ||
1061 pppoe_mask->version_type) {
1062 rte_flow_error_set(error, EINVAL,
1063 RTE_FLOW_ERROR_TYPE_ITEM,
1065 "Invalid pppoe mask");
1068 list[t].type = ICE_PPPOE;
1069 if (pppoe_mask->session_id) {
1070 list[t].h_u.pppoe_hdr.session_id =
1071 pppoe_spec->session_id;
1072 list[t].m_u.pppoe_hdr.session_id =
1073 pppoe_mask->session_id;
1074 *input |= ICE_INSET_PPPOE_SESSION;
1075 input_set_byte += 2;
1078 pppoe_elem_valid = 1;
1082 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1083 pppoe_proto_spec = item->spec;
1084 pppoe_proto_mask = item->mask;
1085 /* Check if PPPoE optional proto_id item
1086 * is used to describe protocol.
1087 * If yes, both spec and mask should be NULL.
1088 * If no, both spec and mask shouldn't be NULL.
1090 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1091 (pppoe_proto_spec && !pppoe_proto_mask)) {
1092 rte_flow_error_set(error, EINVAL,
1093 RTE_FLOW_ERROR_TYPE_ITEM,
1095 "Invalid pppoe proto item");
1098 input = &outer_input_set;
1099 if (pppoe_proto_spec && pppoe_proto_mask) {
1100 if (pppoe_elem_valid)
1102 list[t].type = ICE_PPPOE;
1103 if (pppoe_proto_mask->proto_id) {
1104 list[t].h_u.pppoe_hdr.ppp_prot_id =
1105 pppoe_proto_spec->proto_id;
1106 list[t].m_u.pppoe_hdr.ppp_prot_id =
1107 pppoe_proto_mask->proto_id;
1108 *input |= ICE_INSET_PPPOE_PROTO;
1109 input_set_byte += 2;
1110 pppoe_prot_valid = 1;
1112 if ((pppoe_proto_mask->proto_id &
1113 pppoe_proto_spec->proto_id) !=
1114 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1115 (pppoe_proto_mask->proto_id &
1116 pppoe_proto_spec->proto_id) !=
1117 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1118 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1120 *tun_type = ICE_SW_TUN_PPPOE;
1126 case RTE_FLOW_ITEM_TYPE_ESP:
1127 esp_spec = item->spec;
1128 esp_mask = item->mask;
1129 if ((esp_spec && !esp_mask) ||
1130 (!esp_spec && esp_mask)) {
1131 rte_flow_error_set(error, EINVAL,
1132 RTE_FLOW_ERROR_TYPE_ITEM,
1134 "Invalid esp item");
1137 /* Check esp mask and update input set */
1138 if (esp_mask && esp_mask->hdr.seq) {
1139 rte_flow_error_set(error, EINVAL,
1140 RTE_FLOW_ERROR_TYPE_ITEM,
1142 "Invalid esp mask");
1145 input = &outer_input_set;
1146 if (!esp_spec && !esp_mask && !(*input)) {
1148 if (ipv6_valid && udp_valid)
1150 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1151 else if (ipv6_valid)
1152 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1153 else if (ipv4_valid)
1155 } else if (esp_spec && esp_mask &&
1158 list[t].type = ICE_NAT_T;
1160 list[t].type = ICE_ESP;
1161 list[t].h_u.esp_hdr.spi =
1163 list[t].m_u.esp_hdr.spi =
1165 *input |= ICE_INSET_ESP_SPI;
1166 input_set_byte += 4;
1170 if (!profile_rule) {
1171 if (ipv6_valid && udp_valid)
1172 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1173 else if (ipv4_valid && udp_valid)
1174 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1175 else if (ipv6_valid)
1176 *tun_type = ICE_SW_TUN_IPV6_ESP;
1177 else if (ipv4_valid)
1178 *tun_type = ICE_SW_TUN_IPV4_ESP;
1182 case RTE_FLOW_ITEM_TYPE_AH:
1183 ah_spec = item->spec;
1184 ah_mask = item->mask;
1185 if ((ah_spec && !ah_mask) ||
1186 (!ah_spec && ah_mask)) {
1187 rte_flow_error_set(error, EINVAL,
1188 RTE_FLOW_ERROR_TYPE_ITEM,
1193 /* Check ah mask and update input set */
1195 (ah_mask->next_hdr ||
1196 ah_mask->payload_len ||
1198 ah_mask->reserved)) {
1199 rte_flow_error_set(error, EINVAL,
1200 RTE_FLOW_ERROR_TYPE_ITEM,
1206 input = &outer_input_set;
1207 if (!ah_spec && !ah_mask && !(*input)) {
1209 if (ipv6_valid && udp_valid)
1211 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1212 else if (ipv6_valid)
1213 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1214 else if (ipv4_valid)
1216 } else if (ah_spec && ah_mask &&
1218 list[t].type = ICE_AH;
1219 list[t].h_u.ah_hdr.spi =
1221 list[t].m_u.ah_hdr.spi =
1223 *input |= ICE_INSET_AH_SPI;
1224 input_set_byte += 4;
1228 if (!profile_rule) {
1231 else if (ipv6_valid)
1232 *tun_type = ICE_SW_TUN_IPV6_AH;
1233 else if (ipv4_valid)
1234 *tun_type = ICE_SW_TUN_IPV4_AH;
1238 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1239 l2tp_spec = item->spec;
1240 l2tp_mask = item->mask;
1241 if ((l2tp_spec && !l2tp_mask) ||
1242 (!l2tp_spec && l2tp_mask)) {
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ITEM,
1246 "Invalid l2tp item");
1250 input = &outer_input_set;
1251 if (!l2tp_spec && !l2tp_mask && !(*input)) {
1254 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1255 else if (ipv4_valid)
1257 } else if (l2tp_spec && l2tp_mask &&
1258 l2tp_mask->session_id){
1259 list[t].type = ICE_L2TPV3;
1260 list[t].h_u.l2tpv3_sess_hdr.session_id =
1261 l2tp_spec->session_id;
1262 list[t].m_u.l2tpv3_sess_hdr.session_id =
1263 l2tp_mask->session_id;
1264 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1265 input_set_byte += 4;
1269 if (!profile_rule) {
1272 ICE_SW_TUN_IPV6_L2TPV3;
1273 else if (ipv4_valid)
1275 ICE_SW_TUN_IPV4_L2TPV3;
1279 case RTE_FLOW_ITEM_TYPE_PFCP:
1280 pfcp_spec = item->spec;
1281 pfcp_mask = item->mask;
1282 /* Check if PFCP item is used to describe protocol.
1283 * If yes, both spec and mask should be NULL.
1284 * If no, both spec and mask shouldn't be NULL.
1286 if ((!pfcp_spec && pfcp_mask) ||
1287 (pfcp_spec && !pfcp_mask)) {
1288 rte_flow_error_set(error, EINVAL,
1289 RTE_FLOW_ERROR_TYPE_ITEM,
1291 "Invalid PFCP item");
1294 if (pfcp_spec && pfcp_mask) {
1295 /* Check pfcp mask and update input set */
1296 if (pfcp_mask->msg_type ||
1297 pfcp_mask->msg_len ||
1299 rte_flow_error_set(error, EINVAL,
1300 RTE_FLOW_ERROR_TYPE_ITEM,
1302 "Invalid pfcp mask");
1305 if (pfcp_mask->s_field &&
1306 pfcp_spec->s_field == 0x01 &&
1309 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1310 else if (pfcp_mask->s_field &&
1311 pfcp_spec->s_field == 0x01)
1313 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1314 else if (pfcp_mask->s_field &&
1315 !pfcp_spec->s_field &&
1318 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1319 else if (pfcp_mask->s_field &&
1320 !pfcp_spec->s_field)
1322 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1328 case RTE_FLOW_ITEM_TYPE_GTPU:
1329 gtp_spec = item->spec;
1330 gtp_mask = item->mask;
1331 if (gtp_spec && !gtp_mask) {
1332 rte_flow_error_set(error, EINVAL,
1333 RTE_FLOW_ERROR_TYPE_ITEM,
1335 "Invalid GTP item");
1338 if (gtp_spec && gtp_mask) {
1339 if (gtp_mask->v_pt_rsv_flags ||
1340 gtp_mask->msg_type ||
1341 gtp_mask->msg_len) {
1342 rte_flow_error_set(error, EINVAL,
1343 RTE_FLOW_ERROR_TYPE_ITEM,
1345 "Invalid GTP mask");
1348 input = &outer_input_set;
1350 *input |= ICE_INSET_GTPU_TEID;
1351 list[t].type = ICE_GTP;
1352 list[t].h_u.gtp_hdr.teid =
1354 list[t].m_u.gtp_hdr.teid =
1356 input_set_byte += 4;
1363 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1364 gtp_psc_spec = item->spec;
1365 gtp_psc_mask = item->mask;
1366 if (gtp_psc_spec && !gtp_psc_mask) {
1367 rte_flow_error_set(error, EINVAL,
1368 RTE_FLOW_ERROR_TYPE_ITEM,
1370 "Invalid GTPU_EH item");
1373 if (gtp_psc_spec && gtp_psc_mask) {
1374 if (gtp_psc_mask->pdu_type) {
1375 rte_flow_error_set(error, EINVAL,
1376 RTE_FLOW_ERROR_TYPE_ITEM,
1378 "Invalid GTPU_EH mask");
1381 input = &outer_input_set;
1382 if (gtp_psc_mask->qfi)
1383 *input |= ICE_INSET_GTPU_QFI;
1384 list[t].type = ICE_GTP;
1385 list[t].h_u.gtp_hdr.qfi =
1387 list[t].m_u.gtp_hdr.qfi =
1389 input_set_byte += 1;
1395 case RTE_FLOW_ITEM_TYPE_VOID:
1399 rte_flow_error_set(error, EINVAL,
1400 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1401 "Invalid pattern item.");
1406 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1407 inner_vlan_valid && outer_vlan_valid)
1408 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1409 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1410 inner_vlan_valid && outer_vlan_valid)
1411 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1412 else if (*tun_type == ICE_NON_TUN &&
1413 inner_vlan_valid && outer_vlan_valid)
1414 *tun_type = ICE_NON_TUN_QINQ;
1415 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1416 inner_vlan_valid && outer_vlan_valid)
1417 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1419 if (pppoe_patt_valid && !pppoe_prot_valid) {
1420 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1421 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1422 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1423 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1424 else if (inner_vlan_valid && outer_vlan_valid)
1425 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1426 else if (ipv6_valid && udp_valid)
1427 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1428 else if (ipv6_valid && tcp_valid)
1429 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1430 else if (ipv4_valid && udp_valid)
1431 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1432 else if (ipv4_valid && tcp_valid)
1433 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1434 else if (ipv6_valid)
1435 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1436 else if (ipv4_valid)
1437 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1439 *tun_type = ICE_SW_TUN_PPPOE;
1442 if (gtpu_valid && gtpu_psc_valid) {
1443 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1444 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1445 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1446 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1447 else if (ipv4_valid && inner_ipv4_valid)
1448 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1449 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1450 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1451 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1452 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1453 else if (ipv4_valid && inner_ipv6_valid)
1454 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1455 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1456 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1457 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1458 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1459 else if (ipv6_valid && inner_ipv4_valid)
1460 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1461 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1462 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1463 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1464 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1465 else if (ipv6_valid && inner_ipv6_valid)
1466 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1467 else if (ipv4_valid)
1468 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1469 else if (ipv6_valid)
1470 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1471 } else if (gtpu_valid) {
1472 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1473 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1474 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1475 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1476 else if (ipv4_valid && inner_ipv4_valid)
1477 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1478 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1479 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1480 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1481 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1482 else if (ipv4_valid && inner_ipv6_valid)
1483 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1484 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1485 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1486 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1487 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1488 else if (ipv6_valid && inner_ipv4_valid)
1489 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1490 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1491 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1492 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1493 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1494 else if (ipv6_valid && inner_ipv6_valid)
1495 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1496 else if (ipv4_valid)
1497 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1498 else if (ipv6_valid)
1499 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1502 if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1503 *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1504 for (k = 0; k < t; k++) {
1505 if (list[k].type == ICE_GTP)
1506 list[k].type = ICE_GTP_NO_PAY;
1510 if (*tun_type == ICE_NON_TUN) {
1512 *tun_type = ICE_SW_TUN_VXLAN;
1513 else if (nvgre_valid)
1514 *tun_type = ICE_SW_TUN_NVGRE;
1515 else if (ipv4_valid && tcp_valid)
1516 *tun_type = ICE_SW_IPV4_TCP;
1517 else if (ipv4_valid && udp_valid)
1518 *tun_type = ICE_SW_IPV4_UDP;
1519 else if (ipv6_valid && tcp_valid)
1520 *tun_type = ICE_SW_IPV6_TCP;
1521 else if (ipv6_valid && udp_valid)
1522 *tun_type = ICE_SW_IPV6_UDP;
1525 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1526 rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ITEM,
1529 "too much input set");
1536 if ((!outer_input_set && !inner_input_set &&
1537 !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1538 ~pattern_match_item->input_set_mask_o) ||
1539 (inner_input_set & ~pattern_match_item->input_set_mask_i))
1546 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1547 const struct rte_flow_action *actions,
1549 struct rte_flow_error *error,
1550 struct ice_adv_rule_info *rule_info)
1552 const struct rte_flow_action_vf *act_vf;
1553 const struct rte_flow_action *action;
1554 enum rte_flow_action_type action_type;
1556 for (action = actions; action->type !=
1557 RTE_FLOW_ACTION_TYPE_END; action++) {
1558 action_type = action->type;
1559 switch (action_type) {
1560 case RTE_FLOW_ACTION_TYPE_VF:
1561 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1562 act_vf = action->conf;
1564 if (act_vf->id >= ad->real_hw.num_vfs &&
1565 !act_vf->original) {
1566 rte_flow_error_set(error,
1567 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1573 if (act_vf->original)
1574 rule_info->sw_act.vsi_handle =
1575 ad->real_hw.avf.bus.func;
1577 rule_info->sw_act.vsi_handle = act_vf->id;
1580 case RTE_FLOW_ACTION_TYPE_DROP:
1581 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1585 rte_flow_error_set(error,
1586 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1588 "Invalid action type");
1593 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1594 rule_info->sw_act.flag = ICE_FLTR_RX;
1596 /* 0 denotes lowest priority of recipe and highest priority
1597 * of rte_flow. Change rte_flow priority into recipe priority.
1599 rule_info->priority = ICE_SW_PRI_BASE - priority;
1605 ice_switch_parse_action(struct ice_pf *pf,
1606 const struct rte_flow_action *actions,
1608 struct rte_flow_error *error,
1609 struct ice_adv_rule_info *rule_info)
1611 struct ice_vsi *vsi = pf->main_vsi;
1612 struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1613 const struct rte_flow_action_queue *act_q;
1614 const struct rte_flow_action_rss *act_qgrop;
1615 uint16_t base_queue, i;
1616 const struct rte_flow_action *action;
1617 enum rte_flow_action_type action_type;
1618 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1619 2, 4, 8, 16, 32, 64, 128};
1621 base_queue = pf->base_queue + vsi->base_queue;
1622 for (action = actions; action->type !=
1623 RTE_FLOW_ACTION_TYPE_END; action++) {
1624 action_type = action->type;
1625 switch (action_type) {
1626 case RTE_FLOW_ACTION_TYPE_RSS:
1627 act_qgrop = action->conf;
1628 if (act_qgrop->queue_num <= 1)
1630 rule_info->sw_act.fltr_act =
1632 rule_info->sw_act.fwd_id.q_id =
1633 base_queue + act_qgrop->queue[0];
1634 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1635 if (act_qgrop->queue_num ==
1636 valid_qgrop_number[i])
1639 if (i == MAX_QGRP_NUM_TYPE)
1641 if ((act_qgrop->queue[0] +
1642 act_qgrop->queue_num) >
1643 dev_data->nb_rx_queues)
1645 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1646 if (act_qgrop->queue[i + 1] !=
1647 act_qgrop->queue[i] + 1)
1649 rule_info->sw_act.qgrp_size =
1650 act_qgrop->queue_num;
1652 case RTE_FLOW_ACTION_TYPE_QUEUE:
1653 act_q = action->conf;
1654 if (act_q->index >= dev_data->nb_rx_queues)
1656 rule_info->sw_act.fltr_act =
1658 rule_info->sw_act.fwd_id.q_id =
1659 base_queue + act_q->index;
1662 case RTE_FLOW_ACTION_TYPE_DROP:
1663 rule_info->sw_act.fltr_act =
1667 case RTE_FLOW_ACTION_TYPE_VOID:
1675 rule_info->sw_act.vsi_handle = vsi->idx;
1677 rule_info->sw_act.src = vsi->idx;
1678 /* 0 denotes lowest priority of recipe and highest priority
1679 * of rte_flow. Change rte_flow priority into recipe priority.
1681 rule_info->priority = ICE_SW_PRI_BASE - priority;
1686 rte_flow_error_set(error,
1687 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1689 "Invalid action type or queue number");
1693 rte_flow_error_set(error,
1694 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1696 "Invalid queue region indexes");
1700 rte_flow_error_set(error,
1701 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1703 "Discontinuous queue region");
1708 ice_switch_check_action(const struct rte_flow_action *actions,
1709 struct rte_flow_error *error)
1711 const struct rte_flow_action *action;
1712 enum rte_flow_action_type action_type;
1713 uint16_t actions_num = 0;
1715 for (action = actions; action->type !=
1716 RTE_FLOW_ACTION_TYPE_END; action++) {
1717 action_type = action->type;
1718 switch (action_type) {
1719 case RTE_FLOW_ACTION_TYPE_VF:
1720 case RTE_FLOW_ACTION_TYPE_RSS:
1721 case RTE_FLOW_ACTION_TYPE_QUEUE:
1722 case RTE_FLOW_ACTION_TYPE_DROP:
1725 case RTE_FLOW_ACTION_TYPE_VOID:
1728 rte_flow_error_set(error,
1729 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1731 "Invalid action type");
1736 if (actions_num != 1) {
1737 rte_flow_error_set(error,
1738 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1740 "Invalid action number");
1748 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1749 struct ice_pattern_match_item *array,
1751 const struct rte_flow_item pattern[],
1752 const struct rte_flow_action actions[],
1755 struct rte_flow_error *error)
1757 struct ice_pf *pf = &ad->pf;
1759 struct sw_meta *sw_meta_ptr = NULL;
1760 struct ice_adv_rule_info rule_info;
1761 struct ice_adv_lkup_elem *list = NULL;
1762 uint16_t lkups_num = 0;
1763 const struct rte_flow_item *item = pattern;
1764 uint16_t item_num = 0;
1765 uint16_t vlan_num = 0;
1766 enum ice_sw_tunnel_type tun_type =
1768 struct ice_pattern_match_item *pattern_match_item = NULL;
1770 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1772 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1773 const struct rte_flow_item_eth *eth_mask;
1775 eth_mask = item->mask;
1778 if (eth_mask->type == UINT16_MAX)
1779 tun_type = ICE_SW_TUN_AND_NON_TUN;
1782 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1785 /* reserve one more memory slot for ETH which may
1786 * consume 2 lookup items.
1788 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1792 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1793 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1794 else if (vlan_num == 2)
1795 tun_type = ICE_NON_TUN_QINQ;
1797 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1799 rte_flow_error_set(error, EINVAL,
1800 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1801 "No memory for PMD internal items");
1806 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1808 rte_flow_error_set(error, EINVAL,
1809 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1810 "No memory for sw_pattern_meta_ptr");
1814 pattern_match_item =
1815 ice_search_pattern_match_item(ad, pattern, array, array_len,
1817 if (!pattern_match_item) {
1818 rte_flow_error_set(error, EINVAL,
1819 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1820 "Invalid input pattern");
1824 if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1825 &tun_type, pattern_match_item)) {
1826 rte_flow_error_set(error, EINVAL,
1827 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1829 "Invalid input set");
1833 memset(&rule_info, 0, sizeof(rule_info));
1834 rule_info.tun_type = tun_type;
1836 ret = ice_switch_check_action(actions, error);
1840 if (ad->hw.dcf_enabled)
1841 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1844 ret = ice_switch_parse_action(pf, actions, priority, error,
1851 *meta = sw_meta_ptr;
1852 ((struct sw_meta *)*meta)->list = list;
1853 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1854 ((struct sw_meta *)*meta)->rule_info = rule_info;
1857 rte_free(sw_meta_ptr);
1860 rte_free(pattern_match_item);
1866 rte_free(sw_meta_ptr);
1867 rte_free(pattern_match_item);
1873 ice_switch_query(struct ice_adapter *ad __rte_unused,
1874 struct rte_flow *flow __rte_unused,
1875 struct rte_flow_query_count *count __rte_unused,
1876 struct rte_flow_error *error)
1878 rte_flow_error_set(error, EINVAL,
1879 RTE_FLOW_ERROR_TYPE_HANDLE,
1881 "count action not supported by switch filter");
1887 ice_switch_redirect(struct ice_adapter *ad,
1888 struct rte_flow *flow,
1889 struct ice_flow_redirect *rd)
1891 struct ice_rule_query_data *rdata = flow->rule;
1892 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1893 struct ice_adv_lkup_elem *lkups_dp = NULL;
1894 struct LIST_HEAD_TYPE *list_head;
1895 struct ice_adv_rule_info rinfo;
1896 struct ice_hw *hw = &ad->hw;
1897 struct ice_switch_info *sw;
1901 if (rdata->vsi_handle != rd->vsi_handle)
1904 sw = hw->switch_info;
1905 if (!sw->recp_list[rdata->rid].recp_created)
1908 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1911 list_head = &sw->recp_list[rdata->rid].filt_rules;
1912 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1914 rinfo = list_itr->rule_info;
1915 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1916 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1917 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1918 (rinfo.fltr_rule_id == rdata->rule_id &&
1919 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1920 lkups_cnt = list_itr->lkups_cnt;
1921 lkups_dp = (struct ice_adv_lkup_elem *)
1922 ice_memdup(hw, list_itr->lkups,
1923 sizeof(*list_itr->lkups) *
1924 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1927 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1931 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1932 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1933 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1942 /* Remove the old rule */
1943 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1946 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1952 /* Update VSI context */
1953 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1955 /* Replay the rule */
1956 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1959 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1964 ice_free(hw, lkups_dp);
1969 ice_switch_init(struct ice_adapter *ad)
1972 struct ice_flow_parser *dist_parser;
1973 struct ice_flow_parser *perm_parser;
1975 if (ad->devargs.pipe_mode_support) {
1976 perm_parser = &ice_switch_perm_parser;
1977 ret = ice_register_parser(perm_parser, ad);
1979 dist_parser = &ice_switch_dist_parser;
1980 ret = ice_register_parser(dist_parser, ad);
1986 ice_switch_uninit(struct ice_adapter *ad)
1988 struct ice_flow_parser *dist_parser;
1989 struct ice_flow_parser *perm_parser;
1991 if (ad->devargs.pipe_mode_support) {
1992 perm_parser = &ice_switch_perm_parser;
1993 ice_unregister_parser(perm_parser, ad);
1995 dist_parser = &ice_switch_dist_parser;
1996 ice_unregister_parser(dist_parser, ad);
2001 ice_flow_engine ice_switch_engine = {
2002 .init = ice_switch_init,
2003 .uninit = ice_switch_uninit,
2004 .create = ice_switch_create,
2005 .destroy = ice_switch_destroy,
2006 .query_count = ice_switch_query,
2007 .redirect = ice_switch_redirect,
2008 .free = ice_switch_filter_rule_free,
2009 .type = ICE_FLOW_ENGINE_SWITCH,
2013 ice_flow_parser ice_switch_dist_parser = {
2014 .engine = &ice_switch_engine,
2015 .array = ice_switch_pattern_dist_list,
2016 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2017 .parse_pattern_action = ice_switch_parse_pattern_action,
2018 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2022 ice_flow_parser ice_switch_perm_parser = {
2023 .engine = &ice_switch_engine,
2024 .array = ice_switch_pattern_perm_list,
2025 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2026 .parse_pattern_action = ice_switch_parse_pattern_action,
2027 .stage = ICE_FLOW_STAGE_PERMISSION,
2030 RTE_INIT(ice_sw_engine_init)
2032 struct ice_flow_engine *engine = &ice_switch_engine;
2033 ice_register_flow_engine(engine);