1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
39 #define ICE_SW_INSET_MAC_QINQ ( \
40 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42 #define ICE_SW_INSET_MAC_IPV4 ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
45 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
46 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
47 #define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
48 ICE_SW_INSET_MAC_QINQ_IPV4 | \
49 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
50 #define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
51 ICE_SW_INSET_MAC_QINQ_IPV4 | \
52 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
53 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
54 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
55 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
56 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
57 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
58 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
59 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
60 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
61 #define ICE_SW_INSET_MAC_IPV6 ( \
62 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
63 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
64 ICE_INSET_IPV6_NEXT_HDR)
65 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
66 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
67 #define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
68 ICE_SW_INSET_MAC_QINQ_IPV6 | \
69 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
70 #define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
71 ICE_SW_INSET_MAC_QINQ_IPV6 | \
72 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
73 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
74 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
75 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
76 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
77 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
78 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
79 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
80 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
81 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
82 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
85 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
87 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
88 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
89 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
90 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
91 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
92 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
93 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
94 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
95 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
96 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
97 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
98 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
99 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
100 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
101 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
102 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
103 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
104 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
105 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
106 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
107 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
108 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
110 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
111 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
112 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
114 #define ICE_SW_INSET_MAC_PPPOE ( \
115 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
116 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
117 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
118 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
119 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
120 ICE_INSET_PPPOE_PROTO)
121 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
122 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
123 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
124 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
125 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
126 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
127 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
128 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
129 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
130 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
131 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
132 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
133 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
134 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
135 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
136 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
137 #define ICE_SW_INSET_MAC_IPV4_AH ( \
138 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
139 #define ICE_SW_INSET_MAC_IPV6_AH ( \
140 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
141 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
142 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
143 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
144 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
145 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
146 ICE_SW_INSET_MAC_IPV4 | \
147 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
148 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
149 ICE_SW_INSET_MAC_IPV6 | \
150 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
151 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
152 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
153 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
154 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
155 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
156 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
157 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
158 ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
159 #define ICE_SW_INSET_GTPU_IPV4 ( \
160 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
161 #define ICE_SW_INSET_GTPU_IPV6 ( \
162 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
163 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
164 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
165 ICE_INSET_UDP_DST_PORT)
166 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
167 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
168 ICE_INSET_TCP_DST_PORT)
169 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
170 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
171 ICE_INSET_UDP_DST_PORT)
172 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
173 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
174 ICE_INSET_TCP_DST_PORT)
177 struct ice_adv_lkup_elem *list;
179 struct ice_adv_rule_info rule_info;
182 static struct ice_flow_parser ice_switch_dist_parser;
183 static struct ice_flow_parser ice_switch_perm_parser;
186 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
187 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
188 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
189 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
190 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
191 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
192 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
193 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
194 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
195 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
196 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
197 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
198 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
199 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
200 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
201 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
202 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
203 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
204 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
205 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
206 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
207 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
208 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
209 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
210 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
211 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
212 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
213 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
214 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
215 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
216 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
217 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
218 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
219 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
220 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
221 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
222 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
223 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
224 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
225 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
226 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
228 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
230 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
232 {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
234 {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
236 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
238 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
239 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
240 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
241 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
242 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
243 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
244 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
245 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
246 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
247 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
248 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
249 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
250 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
251 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
252 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
253 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
254 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
255 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
256 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
257 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
258 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
259 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
260 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
261 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
262 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
263 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
264 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
265 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
269 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
270 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
271 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
272 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
273 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
274 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
275 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
276 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
277 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
278 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
279 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
280 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
281 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
282 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
283 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
284 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
285 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
286 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
287 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
288 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
289 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
290 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
291 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
292 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
293 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
294 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
295 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
296 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
297 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
298 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
299 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
300 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
301 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
302 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
303 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
304 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
305 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
306 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
307 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
308 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
309 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
310 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
311 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
312 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
313 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
314 {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
315 {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
316 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
317 {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
318 {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
319 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
320 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
321 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
322 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
323 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
324 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
325 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
326 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
327 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
328 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
329 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
330 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
331 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
332 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
333 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
334 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
335 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
336 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
337 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
338 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
339 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
340 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
341 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
342 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
343 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
344 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
345 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
346 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
347 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
348 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
352 ice_switch_create(struct ice_adapter *ad,
353 struct rte_flow *flow,
355 struct rte_flow_error *error)
358 struct ice_pf *pf = &ad->pf;
359 struct ice_hw *hw = ICE_PF_TO_HW(pf);
360 struct ice_rule_query_data rule_added = {0};
361 struct ice_rule_query_data *filter_ptr;
362 struct ice_adv_lkup_elem *list =
363 ((struct sw_meta *)meta)->list;
365 ((struct sw_meta *)meta)->lkups_num;
366 struct ice_adv_rule_info *rule_info =
367 &((struct sw_meta *)meta)->rule_info;
369 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
370 rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
372 "item number too large for rule");
376 rte_flow_error_set(error, EINVAL,
377 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
378 "lookup list should not be NULL");
381 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
383 filter_ptr = rte_zmalloc("ice_switch_filter",
384 sizeof(struct ice_rule_query_data), 0);
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
388 "No memory for ice_switch_filter");
391 flow->rule = filter_ptr;
392 rte_memcpy(filter_ptr,
394 sizeof(struct ice_rule_query_data));
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
398 "switch filter create flow fail");
414 ice_switch_destroy(struct ice_adapter *ad,
415 struct rte_flow *flow,
416 struct rte_flow_error *error)
418 struct ice_hw *hw = &ad->hw;
420 struct ice_rule_query_data *filter_ptr;
422 filter_ptr = (struct ice_rule_query_data *)
426 rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
429 " create by switch filter");
433 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
435 rte_flow_error_set(error, EINVAL,
436 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
437 "fail to destroy switch filter rule");
441 rte_free(filter_ptr);
446 ice_switch_filter_rule_free(struct rte_flow *flow)
448 rte_free(flow->rule);
452 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
453 struct rte_flow_error *error,
454 struct ice_adv_lkup_elem *list,
456 enum ice_sw_tunnel_type *tun_type,
457 const struct ice_pattern_match_item *pattern_match_item)
459 const struct rte_flow_item *item = pattern;
460 enum rte_flow_item_type item_type;
461 const struct rte_flow_item_eth *eth_spec, *eth_mask;
462 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
463 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
464 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
465 const struct rte_flow_item_udp *udp_spec, *udp_mask;
466 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
467 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
468 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
469 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
470 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
471 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
473 const struct rte_flow_item_esp *esp_spec, *esp_mask;
474 const struct rte_flow_item_ah *ah_spec, *ah_mask;
475 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
476 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
477 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
478 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
479 uint64_t outer_input_set = ICE_INSET_NONE;
480 uint64_t inner_input_set = ICE_INSET_NONE;
481 uint64_t *input = NULL;
482 uint16_t input_set_byte = 0;
483 bool pppoe_elem_valid = 0;
484 bool pppoe_patt_valid = 0;
485 bool pppoe_prot_valid = 0;
486 bool inner_vlan_valid = 0;
487 bool outer_vlan_valid = 0;
488 bool tunnel_valid = 0;
489 bool profile_rule = 0;
490 bool nvgre_valid = 0;
491 bool vxlan_valid = 0;
498 bool gtpu_psc_valid = 0;
499 bool inner_ipv4_valid = 0;
500 bool inner_ipv6_valid = 0;
501 bool inner_tcp_valid = 0;
502 bool inner_udp_valid = 0;
503 uint16_t j, k, t = 0;
505 if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
506 *tun_type == ICE_NON_TUN_QINQ)
509 for (item = pattern; item->type !=
510 RTE_FLOW_ITEM_TYPE_END; item++) {
512 rte_flow_error_set(error, EINVAL,
513 RTE_FLOW_ERROR_TYPE_ITEM,
515 "Not support range");
518 item_type = item->type;
521 case RTE_FLOW_ITEM_TYPE_ETH:
522 eth_spec = item->spec;
523 eth_mask = item->mask;
524 if (eth_spec && eth_mask) {
525 const uint8_t *a = eth_mask->src.addr_bytes;
526 const uint8_t *b = eth_mask->dst.addr_bytes;
528 input = &inner_input_set;
530 input = &outer_input_set;
531 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
533 *input |= ICE_INSET_SMAC;
537 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
539 *input |= ICE_INSET_DMAC;
544 *input |= ICE_INSET_ETHERTYPE;
545 list[t].type = (tunnel_valid == 0) ?
546 ICE_MAC_OFOS : ICE_MAC_IL;
547 struct ice_ether_hdr *h;
548 struct ice_ether_hdr *m;
550 h = &list[t].h_u.eth_hdr;
551 m = &list[t].m_u.eth_hdr;
552 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
553 if (eth_mask->src.addr_bytes[j]) {
555 eth_spec->src.addr_bytes[j];
557 eth_mask->src.addr_bytes[j];
561 if (eth_mask->dst.addr_bytes[j]) {
563 eth_spec->dst.addr_bytes[j];
565 eth_mask->dst.addr_bytes[j];
572 if (eth_mask->type) {
573 list[t].type = ICE_ETYPE_OL;
574 list[t].h_u.ethertype.ethtype_id =
576 list[t].m_u.ethertype.ethtype_id =
584 case RTE_FLOW_ITEM_TYPE_IPV4:
585 ipv4_spec = item->spec;
586 ipv4_mask = item->mask;
588 inner_ipv4_valid = 1;
589 input = &inner_input_set;
592 input = &outer_input_set;
595 if (ipv4_spec && ipv4_mask) {
596 /* Check IPv4 mask and update input set */
597 if (ipv4_mask->hdr.version_ihl ||
598 ipv4_mask->hdr.total_length ||
599 ipv4_mask->hdr.packet_id ||
600 ipv4_mask->hdr.hdr_checksum) {
601 rte_flow_error_set(error, EINVAL,
602 RTE_FLOW_ERROR_TYPE_ITEM,
604 "Invalid IPv4 mask.");
608 if (ipv4_mask->hdr.src_addr)
609 *input |= ICE_INSET_IPV4_SRC;
610 if (ipv4_mask->hdr.dst_addr)
611 *input |= ICE_INSET_IPV4_DST;
612 if (ipv4_mask->hdr.time_to_live)
613 *input |= ICE_INSET_IPV4_TTL;
614 if (ipv4_mask->hdr.next_proto_id)
615 *input |= ICE_INSET_IPV4_PROTO;
616 if (ipv4_mask->hdr.type_of_service)
617 *input |= ICE_INSET_IPV4_TOS;
619 list[t].type = (tunnel_valid == 0) ?
620 ICE_IPV4_OFOS : ICE_IPV4_IL;
621 if (ipv4_mask->hdr.src_addr) {
622 list[t].h_u.ipv4_hdr.src_addr =
623 ipv4_spec->hdr.src_addr;
624 list[t].m_u.ipv4_hdr.src_addr =
625 ipv4_mask->hdr.src_addr;
628 if (ipv4_mask->hdr.dst_addr) {
629 list[t].h_u.ipv4_hdr.dst_addr =
630 ipv4_spec->hdr.dst_addr;
631 list[t].m_u.ipv4_hdr.dst_addr =
632 ipv4_mask->hdr.dst_addr;
635 if (ipv4_mask->hdr.time_to_live) {
636 list[t].h_u.ipv4_hdr.time_to_live =
637 ipv4_spec->hdr.time_to_live;
638 list[t].m_u.ipv4_hdr.time_to_live =
639 ipv4_mask->hdr.time_to_live;
642 if (ipv4_mask->hdr.next_proto_id) {
643 list[t].h_u.ipv4_hdr.protocol =
644 ipv4_spec->hdr.next_proto_id;
645 list[t].m_u.ipv4_hdr.protocol =
646 ipv4_mask->hdr.next_proto_id;
649 if ((ipv4_spec->hdr.next_proto_id &
650 ipv4_mask->hdr.next_proto_id) ==
651 ICE_IPV4_PROTO_NVGRE)
652 *tun_type = ICE_SW_TUN_AND_NON_TUN;
653 if (ipv4_mask->hdr.type_of_service) {
654 list[t].h_u.ipv4_hdr.tos =
655 ipv4_spec->hdr.type_of_service;
656 list[t].m_u.ipv4_hdr.tos =
657 ipv4_mask->hdr.type_of_service;
664 case RTE_FLOW_ITEM_TYPE_IPV6:
665 ipv6_spec = item->spec;
666 ipv6_mask = item->mask;
668 inner_ipv6_valid = 1;
669 input = &inner_input_set;
672 input = &outer_input_set;
675 if (ipv6_spec && ipv6_mask) {
676 if (ipv6_mask->hdr.payload_len) {
677 rte_flow_error_set(error, EINVAL,
678 RTE_FLOW_ERROR_TYPE_ITEM,
680 "Invalid IPv6 mask");
684 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
685 if (ipv6_mask->hdr.src_addr[j]) {
686 *input |= ICE_INSET_IPV6_SRC;
690 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
691 if (ipv6_mask->hdr.dst_addr[j]) {
692 *input |= ICE_INSET_IPV6_DST;
696 if (ipv6_mask->hdr.proto)
697 *input |= ICE_INSET_IPV6_NEXT_HDR;
698 if (ipv6_mask->hdr.hop_limits)
699 *input |= ICE_INSET_IPV6_HOP_LIMIT;
700 if (ipv6_mask->hdr.vtc_flow &
701 rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
702 *input |= ICE_INSET_IPV6_TC;
704 list[t].type = (tunnel_valid == 0) ?
705 ICE_IPV6_OFOS : ICE_IPV6_IL;
706 struct ice_ipv6_hdr *f;
707 struct ice_ipv6_hdr *s;
708 f = &list[t].h_u.ipv6_hdr;
709 s = &list[t].m_u.ipv6_hdr;
710 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
711 if (ipv6_mask->hdr.src_addr[j]) {
713 ipv6_spec->hdr.src_addr[j];
715 ipv6_mask->hdr.src_addr[j];
718 if (ipv6_mask->hdr.dst_addr[j]) {
720 ipv6_spec->hdr.dst_addr[j];
722 ipv6_mask->hdr.dst_addr[j];
726 if (ipv6_mask->hdr.proto) {
728 ipv6_spec->hdr.proto;
730 ipv6_mask->hdr.proto;
733 if (ipv6_mask->hdr.hop_limits) {
735 ipv6_spec->hdr.hop_limits;
737 ipv6_mask->hdr.hop_limits;
740 if (ipv6_mask->hdr.vtc_flow &
742 (RTE_IPV6_HDR_TC_MASK)) {
743 struct ice_le_ver_tc_flow vtf;
744 vtf.u.fld.version = 0;
745 vtf.u.fld.flow_label = 0;
746 vtf.u.fld.tc = (rte_be_to_cpu_32
747 (ipv6_spec->hdr.vtc_flow) &
748 RTE_IPV6_HDR_TC_MASK) >>
749 RTE_IPV6_HDR_TC_SHIFT;
750 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
751 vtf.u.fld.tc = (rte_be_to_cpu_32
752 (ipv6_mask->hdr.vtc_flow) &
753 RTE_IPV6_HDR_TC_MASK) >>
754 RTE_IPV6_HDR_TC_SHIFT;
755 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
762 case RTE_FLOW_ITEM_TYPE_UDP:
763 udp_spec = item->spec;
764 udp_mask = item->mask;
767 input = &inner_input_set;
770 input = &outer_input_set;
773 if (udp_spec && udp_mask) {
774 /* Check UDP mask and update input set*/
775 if (udp_mask->hdr.dgram_len ||
776 udp_mask->hdr.dgram_cksum) {
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ITEM,
784 if (udp_mask->hdr.src_port)
785 *input |= ICE_INSET_UDP_SRC_PORT;
786 if (udp_mask->hdr.dst_port)
787 *input |= ICE_INSET_UDP_DST_PORT;
789 if (*tun_type == ICE_SW_TUN_VXLAN &&
791 list[t].type = ICE_UDP_OF;
793 list[t].type = ICE_UDP_ILOS;
794 if (udp_mask->hdr.src_port) {
795 list[t].h_u.l4_hdr.src_port =
796 udp_spec->hdr.src_port;
797 list[t].m_u.l4_hdr.src_port =
798 udp_mask->hdr.src_port;
801 if (udp_mask->hdr.dst_port) {
802 list[t].h_u.l4_hdr.dst_port =
803 udp_spec->hdr.dst_port;
804 list[t].m_u.l4_hdr.dst_port =
805 udp_mask->hdr.dst_port;
812 case RTE_FLOW_ITEM_TYPE_TCP:
813 tcp_spec = item->spec;
814 tcp_mask = item->mask;
817 input = &inner_input_set;
820 input = &outer_input_set;
823 if (tcp_spec && tcp_mask) {
824 /* Check TCP mask and update input set */
825 if (tcp_mask->hdr.sent_seq ||
826 tcp_mask->hdr.recv_ack ||
827 tcp_mask->hdr.data_off ||
828 tcp_mask->hdr.tcp_flags ||
829 tcp_mask->hdr.rx_win ||
830 tcp_mask->hdr.cksum ||
831 tcp_mask->hdr.tcp_urp) {
832 rte_flow_error_set(error, EINVAL,
833 RTE_FLOW_ERROR_TYPE_ITEM,
839 if (tcp_mask->hdr.src_port)
840 *input |= ICE_INSET_TCP_SRC_PORT;
841 if (tcp_mask->hdr.dst_port)
842 *input |= ICE_INSET_TCP_DST_PORT;
843 list[t].type = ICE_TCP_IL;
844 if (tcp_mask->hdr.src_port) {
845 list[t].h_u.l4_hdr.src_port =
846 tcp_spec->hdr.src_port;
847 list[t].m_u.l4_hdr.src_port =
848 tcp_mask->hdr.src_port;
851 if (tcp_mask->hdr.dst_port) {
852 list[t].h_u.l4_hdr.dst_port =
853 tcp_spec->hdr.dst_port;
854 list[t].m_u.l4_hdr.dst_port =
855 tcp_mask->hdr.dst_port;
862 case RTE_FLOW_ITEM_TYPE_SCTP:
863 sctp_spec = item->spec;
864 sctp_mask = item->mask;
865 if (sctp_spec && sctp_mask) {
866 /* Check SCTP mask and update input set */
867 if (sctp_mask->hdr.cksum) {
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_ITEM,
871 "Invalid SCTP mask");
875 input = &inner_input_set;
877 input = &outer_input_set;
879 if (sctp_mask->hdr.src_port)
880 *input |= ICE_INSET_SCTP_SRC_PORT;
881 if (sctp_mask->hdr.dst_port)
882 *input |= ICE_INSET_SCTP_DST_PORT;
884 list[t].type = ICE_SCTP_IL;
885 if (sctp_mask->hdr.src_port) {
886 list[t].h_u.sctp_hdr.src_port =
887 sctp_spec->hdr.src_port;
888 list[t].m_u.sctp_hdr.src_port =
889 sctp_mask->hdr.src_port;
892 if (sctp_mask->hdr.dst_port) {
893 list[t].h_u.sctp_hdr.dst_port =
894 sctp_spec->hdr.dst_port;
895 list[t].m_u.sctp_hdr.dst_port =
896 sctp_mask->hdr.dst_port;
903 case RTE_FLOW_ITEM_TYPE_VXLAN:
904 vxlan_spec = item->spec;
905 vxlan_mask = item->mask;
906 /* Check if VXLAN item is used to describe protocol.
907 * If yes, both spec and mask should be NULL.
908 * If no, both spec and mask shouldn't be NULL.
910 if ((!vxlan_spec && vxlan_mask) ||
911 (vxlan_spec && !vxlan_mask)) {
912 rte_flow_error_set(error, EINVAL,
913 RTE_FLOW_ERROR_TYPE_ITEM,
915 "Invalid VXLAN item");
920 input = &inner_input_set;
921 if (vxlan_spec && vxlan_mask) {
922 list[t].type = ICE_VXLAN;
923 if (vxlan_mask->vni[0] ||
924 vxlan_mask->vni[1] ||
925 vxlan_mask->vni[2]) {
926 list[t].h_u.tnl_hdr.vni =
927 (vxlan_spec->vni[2] << 16) |
928 (vxlan_spec->vni[1] << 8) |
930 list[t].m_u.tnl_hdr.vni =
931 (vxlan_mask->vni[2] << 16) |
932 (vxlan_mask->vni[1] << 8) |
934 *input |= ICE_INSET_VXLAN_VNI;
941 case RTE_FLOW_ITEM_TYPE_NVGRE:
942 nvgre_spec = item->spec;
943 nvgre_mask = item->mask;
944 /* Check if NVGRE item is used to describe protocol.
945 * If yes, both spec and mask should be NULL.
946 * If no, both spec and mask shouldn't be NULL.
948 if ((!nvgre_spec && nvgre_mask) ||
949 (nvgre_spec && !nvgre_mask)) {
950 rte_flow_error_set(error, EINVAL,
951 RTE_FLOW_ERROR_TYPE_ITEM,
953 "Invalid NVGRE item");
958 input = &inner_input_set;
959 if (nvgre_spec && nvgre_mask) {
960 list[t].type = ICE_NVGRE;
961 if (nvgre_mask->tni[0] ||
962 nvgre_mask->tni[1] ||
963 nvgre_mask->tni[2]) {
964 list[t].h_u.nvgre_hdr.tni_flow =
965 (nvgre_spec->tni[2] << 16) |
966 (nvgre_spec->tni[1] << 8) |
968 list[t].m_u.nvgre_hdr.tni_flow =
969 (nvgre_mask->tni[2] << 16) |
970 (nvgre_mask->tni[1] << 8) |
972 *input |= ICE_INSET_NVGRE_TNI;
979 case RTE_FLOW_ITEM_TYPE_VLAN:
980 vlan_spec = item->spec;
981 vlan_mask = item->mask;
982 /* Check if VLAN item is used to describe protocol.
983 * If yes, both spec and mask should be NULL.
984 * If no, both spec and mask shouldn't be NULL.
986 if ((!vlan_spec && vlan_mask) ||
987 (vlan_spec && !vlan_mask)) {
988 rte_flow_error_set(error, EINVAL,
989 RTE_FLOW_ERROR_TYPE_ITEM,
991 "Invalid VLAN item");
996 if (!outer_vlan_valid)
997 outer_vlan_valid = 1;
999 inner_vlan_valid = 1;
1002 input = &outer_input_set;
1004 if (vlan_spec && vlan_mask) {
1006 if (!inner_vlan_valid) {
1007 list[t].type = ICE_VLAN_EX;
1009 ICE_INSET_VLAN_OUTER;
1011 list[t].type = ICE_VLAN_IN;
1013 ICE_INSET_VLAN_INNER;
1016 list[t].type = ICE_VLAN_OFOS;
1017 *input |= ICE_INSET_VLAN_INNER;
1020 if (vlan_mask->tci) {
1021 list[t].h_u.vlan_hdr.vlan =
1023 list[t].m_u.vlan_hdr.vlan =
1025 input_set_byte += 2;
1027 if (vlan_mask->inner_type) {
1028 rte_flow_error_set(error, EINVAL,
1029 RTE_FLOW_ERROR_TYPE_ITEM,
1031 "Invalid VLAN input set.");
1038 case RTE_FLOW_ITEM_TYPE_PPPOED:
1039 case RTE_FLOW_ITEM_TYPE_PPPOES:
1040 pppoe_spec = item->spec;
1041 pppoe_mask = item->mask;
1042 /* Check if PPPoE item is used to describe protocol.
1043 * If yes, both spec and mask should be NULL.
1044 * If no, both spec and mask shouldn't be NULL.
1046 if ((!pppoe_spec && pppoe_mask) ||
1047 (pppoe_spec && !pppoe_mask)) {
1048 rte_flow_error_set(error, EINVAL,
1049 RTE_FLOW_ERROR_TYPE_ITEM,
1051 "Invalid pppoe item");
1054 pppoe_patt_valid = 1;
1055 input = &outer_input_set;
1056 if (pppoe_spec && pppoe_mask) {
1057 /* Check pppoe mask and update input set */
1058 if (pppoe_mask->length ||
1060 pppoe_mask->version_type) {
1061 rte_flow_error_set(error, EINVAL,
1062 RTE_FLOW_ERROR_TYPE_ITEM,
1064 "Invalid pppoe mask");
1067 list[t].type = ICE_PPPOE;
1068 if (pppoe_mask->session_id) {
1069 list[t].h_u.pppoe_hdr.session_id =
1070 pppoe_spec->session_id;
1071 list[t].m_u.pppoe_hdr.session_id =
1072 pppoe_mask->session_id;
1073 *input |= ICE_INSET_PPPOE_SESSION;
1074 input_set_byte += 2;
1077 pppoe_elem_valid = 1;
1081 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1082 pppoe_proto_spec = item->spec;
1083 pppoe_proto_mask = item->mask;
1084 /* Check if PPPoE optional proto_id item
1085 * is used to describe protocol.
1086 * If yes, both spec and mask should be NULL.
1087 * If no, both spec and mask shouldn't be NULL.
1089 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1090 (pppoe_proto_spec && !pppoe_proto_mask)) {
1091 rte_flow_error_set(error, EINVAL,
1092 RTE_FLOW_ERROR_TYPE_ITEM,
1094 "Invalid pppoe proto item");
1097 input = &outer_input_set;
1098 if (pppoe_proto_spec && pppoe_proto_mask) {
1099 if (pppoe_elem_valid)
1101 list[t].type = ICE_PPPOE;
1102 if (pppoe_proto_mask->proto_id) {
1103 list[t].h_u.pppoe_hdr.ppp_prot_id =
1104 pppoe_proto_spec->proto_id;
1105 list[t].m_u.pppoe_hdr.ppp_prot_id =
1106 pppoe_proto_mask->proto_id;
1107 *input |= ICE_INSET_PPPOE_PROTO;
1108 input_set_byte += 2;
1109 pppoe_prot_valid = 1;
1111 if ((pppoe_proto_mask->proto_id &
1112 pppoe_proto_spec->proto_id) !=
1113 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1114 (pppoe_proto_mask->proto_id &
1115 pppoe_proto_spec->proto_id) !=
1116 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1117 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1119 *tun_type = ICE_SW_TUN_PPPOE;
1125 case RTE_FLOW_ITEM_TYPE_ESP:
1126 esp_spec = item->spec;
1127 esp_mask = item->mask;
1128 if ((esp_spec && !esp_mask) ||
1129 (!esp_spec && esp_mask)) {
1130 rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ITEM,
1133 "Invalid esp item");
1136 /* Check esp mask and update input set */
1137 if (esp_mask && esp_mask->hdr.seq) {
1138 rte_flow_error_set(error, EINVAL,
1139 RTE_FLOW_ERROR_TYPE_ITEM,
1141 "Invalid esp mask");
1144 input = &outer_input_set;
1145 if (!esp_spec && !esp_mask && !(*input)) {
1147 if (ipv6_valid && udp_valid)
1149 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1150 else if (ipv6_valid)
1151 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1152 else if (ipv4_valid)
1154 } else if (esp_spec && esp_mask &&
1157 list[t].type = ICE_NAT_T;
1159 list[t].type = ICE_ESP;
1160 list[t].h_u.esp_hdr.spi =
1162 list[t].m_u.esp_hdr.spi =
1164 *input |= ICE_INSET_ESP_SPI;
1165 input_set_byte += 4;
1169 if (!profile_rule) {
1170 if (ipv6_valid && udp_valid)
1171 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1172 else if (ipv4_valid && udp_valid)
1173 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1174 else if (ipv6_valid)
1175 *tun_type = ICE_SW_TUN_IPV6_ESP;
1176 else if (ipv4_valid)
1177 *tun_type = ICE_SW_TUN_IPV4_ESP;
1181 case RTE_FLOW_ITEM_TYPE_AH:
1182 ah_spec = item->spec;
1183 ah_mask = item->mask;
1184 if ((ah_spec && !ah_mask) ||
1185 (!ah_spec && ah_mask)) {
1186 rte_flow_error_set(error, EINVAL,
1187 RTE_FLOW_ERROR_TYPE_ITEM,
1192 /* Check ah mask and update input set */
1194 (ah_mask->next_hdr ||
1195 ah_mask->payload_len ||
1197 ah_mask->reserved)) {
1198 rte_flow_error_set(error, EINVAL,
1199 RTE_FLOW_ERROR_TYPE_ITEM,
1205 input = &outer_input_set;
1206 if (!ah_spec && !ah_mask && !(*input)) {
1208 if (ipv6_valid && udp_valid)
1210 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1211 else if (ipv6_valid)
1212 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1213 else if (ipv4_valid)
1215 } else if (ah_spec && ah_mask &&
1217 list[t].type = ICE_AH;
1218 list[t].h_u.ah_hdr.spi =
1220 list[t].m_u.ah_hdr.spi =
1222 *input |= ICE_INSET_AH_SPI;
1223 input_set_byte += 4;
1227 if (!profile_rule) {
1230 else if (ipv6_valid)
1231 *tun_type = ICE_SW_TUN_IPV6_AH;
1232 else if (ipv4_valid)
1233 *tun_type = ICE_SW_TUN_IPV4_AH;
1237 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1238 l2tp_spec = item->spec;
1239 l2tp_mask = item->mask;
1240 if ((l2tp_spec && !l2tp_mask) ||
1241 (!l2tp_spec && l2tp_mask)) {
1242 rte_flow_error_set(error, EINVAL,
1243 RTE_FLOW_ERROR_TYPE_ITEM,
1245 "Invalid l2tp item");
1249 input = &outer_input_set;
1250 if (!l2tp_spec && !l2tp_mask && !(*input)) {
1253 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1254 else if (ipv4_valid)
1256 } else if (l2tp_spec && l2tp_mask &&
1257 l2tp_mask->session_id){
1258 list[t].type = ICE_L2TPV3;
1259 list[t].h_u.l2tpv3_sess_hdr.session_id =
1260 l2tp_spec->session_id;
1261 list[t].m_u.l2tpv3_sess_hdr.session_id =
1262 l2tp_mask->session_id;
1263 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1264 input_set_byte += 4;
1268 if (!profile_rule) {
1271 ICE_SW_TUN_IPV6_L2TPV3;
1272 else if (ipv4_valid)
1274 ICE_SW_TUN_IPV4_L2TPV3;
1278 case RTE_FLOW_ITEM_TYPE_PFCP:
1279 pfcp_spec = item->spec;
1280 pfcp_mask = item->mask;
1281 /* Check if PFCP item is used to describe protocol.
1282 * If yes, both spec and mask should be NULL.
1283 * If no, both spec and mask shouldn't be NULL.
1285 if ((!pfcp_spec && pfcp_mask) ||
1286 (pfcp_spec && !pfcp_mask)) {
1287 rte_flow_error_set(error, EINVAL,
1288 RTE_FLOW_ERROR_TYPE_ITEM,
1290 "Invalid PFCP item");
1293 if (pfcp_spec && pfcp_mask) {
1294 /* Check pfcp mask and update input set */
1295 if (pfcp_mask->msg_type ||
1296 pfcp_mask->msg_len ||
1298 rte_flow_error_set(error, EINVAL,
1299 RTE_FLOW_ERROR_TYPE_ITEM,
1301 "Invalid pfcp mask");
1304 if (pfcp_mask->s_field &&
1305 pfcp_spec->s_field == 0x01 &&
1308 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1309 else if (pfcp_mask->s_field &&
1310 pfcp_spec->s_field == 0x01)
1312 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1313 else if (pfcp_mask->s_field &&
1314 !pfcp_spec->s_field &&
1317 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1318 else if (pfcp_mask->s_field &&
1319 !pfcp_spec->s_field)
1321 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1327 case RTE_FLOW_ITEM_TYPE_GTPU:
1328 gtp_spec = item->spec;
1329 gtp_mask = item->mask;
1330 if (gtp_spec && !gtp_mask) {
1331 rte_flow_error_set(error, EINVAL,
1332 RTE_FLOW_ERROR_TYPE_ITEM,
1334 "Invalid GTP item");
1337 if (gtp_spec && gtp_mask) {
1338 if (gtp_mask->v_pt_rsv_flags ||
1339 gtp_mask->msg_type ||
1340 gtp_mask->msg_len) {
1341 rte_flow_error_set(error, EINVAL,
1342 RTE_FLOW_ERROR_TYPE_ITEM,
1344 "Invalid GTP mask");
1347 input = &outer_input_set;
1349 *input |= ICE_INSET_GTPU_TEID;
1350 list[t].type = ICE_GTP;
1351 list[t].h_u.gtp_hdr.teid =
1353 list[t].m_u.gtp_hdr.teid =
1355 input_set_byte += 4;
1362 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1363 gtp_psc_spec = item->spec;
1364 gtp_psc_mask = item->mask;
1365 if (gtp_psc_spec && !gtp_psc_mask) {
1366 rte_flow_error_set(error, EINVAL,
1367 RTE_FLOW_ERROR_TYPE_ITEM,
1369 "Invalid GTPU_EH item");
1372 if (gtp_psc_spec && gtp_psc_mask) {
1373 if (gtp_psc_mask->pdu_type) {
1374 rte_flow_error_set(error, EINVAL,
1375 RTE_FLOW_ERROR_TYPE_ITEM,
1377 "Invalid GTPU_EH mask");
1380 input = &outer_input_set;
1381 if (gtp_psc_mask->qfi)
1382 *input |= ICE_INSET_GTPU_QFI;
1383 list[t].type = ICE_GTP;
1384 list[t].h_u.gtp_hdr.qfi =
1386 list[t].m_u.gtp_hdr.qfi =
1388 input_set_byte += 1;
1394 case RTE_FLOW_ITEM_TYPE_VOID:
1398 rte_flow_error_set(error, EINVAL,
1399 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1400 "Invalid pattern item.");
1405 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1406 inner_vlan_valid && outer_vlan_valid)
1407 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1408 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1409 inner_vlan_valid && outer_vlan_valid)
1410 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1411 else if (*tun_type == ICE_NON_TUN &&
1412 inner_vlan_valid && outer_vlan_valid)
1413 *tun_type = ICE_NON_TUN_QINQ;
1414 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1415 inner_vlan_valid && outer_vlan_valid)
1416 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1418 if (pppoe_patt_valid && !pppoe_prot_valid) {
1419 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1420 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1421 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1422 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1423 else if (inner_vlan_valid && outer_vlan_valid)
1424 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1425 else if (ipv6_valid && udp_valid)
1426 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1427 else if (ipv6_valid && tcp_valid)
1428 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1429 else if (ipv4_valid && udp_valid)
1430 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1431 else if (ipv4_valid && tcp_valid)
1432 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1433 else if (ipv6_valid)
1434 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1435 else if (ipv4_valid)
1436 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1438 *tun_type = ICE_SW_TUN_PPPOE;
1441 if (gtpu_valid && gtpu_psc_valid) {
1442 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1443 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1444 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1445 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1446 else if (ipv4_valid && inner_ipv4_valid)
1447 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1448 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1449 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1450 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1451 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1452 else if (ipv4_valid && inner_ipv6_valid)
1453 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1454 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1455 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1456 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1457 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1458 else if (ipv6_valid && inner_ipv4_valid)
1459 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1460 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1461 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1462 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1463 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1464 else if (ipv6_valid && inner_ipv6_valid)
1465 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1466 else if (ipv4_valid)
1467 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1468 else if (ipv6_valid)
1469 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1470 } else if (gtpu_valid) {
1471 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1472 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1473 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1474 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1475 else if (ipv4_valid && inner_ipv4_valid)
1476 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1477 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1478 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1479 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1480 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1481 else if (ipv4_valid && inner_ipv6_valid)
1482 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1483 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1484 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1485 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1486 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1487 else if (ipv6_valid && inner_ipv4_valid)
1488 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1489 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1490 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1491 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1492 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1493 else if (ipv6_valid && inner_ipv6_valid)
1494 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1495 else if (ipv4_valid)
1496 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1497 else if (ipv6_valid)
1498 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1501 if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1502 *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1503 for (k = 0; k < t; k++) {
1504 if (list[k].type == ICE_GTP)
1505 list[k].type = ICE_GTP_NO_PAY;
1509 if (*tun_type == ICE_NON_TUN) {
1511 *tun_type = ICE_SW_TUN_VXLAN;
1512 else if (nvgre_valid)
1513 *tun_type = ICE_SW_TUN_NVGRE;
1514 else if (ipv4_valid && tcp_valid)
1515 *tun_type = ICE_SW_IPV4_TCP;
1516 else if (ipv4_valid && udp_valid)
1517 *tun_type = ICE_SW_IPV4_UDP;
1518 else if (ipv6_valid && tcp_valid)
1519 *tun_type = ICE_SW_IPV6_TCP;
1520 else if (ipv6_valid && udp_valid)
1521 *tun_type = ICE_SW_IPV6_UDP;
1524 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1525 rte_flow_error_set(error, EINVAL,
1526 RTE_FLOW_ERROR_TYPE_ITEM,
1528 "too much input set");
1535 if ((!outer_input_set && !inner_input_set &&
1536 !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1537 ~pattern_match_item->input_set_mask_o) ||
1538 (inner_input_set & ~pattern_match_item->input_set_mask_i))
1545 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1546 const struct rte_flow_action *actions,
1548 struct rte_flow_error *error,
1549 struct ice_adv_rule_info *rule_info)
1551 const struct rte_flow_action_vf *act_vf;
1552 const struct rte_flow_action *action;
1553 enum rte_flow_action_type action_type;
1555 for (action = actions; action->type !=
1556 RTE_FLOW_ACTION_TYPE_END; action++) {
1557 action_type = action->type;
1558 switch (action_type) {
1559 case RTE_FLOW_ACTION_TYPE_VF:
1560 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1561 act_vf = action->conf;
1563 if (act_vf->id >= ad->real_hw.num_vfs &&
1564 !act_vf->original) {
1565 rte_flow_error_set(error,
1566 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1572 if (act_vf->original)
1573 rule_info->sw_act.vsi_handle =
1574 ad->real_hw.avf.bus.func;
1576 rule_info->sw_act.vsi_handle = act_vf->id;
1579 case RTE_FLOW_ACTION_TYPE_DROP:
1580 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1584 rte_flow_error_set(error,
1585 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1587 "Invalid action type");
1592 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1593 rule_info->sw_act.flag = ICE_FLTR_RX;
1595 rule_info->priority = 6 - priority;
1601 ice_switch_parse_action(struct ice_pf *pf,
1602 const struct rte_flow_action *actions,
1604 struct rte_flow_error *error,
1605 struct ice_adv_rule_info *rule_info)
1607 struct ice_vsi *vsi = pf->main_vsi;
1608 struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1609 const struct rte_flow_action_queue *act_q;
1610 const struct rte_flow_action_rss *act_qgrop;
1611 uint16_t base_queue, i;
1612 const struct rte_flow_action *action;
1613 enum rte_flow_action_type action_type;
1614 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1615 2, 4, 8, 16, 32, 64, 128};
1617 base_queue = pf->base_queue + vsi->base_queue;
1618 for (action = actions; action->type !=
1619 RTE_FLOW_ACTION_TYPE_END; action++) {
1620 action_type = action->type;
1621 switch (action_type) {
1622 case RTE_FLOW_ACTION_TYPE_RSS:
1623 act_qgrop = action->conf;
1624 if (act_qgrop->queue_num <= 1)
1626 rule_info->sw_act.fltr_act =
1628 rule_info->sw_act.fwd_id.q_id =
1629 base_queue + act_qgrop->queue[0];
1630 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1631 if (act_qgrop->queue_num ==
1632 valid_qgrop_number[i])
1635 if (i == MAX_QGRP_NUM_TYPE)
1637 if ((act_qgrop->queue[0] +
1638 act_qgrop->queue_num) >
1639 dev_data->nb_rx_queues)
1641 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1642 if (act_qgrop->queue[i + 1] !=
1643 act_qgrop->queue[i] + 1)
1645 rule_info->sw_act.qgrp_size =
1646 act_qgrop->queue_num;
1648 case RTE_FLOW_ACTION_TYPE_QUEUE:
1649 act_q = action->conf;
1650 if (act_q->index >= dev_data->nb_rx_queues)
1652 rule_info->sw_act.fltr_act =
1654 rule_info->sw_act.fwd_id.q_id =
1655 base_queue + act_q->index;
1658 case RTE_FLOW_ACTION_TYPE_DROP:
1659 rule_info->sw_act.fltr_act =
1663 case RTE_FLOW_ACTION_TYPE_VOID:
1671 rule_info->sw_act.vsi_handle = vsi->idx;
1673 rule_info->sw_act.src = vsi->idx;
1674 rule_info->priority = priority + 5;
1679 rte_flow_error_set(error,
1680 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1682 "Invalid action type or queue number");
1686 rte_flow_error_set(error,
1687 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1689 "Invalid queue region indexes");
1693 rte_flow_error_set(error,
1694 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1696 "Discontinuous queue region");
1701 ice_switch_check_action(const struct rte_flow_action *actions,
1702 struct rte_flow_error *error)
1704 const struct rte_flow_action *action;
1705 enum rte_flow_action_type action_type;
1706 uint16_t actions_num = 0;
1708 for (action = actions; action->type !=
1709 RTE_FLOW_ACTION_TYPE_END; action++) {
1710 action_type = action->type;
1711 switch (action_type) {
1712 case RTE_FLOW_ACTION_TYPE_VF:
1713 case RTE_FLOW_ACTION_TYPE_RSS:
1714 case RTE_FLOW_ACTION_TYPE_QUEUE:
1715 case RTE_FLOW_ACTION_TYPE_DROP:
1718 case RTE_FLOW_ACTION_TYPE_VOID:
1721 rte_flow_error_set(error,
1722 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1724 "Invalid action type");
1729 if (actions_num != 1) {
1730 rte_flow_error_set(error,
1731 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1733 "Invalid action number");
1741 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1742 struct ice_pattern_match_item *array,
1744 const struct rte_flow_item pattern[],
1745 const struct rte_flow_action actions[],
1748 struct rte_flow_error *error)
1750 struct ice_pf *pf = &ad->pf;
1752 struct sw_meta *sw_meta_ptr = NULL;
1753 struct ice_adv_rule_info rule_info;
1754 struct ice_adv_lkup_elem *list = NULL;
1755 uint16_t lkups_num = 0;
1756 const struct rte_flow_item *item = pattern;
1757 uint16_t item_num = 0;
1758 uint16_t vlan_num = 0;
1759 enum ice_sw_tunnel_type tun_type =
1761 struct ice_pattern_match_item *pattern_match_item = NULL;
1763 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1765 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1766 const struct rte_flow_item_eth *eth_mask;
1768 eth_mask = item->mask;
1771 if (eth_mask->type == UINT16_MAX)
1772 tun_type = ICE_SW_TUN_AND_NON_TUN;
1775 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1778 /* reserve one more memory slot for ETH which may
1779 * consume 2 lookup items.
1781 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1785 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1786 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1787 else if (vlan_num == 2)
1788 tun_type = ICE_NON_TUN_QINQ;
1790 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1792 rte_flow_error_set(error, EINVAL,
1793 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1794 "No memory for PMD internal items");
1799 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1801 rte_flow_error_set(error, EINVAL,
1802 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1803 "No memory for sw_pattern_meta_ptr");
1807 pattern_match_item =
1808 ice_search_pattern_match_item(ad, pattern, array, array_len,
1810 if (!pattern_match_item) {
1811 rte_flow_error_set(error, EINVAL,
1812 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1813 "Invalid input pattern");
1817 if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1818 &tun_type, pattern_match_item)) {
1819 rte_flow_error_set(error, EINVAL,
1820 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1822 "Invalid input set");
1826 memset(&rule_info, 0, sizeof(rule_info));
1827 rule_info.tun_type = tun_type;
1829 ret = ice_switch_check_action(actions, error);
1833 if (ad->hw.dcf_enabled)
1834 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1837 ret = ice_switch_parse_action(pf, actions, priority, error,
1844 *meta = sw_meta_ptr;
1845 ((struct sw_meta *)*meta)->list = list;
1846 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1847 ((struct sw_meta *)*meta)->rule_info = rule_info;
1850 rte_free(sw_meta_ptr);
1853 rte_free(pattern_match_item);
1859 rte_free(sw_meta_ptr);
1860 rte_free(pattern_match_item);
1866 ice_switch_query(struct ice_adapter *ad __rte_unused,
1867 struct rte_flow *flow __rte_unused,
1868 struct rte_flow_query_count *count __rte_unused,
1869 struct rte_flow_error *error)
1871 rte_flow_error_set(error, EINVAL,
1872 RTE_FLOW_ERROR_TYPE_HANDLE,
1874 "count action not supported by switch filter");
1880 ice_switch_redirect(struct ice_adapter *ad,
1881 struct rte_flow *flow,
1882 struct ice_flow_redirect *rd)
1884 struct ice_rule_query_data *rdata = flow->rule;
1885 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1886 struct ice_adv_lkup_elem *lkups_dp = NULL;
1887 struct LIST_HEAD_TYPE *list_head;
1888 struct ice_adv_rule_info rinfo;
1889 struct ice_hw *hw = &ad->hw;
1890 struct ice_switch_info *sw;
1894 if (rdata->vsi_handle != rd->vsi_handle)
1897 sw = hw->switch_info;
1898 if (!sw->recp_list[rdata->rid].recp_created)
1901 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1904 list_head = &sw->recp_list[rdata->rid].filt_rules;
1905 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1907 rinfo = list_itr->rule_info;
1908 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1909 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1910 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1911 (rinfo.fltr_rule_id == rdata->rule_id &&
1912 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1913 lkups_cnt = list_itr->lkups_cnt;
1914 lkups_dp = (struct ice_adv_lkup_elem *)
1915 ice_memdup(hw, list_itr->lkups,
1916 sizeof(*list_itr->lkups) *
1917 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1920 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1924 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1925 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1926 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1935 /* Remove the old rule */
1936 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1939 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1945 /* Update VSI context */
1946 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1948 /* Replay the rule */
1949 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1952 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1957 ice_free(hw, lkups_dp);
1962 ice_switch_init(struct ice_adapter *ad)
1965 struct ice_flow_parser *dist_parser;
1966 struct ice_flow_parser *perm_parser;
1968 if (ad->devargs.pipe_mode_support) {
1969 perm_parser = &ice_switch_perm_parser;
1970 ret = ice_register_parser(perm_parser, ad);
1972 dist_parser = &ice_switch_dist_parser;
1973 ret = ice_register_parser(dist_parser, ad);
1979 ice_switch_uninit(struct ice_adapter *ad)
1981 struct ice_flow_parser *dist_parser;
1982 struct ice_flow_parser *perm_parser;
1984 if (ad->devargs.pipe_mode_support) {
1985 perm_parser = &ice_switch_perm_parser;
1986 ice_unregister_parser(perm_parser, ad);
1988 dist_parser = &ice_switch_dist_parser;
1989 ice_unregister_parser(dist_parser, ad);
1994 ice_flow_engine ice_switch_engine = {
1995 .init = ice_switch_init,
1996 .uninit = ice_switch_uninit,
1997 .create = ice_switch_create,
1998 .destroy = ice_switch_destroy,
1999 .query_count = ice_switch_query,
2000 .redirect = ice_switch_redirect,
2001 .free = ice_switch_filter_rule_free,
2002 .type = ICE_FLOW_ENGINE_SWITCH,
2006 ice_flow_parser ice_switch_dist_parser = {
2007 .engine = &ice_switch_engine,
2008 .array = ice_switch_pattern_dist_list,
2009 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2010 .parse_pattern_action = ice_switch_parse_pattern_action,
2011 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2015 ice_flow_parser ice_switch_perm_parser = {
2016 .engine = &ice_switch_engine,
2017 .array = ice_switch_pattern_perm_list,
2018 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2019 .parse_pattern_action = ice_switch_parse_pattern_action,
2020 .stage = ICE_FLOW_STAGE_PERMISSION,
2023 RTE_INIT(ice_sw_engine_init)
2025 struct ice_flow_engine *engine = &ice_switch_engine;
2026 ice_register_flow_engine(engine);