1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
39 #define ICE_SW_INSET_MAC_QINQ ( \
40 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42 #define ICE_SW_INSET_MAC_IPV4 ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
45 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
46 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
47 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
48 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
52 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
53 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
54 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_MAC_IPV6 ( \
56 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
58 ICE_INSET_IPV6_NEXT_HDR)
59 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
60 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
61 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
62 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
63 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
64 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
65 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
66 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
67 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
68 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
70 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
73 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
75 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
76 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
77 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
78 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
79 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
80 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
81 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
82 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
83 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
84 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
85 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
86 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
87 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
88 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
89 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
90 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
91 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
92 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
93 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
94 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
95 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
96 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
98 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
99 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
100 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
102 #define ICE_SW_INSET_MAC_PPPOE ( \
103 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
104 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
105 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
106 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
107 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
108 ICE_INSET_PPPOE_PROTO)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
110 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
112 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
114 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
115 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
116 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
117 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
118 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
119 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
120 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
121 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
122 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
123 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
124 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
125 #define ICE_SW_INSET_MAC_IPV4_AH ( \
126 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
127 #define ICE_SW_INSET_MAC_IPV6_AH ( \
128 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
129 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
130 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
131 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
132 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
133 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
134 ICE_SW_INSET_MAC_IPV4 | \
135 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
136 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
137 ICE_SW_INSET_MAC_IPV6 | \
138 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
139 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
140 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
141 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
142 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
143 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
144 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
145 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
146 ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
147 #define ICE_SW_INSET_GTPU_IPV4 ( \
148 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
149 #define ICE_SW_INSET_GTPU_IPV6 ( \
150 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
151 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
152 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
153 ICE_INSET_UDP_DST_PORT)
154 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
155 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
156 ICE_INSET_TCP_DST_PORT)
157 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
158 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
159 ICE_INSET_UDP_DST_PORT)
160 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
161 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
162 ICE_INSET_TCP_DST_PORT)
165 struct ice_adv_lkup_elem *list;
167 struct ice_adv_rule_info rule_info;
170 static struct ice_flow_parser ice_switch_dist_parser;
171 static struct ice_flow_parser ice_switch_perm_parser;
174 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
175 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
176 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
177 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
178 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
179 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
180 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
181 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
182 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
183 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
184 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
185 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
186 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
187 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
188 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
189 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
190 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
191 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
192 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
193 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
194 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
195 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
196 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
197 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
198 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
199 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
200 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
201 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
202 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
203 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
204 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
205 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
206 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
207 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
208 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
209 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
210 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
211 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
212 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
213 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
214 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
215 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
216 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
217 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
218 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
219 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
220 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
221 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
222 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
223 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
224 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
225 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
226 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
227 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
228 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
229 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
230 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
231 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
232 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
233 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
234 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
235 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
236 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
237 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
238 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
239 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
240 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
241 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
242 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
243 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
244 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
245 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
246 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
247 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
248 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
249 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
253 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
254 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
255 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
256 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
257 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
258 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
259 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
260 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
261 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
262 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
263 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
264 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
265 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
266 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
267 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
268 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
269 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
270 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
271 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
272 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
273 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
274 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
275 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
276 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
277 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
278 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
279 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
280 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
281 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
282 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
283 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
284 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
285 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
286 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
287 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
288 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
289 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
290 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
291 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
292 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
293 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
294 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
295 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
296 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
297 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
298 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
299 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
300 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
301 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
302 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
303 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
304 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
305 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
306 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
307 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
308 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
309 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
310 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
311 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
312 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
313 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
314 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
315 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
316 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
317 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
318 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
319 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
320 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
321 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
322 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
323 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
324 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
325 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
326 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
327 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
328 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
332 ice_switch_create(struct ice_adapter *ad,
333 struct rte_flow *flow,
335 struct rte_flow_error *error)
338 struct ice_pf *pf = &ad->pf;
339 struct ice_hw *hw = ICE_PF_TO_HW(pf);
340 struct ice_rule_query_data rule_added = {0};
341 struct ice_rule_query_data *filter_ptr;
342 struct ice_adv_lkup_elem *list =
343 ((struct sw_meta *)meta)->list;
345 ((struct sw_meta *)meta)->lkups_num;
346 struct ice_adv_rule_info *rule_info =
347 &((struct sw_meta *)meta)->rule_info;
349 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
352 "item number too large for rule");
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
358 "lookup list should not be NULL");
361 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
363 filter_ptr = rte_zmalloc("ice_switch_filter",
364 sizeof(struct ice_rule_query_data), 0);
366 rte_flow_error_set(error, EINVAL,
367 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
368 "No memory for ice_switch_filter");
371 flow->rule = filter_ptr;
372 rte_memcpy(filter_ptr,
374 sizeof(struct ice_rule_query_data));
376 rte_flow_error_set(error, EINVAL,
377 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
378 "switch filter create flow fail");
394 ice_switch_destroy(struct ice_adapter *ad,
395 struct rte_flow *flow,
396 struct rte_flow_error *error)
398 struct ice_hw *hw = &ad->hw;
400 struct ice_rule_query_data *filter_ptr;
402 filter_ptr = (struct ice_rule_query_data *)
406 rte_flow_error_set(error, EINVAL,
407 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
409 " create by switch filter");
413 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
417 "fail to destroy switch filter rule");
421 rte_free(filter_ptr);
426 ice_switch_filter_rule_free(struct rte_flow *flow)
428 rte_free(flow->rule);
432 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
433 struct rte_flow_error *error,
434 struct ice_adv_lkup_elem *list,
436 enum ice_sw_tunnel_type *tun_type,
437 const struct ice_pattern_match_item *pattern_match_item)
439 const struct rte_flow_item *item = pattern;
440 enum rte_flow_item_type item_type;
441 const struct rte_flow_item_eth *eth_spec, *eth_mask;
442 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
443 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
444 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
445 const struct rte_flow_item_udp *udp_spec, *udp_mask;
446 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
447 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
448 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
449 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
450 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
451 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
453 const struct rte_flow_item_esp *esp_spec, *esp_mask;
454 const struct rte_flow_item_ah *ah_spec, *ah_mask;
455 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
456 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
457 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
458 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
459 uint64_t outer_input_set = ICE_INSET_NONE;
460 uint64_t inner_input_set = ICE_INSET_NONE;
461 uint64_t *input = NULL;
462 uint16_t input_set_byte = 0;
463 bool pppoe_elem_valid = 0;
464 bool pppoe_patt_valid = 0;
465 bool pppoe_prot_valid = 0;
466 bool inner_vlan_valid = 0;
467 bool outer_vlan_valid = 0;
468 bool tunnel_valid = 0;
469 bool profile_rule = 0;
470 bool nvgre_valid = 0;
471 bool vxlan_valid = 0;
478 bool gtpu_psc_valid = 0;
479 bool inner_ipv4_valid = 0;
480 bool inner_ipv6_valid = 0;
481 bool inner_tcp_valid = 0;
482 bool inner_udp_valid = 0;
483 uint16_t j, k, t = 0;
485 if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
486 *tun_type == ICE_NON_TUN_QINQ)
489 for (item = pattern; item->type !=
490 RTE_FLOW_ITEM_TYPE_END; item++) {
492 rte_flow_error_set(error, EINVAL,
493 RTE_FLOW_ERROR_TYPE_ITEM,
495 "Not support range");
498 item_type = item->type;
501 case RTE_FLOW_ITEM_TYPE_ETH:
502 eth_spec = item->spec;
503 eth_mask = item->mask;
504 if (eth_spec && eth_mask) {
505 const uint8_t *a = eth_mask->src.addr_bytes;
506 const uint8_t *b = eth_mask->dst.addr_bytes;
508 input = &inner_input_set;
510 input = &outer_input_set;
511 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
513 *input |= ICE_INSET_SMAC;
517 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
519 *input |= ICE_INSET_DMAC;
524 *input |= ICE_INSET_ETHERTYPE;
525 list[t].type = (tunnel_valid == 0) ?
526 ICE_MAC_OFOS : ICE_MAC_IL;
527 struct ice_ether_hdr *h;
528 struct ice_ether_hdr *m;
530 h = &list[t].h_u.eth_hdr;
531 m = &list[t].m_u.eth_hdr;
532 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
533 if (eth_mask->src.addr_bytes[j]) {
535 eth_spec->src.addr_bytes[j];
537 eth_mask->src.addr_bytes[j];
541 if (eth_mask->dst.addr_bytes[j]) {
543 eth_spec->dst.addr_bytes[j];
545 eth_mask->dst.addr_bytes[j];
552 if (eth_mask->type) {
553 list[t].type = ICE_ETYPE_OL;
554 list[t].h_u.ethertype.ethtype_id =
556 list[t].m_u.ethertype.ethtype_id =
564 case RTE_FLOW_ITEM_TYPE_IPV4:
565 ipv4_spec = item->spec;
566 ipv4_mask = item->mask;
568 inner_ipv4_valid = 1;
569 input = &inner_input_set;
572 input = &outer_input_set;
575 if (ipv4_spec && ipv4_mask) {
576 /* Check IPv4 mask and update input set */
577 if (ipv4_mask->hdr.version_ihl ||
578 ipv4_mask->hdr.total_length ||
579 ipv4_mask->hdr.packet_id ||
580 ipv4_mask->hdr.hdr_checksum) {
581 rte_flow_error_set(error, EINVAL,
582 RTE_FLOW_ERROR_TYPE_ITEM,
584 "Invalid IPv4 mask.");
588 if (ipv4_mask->hdr.src_addr)
589 *input |= ICE_INSET_IPV4_SRC;
590 if (ipv4_mask->hdr.dst_addr)
591 *input |= ICE_INSET_IPV4_DST;
592 if (ipv4_mask->hdr.time_to_live)
593 *input |= ICE_INSET_IPV4_TTL;
594 if (ipv4_mask->hdr.next_proto_id)
595 *input |= ICE_INSET_IPV4_PROTO;
596 if (ipv4_mask->hdr.type_of_service)
597 *input |= ICE_INSET_IPV4_TOS;
599 list[t].type = (tunnel_valid == 0) ?
600 ICE_IPV4_OFOS : ICE_IPV4_IL;
601 if (ipv4_mask->hdr.src_addr) {
602 list[t].h_u.ipv4_hdr.src_addr =
603 ipv4_spec->hdr.src_addr;
604 list[t].m_u.ipv4_hdr.src_addr =
605 ipv4_mask->hdr.src_addr;
608 if (ipv4_mask->hdr.dst_addr) {
609 list[t].h_u.ipv4_hdr.dst_addr =
610 ipv4_spec->hdr.dst_addr;
611 list[t].m_u.ipv4_hdr.dst_addr =
612 ipv4_mask->hdr.dst_addr;
615 if (ipv4_mask->hdr.time_to_live) {
616 list[t].h_u.ipv4_hdr.time_to_live =
617 ipv4_spec->hdr.time_to_live;
618 list[t].m_u.ipv4_hdr.time_to_live =
619 ipv4_mask->hdr.time_to_live;
622 if (ipv4_mask->hdr.next_proto_id) {
623 list[t].h_u.ipv4_hdr.protocol =
624 ipv4_spec->hdr.next_proto_id;
625 list[t].m_u.ipv4_hdr.protocol =
626 ipv4_mask->hdr.next_proto_id;
629 if ((ipv4_spec->hdr.next_proto_id &
630 ipv4_mask->hdr.next_proto_id) ==
631 ICE_IPV4_PROTO_NVGRE)
632 *tun_type = ICE_SW_TUN_AND_NON_TUN;
633 if (ipv4_mask->hdr.type_of_service) {
634 list[t].h_u.ipv4_hdr.tos =
635 ipv4_spec->hdr.type_of_service;
636 list[t].m_u.ipv4_hdr.tos =
637 ipv4_mask->hdr.type_of_service;
644 case RTE_FLOW_ITEM_TYPE_IPV6:
645 ipv6_spec = item->spec;
646 ipv6_mask = item->mask;
648 inner_ipv6_valid = 1;
649 input = &inner_input_set;
652 input = &outer_input_set;
655 if (ipv6_spec && ipv6_mask) {
656 if (ipv6_mask->hdr.payload_len) {
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ITEM,
660 "Invalid IPv6 mask");
664 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
665 if (ipv6_mask->hdr.src_addr[j]) {
666 *input |= ICE_INSET_IPV6_SRC;
670 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
671 if (ipv6_mask->hdr.dst_addr[j]) {
672 *input |= ICE_INSET_IPV6_DST;
676 if (ipv6_mask->hdr.proto)
677 *input |= ICE_INSET_IPV6_NEXT_HDR;
678 if (ipv6_mask->hdr.hop_limits)
679 *input |= ICE_INSET_IPV6_HOP_LIMIT;
680 if (ipv6_mask->hdr.vtc_flow &
681 rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
682 *input |= ICE_INSET_IPV6_TC;
684 list[t].type = (tunnel_valid == 0) ?
685 ICE_IPV6_OFOS : ICE_IPV6_IL;
686 struct ice_ipv6_hdr *f;
687 struct ice_ipv6_hdr *s;
688 f = &list[t].h_u.ipv6_hdr;
689 s = &list[t].m_u.ipv6_hdr;
690 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
691 if (ipv6_mask->hdr.src_addr[j]) {
693 ipv6_spec->hdr.src_addr[j];
695 ipv6_mask->hdr.src_addr[j];
698 if (ipv6_mask->hdr.dst_addr[j]) {
700 ipv6_spec->hdr.dst_addr[j];
702 ipv6_mask->hdr.dst_addr[j];
706 if (ipv6_mask->hdr.proto) {
708 ipv6_spec->hdr.proto;
710 ipv6_mask->hdr.proto;
713 if (ipv6_mask->hdr.hop_limits) {
715 ipv6_spec->hdr.hop_limits;
717 ipv6_mask->hdr.hop_limits;
720 if (ipv6_mask->hdr.vtc_flow &
722 (RTE_IPV6_HDR_TC_MASK)) {
723 struct ice_le_ver_tc_flow vtf;
724 vtf.u.fld.version = 0;
725 vtf.u.fld.flow_label = 0;
726 vtf.u.fld.tc = (rte_be_to_cpu_32
727 (ipv6_spec->hdr.vtc_flow) &
728 RTE_IPV6_HDR_TC_MASK) >>
729 RTE_IPV6_HDR_TC_SHIFT;
730 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
731 vtf.u.fld.tc = (rte_be_to_cpu_32
732 (ipv6_mask->hdr.vtc_flow) &
733 RTE_IPV6_HDR_TC_MASK) >>
734 RTE_IPV6_HDR_TC_SHIFT;
735 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
742 case RTE_FLOW_ITEM_TYPE_UDP:
743 udp_spec = item->spec;
744 udp_mask = item->mask;
747 input = &inner_input_set;
750 input = &outer_input_set;
753 if (udp_spec && udp_mask) {
754 /* Check UDP mask and update input set*/
755 if (udp_mask->hdr.dgram_len ||
756 udp_mask->hdr.dgram_cksum) {
757 rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ITEM,
764 if (udp_mask->hdr.src_port)
765 *input |= ICE_INSET_UDP_SRC_PORT;
766 if (udp_mask->hdr.dst_port)
767 *input |= ICE_INSET_UDP_DST_PORT;
769 if (*tun_type == ICE_SW_TUN_VXLAN &&
771 list[t].type = ICE_UDP_OF;
773 list[t].type = ICE_UDP_ILOS;
774 if (udp_mask->hdr.src_port) {
775 list[t].h_u.l4_hdr.src_port =
776 udp_spec->hdr.src_port;
777 list[t].m_u.l4_hdr.src_port =
778 udp_mask->hdr.src_port;
781 if (udp_mask->hdr.dst_port) {
782 list[t].h_u.l4_hdr.dst_port =
783 udp_spec->hdr.dst_port;
784 list[t].m_u.l4_hdr.dst_port =
785 udp_mask->hdr.dst_port;
792 case RTE_FLOW_ITEM_TYPE_TCP:
793 tcp_spec = item->spec;
794 tcp_mask = item->mask;
797 input = &inner_input_set;
800 input = &outer_input_set;
803 if (tcp_spec && tcp_mask) {
804 /* Check TCP mask and update input set */
805 if (tcp_mask->hdr.sent_seq ||
806 tcp_mask->hdr.recv_ack ||
807 tcp_mask->hdr.data_off ||
808 tcp_mask->hdr.tcp_flags ||
809 tcp_mask->hdr.rx_win ||
810 tcp_mask->hdr.cksum ||
811 tcp_mask->hdr.tcp_urp) {
812 rte_flow_error_set(error, EINVAL,
813 RTE_FLOW_ERROR_TYPE_ITEM,
819 if (tcp_mask->hdr.src_port)
820 *input |= ICE_INSET_TCP_SRC_PORT;
821 if (tcp_mask->hdr.dst_port)
822 *input |= ICE_INSET_TCP_DST_PORT;
823 list[t].type = ICE_TCP_IL;
824 if (tcp_mask->hdr.src_port) {
825 list[t].h_u.l4_hdr.src_port =
826 tcp_spec->hdr.src_port;
827 list[t].m_u.l4_hdr.src_port =
828 tcp_mask->hdr.src_port;
831 if (tcp_mask->hdr.dst_port) {
832 list[t].h_u.l4_hdr.dst_port =
833 tcp_spec->hdr.dst_port;
834 list[t].m_u.l4_hdr.dst_port =
835 tcp_mask->hdr.dst_port;
842 case RTE_FLOW_ITEM_TYPE_SCTP:
843 sctp_spec = item->spec;
844 sctp_mask = item->mask;
845 if (sctp_spec && sctp_mask) {
846 /* Check SCTP mask and update input set */
847 if (sctp_mask->hdr.cksum) {
848 rte_flow_error_set(error, EINVAL,
849 RTE_FLOW_ERROR_TYPE_ITEM,
851 "Invalid SCTP mask");
855 input = &inner_input_set;
857 input = &outer_input_set;
859 if (sctp_mask->hdr.src_port)
860 *input |= ICE_INSET_SCTP_SRC_PORT;
861 if (sctp_mask->hdr.dst_port)
862 *input |= ICE_INSET_SCTP_DST_PORT;
864 list[t].type = ICE_SCTP_IL;
865 if (sctp_mask->hdr.src_port) {
866 list[t].h_u.sctp_hdr.src_port =
867 sctp_spec->hdr.src_port;
868 list[t].m_u.sctp_hdr.src_port =
869 sctp_mask->hdr.src_port;
872 if (sctp_mask->hdr.dst_port) {
873 list[t].h_u.sctp_hdr.dst_port =
874 sctp_spec->hdr.dst_port;
875 list[t].m_u.sctp_hdr.dst_port =
876 sctp_mask->hdr.dst_port;
883 case RTE_FLOW_ITEM_TYPE_VXLAN:
884 vxlan_spec = item->spec;
885 vxlan_mask = item->mask;
886 /* Check if VXLAN item is used to describe protocol.
887 * If yes, both spec and mask should be NULL.
888 * If no, both spec and mask shouldn't be NULL.
890 if ((!vxlan_spec && vxlan_mask) ||
891 (vxlan_spec && !vxlan_mask)) {
892 rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ITEM,
895 "Invalid VXLAN item");
900 input = &inner_input_set;
901 if (vxlan_spec && vxlan_mask) {
902 list[t].type = ICE_VXLAN;
903 if (vxlan_mask->vni[0] ||
904 vxlan_mask->vni[1] ||
905 vxlan_mask->vni[2]) {
906 list[t].h_u.tnl_hdr.vni =
907 (vxlan_spec->vni[2] << 16) |
908 (vxlan_spec->vni[1] << 8) |
910 list[t].m_u.tnl_hdr.vni =
911 (vxlan_mask->vni[2] << 16) |
912 (vxlan_mask->vni[1] << 8) |
914 *input |= ICE_INSET_VXLAN_VNI;
921 case RTE_FLOW_ITEM_TYPE_NVGRE:
922 nvgre_spec = item->spec;
923 nvgre_mask = item->mask;
924 /* Check if NVGRE item is used to describe protocol.
925 * If yes, both spec and mask should be NULL.
926 * If no, both spec and mask shouldn't be NULL.
928 if ((!nvgre_spec && nvgre_mask) ||
929 (nvgre_spec && !nvgre_mask)) {
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ITEM,
933 "Invalid NVGRE item");
938 input = &inner_input_set;
939 if (nvgre_spec && nvgre_mask) {
940 list[t].type = ICE_NVGRE;
941 if (nvgre_mask->tni[0] ||
942 nvgre_mask->tni[1] ||
943 nvgre_mask->tni[2]) {
944 list[t].h_u.nvgre_hdr.tni_flow =
945 (nvgre_spec->tni[2] << 16) |
946 (nvgre_spec->tni[1] << 8) |
948 list[t].m_u.nvgre_hdr.tni_flow =
949 (nvgre_mask->tni[2] << 16) |
950 (nvgre_mask->tni[1] << 8) |
952 *input |= ICE_INSET_NVGRE_TNI;
959 case RTE_FLOW_ITEM_TYPE_VLAN:
960 vlan_spec = item->spec;
961 vlan_mask = item->mask;
962 /* Check if VLAN item is used to describe protocol.
963 * If yes, both spec and mask should be NULL.
964 * If no, both spec and mask shouldn't be NULL.
966 if ((!vlan_spec && vlan_mask) ||
967 (vlan_spec && !vlan_mask)) {
968 rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ITEM,
971 "Invalid VLAN item");
976 if (!outer_vlan_valid)
977 outer_vlan_valid = 1;
979 inner_vlan_valid = 1;
982 input = &outer_input_set;
984 if (vlan_spec && vlan_mask) {
986 if (!inner_vlan_valid) {
987 list[t].type = ICE_VLAN_EX;
989 ICE_INSET_VLAN_OUTER;
991 list[t].type = ICE_VLAN_IN;
993 ICE_INSET_VLAN_INNER;
996 list[t].type = ICE_VLAN_OFOS;
997 *input |= ICE_INSET_VLAN_INNER;
1000 if (vlan_mask->tci) {
1001 list[t].h_u.vlan_hdr.vlan =
1003 list[t].m_u.vlan_hdr.vlan =
1005 input_set_byte += 2;
1007 if (vlan_mask->inner_type) {
1008 rte_flow_error_set(error, EINVAL,
1009 RTE_FLOW_ERROR_TYPE_ITEM,
1011 "Invalid VLAN input set.");
1018 case RTE_FLOW_ITEM_TYPE_PPPOED:
1019 case RTE_FLOW_ITEM_TYPE_PPPOES:
1020 pppoe_spec = item->spec;
1021 pppoe_mask = item->mask;
1022 /* Check if PPPoE item is used to describe protocol.
1023 * If yes, both spec and mask should be NULL.
1024 * If no, both spec and mask shouldn't be NULL.
1026 if ((!pppoe_spec && pppoe_mask) ||
1027 (pppoe_spec && !pppoe_mask)) {
1028 rte_flow_error_set(error, EINVAL,
1029 RTE_FLOW_ERROR_TYPE_ITEM,
1031 "Invalid pppoe item");
1034 pppoe_patt_valid = 1;
1035 input = &outer_input_set;
1036 if (pppoe_spec && pppoe_mask) {
1037 /* Check pppoe mask and update input set */
1038 if (pppoe_mask->length ||
1040 pppoe_mask->version_type) {
1041 rte_flow_error_set(error, EINVAL,
1042 RTE_FLOW_ERROR_TYPE_ITEM,
1044 "Invalid pppoe mask");
1047 list[t].type = ICE_PPPOE;
1048 if (pppoe_mask->session_id) {
1049 list[t].h_u.pppoe_hdr.session_id =
1050 pppoe_spec->session_id;
1051 list[t].m_u.pppoe_hdr.session_id =
1052 pppoe_mask->session_id;
1053 *input |= ICE_INSET_PPPOE_SESSION;
1054 input_set_byte += 2;
1057 pppoe_elem_valid = 1;
1061 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1062 pppoe_proto_spec = item->spec;
1063 pppoe_proto_mask = item->mask;
1064 /* Check if PPPoE optional proto_id item
1065 * is used to describe protocol.
1066 * If yes, both spec and mask should be NULL.
1067 * If no, both spec and mask shouldn't be NULL.
1069 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1070 (pppoe_proto_spec && !pppoe_proto_mask)) {
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ITEM,
1074 "Invalid pppoe proto item");
1077 input = &outer_input_set;
1078 if (pppoe_proto_spec && pppoe_proto_mask) {
1079 if (pppoe_elem_valid)
1081 list[t].type = ICE_PPPOE;
1082 if (pppoe_proto_mask->proto_id) {
1083 list[t].h_u.pppoe_hdr.ppp_prot_id =
1084 pppoe_proto_spec->proto_id;
1085 list[t].m_u.pppoe_hdr.ppp_prot_id =
1086 pppoe_proto_mask->proto_id;
1087 *input |= ICE_INSET_PPPOE_PROTO;
1088 input_set_byte += 2;
1089 pppoe_prot_valid = 1;
1091 if ((pppoe_proto_mask->proto_id &
1092 pppoe_proto_spec->proto_id) !=
1093 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1094 (pppoe_proto_mask->proto_id &
1095 pppoe_proto_spec->proto_id) !=
1096 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1097 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1099 *tun_type = ICE_SW_TUN_PPPOE;
1105 case RTE_FLOW_ITEM_TYPE_ESP:
1106 esp_spec = item->spec;
1107 esp_mask = item->mask;
1108 if ((esp_spec && !esp_mask) ||
1109 (!esp_spec && esp_mask)) {
1110 rte_flow_error_set(error, EINVAL,
1111 RTE_FLOW_ERROR_TYPE_ITEM,
1113 "Invalid esp item");
1116 /* Check esp mask and update input set */
1117 if (esp_mask && esp_mask->hdr.seq) {
1118 rte_flow_error_set(error, EINVAL,
1119 RTE_FLOW_ERROR_TYPE_ITEM,
1121 "Invalid esp mask");
1124 input = &outer_input_set;
1125 if (!esp_spec && !esp_mask && !(*input)) {
1127 if (ipv6_valid && udp_valid)
1129 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1130 else if (ipv6_valid)
1131 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1132 else if (ipv4_valid)
1134 } else if (esp_spec && esp_mask &&
1137 list[t].type = ICE_NAT_T;
1139 list[t].type = ICE_ESP;
1140 list[t].h_u.esp_hdr.spi =
1142 list[t].m_u.esp_hdr.spi =
1144 *input |= ICE_INSET_ESP_SPI;
1145 input_set_byte += 4;
1149 if (!profile_rule) {
1150 if (ipv6_valid && udp_valid)
1151 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1152 else if (ipv4_valid && udp_valid)
1153 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1154 else if (ipv6_valid)
1155 *tun_type = ICE_SW_TUN_IPV6_ESP;
1156 else if (ipv4_valid)
1157 *tun_type = ICE_SW_TUN_IPV4_ESP;
1161 case RTE_FLOW_ITEM_TYPE_AH:
1162 ah_spec = item->spec;
1163 ah_mask = item->mask;
1164 if ((ah_spec && !ah_mask) ||
1165 (!ah_spec && ah_mask)) {
1166 rte_flow_error_set(error, EINVAL,
1167 RTE_FLOW_ERROR_TYPE_ITEM,
1172 /* Check ah mask and update input set */
1174 (ah_mask->next_hdr ||
1175 ah_mask->payload_len ||
1177 ah_mask->reserved)) {
1178 rte_flow_error_set(error, EINVAL,
1179 RTE_FLOW_ERROR_TYPE_ITEM,
1185 input = &outer_input_set;
1186 if (!ah_spec && !ah_mask && !(*input)) {
1188 if (ipv6_valid && udp_valid)
1190 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1191 else if (ipv6_valid)
1192 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1193 else if (ipv4_valid)
1195 } else if (ah_spec && ah_mask &&
1197 list[t].type = ICE_AH;
1198 list[t].h_u.ah_hdr.spi =
1200 list[t].m_u.ah_hdr.spi =
1202 *input |= ICE_INSET_AH_SPI;
1203 input_set_byte += 4;
1207 if (!profile_rule) {
1210 else if (ipv6_valid)
1211 *tun_type = ICE_SW_TUN_IPV6_AH;
1212 else if (ipv4_valid)
1213 *tun_type = ICE_SW_TUN_IPV4_AH;
1217 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1218 l2tp_spec = item->spec;
1219 l2tp_mask = item->mask;
1220 if ((l2tp_spec && !l2tp_mask) ||
1221 (!l2tp_spec && l2tp_mask)) {
1222 rte_flow_error_set(error, EINVAL,
1223 RTE_FLOW_ERROR_TYPE_ITEM,
1225 "Invalid l2tp item");
1229 input = &outer_input_set;
1230 if (!l2tp_spec && !l2tp_mask && !(*input)) {
1233 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1234 else if (ipv4_valid)
1236 } else if (l2tp_spec && l2tp_mask &&
1237 l2tp_mask->session_id){
1238 list[t].type = ICE_L2TPV3;
1239 list[t].h_u.l2tpv3_sess_hdr.session_id =
1240 l2tp_spec->session_id;
1241 list[t].m_u.l2tpv3_sess_hdr.session_id =
1242 l2tp_mask->session_id;
1243 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1244 input_set_byte += 4;
1248 if (!profile_rule) {
1251 ICE_SW_TUN_IPV6_L2TPV3;
1252 else if (ipv4_valid)
1254 ICE_SW_TUN_IPV4_L2TPV3;
1258 case RTE_FLOW_ITEM_TYPE_PFCP:
1259 pfcp_spec = item->spec;
1260 pfcp_mask = item->mask;
1261 /* Check if PFCP item is used to describe protocol.
1262 * If yes, both spec and mask should be NULL.
1263 * If no, both spec and mask shouldn't be NULL.
1265 if ((!pfcp_spec && pfcp_mask) ||
1266 (pfcp_spec && !pfcp_mask)) {
1267 rte_flow_error_set(error, EINVAL,
1268 RTE_FLOW_ERROR_TYPE_ITEM,
1270 "Invalid PFCP item");
1273 if (pfcp_spec && pfcp_mask) {
1274 /* Check pfcp mask and update input set */
1275 if (pfcp_mask->msg_type ||
1276 pfcp_mask->msg_len ||
1278 rte_flow_error_set(error, EINVAL,
1279 RTE_FLOW_ERROR_TYPE_ITEM,
1281 "Invalid pfcp mask");
1284 if (pfcp_mask->s_field &&
1285 pfcp_spec->s_field == 0x01 &&
1288 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1289 else if (pfcp_mask->s_field &&
1290 pfcp_spec->s_field == 0x01)
1292 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1293 else if (pfcp_mask->s_field &&
1294 !pfcp_spec->s_field &&
1297 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1298 else if (pfcp_mask->s_field &&
1299 !pfcp_spec->s_field)
1301 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1307 case RTE_FLOW_ITEM_TYPE_GTPU:
1308 gtp_spec = item->spec;
1309 gtp_mask = item->mask;
1310 if (gtp_spec && !gtp_mask) {
1311 rte_flow_error_set(error, EINVAL,
1312 RTE_FLOW_ERROR_TYPE_ITEM,
1314 "Invalid GTP item");
1317 if (gtp_spec && gtp_mask) {
1318 if (gtp_mask->v_pt_rsv_flags ||
1319 gtp_mask->msg_type ||
1320 gtp_mask->msg_len) {
1321 rte_flow_error_set(error, EINVAL,
1322 RTE_FLOW_ERROR_TYPE_ITEM,
1324 "Invalid GTP mask");
1327 input = &outer_input_set;
1329 *input |= ICE_INSET_GTPU_TEID;
1330 list[t].type = ICE_GTP;
1331 list[t].h_u.gtp_hdr.teid =
1333 list[t].m_u.gtp_hdr.teid =
1335 input_set_byte += 4;
1342 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1343 gtp_psc_spec = item->spec;
1344 gtp_psc_mask = item->mask;
1345 if (gtp_psc_spec && !gtp_psc_mask) {
1346 rte_flow_error_set(error, EINVAL,
1347 RTE_FLOW_ERROR_TYPE_ITEM,
1349 "Invalid GTPU_EH item");
1352 if (gtp_psc_spec && gtp_psc_mask) {
1353 if (gtp_psc_mask->pdu_type) {
1354 rte_flow_error_set(error, EINVAL,
1355 RTE_FLOW_ERROR_TYPE_ITEM,
1357 "Invalid GTPU_EH mask");
1360 input = &outer_input_set;
1361 if (gtp_psc_mask->qfi)
1362 *input |= ICE_INSET_GTPU_QFI;
1363 list[t].type = ICE_GTP;
1364 list[t].h_u.gtp_hdr.qfi =
1366 list[t].m_u.gtp_hdr.qfi =
1368 input_set_byte += 1;
1374 case RTE_FLOW_ITEM_TYPE_VOID:
1378 rte_flow_error_set(error, EINVAL,
1379 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1380 "Invalid pattern item.");
1385 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1386 inner_vlan_valid && outer_vlan_valid)
1387 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1388 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1389 inner_vlan_valid && outer_vlan_valid)
1390 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1391 else if (*tun_type == ICE_NON_TUN &&
1392 inner_vlan_valid && outer_vlan_valid)
1393 *tun_type = ICE_NON_TUN_QINQ;
1394 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1395 inner_vlan_valid && outer_vlan_valid)
1396 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1398 if (pppoe_patt_valid && !pppoe_prot_valid) {
1399 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1400 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1401 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1402 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1403 else if (inner_vlan_valid && outer_vlan_valid)
1404 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1405 else if (ipv6_valid && udp_valid)
1406 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1407 else if (ipv6_valid && tcp_valid)
1408 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1409 else if (ipv4_valid && udp_valid)
1410 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1411 else if (ipv4_valid && tcp_valid)
1412 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1413 else if (ipv6_valid)
1414 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1415 else if (ipv4_valid)
1416 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1418 *tun_type = ICE_SW_TUN_PPPOE;
1421 if (gtpu_valid && gtpu_psc_valid) {
1422 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1423 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1424 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1425 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1426 else if (ipv4_valid && inner_ipv4_valid)
1427 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1428 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1429 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1430 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1431 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1432 else if (ipv4_valid && inner_ipv6_valid)
1433 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1434 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1435 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1436 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1437 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1438 else if (ipv6_valid && inner_ipv4_valid)
1439 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1440 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1441 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1442 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1443 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1444 else if (ipv6_valid && inner_ipv6_valid)
1445 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1446 else if (ipv4_valid)
1447 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1448 else if (ipv6_valid)
1449 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1450 } else if (gtpu_valid) {
1451 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1452 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1453 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1454 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1455 else if (ipv4_valid && inner_ipv4_valid)
1456 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1457 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1458 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1459 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1460 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1461 else if (ipv4_valid && inner_ipv6_valid)
1462 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1463 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1464 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1465 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1466 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1467 else if (ipv6_valid && inner_ipv4_valid)
1468 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1469 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1470 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1471 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1472 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1473 else if (ipv6_valid && inner_ipv6_valid)
1474 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1475 else if (ipv4_valid)
1476 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1477 else if (ipv6_valid)
1478 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1481 if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1482 *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1483 for (k = 0; k < t; k++) {
1484 if (list[k].type == ICE_GTP)
1485 list[k].type = ICE_GTP_NO_PAY;
1489 if (*tun_type == ICE_NON_TUN) {
1491 *tun_type = ICE_SW_TUN_VXLAN;
1492 else if (nvgre_valid)
1493 *tun_type = ICE_SW_TUN_NVGRE;
1494 else if (ipv4_valid && tcp_valid)
1495 *tun_type = ICE_SW_IPV4_TCP;
1496 else if (ipv4_valid && udp_valid)
1497 *tun_type = ICE_SW_IPV4_UDP;
1498 else if (ipv6_valid && tcp_valid)
1499 *tun_type = ICE_SW_IPV6_TCP;
1500 else if (ipv6_valid && udp_valid)
1501 *tun_type = ICE_SW_IPV6_UDP;
1504 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1505 rte_flow_error_set(error, EINVAL,
1506 RTE_FLOW_ERROR_TYPE_ITEM,
1508 "too much input set");
1515 if ((!outer_input_set && !inner_input_set &&
1516 !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1517 ~pattern_match_item->input_set_mask_o) ||
1518 (inner_input_set & ~pattern_match_item->input_set_mask_i))
1525 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1526 const struct rte_flow_action *actions,
1528 struct rte_flow_error *error,
1529 struct ice_adv_rule_info *rule_info)
1531 const struct rte_flow_action_vf *act_vf;
1532 const struct rte_flow_action *action;
1533 enum rte_flow_action_type action_type;
1535 for (action = actions; action->type !=
1536 RTE_FLOW_ACTION_TYPE_END; action++) {
1537 action_type = action->type;
1538 switch (action_type) {
1539 case RTE_FLOW_ACTION_TYPE_VF:
1540 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1541 act_vf = action->conf;
1543 if (act_vf->id >= ad->real_hw.num_vfs &&
1544 !act_vf->original) {
1545 rte_flow_error_set(error,
1546 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1552 if (act_vf->original)
1553 rule_info->sw_act.vsi_handle =
1554 ad->real_hw.avf.bus.func;
1556 rule_info->sw_act.vsi_handle = act_vf->id;
1559 case RTE_FLOW_ACTION_TYPE_DROP:
1560 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1564 rte_flow_error_set(error,
1565 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1567 "Invalid action type");
1572 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1573 rule_info->sw_act.flag = ICE_FLTR_RX;
1575 rule_info->priority = 6 - priority;
1581 ice_switch_parse_action(struct ice_pf *pf,
1582 const struct rte_flow_action *actions,
1584 struct rte_flow_error *error,
1585 struct ice_adv_rule_info *rule_info)
1587 struct ice_vsi *vsi = pf->main_vsi;
1588 struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1589 const struct rte_flow_action_queue *act_q;
1590 const struct rte_flow_action_rss *act_qgrop;
1591 uint16_t base_queue, i;
1592 const struct rte_flow_action *action;
1593 enum rte_flow_action_type action_type;
1594 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1595 2, 4, 8, 16, 32, 64, 128};
1597 base_queue = pf->base_queue + vsi->base_queue;
1598 for (action = actions; action->type !=
1599 RTE_FLOW_ACTION_TYPE_END; action++) {
1600 action_type = action->type;
1601 switch (action_type) {
1602 case RTE_FLOW_ACTION_TYPE_RSS:
1603 act_qgrop = action->conf;
1604 if (act_qgrop->queue_num <= 1)
1606 rule_info->sw_act.fltr_act =
1608 rule_info->sw_act.fwd_id.q_id =
1609 base_queue + act_qgrop->queue[0];
1610 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1611 if (act_qgrop->queue_num ==
1612 valid_qgrop_number[i])
1615 if (i == MAX_QGRP_NUM_TYPE)
1617 if ((act_qgrop->queue[0] +
1618 act_qgrop->queue_num) >
1619 dev_data->nb_rx_queues)
1621 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1622 if (act_qgrop->queue[i + 1] !=
1623 act_qgrop->queue[i] + 1)
1625 rule_info->sw_act.qgrp_size =
1626 act_qgrop->queue_num;
1628 case RTE_FLOW_ACTION_TYPE_QUEUE:
1629 act_q = action->conf;
1630 if (act_q->index >= dev_data->nb_rx_queues)
1632 rule_info->sw_act.fltr_act =
1634 rule_info->sw_act.fwd_id.q_id =
1635 base_queue + act_q->index;
1638 case RTE_FLOW_ACTION_TYPE_DROP:
1639 rule_info->sw_act.fltr_act =
1643 case RTE_FLOW_ACTION_TYPE_VOID:
1651 rule_info->sw_act.vsi_handle = vsi->idx;
1653 rule_info->sw_act.src = vsi->idx;
1654 rule_info->priority = priority + 5;
1659 rte_flow_error_set(error,
1660 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1662 "Invalid action type or queue number");
1666 rte_flow_error_set(error,
1667 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1669 "Invalid queue region indexes");
1673 rte_flow_error_set(error,
1674 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1676 "Discontinuous queue region");
1681 ice_switch_check_action(const struct rte_flow_action *actions,
1682 struct rte_flow_error *error)
1684 const struct rte_flow_action *action;
1685 enum rte_flow_action_type action_type;
1686 uint16_t actions_num = 0;
1688 for (action = actions; action->type !=
1689 RTE_FLOW_ACTION_TYPE_END; action++) {
1690 action_type = action->type;
1691 switch (action_type) {
1692 case RTE_FLOW_ACTION_TYPE_VF:
1693 case RTE_FLOW_ACTION_TYPE_RSS:
1694 case RTE_FLOW_ACTION_TYPE_QUEUE:
1695 case RTE_FLOW_ACTION_TYPE_DROP:
1698 case RTE_FLOW_ACTION_TYPE_VOID:
1701 rte_flow_error_set(error,
1702 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1704 "Invalid action type");
1709 if (actions_num != 1) {
1710 rte_flow_error_set(error,
1711 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1713 "Invalid action number");
1721 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1722 struct ice_pattern_match_item *array,
1724 const struct rte_flow_item pattern[],
1725 const struct rte_flow_action actions[],
1728 struct rte_flow_error *error)
1730 struct ice_pf *pf = &ad->pf;
1732 struct sw_meta *sw_meta_ptr = NULL;
1733 struct ice_adv_rule_info rule_info;
1734 struct ice_adv_lkup_elem *list = NULL;
1735 uint16_t lkups_num = 0;
1736 const struct rte_flow_item *item = pattern;
1737 uint16_t item_num = 0;
1738 uint16_t vlan_num = 0;
1739 enum ice_sw_tunnel_type tun_type =
1741 struct ice_pattern_match_item *pattern_match_item = NULL;
1743 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1745 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1746 const struct rte_flow_item_eth *eth_mask;
1748 eth_mask = item->mask;
1751 if (eth_mask->type == UINT16_MAX)
1752 tun_type = ICE_SW_TUN_AND_NON_TUN;
1755 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1758 /* reserve one more memory slot for ETH which may
1759 * consume 2 lookup items.
1761 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1765 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1766 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1767 else if (vlan_num == 2)
1768 tun_type = ICE_NON_TUN_QINQ;
1770 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1772 rte_flow_error_set(error, EINVAL,
1773 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1774 "No memory for PMD internal items");
1779 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1781 rte_flow_error_set(error, EINVAL,
1782 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1783 "No memory for sw_pattern_meta_ptr");
1787 pattern_match_item =
1788 ice_search_pattern_match_item(ad, pattern, array, array_len,
1790 if (!pattern_match_item) {
1791 rte_flow_error_set(error, EINVAL,
1792 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1793 "Invalid input pattern");
1797 if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1798 &tun_type, pattern_match_item)) {
1799 rte_flow_error_set(error, EINVAL,
1800 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1802 "Invalid input set");
1806 memset(&rule_info, 0, sizeof(rule_info));
1807 rule_info.tun_type = tun_type;
1809 ret = ice_switch_check_action(actions, error);
1813 if (ad->hw.dcf_enabled)
1814 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1817 ret = ice_switch_parse_action(pf, actions, priority, error,
1824 *meta = sw_meta_ptr;
1825 ((struct sw_meta *)*meta)->list = list;
1826 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1827 ((struct sw_meta *)*meta)->rule_info = rule_info;
1830 rte_free(sw_meta_ptr);
1833 rte_free(pattern_match_item);
1839 rte_free(sw_meta_ptr);
1840 rte_free(pattern_match_item);
1846 ice_switch_query(struct ice_adapter *ad __rte_unused,
1847 struct rte_flow *flow __rte_unused,
1848 struct rte_flow_query_count *count __rte_unused,
1849 struct rte_flow_error *error)
1851 rte_flow_error_set(error, EINVAL,
1852 RTE_FLOW_ERROR_TYPE_HANDLE,
1854 "count action not supported by switch filter");
1860 ice_switch_redirect(struct ice_adapter *ad,
1861 struct rte_flow *flow,
1862 struct ice_flow_redirect *rd)
1864 struct ice_rule_query_data *rdata = flow->rule;
1865 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1866 struct ice_adv_lkup_elem *lkups_dp = NULL;
1867 struct LIST_HEAD_TYPE *list_head;
1868 struct ice_adv_rule_info rinfo;
1869 struct ice_hw *hw = &ad->hw;
1870 struct ice_switch_info *sw;
1874 if (rdata->vsi_handle != rd->vsi_handle)
1877 sw = hw->switch_info;
1878 if (!sw->recp_list[rdata->rid].recp_created)
1881 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1884 list_head = &sw->recp_list[rdata->rid].filt_rules;
1885 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1887 rinfo = list_itr->rule_info;
1888 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1889 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1890 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1891 (rinfo.fltr_rule_id == rdata->rule_id &&
1892 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1893 lkups_cnt = list_itr->lkups_cnt;
1894 lkups_dp = (struct ice_adv_lkup_elem *)
1895 ice_memdup(hw, list_itr->lkups,
1896 sizeof(*list_itr->lkups) *
1897 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1900 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1904 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1905 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1906 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1915 /* Remove the old rule */
1916 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1919 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1925 /* Update VSI context */
1926 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1928 /* Replay the rule */
1929 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1932 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1937 ice_free(hw, lkups_dp);
1942 ice_switch_init(struct ice_adapter *ad)
1945 struct ice_flow_parser *dist_parser;
1946 struct ice_flow_parser *perm_parser;
1948 if (ad->devargs.pipe_mode_support) {
1949 perm_parser = &ice_switch_perm_parser;
1950 ret = ice_register_parser(perm_parser, ad);
1952 dist_parser = &ice_switch_dist_parser;
1953 ret = ice_register_parser(dist_parser, ad);
1959 ice_switch_uninit(struct ice_adapter *ad)
1961 struct ice_flow_parser *dist_parser;
1962 struct ice_flow_parser *perm_parser;
1964 if (ad->devargs.pipe_mode_support) {
1965 perm_parser = &ice_switch_perm_parser;
1966 ice_unregister_parser(perm_parser, ad);
1968 dist_parser = &ice_switch_dist_parser;
1969 ice_unregister_parser(dist_parser, ad);
1974 ice_flow_engine ice_switch_engine = {
1975 .init = ice_switch_init,
1976 .uninit = ice_switch_uninit,
1977 .create = ice_switch_create,
1978 .destroy = ice_switch_destroy,
1979 .query_count = ice_switch_query,
1980 .redirect = ice_switch_redirect,
1981 .free = ice_switch_filter_rule_free,
1982 .type = ICE_FLOW_ENGINE_SWITCH,
1986 ice_flow_parser ice_switch_dist_parser = {
1987 .engine = &ice_switch_engine,
1988 .array = ice_switch_pattern_dist_list,
1989 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1990 .parse_pattern_action = ice_switch_parse_pattern_action,
1991 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1995 ice_flow_parser ice_switch_perm_parser = {
1996 .engine = &ice_switch_engine,
1997 .array = ice_switch_pattern_perm_list,
1998 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1999 .parse_pattern_action = ice_switch_parse_pattern_action,
2000 .stage = ICE_FLOW_STAGE_PERMISSION,
2003 RTE_INIT(ice_sw_engine_init)
2005 struct ice_flow_engine *engine = &ice_switch_engine;
2006 ice_register_flow_engine(engine);