1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
40 #define ICE_SW_INSET_MAC_QINQ ( \
41 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
49 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
50 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
51 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
52 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
53 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
54 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
55 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
56 #define ICE_SW_INSET_MAC_IPV6 ( \
57 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
58 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
59 ICE_INSET_IPV6_NEXT_HDR)
60 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
61 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
62 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
63 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
65 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
66 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
67 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
68 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
69 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
70 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
71 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
85 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
88 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
89 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
93 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
96 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
97 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
98 ICE_INSET_TUN_IPV4_TOS)
99 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
100 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
101 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
102 ICE_INSET_TUN_IPV4_TOS)
103 #define ICE_SW_INSET_MAC_PPPOE ( \
104 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
105 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
106 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
107 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
108 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
109 ICE_INSET_PPPOE_PROTO)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
111 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
113 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
114 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
115 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
116 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
117 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
118 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
119 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
120 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
121 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
122 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
123 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
124 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
125 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
126 #define ICE_SW_INSET_MAC_IPV4_AH ( \
127 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
128 #define ICE_SW_INSET_MAC_IPV6_AH ( \
129 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
130 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
131 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
132 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
133 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
134 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
135 ICE_SW_INSET_MAC_IPV4 | \
136 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
137 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
138 ICE_SW_INSET_MAC_IPV6 | \
139 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
142 struct ice_adv_lkup_elem *list;
144 struct ice_adv_rule_info rule_info;
147 static struct ice_flow_parser ice_switch_dist_parser;
148 static struct ice_flow_parser ice_switch_perm_parser;
151 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
152 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
153 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
154 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
155 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
156 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
157 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
158 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
159 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
160 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
161 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
162 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
163 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
164 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
165 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
166 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
167 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
168 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
169 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
170 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
171 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
172 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
173 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
174 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
175 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
176 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
177 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
178 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
179 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
180 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
181 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
182 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
183 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
184 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
185 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
186 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
187 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
188 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
189 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
190 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
191 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
192 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
193 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
194 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
195 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
196 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
197 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
198 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
199 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
200 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
204 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
205 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
206 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
207 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
208 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
209 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
210 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
211 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
212 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
213 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
214 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
215 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
216 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
217 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
218 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
219 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
220 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
221 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
222 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
223 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
224 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
225 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
226 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
228 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
230 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
232 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
234 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
236 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
238 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
239 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
240 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
241 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
242 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
243 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
244 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
245 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
246 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
247 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
248 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
249 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
250 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
251 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
252 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
253 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
257 ice_switch_create(struct ice_adapter *ad,
258 struct rte_flow *flow,
260 struct rte_flow_error *error)
263 struct ice_pf *pf = &ad->pf;
264 struct ice_hw *hw = ICE_PF_TO_HW(pf);
265 struct ice_rule_query_data rule_added = {0};
266 struct ice_rule_query_data *filter_ptr;
267 struct ice_adv_lkup_elem *list =
268 ((struct sw_meta *)meta)->list;
270 ((struct sw_meta *)meta)->lkups_num;
271 struct ice_adv_rule_info *rule_info =
272 &((struct sw_meta *)meta)->rule_info;
274 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
275 rte_flow_error_set(error, EINVAL,
276 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
277 "item number too large for rule");
281 rte_flow_error_set(error, EINVAL,
282 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
283 "lookup list should not be NULL");
286 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
288 filter_ptr = rte_zmalloc("ice_switch_filter",
289 sizeof(struct ice_rule_query_data), 0);
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
293 "No memory for ice_switch_filter");
296 flow->rule = filter_ptr;
297 rte_memcpy(filter_ptr,
299 sizeof(struct ice_rule_query_data));
301 rte_flow_error_set(error, EINVAL,
302 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
303 "switch filter create flow fail");
319 ice_switch_destroy(struct ice_adapter *ad,
320 struct rte_flow *flow,
321 struct rte_flow_error *error)
323 struct ice_hw *hw = &ad->hw;
325 struct ice_rule_query_data *filter_ptr;
327 filter_ptr = (struct ice_rule_query_data *)
331 rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
334 " create by switch filter");
338 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
340 rte_flow_error_set(error, EINVAL,
341 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
342 "fail to destroy switch filter rule");
346 rte_free(filter_ptr);
351 ice_switch_filter_rule_free(struct rte_flow *flow)
353 rte_free(flow->rule);
357 ice_switch_inset_get(const struct rte_flow_item pattern[],
358 struct rte_flow_error *error,
359 struct ice_adv_lkup_elem *list,
361 enum ice_sw_tunnel_type *tun_type)
363 const struct rte_flow_item *item = pattern;
364 enum rte_flow_item_type item_type;
365 const struct rte_flow_item_eth *eth_spec, *eth_mask;
366 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
367 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
368 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
369 const struct rte_flow_item_udp *udp_spec, *udp_mask;
370 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
371 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
372 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
373 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
374 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
375 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
377 const struct rte_flow_item_esp *esp_spec, *esp_mask;
378 const struct rte_flow_item_ah *ah_spec, *ah_mask;
379 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
380 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
381 uint64_t input_set = ICE_INSET_NONE;
382 uint16_t input_set_byte = 0;
383 bool pppoe_elem_valid = 0;
384 bool pppoe_patt_valid = 0;
385 bool pppoe_prot_valid = 0;
386 bool inner_vlan_valid = 0;
387 bool outer_vlan_valid = 0;
388 bool tunnel_valid = 0;
389 bool profile_rule = 0;
390 bool nvgre_valid = 0;
391 bool vxlan_valid = 0;
398 for (item = pattern; item->type !=
399 RTE_FLOW_ITEM_TYPE_END; item++) {
401 rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_ITEM,
404 "Not support range");
407 item_type = item->type;
410 case RTE_FLOW_ITEM_TYPE_ETH:
411 eth_spec = item->spec;
412 eth_mask = item->mask;
413 if (eth_spec && eth_mask) {
414 const uint8_t *a = eth_mask->src.addr_bytes;
415 const uint8_t *b = eth_mask->dst.addr_bytes;
416 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
417 if (a[j] && tunnel_valid) {
427 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
428 if (b[j] && tunnel_valid) {
439 input_set |= ICE_INSET_ETHERTYPE;
440 list[t].type = (tunnel_valid == 0) ?
441 ICE_MAC_OFOS : ICE_MAC_IL;
442 struct ice_ether_hdr *h;
443 struct ice_ether_hdr *m;
445 h = &list[t].h_u.eth_hdr;
446 m = &list[t].m_u.eth_hdr;
447 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
448 if (eth_mask->src.addr_bytes[j]) {
450 eth_spec->src.addr_bytes[j];
452 eth_mask->src.addr_bytes[j];
456 if (eth_mask->dst.addr_bytes[j]) {
458 eth_spec->dst.addr_bytes[j];
460 eth_mask->dst.addr_bytes[j];
467 if (eth_mask->type) {
468 list[t].type = ICE_ETYPE_OL;
469 list[t].h_u.ethertype.ethtype_id =
471 list[t].m_u.ethertype.ethtype_id =
479 case RTE_FLOW_ITEM_TYPE_IPV4:
480 ipv4_spec = item->spec;
481 ipv4_mask = item->mask;
483 if (ipv4_spec && ipv4_mask) {
484 /* Check IPv4 mask and update input set */
485 if (ipv4_mask->hdr.version_ihl ||
486 ipv4_mask->hdr.total_length ||
487 ipv4_mask->hdr.packet_id ||
488 ipv4_mask->hdr.hdr_checksum) {
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_ITEM,
492 "Invalid IPv4 mask.");
497 if (ipv4_mask->hdr.type_of_service)
499 ICE_INSET_TUN_IPV4_TOS;
500 if (ipv4_mask->hdr.src_addr)
502 ICE_INSET_TUN_IPV4_SRC;
503 if (ipv4_mask->hdr.dst_addr)
505 ICE_INSET_TUN_IPV4_DST;
506 if (ipv4_mask->hdr.time_to_live)
508 ICE_INSET_TUN_IPV4_TTL;
509 if (ipv4_mask->hdr.next_proto_id)
511 ICE_INSET_TUN_IPV4_PROTO;
513 if (ipv4_mask->hdr.src_addr)
514 input_set |= ICE_INSET_IPV4_SRC;
515 if (ipv4_mask->hdr.dst_addr)
516 input_set |= ICE_INSET_IPV4_DST;
517 if (ipv4_mask->hdr.time_to_live)
518 input_set |= ICE_INSET_IPV4_TTL;
519 if (ipv4_mask->hdr.next_proto_id)
521 ICE_INSET_IPV4_PROTO;
522 if (ipv4_mask->hdr.type_of_service)
526 list[t].type = (tunnel_valid == 0) ?
527 ICE_IPV4_OFOS : ICE_IPV4_IL;
528 if (ipv4_mask->hdr.src_addr) {
529 list[t].h_u.ipv4_hdr.src_addr =
530 ipv4_spec->hdr.src_addr;
531 list[t].m_u.ipv4_hdr.src_addr =
532 ipv4_mask->hdr.src_addr;
535 if (ipv4_mask->hdr.dst_addr) {
536 list[t].h_u.ipv4_hdr.dst_addr =
537 ipv4_spec->hdr.dst_addr;
538 list[t].m_u.ipv4_hdr.dst_addr =
539 ipv4_mask->hdr.dst_addr;
542 if (ipv4_mask->hdr.time_to_live) {
543 list[t].h_u.ipv4_hdr.time_to_live =
544 ipv4_spec->hdr.time_to_live;
545 list[t].m_u.ipv4_hdr.time_to_live =
546 ipv4_mask->hdr.time_to_live;
549 if (ipv4_mask->hdr.next_proto_id) {
550 list[t].h_u.ipv4_hdr.protocol =
551 ipv4_spec->hdr.next_proto_id;
552 list[t].m_u.ipv4_hdr.protocol =
553 ipv4_mask->hdr.next_proto_id;
556 if ((ipv4_spec->hdr.next_proto_id &
557 ipv4_mask->hdr.next_proto_id) ==
558 ICE_IPV4_PROTO_NVGRE)
559 *tun_type = ICE_SW_TUN_AND_NON_TUN;
560 if (ipv4_mask->hdr.type_of_service) {
561 list[t].h_u.ipv4_hdr.tos =
562 ipv4_spec->hdr.type_of_service;
563 list[t].m_u.ipv4_hdr.tos =
564 ipv4_mask->hdr.type_of_service;
571 case RTE_FLOW_ITEM_TYPE_IPV6:
572 ipv6_spec = item->spec;
573 ipv6_mask = item->mask;
575 if (ipv6_spec && ipv6_mask) {
576 if (ipv6_mask->hdr.payload_len) {
577 rte_flow_error_set(error, EINVAL,
578 RTE_FLOW_ERROR_TYPE_ITEM,
580 "Invalid IPv6 mask");
584 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
585 if (ipv6_mask->hdr.src_addr[j] &&
588 ICE_INSET_TUN_IPV6_SRC;
590 } else if (ipv6_mask->hdr.src_addr[j]) {
591 input_set |= ICE_INSET_IPV6_SRC;
595 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
596 if (ipv6_mask->hdr.dst_addr[j] &&
599 ICE_INSET_TUN_IPV6_DST;
601 } else if (ipv6_mask->hdr.dst_addr[j]) {
602 input_set |= ICE_INSET_IPV6_DST;
606 if (ipv6_mask->hdr.proto &&
609 ICE_INSET_TUN_IPV6_NEXT_HDR;
610 else if (ipv6_mask->hdr.proto)
612 ICE_INSET_IPV6_NEXT_HDR;
613 if (ipv6_mask->hdr.hop_limits &&
616 ICE_INSET_TUN_IPV6_HOP_LIMIT;
617 else if (ipv6_mask->hdr.hop_limits)
619 ICE_INSET_IPV6_HOP_LIMIT;
620 if ((ipv6_mask->hdr.vtc_flow &
622 (RTE_IPV6_HDR_TC_MASK)) &&
625 ICE_INSET_TUN_IPV6_TC;
626 else if (ipv6_mask->hdr.vtc_flow &
628 (RTE_IPV6_HDR_TC_MASK))
629 input_set |= ICE_INSET_IPV6_TC;
631 list[t].type = (tunnel_valid == 0) ?
632 ICE_IPV6_OFOS : ICE_IPV6_IL;
633 struct ice_ipv6_hdr *f;
634 struct ice_ipv6_hdr *s;
635 f = &list[t].h_u.ipv6_hdr;
636 s = &list[t].m_u.ipv6_hdr;
637 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
638 if (ipv6_mask->hdr.src_addr[j]) {
640 ipv6_spec->hdr.src_addr[j];
642 ipv6_mask->hdr.src_addr[j];
645 if (ipv6_mask->hdr.dst_addr[j]) {
647 ipv6_spec->hdr.dst_addr[j];
649 ipv6_mask->hdr.dst_addr[j];
653 if (ipv6_mask->hdr.proto) {
655 ipv6_spec->hdr.proto;
657 ipv6_mask->hdr.proto;
660 if (ipv6_mask->hdr.hop_limits) {
662 ipv6_spec->hdr.hop_limits;
664 ipv6_mask->hdr.hop_limits;
667 if (ipv6_mask->hdr.vtc_flow &
669 (RTE_IPV6_HDR_TC_MASK)) {
670 struct ice_le_ver_tc_flow vtf;
671 vtf.u.fld.version = 0;
672 vtf.u.fld.flow_label = 0;
673 vtf.u.fld.tc = (rte_be_to_cpu_32
674 (ipv6_spec->hdr.vtc_flow) &
675 RTE_IPV6_HDR_TC_MASK) >>
676 RTE_IPV6_HDR_TC_SHIFT;
677 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
678 vtf.u.fld.tc = (rte_be_to_cpu_32
679 (ipv6_mask->hdr.vtc_flow) &
680 RTE_IPV6_HDR_TC_MASK) >>
681 RTE_IPV6_HDR_TC_SHIFT;
682 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
689 case RTE_FLOW_ITEM_TYPE_UDP:
690 udp_spec = item->spec;
691 udp_mask = item->mask;
693 if (udp_spec && udp_mask) {
694 /* Check UDP mask and update input set*/
695 if (udp_mask->hdr.dgram_len ||
696 udp_mask->hdr.dgram_cksum) {
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_ITEM,
705 if (udp_mask->hdr.src_port)
707 ICE_INSET_TUN_UDP_SRC_PORT;
708 if (udp_mask->hdr.dst_port)
710 ICE_INSET_TUN_UDP_DST_PORT;
712 if (udp_mask->hdr.src_port)
714 ICE_INSET_UDP_SRC_PORT;
715 if (udp_mask->hdr.dst_port)
717 ICE_INSET_UDP_DST_PORT;
719 if (*tun_type == ICE_SW_TUN_VXLAN &&
721 list[t].type = ICE_UDP_OF;
723 list[t].type = ICE_UDP_ILOS;
724 if (udp_mask->hdr.src_port) {
725 list[t].h_u.l4_hdr.src_port =
726 udp_spec->hdr.src_port;
727 list[t].m_u.l4_hdr.src_port =
728 udp_mask->hdr.src_port;
731 if (udp_mask->hdr.dst_port) {
732 list[t].h_u.l4_hdr.dst_port =
733 udp_spec->hdr.dst_port;
734 list[t].m_u.l4_hdr.dst_port =
735 udp_mask->hdr.dst_port;
742 case RTE_FLOW_ITEM_TYPE_TCP:
743 tcp_spec = item->spec;
744 tcp_mask = item->mask;
746 if (tcp_spec && tcp_mask) {
747 /* Check TCP mask and update input set */
748 if (tcp_mask->hdr.sent_seq ||
749 tcp_mask->hdr.recv_ack ||
750 tcp_mask->hdr.data_off ||
751 tcp_mask->hdr.tcp_flags ||
752 tcp_mask->hdr.rx_win ||
753 tcp_mask->hdr.cksum ||
754 tcp_mask->hdr.tcp_urp) {
755 rte_flow_error_set(error, EINVAL,
756 RTE_FLOW_ERROR_TYPE_ITEM,
763 if (tcp_mask->hdr.src_port)
765 ICE_INSET_TUN_TCP_SRC_PORT;
766 if (tcp_mask->hdr.dst_port)
768 ICE_INSET_TUN_TCP_DST_PORT;
770 if (tcp_mask->hdr.src_port)
772 ICE_INSET_TCP_SRC_PORT;
773 if (tcp_mask->hdr.dst_port)
775 ICE_INSET_TCP_DST_PORT;
777 list[t].type = ICE_TCP_IL;
778 if (tcp_mask->hdr.src_port) {
779 list[t].h_u.l4_hdr.src_port =
780 tcp_spec->hdr.src_port;
781 list[t].m_u.l4_hdr.src_port =
782 tcp_mask->hdr.src_port;
785 if (tcp_mask->hdr.dst_port) {
786 list[t].h_u.l4_hdr.dst_port =
787 tcp_spec->hdr.dst_port;
788 list[t].m_u.l4_hdr.dst_port =
789 tcp_mask->hdr.dst_port;
796 case RTE_FLOW_ITEM_TYPE_SCTP:
797 sctp_spec = item->spec;
798 sctp_mask = item->mask;
799 if (sctp_spec && sctp_mask) {
800 /* Check SCTP mask and update input set */
801 if (sctp_mask->hdr.cksum) {
802 rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_ITEM,
805 "Invalid SCTP mask");
810 if (sctp_mask->hdr.src_port)
812 ICE_INSET_TUN_SCTP_SRC_PORT;
813 if (sctp_mask->hdr.dst_port)
815 ICE_INSET_TUN_SCTP_DST_PORT;
817 if (sctp_mask->hdr.src_port)
819 ICE_INSET_SCTP_SRC_PORT;
820 if (sctp_mask->hdr.dst_port)
822 ICE_INSET_SCTP_DST_PORT;
824 list[t].type = ICE_SCTP_IL;
825 if (sctp_mask->hdr.src_port) {
826 list[t].h_u.sctp_hdr.src_port =
827 sctp_spec->hdr.src_port;
828 list[t].m_u.sctp_hdr.src_port =
829 sctp_mask->hdr.src_port;
832 if (sctp_mask->hdr.dst_port) {
833 list[t].h_u.sctp_hdr.dst_port =
834 sctp_spec->hdr.dst_port;
835 list[t].m_u.sctp_hdr.dst_port =
836 sctp_mask->hdr.dst_port;
843 case RTE_FLOW_ITEM_TYPE_VXLAN:
844 vxlan_spec = item->spec;
845 vxlan_mask = item->mask;
846 /* Check if VXLAN item is used to describe protocol.
847 * If yes, both spec and mask should be NULL.
848 * If no, both spec and mask shouldn't be NULL.
850 if ((!vxlan_spec && vxlan_mask) ||
851 (vxlan_spec && !vxlan_mask)) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
855 "Invalid VXLAN item");
860 if (vxlan_spec && vxlan_mask) {
861 list[t].type = ICE_VXLAN;
862 if (vxlan_mask->vni[0] ||
863 vxlan_mask->vni[1] ||
864 vxlan_mask->vni[2]) {
865 list[t].h_u.tnl_hdr.vni =
866 (vxlan_spec->vni[2] << 16) |
867 (vxlan_spec->vni[1] << 8) |
869 list[t].m_u.tnl_hdr.vni =
870 (vxlan_mask->vni[2] << 16) |
871 (vxlan_mask->vni[1] << 8) |
874 ICE_INSET_TUN_VXLAN_VNI;
881 case RTE_FLOW_ITEM_TYPE_NVGRE:
882 nvgre_spec = item->spec;
883 nvgre_mask = item->mask;
884 /* Check if NVGRE item is used to describe protocol.
885 * If yes, both spec and mask should be NULL.
886 * If no, both spec and mask shouldn't be NULL.
888 if ((!nvgre_spec && nvgre_mask) ||
889 (nvgre_spec && !nvgre_mask)) {
890 rte_flow_error_set(error, EINVAL,
891 RTE_FLOW_ERROR_TYPE_ITEM,
893 "Invalid NVGRE item");
898 if (nvgre_spec && nvgre_mask) {
899 list[t].type = ICE_NVGRE;
900 if (nvgre_mask->tni[0] ||
901 nvgre_mask->tni[1] ||
902 nvgre_mask->tni[2]) {
903 list[t].h_u.nvgre_hdr.tni_flow =
904 (nvgre_spec->tni[2] << 16) |
905 (nvgre_spec->tni[1] << 8) |
907 list[t].m_u.nvgre_hdr.tni_flow =
908 (nvgre_mask->tni[2] << 16) |
909 (nvgre_mask->tni[1] << 8) |
912 ICE_INSET_TUN_NVGRE_TNI;
919 case RTE_FLOW_ITEM_TYPE_VLAN:
920 vlan_spec = item->spec;
921 vlan_mask = item->mask;
922 /* Check if VLAN item is used to describe protocol.
923 * If yes, both spec and mask should be NULL.
924 * If no, both spec and mask shouldn't be NULL.
926 if ((!vlan_spec && vlan_mask) ||
927 (vlan_spec && !vlan_mask)) {
928 rte_flow_error_set(error, EINVAL,
929 RTE_FLOW_ERROR_TYPE_ITEM,
931 "Invalid VLAN item");
935 if (!outer_vlan_valid &&
936 (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
937 *tun_type == ICE_NON_TUN_QINQ))
938 outer_vlan_valid = 1;
939 else if (!inner_vlan_valid &&
940 (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
941 *tun_type == ICE_NON_TUN_QINQ))
942 inner_vlan_valid = 1;
943 else if (!inner_vlan_valid)
944 inner_vlan_valid = 1;
946 if (vlan_spec && vlan_mask) {
947 if (outer_vlan_valid && !inner_vlan_valid) {
948 list[t].type = ICE_VLAN_EX;
949 input_set |= ICE_INSET_VLAN_OUTER;
950 } else if (inner_vlan_valid) {
951 list[t].type = ICE_VLAN_OFOS;
952 input_set |= ICE_INSET_VLAN_INNER;
955 if (vlan_mask->tci) {
956 list[t].h_u.vlan_hdr.vlan =
958 list[t].m_u.vlan_hdr.vlan =
962 if (vlan_mask->inner_type) {
963 rte_flow_error_set(error, EINVAL,
964 RTE_FLOW_ERROR_TYPE_ITEM,
966 "Invalid VLAN input set.");
973 case RTE_FLOW_ITEM_TYPE_PPPOED:
974 case RTE_FLOW_ITEM_TYPE_PPPOES:
975 pppoe_spec = item->spec;
976 pppoe_mask = item->mask;
977 /* Check if PPPoE item is used to describe protocol.
978 * If yes, both spec and mask should be NULL.
979 * If no, both spec and mask shouldn't be NULL.
981 if ((!pppoe_spec && pppoe_mask) ||
982 (pppoe_spec && !pppoe_mask)) {
983 rte_flow_error_set(error, EINVAL,
984 RTE_FLOW_ERROR_TYPE_ITEM,
986 "Invalid pppoe item");
989 pppoe_patt_valid = 1;
990 if (pppoe_spec && pppoe_mask) {
991 /* Check pppoe mask and update input set */
992 if (pppoe_mask->length ||
994 pppoe_mask->version_type) {
995 rte_flow_error_set(error, EINVAL,
996 RTE_FLOW_ERROR_TYPE_ITEM,
998 "Invalid pppoe mask");
1001 list[t].type = ICE_PPPOE;
1002 if (pppoe_mask->session_id) {
1003 list[t].h_u.pppoe_hdr.session_id =
1004 pppoe_spec->session_id;
1005 list[t].m_u.pppoe_hdr.session_id =
1006 pppoe_mask->session_id;
1007 input_set |= ICE_INSET_PPPOE_SESSION;
1008 input_set_byte += 2;
1011 pppoe_elem_valid = 1;
1015 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1016 pppoe_proto_spec = item->spec;
1017 pppoe_proto_mask = item->mask;
1018 /* Check if PPPoE optional proto_id item
1019 * is used to describe protocol.
1020 * If yes, both spec and mask should be NULL.
1021 * If no, both spec and mask shouldn't be NULL.
1023 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1024 (pppoe_proto_spec && !pppoe_proto_mask)) {
1025 rte_flow_error_set(error, EINVAL,
1026 RTE_FLOW_ERROR_TYPE_ITEM,
1028 "Invalid pppoe proto item");
1031 if (pppoe_proto_spec && pppoe_proto_mask) {
1032 if (pppoe_elem_valid)
1034 list[t].type = ICE_PPPOE;
1035 if (pppoe_proto_mask->proto_id) {
1036 list[t].h_u.pppoe_hdr.ppp_prot_id =
1037 pppoe_proto_spec->proto_id;
1038 list[t].m_u.pppoe_hdr.ppp_prot_id =
1039 pppoe_proto_mask->proto_id;
1040 input_set |= ICE_INSET_PPPOE_PROTO;
1041 input_set_byte += 2;
1042 pppoe_prot_valid = 1;
1044 if ((pppoe_proto_mask->proto_id &
1045 pppoe_proto_spec->proto_id) !=
1046 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1047 (pppoe_proto_mask->proto_id &
1048 pppoe_proto_spec->proto_id) !=
1049 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1050 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1052 *tun_type = ICE_SW_TUN_PPPOE;
1058 case RTE_FLOW_ITEM_TYPE_ESP:
1059 esp_spec = item->spec;
1060 esp_mask = item->mask;
1061 if ((esp_spec && !esp_mask) ||
1062 (!esp_spec && esp_mask)) {
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_ITEM,
1066 "Invalid esp item");
1069 /* Check esp mask and update input set */
1070 if (esp_mask && esp_mask->hdr.seq) {
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ITEM,
1074 "Invalid esp mask");
1078 if (!esp_spec && !esp_mask && !input_set) {
1080 if (ipv6_valid && udp_valid)
1082 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1083 else if (ipv6_valid)
1084 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1085 else if (ipv4_valid)
1087 } else if (esp_spec && esp_mask &&
1090 list[t].type = ICE_NAT_T;
1092 list[t].type = ICE_ESP;
1093 list[t].h_u.esp_hdr.spi =
1095 list[t].m_u.esp_hdr.spi =
1097 input_set |= ICE_INSET_ESP_SPI;
1098 input_set_byte += 4;
1102 if (!profile_rule) {
1103 if (ipv6_valid && udp_valid)
1104 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1105 else if (ipv4_valid && udp_valid)
1106 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1107 else if (ipv6_valid)
1108 *tun_type = ICE_SW_TUN_IPV6_ESP;
1109 else if (ipv4_valid)
1110 *tun_type = ICE_SW_TUN_IPV4_ESP;
1114 case RTE_FLOW_ITEM_TYPE_AH:
1115 ah_spec = item->spec;
1116 ah_mask = item->mask;
1117 if ((ah_spec && !ah_mask) ||
1118 (!ah_spec && ah_mask)) {
1119 rte_flow_error_set(error, EINVAL,
1120 RTE_FLOW_ERROR_TYPE_ITEM,
1125 /* Check ah mask and update input set */
1127 (ah_mask->next_hdr ||
1128 ah_mask->payload_len ||
1130 ah_mask->reserved)) {
1131 rte_flow_error_set(error, EINVAL,
1132 RTE_FLOW_ERROR_TYPE_ITEM,
1138 if (!ah_spec && !ah_mask && !input_set) {
1140 if (ipv6_valid && udp_valid)
1142 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1143 else if (ipv6_valid)
1144 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1145 else if (ipv4_valid)
1147 } else if (ah_spec && ah_mask &&
1149 list[t].type = ICE_AH;
1150 list[t].h_u.ah_hdr.spi =
1152 list[t].m_u.ah_hdr.spi =
1154 input_set |= ICE_INSET_AH_SPI;
1155 input_set_byte += 4;
1159 if (!profile_rule) {
1162 else if (ipv6_valid)
1163 *tun_type = ICE_SW_TUN_IPV6_AH;
1164 else if (ipv4_valid)
1165 *tun_type = ICE_SW_TUN_IPV4_AH;
1169 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1170 l2tp_spec = item->spec;
1171 l2tp_mask = item->mask;
1172 if ((l2tp_spec && !l2tp_mask) ||
1173 (!l2tp_spec && l2tp_mask)) {
1174 rte_flow_error_set(error, EINVAL,
1175 RTE_FLOW_ERROR_TYPE_ITEM,
1177 "Invalid l2tp item");
1181 if (!l2tp_spec && !l2tp_mask && !input_set) {
1184 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1185 else if (ipv4_valid)
1187 } else if (l2tp_spec && l2tp_mask &&
1188 l2tp_mask->session_id){
1189 list[t].type = ICE_L2TPV3;
1190 list[t].h_u.l2tpv3_sess_hdr.session_id =
1191 l2tp_spec->session_id;
1192 list[t].m_u.l2tpv3_sess_hdr.session_id =
1193 l2tp_mask->session_id;
1194 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1195 input_set_byte += 4;
1199 if (!profile_rule) {
1202 ICE_SW_TUN_IPV6_L2TPV3;
1203 else if (ipv4_valid)
1205 ICE_SW_TUN_IPV4_L2TPV3;
1209 case RTE_FLOW_ITEM_TYPE_PFCP:
1210 pfcp_spec = item->spec;
1211 pfcp_mask = item->mask;
1212 /* Check if PFCP item is used to describe protocol.
1213 * If yes, both spec and mask should be NULL.
1214 * If no, both spec and mask shouldn't be NULL.
1216 if ((!pfcp_spec && pfcp_mask) ||
1217 (pfcp_spec && !pfcp_mask)) {
1218 rte_flow_error_set(error, EINVAL,
1219 RTE_FLOW_ERROR_TYPE_ITEM,
1221 "Invalid PFCP item");
1224 if (pfcp_spec && pfcp_mask) {
1225 /* Check pfcp mask and update input set */
1226 if (pfcp_mask->msg_type ||
1227 pfcp_mask->msg_len ||
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ITEM,
1232 "Invalid pfcp mask");
1235 if (pfcp_mask->s_field &&
1236 pfcp_spec->s_field == 0x01 &&
1239 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1240 else if (pfcp_mask->s_field &&
1241 pfcp_spec->s_field == 0x01)
1243 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1244 else if (pfcp_mask->s_field &&
1245 !pfcp_spec->s_field &&
1248 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1249 else if (pfcp_mask->s_field &&
1250 !pfcp_spec->s_field)
1252 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1258 case RTE_FLOW_ITEM_TYPE_VOID:
1262 rte_flow_error_set(error, EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1264 "Invalid pattern item.");
1269 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1270 inner_vlan_valid && outer_vlan_valid)
1271 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1272 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1273 inner_vlan_valid && outer_vlan_valid)
1274 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1275 else if (*tun_type == ICE_NON_TUN &&
1276 inner_vlan_valid && outer_vlan_valid)
1277 *tun_type = ICE_NON_TUN_QINQ;
1278 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1279 inner_vlan_valid && outer_vlan_valid)
1280 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1282 if (pppoe_patt_valid && !pppoe_prot_valid) {
1283 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1284 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1285 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1286 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1287 else if (inner_vlan_valid && outer_vlan_valid)
1288 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1289 else if (ipv6_valid && udp_valid)
1290 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1291 else if (ipv6_valid && tcp_valid)
1292 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1293 else if (ipv4_valid && udp_valid)
1294 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1295 else if (ipv4_valid && tcp_valid)
1296 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1297 else if (ipv6_valid)
1298 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1299 else if (ipv4_valid)
1300 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1302 *tun_type = ICE_SW_TUN_PPPOE;
1305 if (*tun_type == ICE_NON_TUN) {
1307 *tun_type = ICE_SW_TUN_VXLAN;
1308 else if (nvgre_valid)
1309 *tun_type = ICE_SW_TUN_NVGRE;
1310 else if (ipv4_valid && tcp_valid)
1311 *tun_type = ICE_SW_IPV4_TCP;
1312 else if (ipv4_valid && udp_valid)
1313 *tun_type = ICE_SW_IPV4_UDP;
1314 else if (ipv6_valid && tcp_valid)
1315 *tun_type = ICE_SW_IPV6_TCP;
1316 else if (ipv6_valid && udp_valid)
1317 *tun_type = ICE_SW_IPV6_UDP;
1320 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1321 rte_flow_error_set(error, EINVAL,
1322 RTE_FLOW_ERROR_TYPE_ITEM,
1324 "too much input set");
1336 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1337 const struct rte_flow_action *actions,
1338 struct rte_flow_error *error,
1339 struct ice_adv_rule_info *rule_info)
1341 const struct rte_flow_action_vf *act_vf;
1342 const struct rte_flow_action *action;
1343 enum rte_flow_action_type action_type;
1345 for (action = actions; action->type !=
1346 RTE_FLOW_ACTION_TYPE_END; action++) {
1347 action_type = action->type;
1348 switch (action_type) {
1349 case RTE_FLOW_ACTION_TYPE_VF:
1350 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1351 act_vf = action->conf;
1353 if (act_vf->id >= ad->real_hw.num_vfs &&
1354 !act_vf->original) {
1355 rte_flow_error_set(error,
1356 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1362 if (act_vf->original)
1363 rule_info->sw_act.vsi_handle =
1364 ad->real_hw.avf.bus.func;
1366 rule_info->sw_act.vsi_handle = act_vf->id;
1369 case RTE_FLOW_ACTION_TYPE_DROP:
1370 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1374 rte_flow_error_set(error,
1375 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1377 "Invalid action type");
1382 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1383 rule_info->sw_act.flag = ICE_FLTR_RX;
1385 rule_info->priority = 5;
1391 ice_switch_parse_action(struct ice_pf *pf,
1392 const struct rte_flow_action *actions,
1393 struct rte_flow_error *error,
1394 struct ice_adv_rule_info *rule_info)
1396 struct ice_vsi *vsi = pf->main_vsi;
1397 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1398 const struct rte_flow_action_queue *act_q;
1399 const struct rte_flow_action_rss *act_qgrop;
1400 uint16_t base_queue, i;
1401 const struct rte_flow_action *action;
1402 enum rte_flow_action_type action_type;
1403 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1404 2, 4, 8, 16, 32, 64, 128};
1406 base_queue = pf->base_queue + vsi->base_queue;
1407 for (action = actions; action->type !=
1408 RTE_FLOW_ACTION_TYPE_END; action++) {
1409 action_type = action->type;
1410 switch (action_type) {
1411 case RTE_FLOW_ACTION_TYPE_RSS:
1412 act_qgrop = action->conf;
1413 if (act_qgrop->queue_num <= 1)
1415 rule_info->sw_act.fltr_act =
1417 rule_info->sw_act.fwd_id.q_id =
1418 base_queue + act_qgrop->queue[0];
1419 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1420 if (act_qgrop->queue_num ==
1421 valid_qgrop_number[i])
1424 if (i == MAX_QGRP_NUM_TYPE)
1426 if ((act_qgrop->queue[0] +
1427 act_qgrop->queue_num) >
1428 dev->data->nb_rx_queues)
1430 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1431 if (act_qgrop->queue[i + 1] !=
1432 act_qgrop->queue[i] + 1)
1434 rule_info->sw_act.qgrp_size =
1435 act_qgrop->queue_num;
1437 case RTE_FLOW_ACTION_TYPE_QUEUE:
1438 act_q = action->conf;
1439 if (act_q->index >= dev->data->nb_rx_queues)
1441 rule_info->sw_act.fltr_act =
1443 rule_info->sw_act.fwd_id.q_id =
1444 base_queue + act_q->index;
1447 case RTE_FLOW_ACTION_TYPE_DROP:
1448 rule_info->sw_act.fltr_act =
1452 case RTE_FLOW_ACTION_TYPE_VOID:
1460 rule_info->sw_act.vsi_handle = vsi->idx;
1462 rule_info->sw_act.src = vsi->idx;
1463 rule_info->priority = 5;
1468 rte_flow_error_set(error,
1469 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1471 "Invalid action type or queue number");
1475 rte_flow_error_set(error,
1476 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1478 "Invalid queue region indexes");
1482 rte_flow_error_set(error,
1483 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1485 "Discontinuous queue region");
1490 ice_switch_check_action(const struct rte_flow_action *actions,
1491 struct rte_flow_error *error)
1493 const struct rte_flow_action *action;
1494 enum rte_flow_action_type action_type;
1495 uint16_t actions_num = 0;
1497 for (action = actions; action->type !=
1498 RTE_FLOW_ACTION_TYPE_END; action++) {
1499 action_type = action->type;
1500 switch (action_type) {
1501 case RTE_FLOW_ACTION_TYPE_VF:
1502 case RTE_FLOW_ACTION_TYPE_RSS:
1503 case RTE_FLOW_ACTION_TYPE_QUEUE:
1504 case RTE_FLOW_ACTION_TYPE_DROP:
1507 case RTE_FLOW_ACTION_TYPE_VOID:
1510 rte_flow_error_set(error,
1511 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1513 "Invalid action type");
1518 if (actions_num != 1) {
1519 rte_flow_error_set(error,
1520 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1522 "Invalid action number");
1530 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1531 struct ice_pattern_match_item *array,
1533 const struct rte_flow_item pattern[],
1534 const struct rte_flow_action actions[],
1536 struct rte_flow_error *error)
1538 struct ice_pf *pf = &ad->pf;
1539 uint64_t inputset = 0;
1541 struct sw_meta *sw_meta_ptr = NULL;
1542 struct ice_adv_rule_info rule_info;
1543 struct ice_adv_lkup_elem *list = NULL;
1544 uint16_t lkups_num = 0;
1545 const struct rte_flow_item *item = pattern;
1546 uint16_t item_num = 0;
1547 uint16_t vlan_num = 0;
1548 enum ice_sw_tunnel_type tun_type =
1550 struct ice_pattern_match_item *pattern_match_item = NULL;
1552 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1554 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1555 const struct rte_flow_item_eth *eth_mask;
1557 eth_mask = item->mask;
1560 if (eth_mask->type == UINT16_MAX)
1561 tun_type = ICE_SW_TUN_AND_NON_TUN;
1564 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1567 /* reserve one more memory slot for ETH which may
1568 * consume 2 lookup items.
1570 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1574 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1575 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1576 else if (vlan_num == 2)
1577 tun_type = ICE_NON_TUN_QINQ;
1579 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1581 rte_flow_error_set(error, EINVAL,
1582 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1583 "No memory for PMD internal items");
1588 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1590 rte_flow_error_set(error, EINVAL,
1591 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1592 "No memory for sw_pattern_meta_ptr");
1596 pattern_match_item =
1597 ice_search_pattern_match_item(ad, pattern, array, array_len,
1599 if (!pattern_match_item) {
1600 rte_flow_error_set(error, EINVAL,
1601 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1602 "Invalid input pattern");
1606 inputset = ice_switch_inset_get
1607 (pattern, error, list, &lkups_num, &tun_type);
1608 if ((!inputset && !ice_is_prof_rule(tun_type)) ||
1609 (inputset & ~pattern_match_item->input_set_mask_o)) {
1610 rte_flow_error_set(error, EINVAL,
1611 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1613 "Invalid input set");
1617 memset(&rule_info, 0, sizeof(rule_info));
1618 rule_info.tun_type = tun_type;
1620 ret = ice_switch_check_action(actions, error);
1624 if (ad->hw.dcf_enabled)
1625 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1628 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1634 *meta = sw_meta_ptr;
1635 ((struct sw_meta *)*meta)->list = list;
1636 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1637 ((struct sw_meta *)*meta)->rule_info = rule_info;
1640 rte_free(sw_meta_ptr);
1643 rte_free(pattern_match_item);
1649 rte_free(sw_meta_ptr);
1650 rte_free(pattern_match_item);
1656 ice_switch_query(struct ice_adapter *ad __rte_unused,
1657 struct rte_flow *flow __rte_unused,
1658 struct rte_flow_query_count *count __rte_unused,
1659 struct rte_flow_error *error)
1661 rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_HANDLE,
1664 "count action not supported by switch filter");
1670 ice_switch_redirect(struct ice_adapter *ad,
1671 struct rte_flow *flow,
1672 struct ice_flow_redirect *rd)
1674 struct ice_rule_query_data *rdata = flow->rule;
1675 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1676 struct ice_adv_lkup_elem *lkups_dp = NULL;
1677 struct LIST_HEAD_TYPE *list_head;
1678 struct ice_adv_rule_info rinfo;
1679 struct ice_hw *hw = &ad->hw;
1680 struct ice_switch_info *sw;
1684 if (rdata->vsi_handle != rd->vsi_handle)
1687 sw = hw->switch_info;
1688 if (!sw->recp_list[rdata->rid].recp_created)
1691 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1694 list_head = &sw->recp_list[rdata->rid].filt_rules;
1695 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1697 rinfo = list_itr->rule_info;
1698 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1699 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1700 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1701 (rinfo.fltr_rule_id == rdata->rule_id &&
1702 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1703 lkups_cnt = list_itr->lkups_cnt;
1704 lkups_dp = (struct ice_adv_lkup_elem *)
1705 ice_memdup(hw, list_itr->lkups,
1706 sizeof(*list_itr->lkups) *
1707 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1710 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1714 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1715 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1716 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1725 /* Remove the old rule */
1726 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1729 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1735 /* Update VSI context */
1736 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1738 /* Replay the rule */
1739 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1742 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1747 ice_free(hw, lkups_dp);
1752 ice_switch_init(struct ice_adapter *ad)
1755 struct ice_flow_parser *dist_parser;
1756 struct ice_flow_parser *perm_parser;
1758 if (ad->devargs.pipe_mode_support) {
1759 perm_parser = &ice_switch_perm_parser;
1760 ret = ice_register_parser(perm_parser, ad);
1762 dist_parser = &ice_switch_dist_parser;
1763 ret = ice_register_parser(dist_parser, ad);
1769 ice_switch_uninit(struct ice_adapter *ad)
1771 struct ice_flow_parser *dist_parser;
1772 struct ice_flow_parser *perm_parser;
1774 if (ad->devargs.pipe_mode_support) {
1775 perm_parser = &ice_switch_perm_parser;
1776 ice_unregister_parser(perm_parser, ad);
1778 dist_parser = &ice_switch_dist_parser;
1779 ice_unregister_parser(dist_parser, ad);
1784 ice_flow_engine ice_switch_engine = {
1785 .init = ice_switch_init,
1786 .uninit = ice_switch_uninit,
1787 .create = ice_switch_create,
1788 .destroy = ice_switch_destroy,
1789 .query_count = ice_switch_query,
1790 .redirect = ice_switch_redirect,
1791 .free = ice_switch_filter_rule_free,
1792 .type = ICE_FLOW_ENGINE_SWITCH,
1796 ice_flow_parser ice_switch_dist_parser = {
1797 .engine = &ice_switch_engine,
1798 .array = ice_switch_pattern_dist_list,
1799 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1800 .parse_pattern_action = ice_switch_parse_pattern_action,
1801 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1805 ice_flow_parser ice_switch_perm_parser = {
1806 .engine = &ice_switch_engine,
1807 .array = ice_switch_pattern_perm_list,
1808 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1809 .parse_pattern_action = ice_switch_parse_pattern_action,
1810 .stage = ICE_FLOW_STAGE_PERMISSION,
1813 RTE_INIT(ice_sw_engine_init)
1815 struct ice_flow_engine *engine = &ice_switch_engine;
1816 ice_register_flow_engine(engine);