1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
40 #define ICE_SW_INSET_MAC_QINQ ( \
41 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
49 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
50 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
51 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
52 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
53 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
54 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
55 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
56 #define ICE_SW_INSET_MAC_IPV6 ( \
57 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
58 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
59 ICE_INSET_IPV6_NEXT_HDR)
60 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
61 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
62 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
63 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
65 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
66 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
67 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
68 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
69 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
70 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
71 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
85 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
88 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
89 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
93 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
96 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
97 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
98 ICE_INSET_TUN_IPV4_TOS)
99 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
100 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
101 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
102 ICE_INSET_TUN_IPV4_TOS)
103 #define ICE_SW_INSET_MAC_PPPOE ( \
104 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
105 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
106 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
107 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
108 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
109 ICE_INSET_PPPOE_PROTO)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
111 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
113 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
114 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
115 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
116 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
117 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
118 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
119 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
120 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
121 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
122 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
123 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
124 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
125 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
126 #define ICE_SW_INSET_MAC_IPV4_AH ( \
127 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
128 #define ICE_SW_INSET_MAC_IPV6_AH ( \
129 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
130 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
131 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
132 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
133 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
134 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
135 ICE_SW_INSET_MAC_IPV4 | \
136 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
137 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
138 ICE_SW_INSET_MAC_IPV6 | \
139 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
142 struct ice_adv_lkup_elem *list;
144 struct ice_adv_rule_info rule_info;
147 static struct ice_flow_parser ice_switch_dist_parser;
148 static struct ice_flow_parser ice_switch_perm_parser;
151 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
152 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
153 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
154 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
155 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
156 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
157 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
158 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
159 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
160 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
161 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
162 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
163 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
164 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
165 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
166 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
167 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
168 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
169 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
170 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
171 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
172 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
173 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
174 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
175 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
176 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
177 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
178 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
179 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
180 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
181 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
182 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
183 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
184 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
185 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
186 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
187 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
188 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
189 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
190 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
191 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
192 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
193 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
194 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
195 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
196 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
197 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
198 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
199 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
200 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
204 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
205 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
206 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
207 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
208 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
209 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
210 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
211 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
212 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
213 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
214 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
215 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
216 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
217 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
218 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
219 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
220 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
221 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
222 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
223 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
224 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
225 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
226 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
228 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
230 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
232 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
234 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
236 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
238 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
239 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
240 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
241 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
242 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
243 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
244 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
245 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
246 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
247 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
248 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
249 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
250 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
251 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
252 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
253 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
257 ice_switch_create(struct ice_adapter *ad,
258 struct rte_flow *flow,
260 struct rte_flow_error *error)
263 struct ice_pf *pf = &ad->pf;
264 struct ice_hw *hw = ICE_PF_TO_HW(pf);
265 struct ice_rule_query_data rule_added = {0};
266 struct ice_rule_query_data *filter_ptr;
267 struct ice_adv_lkup_elem *list =
268 ((struct sw_meta *)meta)->list;
270 ((struct sw_meta *)meta)->lkups_num;
271 struct ice_adv_rule_info *rule_info =
272 &((struct sw_meta *)meta)->rule_info;
274 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
275 rte_flow_error_set(error, EINVAL,
276 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
277 "item number too large for rule");
281 rte_flow_error_set(error, EINVAL,
282 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
283 "lookup list should not be NULL");
286 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
288 filter_ptr = rte_zmalloc("ice_switch_filter",
289 sizeof(struct ice_rule_query_data), 0);
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
293 "No memory for ice_switch_filter");
296 flow->rule = filter_ptr;
297 rte_memcpy(filter_ptr,
299 sizeof(struct ice_rule_query_data));
301 rte_flow_error_set(error, EINVAL,
302 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
303 "switch filter create flow fail");
319 ice_switch_destroy(struct ice_adapter *ad,
320 struct rte_flow *flow,
321 struct rte_flow_error *error)
323 struct ice_hw *hw = &ad->hw;
325 struct ice_rule_query_data *filter_ptr;
327 filter_ptr = (struct ice_rule_query_data *)
331 rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
334 " create by switch filter");
338 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
340 rte_flow_error_set(error, EINVAL,
341 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
342 "fail to destroy switch filter rule");
346 rte_free(filter_ptr);
351 ice_switch_filter_rule_free(struct rte_flow *flow)
353 rte_free(flow->rule);
357 ice_switch_inset_get(const struct rte_flow_item pattern[],
358 struct rte_flow_error *error,
359 struct ice_adv_lkup_elem *list,
361 enum ice_sw_tunnel_type *tun_type)
363 const struct rte_flow_item *item = pattern;
364 enum rte_flow_item_type item_type;
365 const struct rte_flow_item_eth *eth_spec, *eth_mask;
366 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
367 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
368 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
369 const struct rte_flow_item_udp *udp_spec, *udp_mask;
370 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
371 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
372 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
373 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
374 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
375 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
377 const struct rte_flow_item_esp *esp_spec, *esp_mask;
378 const struct rte_flow_item_ah *ah_spec, *ah_mask;
379 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
380 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
381 uint64_t input_set = ICE_INSET_NONE;
382 uint16_t input_set_byte = 0;
383 bool pppoe_elem_valid = 0;
384 bool pppoe_patt_valid = 0;
385 bool pppoe_prot_valid = 0;
386 bool inner_vlan_valid = 0;
387 bool outer_vlan_valid = 0;
388 bool tunnel_valid = 0;
389 bool profile_rule = 0;
390 bool nvgre_valid = 0;
391 bool vxlan_valid = 0;
399 if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
400 *tun_type == ICE_NON_TUN_QINQ)
403 for (item = pattern; item->type !=
404 RTE_FLOW_ITEM_TYPE_END; item++) {
406 rte_flow_error_set(error, EINVAL,
407 RTE_FLOW_ERROR_TYPE_ITEM,
409 "Not support range");
412 item_type = item->type;
415 case RTE_FLOW_ITEM_TYPE_ETH:
416 eth_spec = item->spec;
417 eth_mask = item->mask;
418 if (eth_spec && eth_mask) {
419 const uint8_t *a = eth_mask->src.addr_bytes;
420 const uint8_t *b = eth_mask->dst.addr_bytes;
421 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
422 if (a[j] && tunnel_valid) {
432 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
433 if (b[j] && tunnel_valid) {
444 input_set |= ICE_INSET_ETHERTYPE;
445 list[t].type = (tunnel_valid == 0) ?
446 ICE_MAC_OFOS : ICE_MAC_IL;
447 struct ice_ether_hdr *h;
448 struct ice_ether_hdr *m;
450 h = &list[t].h_u.eth_hdr;
451 m = &list[t].m_u.eth_hdr;
452 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
453 if (eth_mask->src.addr_bytes[j]) {
455 eth_spec->src.addr_bytes[j];
457 eth_mask->src.addr_bytes[j];
461 if (eth_mask->dst.addr_bytes[j]) {
463 eth_spec->dst.addr_bytes[j];
465 eth_mask->dst.addr_bytes[j];
472 if (eth_mask->type) {
473 list[t].type = ICE_ETYPE_OL;
474 list[t].h_u.ethertype.ethtype_id =
476 list[t].m_u.ethertype.ethtype_id =
484 case RTE_FLOW_ITEM_TYPE_IPV4:
485 ipv4_spec = item->spec;
486 ipv4_mask = item->mask;
488 if (ipv4_spec && ipv4_mask) {
489 /* Check IPv4 mask and update input set */
490 if (ipv4_mask->hdr.version_ihl ||
491 ipv4_mask->hdr.total_length ||
492 ipv4_mask->hdr.packet_id ||
493 ipv4_mask->hdr.hdr_checksum) {
494 rte_flow_error_set(error, EINVAL,
495 RTE_FLOW_ERROR_TYPE_ITEM,
497 "Invalid IPv4 mask.");
502 if (ipv4_mask->hdr.type_of_service)
504 ICE_INSET_TUN_IPV4_TOS;
505 if (ipv4_mask->hdr.src_addr)
507 ICE_INSET_TUN_IPV4_SRC;
508 if (ipv4_mask->hdr.dst_addr)
510 ICE_INSET_TUN_IPV4_DST;
511 if (ipv4_mask->hdr.time_to_live)
513 ICE_INSET_TUN_IPV4_TTL;
514 if (ipv4_mask->hdr.next_proto_id)
516 ICE_INSET_TUN_IPV4_PROTO;
518 if (ipv4_mask->hdr.src_addr)
519 input_set |= ICE_INSET_IPV4_SRC;
520 if (ipv4_mask->hdr.dst_addr)
521 input_set |= ICE_INSET_IPV4_DST;
522 if (ipv4_mask->hdr.time_to_live)
523 input_set |= ICE_INSET_IPV4_TTL;
524 if (ipv4_mask->hdr.next_proto_id)
526 ICE_INSET_IPV4_PROTO;
527 if (ipv4_mask->hdr.type_of_service)
531 list[t].type = (tunnel_valid == 0) ?
532 ICE_IPV4_OFOS : ICE_IPV4_IL;
533 if (ipv4_mask->hdr.src_addr) {
534 list[t].h_u.ipv4_hdr.src_addr =
535 ipv4_spec->hdr.src_addr;
536 list[t].m_u.ipv4_hdr.src_addr =
537 ipv4_mask->hdr.src_addr;
540 if (ipv4_mask->hdr.dst_addr) {
541 list[t].h_u.ipv4_hdr.dst_addr =
542 ipv4_spec->hdr.dst_addr;
543 list[t].m_u.ipv4_hdr.dst_addr =
544 ipv4_mask->hdr.dst_addr;
547 if (ipv4_mask->hdr.time_to_live) {
548 list[t].h_u.ipv4_hdr.time_to_live =
549 ipv4_spec->hdr.time_to_live;
550 list[t].m_u.ipv4_hdr.time_to_live =
551 ipv4_mask->hdr.time_to_live;
554 if (ipv4_mask->hdr.next_proto_id) {
555 list[t].h_u.ipv4_hdr.protocol =
556 ipv4_spec->hdr.next_proto_id;
557 list[t].m_u.ipv4_hdr.protocol =
558 ipv4_mask->hdr.next_proto_id;
561 if ((ipv4_spec->hdr.next_proto_id &
562 ipv4_mask->hdr.next_proto_id) ==
563 ICE_IPV4_PROTO_NVGRE)
564 *tun_type = ICE_SW_TUN_AND_NON_TUN;
565 if (ipv4_mask->hdr.type_of_service) {
566 list[t].h_u.ipv4_hdr.tos =
567 ipv4_spec->hdr.type_of_service;
568 list[t].m_u.ipv4_hdr.tos =
569 ipv4_mask->hdr.type_of_service;
576 case RTE_FLOW_ITEM_TYPE_IPV6:
577 ipv6_spec = item->spec;
578 ipv6_mask = item->mask;
580 if (ipv6_spec && ipv6_mask) {
581 if (ipv6_mask->hdr.payload_len) {
582 rte_flow_error_set(error, EINVAL,
583 RTE_FLOW_ERROR_TYPE_ITEM,
585 "Invalid IPv6 mask");
589 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
590 if (ipv6_mask->hdr.src_addr[j] &&
593 ICE_INSET_TUN_IPV6_SRC;
595 } else if (ipv6_mask->hdr.src_addr[j]) {
596 input_set |= ICE_INSET_IPV6_SRC;
600 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
601 if (ipv6_mask->hdr.dst_addr[j] &&
604 ICE_INSET_TUN_IPV6_DST;
606 } else if (ipv6_mask->hdr.dst_addr[j]) {
607 input_set |= ICE_INSET_IPV6_DST;
611 if (ipv6_mask->hdr.proto &&
614 ICE_INSET_TUN_IPV6_NEXT_HDR;
615 else if (ipv6_mask->hdr.proto)
617 ICE_INSET_IPV6_NEXT_HDR;
618 if (ipv6_mask->hdr.hop_limits &&
621 ICE_INSET_TUN_IPV6_HOP_LIMIT;
622 else if (ipv6_mask->hdr.hop_limits)
624 ICE_INSET_IPV6_HOP_LIMIT;
625 if ((ipv6_mask->hdr.vtc_flow &
627 (RTE_IPV6_HDR_TC_MASK)) &&
630 ICE_INSET_TUN_IPV6_TC;
631 else if (ipv6_mask->hdr.vtc_flow &
633 (RTE_IPV6_HDR_TC_MASK))
634 input_set |= ICE_INSET_IPV6_TC;
636 list[t].type = (tunnel_valid == 0) ?
637 ICE_IPV6_OFOS : ICE_IPV6_IL;
638 struct ice_ipv6_hdr *f;
639 struct ice_ipv6_hdr *s;
640 f = &list[t].h_u.ipv6_hdr;
641 s = &list[t].m_u.ipv6_hdr;
642 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
643 if (ipv6_mask->hdr.src_addr[j]) {
645 ipv6_spec->hdr.src_addr[j];
647 ipv6_mask->hdr.src_addr[j];
650 if (ipv6_mask->hdr.dst_addr[j]) {
652 ipv6_spec->hdr.dst_addr[j];
654 ipv6_mask->hdr.dst_addr[j];
658 if (ipv6_mask->hdr.proto) {
660 ipv6_spec->hdr.proto;
662 ipv6_mask->hdr.proto;
665 if (ipv6_mask->hdr.hop_limits) {
667 ipv6_spec->hdr.hop_limits;
669 ipv6_mask->hdr.hop_limits;
672 if (ipv6_mask->hdr.vtc_flow &
674 (RTE_IPV6_HDR_TC_MASK)) {
675 struct ice_le_ver_tc_flow vtf;
676 vtf.u.fld.version = 0;
677 vtf.u.fld.flow_label = 0;
678 vtf.u.fld.tc = (rte_be_to_cpu_32
679 (ipv6_spec->hdr.vtc_flow) &
680 RTE_IPV6_HDR_TC_MASK) >>
681 RTE_IPV6_HDR_TC_SHIFT;
682 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
683 vtf.u.fld.tc = (rte_be_to_cpu_32
684 (ipv6_mask->hdr.vtc_flow) &
685 RTE_IPV6_HDR_TC_MASK) >>
686 RTE_IPV6_HDR_TC_SHIFT;
687 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
694 case RTE_FLOW_ITEM_TYPE_UDP:
695 udp_spec = item->spec;
696 udp_mask = item->mask;
698 if (udp_spec && udp_mask) {
699 /* Check UDP mask and update input set*/
700 if (udp_mask->hdr.dgram_len ||
701 udp_mask->hdr.dgram_cksum) {
702 rte_flow_error_set(error, EINVAL,
703 RTE_FLOW_ERROR_TYPE_ITEM,
710 if (udp_mask->hdr.src_port)
712 ICE_INSET_TUN_UDP_SRC_PORT;
713 if (udp_mask->hdr.dst_port)
715 ICE_INSET_TUN_UDP_DST_PORT;
717 if (udp_mask->hdr.src_port)
719 ICE_INSET_UDP_SRC_PORT;
720 if (udp_mask->hdr.dst_port)
722 ICE_INSET_UDP_DST_PORT;
724 if (*tun_type == ICE_SW_TUN_VXLAN &&
726 list[t].type = ICE_UDP_OF;
728 list[t].type = ICE_UDP_ILOS;
729 if (udp_mask->hdr.src_port) {
730 list[t].h_u.l4_hdr.src_port =
731 udp_spec->hdr.src_port;
732 list[t].m_u.l4_hdr.src_port =
733 udp_mask->hdr.src_port;
736 if (udp_mask->hdr.dst_port) {
737 list[t].h_u.l4_hdr.dst_port =
738 udp_spec->hdr.dst_port;
739 list[t].m_u.l4_hdr.dst_port =
740 udp_mask->hdr.dst_port;
747 case RTE_FLOW_ITEM_TYPE_TCP:
748 tcp_spec = item->spec;
749 tcp_mask = item->mask;
751 if (tcp_spec && tcp_mask) {
752 /* Check TCP mask and update input set */
753 if (tcp_mask->hdr.sent_seq ||
754 tcp_mask->hdr.recv_ack ||
755 tcp_mask->hdr.data_off ||
756 tcp_mask->hdr.tcp_flags ||
757 tcp_mask->hdr.rx_win ||
758 tcp_mask->hdr.cksum ||
759 tcp_mask->hdr.tcp_urp) {
760 rte_flow_error_set(error, EINVAL,
761 RTE_FLOW_ERROR_TYPE_ITEM,
768 if (tcp_mask->hdr.src_port)
770 ICE_INSET_TUN_TCP_SRC_PORT;
771 if (tcp_mask->hdr.dst_port)
773 ICE_INSET_TUN_TCP_DST_PORT;
775 if (tcp_mask->hdr.src_port)
777 ICE_INSET_TCP_SRC_PORT;
778 if (tcp_mask->hdr.dst_port)
780 ICE_INSET_TCP_DST_PORT;
782 list[t].type = ICE_TCP_IL;
783 if (tcp_mask->hdr.src_port) {
784 list[t].h_u.l4_hdr.src_port =
785 tcp_spec->hdr.src_port;
786 list[t].m_u.l4_hdr.src_port =
787 tcp_mask->hdr.src_port;
790 if (tcp_mask->hdr.dst_port) {
791 list[t].h_u.l4_hdr.dst_port =
792 tcp_spec->hdr.dst_port;
793 list[t].m_u.l4_hdr.dst_port =
794 tcp_mask->hdr.dst_port;
801 case RTE_FLOW_ITEM_TYPE_SCTP:
802 sctp_spec = item->spec;
803 sctp_mask = item->mask;
804 if (sctp_spec && sctp_mask) {
805 /* Check SCTP mask and update input set */
806 if (sctp_mask->hdr.cksum) {
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ITEM,
810 "Invalid SCTP mask");
815 if (sctp_mask->hdr.src_port)
817 ICE_INSET_TUN_SCTP_SRC_PORT;
818 if (sctp_mask->hdr.dst_port)
820 ICE_INSET_TUN_SCTP_DST_PORT;
822 if (sctp_mask->hdr.src_port)
824 ICE_INSET_SCTP_SRC_PORT;
825 if (sctp_mask->hdr.dst_port)
827 ICE_INSET_SCTP_DST_PORT;
829 list[t].type = ICE_SCTP_IL;
830 if (sctp_mask->hdr.src_port) {
831 list[t].h_u.sctp_hdr.src_port =
832 sctp_spec->hdr.src_port;
833 list[t].m_u.sctp_hdr.src_port =
834 sctp_mask->hdr.src_port;
837 if (sctp_mask->hdr.dst_port) {
838 list[t].h_u.sctp_hdr.dst_port =
839 sctp_spec->hdr.dst_port;
840 list[t].m_u.sctp_hdr.dst_port =
841 sctp_mask->hdr.dst_port;
848 case RTE_FLOW_ITEM_TYPE_VXLAN:
849 vxlan_spec = item->spec;
850 vxlan_mask = item->mask;
851 /* Check if VXLAN item is used to describe protocol.
852 * If yes, both spec and mask should be NULL.
853 * If no, both spec and mask shouldn't be NULL.
855 if ((!vxlan_spec && vxlan_mask) ||
856 (vxlan_spec && !vxlan_mask)) {
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM,
860 "Invalid VXLAN item");
865 if (vxlan_spec && vxlan_mask) {
866 list[t].type = ICE_VXLAN;
867 if (vxlan_mask->vni[0] ||
868 vxlan_mask->vni[1] ||
869 vxlan_mask->vni[2]) {
870 list[t].h_u.tnl_hdr.vni =
871 (vxlan_spec->vni[2] << 16) |
872 (vxlan_spec->vni[1] << 8) |
874 list[t].m_u.tnl_hdr.vni =
875 (vxlan_mask->vni[2] << 16) |
876 (vxlan_mask->vni[1] << 8) |
879 ICE_INSET_TUN_VXLAN_VNI;
886 case RTE_FLOW_ITEM_TYPE_NVGRE:
887 nvgre_spec = item->spec;
888 nvgre_mask = item->mask;
889 /* Check if NVGRE item is used to describe protocol.
890 * If yes, both spec and mask should be NULL.
891 * If no, both spec and mask shouldn't be NULL.
893 if ((!nvgre_spec && nvgre_mask) ||
894 (nvgre_spec && !nvgre_mask)) {
895 rte_flow_error_set(error, EINVAL,
896 RTE_FLOW_ERROR_TYPE_ITEM,
898 "Invalid NVGRE item");
903 if (nvgre_spec && nvgre_mask) {
904 list[t].type = ICE_NVGRE;
905 if (nvgre_mask->tni[0] ||
906 nvgre_mask->tni[1] ||
907 nvgre_mask->tni[2]) {
908 list[t].h_u.nvgre_hdr.tni_flow =
909 (nvgre_spec->tni[2] << 16) |
910 (nvgre_spec->tni[1] << 8) |
912 list[t].m_u.nvgre_hdr.tni_flow =
913 (nvgre_mask->tni[2] << 16) |
914 (nvgre_mask->tni[1] << 8) |
917 ICE_INSET_TUN_NVGRE_TNI;
924 case RTE_FLOW_ITEM_TYPE_VLAN:
925 vlan_spec = item->spec;
926 vlan_mask = item->mask;
927 /* Check if VLAN item is used to describe protocol.
928 * If yes, both spec and mask should be NULL.
929 * If no, both spec and mask shouldn't be NULL.
931 if ((!vlan_spec && vlan_mask) ||
932 (vlan_spec && !vlan_mask)) {
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ITEM,
936 "Invalid VLAN item");
941 if (!outer_vlan_valid)
942 outer_vlan_valid = 1;
944 inner_vlan_valid = 1;
947 if (vlan_spec && vlan_mask) {
949 if (!inner_vlan_valid) {
950 list[t].type = ICE_VLAN_EX;
952 ICE_INSET_VLAN_OUTER;
954 list[t].type = ICE_VLAN_IN;
956 ICE_INSET_VLAN_INNER;
959 list[t].type = ICE_VLAN_OFOS;
960 input_set |= ICE_INSET_VLAN_INNER;
963 if (vlan_mask->tci) {
964 list[t].h_u.vlan_hdr.vlan =
966 list[t].m_u.vlan_hdr.vlan =
970 if (vlan_mask->inner_type) {
971 rte_flow_error_set(error, EINVAL,
972 RTE_FLOW_ERROR_TYPE_ITEM,
974 "Invalid VLAN input set.");
981 case RTE_FLOW_ITEM_TYPE_PPPOED:
982 case RTE_FLOW_ITEM_TYPE_PPPOES:
983 pppoe_spec = item->spec;
984 pppoe_mask = item->mask;
985 /* Check if PPPoE item is used to describe protocol.
986 * If yes, both spec and mask should be NULL.
987 * If no, both spec and mask shouldn't be NULL.
989 if ((!pppoe_spec && pppoe_mask) ||
990 (pppoe_spec && !pppoe_mask)) {
991 rte_flow_error_set(error, EINVAL,
992 RTE_FLOW_ERROR_TYPE_ITEM,
994 "Invalid pppoe item");
997 pppoe_patt_valid = 1;
998 if (pppoe_spec && pppoe_mask) {
999 /* Check pppoe mask and update input set */
1000 if (pppoe_mask->length ||
1002 pppoe_mask->version_type) {
1003 rte_flow_error_set(error, EINVAL,
1004 RTE_FLOW_ERROR_TYPE_ITEM,
1006 "Invalid pppoe mask");
1009 list[t].type = ICE_PPPOE;
1010 if (pppoe_mask->session_id) {
1011 list[t].h_u.pppoe_hdr.session_id =
1012 pppoe_spec->session_id;
1013 list[t].m_u.pppoe_hdr.session_id =
1014 pppoe_mask->session_id;
1015 input_set |= ICE_INSET_PPPOE_SESSION;
1016 input_set_byte += 2;
1019 pppoe_elem_valid = 1;
1023 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1024 pppoe_proto_spec = item->spec;
1025 pppoe_proto_mask = item->mask;
1026 /* Check if PPPoE optional proto_id item
1027 * is used to describe protocol.
1028 * If yes, both spec and mask should be NULL.
1029 * If no, both spec and mask shouldn't be NULL.
1031 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1032 (pppoe_proto_spec && !pppoe_proto_mask)) {
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ITEM,
1036 "Invalid pppoe proto item");
1039 if (pppoe_proto_spec && pppoe_proto_mask) {
1040 if (pppoe_elem_valid)
1042 list[t].type = ICE_PPPOE;
1043 if (pppoe_proto_mask->proto_id) {
1044 list[t].h_u.pppoe_hdr.ppp_prot_id =
1045 pppoe_proto_spec->proto_id;
1046 list[t].m_u.pppoe_hdr.ppp_prot_id =
1047 pppoe_proto_mask->proto_id;
1048 input_set |= ICE_INSET_PPPOE_PROTO;
1049 input_set_byte += 2;
1050 pppoe_prot_valid = 1;
1052 if ((pppoe_proto_mask->proto_id &
1053 pppoe_proto_spec->proto_id) !=
1054 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1055 (pppoe_proto_mask->proto_id &
1056 pppoe_proto_spec->proto_id) !=
1057 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1058 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1060 *tun_type = ICE_SW_TUN_PPPOE;
1066 case RTE_FLOW_ITEM_TYPE_ESP:
1067 esp_spec = item->spec;
1068 esp_mask = item->mask;
1069 if ((esp_spec && !esp_mask) ||
1070 (!esp_spec && esp_mask)) {
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ITEM,
1074 "Invalid esp item");
1077 /* Check esp mask and update input set */
1078 if (esp_mask && esp_mask->hdr.seq) {
1079 rte_flow_error_set(error, EINVAL,
1080 RTE_FLOW_ERROR_TYPE_ITEM,
1082 "Invalid esp mask");
1086 if (!esp_spec && !esp_mask && !input_set) {
1088 if (ipv6_valid && udp_valid)
1090 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1091 else if (ipv6_valid)
1092 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1093 else if (ipv4_valid)
1095 } else if (esp_spec && esp_mask &&
1098 list[t].type = ICE_NAT_T;
1100 list[t].type = ICE_ESP;
1101 list[t].h_u.esp_hdr.spi =
1103 list[t].m_u.esp_hdr.spi =
1105 input_set |= ICE_INSET_ESP_SPI;
1106 input_set_byte += 4;
1110 if (!profile_rule) {
1111 if (ipv6_valid && udp_valid)
1112 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1113 else if (ipv4_valid && udp_valid)
1114 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1115 else if (ipv6_valid)
1116 *tun_type = ICE_SW_TUN_IPV6_ESP;
1117 else if (ipv4_valid)
1118 *tun_type = ICE_SW_TUN_IPV4_ESP;
1122 case RTE_FLOW_ITEM_TYPE_AH:
1123 ah_spec = item->spec;
1124 ah_mask = item->mask;
1125 if ((ah_spec && !ah_mask) ||
1126 (!ah_spec && ah_mask)) {
1127 rte_flow_error_set(error, EINVAL,
1128 RTE_FLOW_ERROR_TYPE_ITEM,
1133 /* Check ah mask and update input set */
1135 (ah_mask->next_hdr ||
1136 ah_mask->payload_len ||
1138 ah_mask->reserved)) {
1139 rte_flow_error_set(error, EINVAL,
1140 RTE_FLOW_ERROR_TYPE_ITEM,
1146 if (!ah_spec && !ah_mask && !input_set) {
1148 if (ipv6_valid && udp_valid)
1150 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1151 else if (ipv6_valid)
1152 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1153 else if (ipv4_valid)
1155 } else if (ah_spec && ah_mask &&
1157 list[t].type = ICE_AH;
1158 list[t].h_u.ah_hdr.spi =
1160 list[t].m_u.ah_hdr.spi =
1162 input_set |= ICE_INSET_AH_SPI;
1163 input_set_byte += 4;
1167 if (!profile_rule) {
1170 else if (ipv6_valid)
1171 *tun_type = ICE_SW_TUN_IPV6_AH;
1172 else if (ipv4_valid)
1173 *tun_type = ICE_SW_TUN_IPV4_AH;
1177 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1178 l2tp_spec = item->spec;
1179 l2tp_mask = item->mask;
1180 if ((l2tp_spec && !l2tp_mask) ||
1181 (!l2tp_spec && l2tp_mask)) {
1182 rte_flow_error_set(error, EINVAL,
1183 RTE_FLOW_ERROR_TYPE_ITEM,
1185 "Invalid l2tp item");
1189 if (!l2tp_spec && !l2tp_mask && !input_set) {
1192 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1193 else if (ipv4_valid)
1195 } else if (l2tp_spec && l2tp_mask &&
1196 l2tp_mask->session_id){
1197 list[t].type = ICE_L2TPV3;
1198 list[t].h_u.l2tpv3_sess_hdr.session_id =
1199 l2tp_spec->session_id;
1200 list[t].m_u.l2tpv3_sess_hdr.session_id =
1201 l2tp_mask->session_id;
1202 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1203 input_set_byte += 4;
1207 if (!profile_rule) {
1210 ICE_SW_TUN_IPV6_L2TPV3;
1211 else if (ipv4_valid)
1213 ICE_SW_TUN_IPV4_L2TPV3;
1217 case RTE_FLOW_ITEM_TYPE_PFCP:
1218 pfcp_spec = item->spec;
1219 pfcp_mask = item->mask;
1220 /* Check if PFCP item is used to describe protocol.
1221 * If yes, both spec and mask should be NULL.
1222 * If no, both spec and mask shouldn't be NULL.
1224 if ((!pfcp_spec && pfcp_mask) ||
1225 (pfcp_spec && !pfcp_mask)) {
1226 rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ITEM,
1229 "Invalid PFCP item");
1232 if (pfcp_spec && pfcp_mask) {
1233 /* Check pfcp mask and update input set */
1234 if (pfcp_mask->msg_type ||
1235 pfcp_mask->msg_len ||
1237 rte_flow_error_set(error, EINVAL,
1238 RTE_FLOW_ERROR_TYPE_ITEM,
1240 "Invalid pfcp mask");
1243 if (pfcp_mask->s_field &&
1244 pfcp_spec->s_field == 0x01 &&
1247 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1248 else if (pfcp_mask->s_field &&
1249 pfcp_spec->s_field == 0x01)
1251 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1252 else if (pfcp_mask->s_field &&
1253 !pfcp_spec->s_field &&
1256 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1257 else if (pfcp_mask->s_field &&
1258 !pfcp_spec->s_field)
1260 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1266 case RTE_FLOW_ITEM_TYPE_VOID:
1270 rte_flow_error_set(error, EINVAL,
1271 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1272 "Invalid pattern item.");
1277 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1278 inner_vlan_valid && outer_vlan_valid)
1279 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1280 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1281 inner_vlan_valid && outer_vlan_valid)
1282 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1283 else if (*tun_type == ICE_NON_TUN &&
1284 inner_vlan_valid && outer_vlan_valid)
1285 *tun_type = ICE_NON_TUN_QINQ;
1286 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1287 inner_vlan_valid && outer_vlan_valid)
1288 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1290 if (pppoe_patt_valid && !pppoe_prot_valid) {
1291 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1292 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1293 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1294 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1295 else if (inner_vlan_valid && outer_vlan_valid)
1296 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1297 else if (ipv6_valid && udp_valid)
1298 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1299 else if (ipv6_valid && tcp_valid)
1300 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1301 else if (ipv4_valid && udp_valid)
1302 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1303 else if (ipv4_valid && tcp_valid)
1304 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1305 else if (ipv6_valid)
1306 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1307 else if (ipv4_valid)
1308 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1310 *tun_type = ICE_SW_TUN_PPPOE;
1313 if (*tun_type == ICE_NON_TUN) {
1315 *tun_type = ICE_SW_TUN_VXLAN;
1316 else if (nvgre_valid)
1317 *tun_type = ICE_SW_TUN_NVGRE;
1318 else if (ipv4_valid && tcp_valid)
1319 *tun_type = ICE_SW_IPV4_TCP;
1320 else if (ipv4_valid && udp_valid)
1321 *tun_type = ICE_SW_IPV4_UDP;
1322 else if (ipv6_valid && tcp_valid)
1323 *tun_type = ICE_SW_IPV6_TCP;
1324 else if (ipv6_valid && udp_valid)
1325 *tun_type = ICE_SW_IPV6_UDP;
1328 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1329 rte_flow_error_set(error, EINVAL,
1330 RTE_FLOW_ERROR_TYPE_ITEM,
1332 "too much input set");
1344 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1345 const struct rte_flow_action *actions,
1346 struct rte_flow_error *error,
1347 struct ice_adv_rule_info *rule_info)
1349 const struct rte_flow_action_vf *act_vf;
1350 const struct rte_flow_action *action;
1351 enum rte_flow_action_type action_type;
1353 for (action = actions; action->type !=
1354 RTE_FLOW_ACTION_TYPE_END; action++) {
1355 action_type = action->type;
1356 switch (action_type) {
1357 case RTE_FLOW_ACTION_TYPE_VF:
1358 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1359 act_vf = action->conf;
1361 if (act_vf->id >= ad->real_hw.num_vfs &&
1362 !act_vf->original) {
1363 rte_flow_error_set(error,
1364 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1370 if (act_vf->original)
1371 rule_info->sw_act.vsi_handle =
1372 ad->real_hw.avf.bus.func;
1374 rule_info->sw_act.vsi_handle = act_vf->id;
1377 case RTE_FLOW_ACTION_TYPE_DROP:
1378 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1382 rte_flow_error_set(error,
1383 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1385 "Invalid action type");
1390 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1391 rule_info->sw_act.flag = ICE_FLTR_RX;
1393 rule_info->priority = 5;
1399 ice_switch_parse_action(struct ice_pf *pf,
1400 const struct rte_flow_action *actions,
1401 struct rte_flow_error *error,
1402 struct ice_adv_rule_info *rule_info)
1404 struct ice_vsi *vsi = pf->main_vsi;
1405 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1406 const struct rte_flow_action_queue *act_q;
1407 const struct rte_flow_action_rss *act_qgrop;
1408 uint16_t base_queue, i;
1409 const struct rte_flow_action *action;
1410 enum rte_flow_action_type action_type;
1411 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1412 2, 4, 8, 16, 32, 64, 128};
1414 base_queue = pf->base_queue + vsi->base_queue;
1415 for (action = actions; action->type !=
1416 RTE_FLOW_ACTION_TYPE_END; action++) {
1417 action_type = action->type;
1418 switch (action_type) {
1419 case RTE_FLOW_ACTION_TYPE_RSS:
1420 act_qgrop = action->conf;
1421 if (act_qgrop->queue_num <= 1)
1423 rule_info->sw_act.fltr_act =
1425 rule_info->sw_act.fwd_id.q_id =
1426 base_queue + act_qgrop->queue[0];
1427 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1428 if (act_qgrop->queue_num ==
1429 valid_qgrop_number[i])
1432 if (i == MAX_QGRP_NUM_TYPE)
1434 if ((act_qgrop->queue[0] +
1435 act_qgrop->queue_num) >
1436 dev->data->nb_rx_queues)
1438 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1439 if (act_qgrop->queue[i + 1] !=
1440 act_qgrop->queue[i] + 1)
1442 rule_info->sw_act.qgrp_size =
1443 act_qgrop->queue_num;
1445 case RTE_FLOW_ACTION_TYPE_QUEUE:
1446 act_q = action->conf;
1447 if (act_q->index >= dev->data->nb_rx_queues)
1449 rule_info->sw_act.fltr_act =
1451 rule_info->sw_act.fwd_id.q_id =
1452 base_queue + act_q->index;
1455 case RTE_FLOW_ACTION_TYPE_DROP:
1456 rule_info->sw_act.fltr_act =
1460 case RTE_FLOW_ACTION_TYPE_VOID:
1468 rule_info->sw_act.vsi_handle = vsi->idx;
1470 rule_info->sw_act.src = vsi->idx;
1471 rule_info->priority = 5;
1476 rte_flow_error_set(error,
1477 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1479 "Invalid action type or queue number");
1483 rte_flow_error_set(error,
1484 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1486 "Invalid queue region indexes");
1490 rte_flow_error_set(error,
1491 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1493 "Discontinuous queue region");
1498 ice_switch_check_action(const struct rte_flow_action *actions,
1499 struct rte_flow_error *error)
1501 const struct rte_flow_action *action;
1502 enum rte_flow_action_type action_type;
1503 uint16_t actions_num = 0;
1505 for (action = actions; action->type !=
1506 RTE_FLOW_ACTION_TYPE_END; action++) {
1507 action_type = action->type;
1508 switch (action_type) {
1509 case RTE_FLOW_ACTION_TYPE_VF:
1510 case RTE_FLOW_ACTION_TYPE_RSS:
1511 case RTE_FLOW_ACTION_TYPE_QUEUE:
1512 case RTE_FLOW_ACTION_TYPE_DROP:
1515 case RTE_FLOW_ACTION_TYPE_VOID:
1518 rte_flow_error_set(error,
1519 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1521 "Invalid action type");
1526 if (actions_num != 1) {
1527 rte_flow_error_set(error,
1528 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1530 "Invalid action number");
1538 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1539 struct ice_pattern_match_item *array,
1541 const struct rte_flow_item pattern[],
1542 const struct rte_flow_action actions[],
1544 struct rte_flow_error *error)
1546 struct ice_pf *pf = &ad->pf;
1547 uint64_t inputset = 0;
1549 struct sw_meta *sw_meta_ptr = NULL;
1550 struct ice_adv_rule_info rule_info;
1551 struct ice_adv_lkup_elem *list = NULL;
1552 uint16_t lkups_num = 0;
1553 const struct rte_flow_item *item = pattern;
1554 uint16_t item_num = 0;
1555 uint16_t vlan_num = 0;
1556 enum ice_sw_tunnel_type tun_type =
1558 struct ice_pattern_match_item *pattern_match_item = NULL;
1560 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1562 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1563 const struct rte_flow_item_eth *eth_mask;
1565 eth_mask = item->mask;
1568 if (eth_mask->type == UINT16_MAX)
1569 tun_type = ICE_SW_TUN_AND_NON_TUN;
1572 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1575 /* reserve one more memory slot for ETH which may
1576 * consume 2 lookup items.
1578 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1582 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1583 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1584 else if (vlan_num == 2)
1585 tun_type = ICE_NON_TUN_QINQ;
1587 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1589 rte_flow_error_set(error, EINVAL,
1590 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1591 "No memory for PMD internal items");
1596 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1598 rte_flow_error_set(error, EINVAL,
1599 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1600 "No memory for sw_pattern_meta_ptr");
1604 pattern_match_item =
1605 ice_search_pattern_match_item(ad, pattern, array, array_len,
1607 if (!pattern_match_item) {
1608 rte_flow_error_set(error, EINVAL,
1609 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1610 "Invalid input pattern");
1614 inputset = ice_switch_inset_get
1615 (pattern, error, list, &lkups_num, &tun_type);
1616 if ((!inputset && !ice_is_prof_rule(tun_type)) ||
1617 (inputset & ~pattern_match_item->input_set_mask_o)) {
1618 rte_flow_error_set(error, EINVAL,
1619 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1621 "Invalid input set");
1625 memset(&rule_info, 0, sizeof(rule_info));
1626 rule_info.tun_type = tun_type;
1628 ret = ice_switch_check_action(actions, error);
1632 if (ad->hw.dcf_enabled)
1633 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1636 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1642 *meta = sw_meta_ptr;
1643 ((struct sw_meta *)*meta)->list = list;
1644 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1645 ((struct sw_meta *)*meta)->rule_info = rule_info;
1648 rte_free(sw_meta_ptr);
1651 rte_free(pattern_match_item);
1657 rte_free(sw_meta_ptr);
1658 rte_free(pattern_match_item);
1664 ice_switch_query(struct ice_adapter *ad __rte_unused,
1665 struct rte_flow *flow __rte_unused,
1666 struct rte_flow_query_count *count __rte_unused,
1667 struct rte_flow_error *error)
1669 rte_flow_error_set(error, EINVAL,
1670 RTE_FLOW_ERROR_TYPE_HANDLE,
1672 "count action not supported by switch filter");
1678 ice_switch_redirect(struct ice_adapter *ad,
1679 struct rte_flow *flow,
1680 struct ice_flow_redirect *rd)
1682 struct ice_rule_query_data *rdata = flow->rule;
1683 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1684 struct ice_adv_lkup_elem *lkups_dp = NULL;
1685 struct LIST_HEAD_TYPE *list_head;
1686 struct ice_adv_rule_info rinfo;
1687 struct ice_hw *hw = &ad->hw;
1688 struct ice_switch_info *sw;
1692 if (rdata->vsi_handle != rd->vsi_handle)
1695 sw = hw->switch_info;
1696 if (!sw->recp_list[rdata->rid].recp_created)
1699 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1702 list_head = &sw->recp_list[rdata->rid].filt_rules;
1703 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1705 rinfo = list_itr->rule_info;
1706 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1707 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1708 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1709 (rinfo.fltr_rule_id == rdata->rule_id &&
1710 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1711 lkups_cnt = list_itr->lkups_cnt;
1712 lkups_dp = (struct ice_adv_lkup_elem *)
1713 ice_memdup(hw, list_itr->lkups,
1714 sizeof(*list_itr->lkups) *
1715 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1718 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1722 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1723 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1724 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1733 /* Remove the old rule */
1734 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1737 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1743 /* Update VSI context */
1744 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1746 /* Replay the rule */
1747 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1750 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1755 ice_free(hw, lkups_dp);
1760 ice_switch_init(struct ice_adapter *ad)
1763 struct ice_flow_parser *dist_parser;
1764 struct ice_flow_parser *perm_parser;
1766 if (ad->devargs.pipe_mode_support) {
1767 perm_parser = &ice_switch_perm_parser;
1768 ret = ice_register_parser(perm_parser, ad);
1770 dist_parser = &ice_switch_dist_parser;
1771 ret = ice_register_parser(dist_parser, ad);
1777 ice_switch_uninit(struct ice_adapter *ad)
1779 struct ice_flow_parser *dist_parser;
1780 struct ice_flow_parser *perm_parser;
1782 if (ad->devargs.pipe_mode_support) {
1783 perm_parser = &ice_switch_perm_parser;
1784 ice_unregister_parser(perm_parser, ad);
1786 dist_parser = &ice_switch_dist_parser;
1787 ice_unregister_parser(dist_parser, ad);
1792 ice_flow_engine ice_switch_engine = {
1793 .init = ice_switch_init,
1794 .uninit = ice_switch_uninit,
1795 .create = ice_switch_create,
1796 .destroy = ice_switch_destroy,
1797 .query_count = ice_switch_query,
1798 .redirect = ice_switch_redirect,
1799 .free = ice_switch_filter_rule_free,
1800 .type = ICE_FLOW_ENGINE_SWITCH,
1804 ice_flow_parser ice_switch_dist_parser = {
1805 .engine = &ice_switch_engine,
1806 .array = ice_switch_pattern_dist_list,
1807 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1808 .parse_pattern_action = ice_switch_parse_pattern_action,
1809 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1813 ice_flow_parser ice_switch_perm_parser = {
1814 .engine = &ice_switch_engine,
1815 .array = ice_switch_pattern_perm_list,
1816 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1817 .parse_pattern_action = ice_switch_parse_pattern_action,
1818 .stage = ICE_FLOW_STAGE_PERMISSION,
1821 RTE_INIT(ice_sw_engine_init)
1823 struct ice_flow_engine *engine = &ice_switch_engine;
1824 ice_register_flow_engine(engine);