1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49 ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86 ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90 ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE ( \
92 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
95 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97 ICE_INSET_PPPOE_PROTO)
100 struct ice_adv_lkup_elem *list;
102 struct ice_adv_rule_info rule_info;
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
112 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113 {pattern_ethertype_vlan,
114 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
116 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp,
118 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119 {pattern_eth_ipv4_tcp,
120 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
122 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123 {pattern_eth_ipv6_udp,
124 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125 {pattern_eth_ipv6_tcp,
126 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133 {pattern_eth_ipv4_nvgre_eth_ipv4,
134 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
140 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141 {pattern_eth_vlan_pppoed,
142 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
144 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145 {pattern_eth_vlan_pppoes,
146 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147 {pattern_eth_pppoes_proto,
148 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149 {pattern_eth_vlan_pppoes_proto,
150 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151 {pattern_eth_ipv6_esp,
152 ICE_INSET_NONE, ICE_INSET_NONE},
153 {pattern_eth_ipv6_udp_esp,
154 ICE_INSET_NONE, ICE_INSET_NONE},
155 {pattern_eth_ipv6_ah,
156 ICE_INSET_NONE, ICE_INSET_NONE},
157 {pattern_eth_ipv6_udp_ah,
158 ICE_INSET_NONE, ICE_INSET_NONE},
159 {pattern_eth_ipv6_l2tp,
160 ICE_INSET_NONE, ICE_INSET_NONE},
161 {pattern_eth_ipv4_pfcp,
162 ICE_INSET_NONE, ICE_INSET_NONE},
163 {pattern_eth_ipv6_pfcp,
164 ICE_INSET_NONE, ICE_INSET_NONE},
168 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
170 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
171 {pattern_ethertype_vlan,
172 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
174 ICE_INSET_NONE, ICE_INSET_NONE},
176 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
177 {pattern_eth_ipv4_udp,
178 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
179 {pattern_eth_ipv4_tcp,
180 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
182 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
183 {pattern_eth_ipv6_udp,
184 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
185 {pattern_eth_ipv6_tcp,
186 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
187 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
188 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
189 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
190 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
191 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
192 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
193 {pattern_eth_ipv4_nvgre_eth_ipv4,
194 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
195 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
196 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
197 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
198 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
202 ice_pattern_match_item ice_switch_pattern_perm[] = {
204 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
205 {pattern_ethertype_vlan,
206 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
208 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
209 {pattern_eth_ipv4_udp,
210 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
211 {pattern_eth_ipv4_tcp,
212 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
214 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
215 {pattern_eth_ipv6_udp,
216 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
217 {pattern_eth_ipv6_tcp,
218 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
219 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
220 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
221 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
222 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
223 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
224 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
225 {pattern_eth_ipv4_nvgre_eth_ipv4,
226 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
227 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
228 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
229 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
230 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
232 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
233 {pattern_eth_vlan_pppoed,
234 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
236 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
237 {pattern_eth_vlan_pppoes,
238 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
239 {pattern_eth_pppoes_proto,
240 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
241 {pattern_eth_vlan_pppoes_proto,
242 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
243 {pattern_eth_ipv6_esp,
244 ICE_INSET_NONE, ICE_INSET_NONE},
245 {pattern_eth_ipv6_udp_esp,
246 ICE_INSET_NONE, ICE_INSET_NONE},
247 {pattern_eth_ipv6_ah,
248 ICE_INSET_NONE, ICE_INSET_NONE},
249 {pattern_eth_ipv6_udp_ah,
250 ICE_INSET_NONE, ICE_INSET_NONE},
251 {pattern_eth_ipv6_l2tp,
252 ICE_INSET_NONE, ICE_INSET_NONE},
253 {pattern_eth_ipv4_pfcp,
254 ICE_INSET_NONE, ICE_INSET_NONE},
255 {pattern_eth_ipv6_pfcp,
256 ICE_INSET_NONE, ICE_INSET_NONE},
260 ice_switch_create(struct ice_adapter *ad,
261 struct rte_flow *flow,
263 struct rte_flow_error *error)
266 struct ice_pf *pf = &ad->pf;
267 struct ice_hw *hw = ICE_PF_TO_HW(pf);
268 struct ice_rule_query_data rule_added = {0};
269 struct ice_rule_query_data *filter_ptr;
270 struct ice_adv_lkup_elem *list =
271 ((struct sw_meta *)meta)->list;
273 ((struct sw_meta *)meta)->lkups_num;
274 struct ice_adv_rule_info *rule_info =
275 &((struct sw_meta *)meta)->rule_info;
277 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
278 rte_flow_error_set(error, EINVAL,
279 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
280 "item number too large for rule");
284 rte_flow_error_set(error, EINVAL,
285 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
286 "lookup list should not be NULL");
289 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
291 filter_ptr = rte_zmalloc("ice_switch_filter",
292 sizeof(struct ice_rule_query_data), 0);
294 rte_flow_error_set(error, EINVAL,
295 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
296 "No memory for ice_switch_filter");
299 flow->rule = filter_ptr;
300 rte_memcpy(filter_ptr,
302 sizeof(struct ice_rule_query_data));
304 rte_flow_error_set(error, EINVAL,
305 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
306 "switch filter create flow fail");
322 ice_switch_destroy(struct ice_adapter *ad,
323 struct rte_flow *flow,
324 struct rte_flow_error *error)
326 struct ice_hw *hw = &ad->hw;
328 struct ice_rule_query_data *filter_ptr;
330 filter_ptr = (struct ice_rule_query_data *)
334 rte_flow_error_set(error, EINVAL,
335 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
337 " create by switch filter");
341 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
343 rte_flow_error_set(error, EINVAL,
344 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
345 "fail to destroy switch filter rule");
349 rte_free(filter_ptr);
354 ice_switch_filter_rule_free(struct rte_flow *flow)
356 rte_free(flow->rule);
360 ice_switch_inset_get(const struct rte_flow_item pattern[],
361 struct rte_flow_error *error,
362 struct ice_adv_lkup_elem *list,
364 enum ice_sw_tunnel_type *tun_type)
366 const struct rte_flow_item *item = pattern;
367 enum rte_flow_item_type item_type;
368 const struct rte_flow_item_eth *eth_spec, *eth_mask;
369 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
370 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
371 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
372 const struct rte_flow_item_udp *udp_spec, *udp_mask;
373 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
374 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
375 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
376 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
377 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
378 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
380 const struct rte_flow_item_esp *esp_spec, *esp_mask;
381 const struct rte_flow_item_ah *ah_spec, *ah_mask;
382 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
383 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
384 uint64_t input_set = ICE_INSET_NONE;
386 uint16_t tunnel_valid = 0;
387 uint16_t pppoe_valid = 0;
388 uint16_t ipv6_valiad = 0;
389 uint16_t udp_valiad = 0;
392 for (item = pattern; item->type !=
393 RTE_FLOW_ITEM_TYPE_END; item++) {
395 rte_flow_error_set(error, EINVAL,
396 RTE_FLOW_ERROR_TYPE_ITEM,
398 "Not support range");
401 item_type = item->type;
404 case RTE_FLOW_ITEM_TYPE_ETH:
405 eth_spec = item->spec;
406 eth_mask = item->mask;
407 if (eth_spec && eth_mask) {
408 const uint8_t *a = eth_mask->src.addr_bytes;
409 const uint8_t *b = eth_mask->dst.addr_bytes;
410 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
411 if (a[j] && tunnel_valid) {
421 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
422 if (b[j] && tunnel_valid) {
433 input_set |= ICE_INSET_ETHERTYPE;
434 list[t].type = (tunnel_valid == 0) ?
435 ICE_MAC_OFOS : ICE_MAC_IL;
436 struct ice_ether_hdr *h;
437 struct ice_ether_hdr *m;
439 h = &list[t].h_u.eth_hdr;
440 m = &list[t].m_u.eth_hdr;
441 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
442 if (eth_mask->src.addr_bytes[j]) {
444 eth_spec->src.addr_bytes[j];
446 eth_mask->src.addr_bytes[j];
449 if (eth_mask->dst.addr_bytes[j]) {
451 eth_spec->dst.addr_bytes[j];
453 eth_mask->dst.addr_bytes[j];
459 if (eth_mask->type) {
460 list[t].type = ICE_ETYPE_OL;
461 list[t].h_u.ethertype.ethtype_id =
463 list[t].m_u.ethertype.ethtype_id =
470 case RTE_FLOW_ITEM_TYPE_IPV4:
471 ipv4_spec = item->spec;
472 ipv4_mask = item->mask;
473 if (ipv4_spec && ipv4_mask) {
474 /* Check IPv4 mask and update input set */
475 if (ipv4_mask->hdr.version_ihl ||
476 ipv4_mask->hdr.total_length ||
477 ipv4_mask->hdr.packet_id ||
478 ipv4_mask->hdr.hdr_checksum) {
479 rte_flow_error_set(error, EINVAL,
480 RTE_FLOW_ERROR_TYPE_ITEM,
482 "Invalid IPv4 mask.");
487 if (ipv4_mask->hdr.type_of_service)
489 ICE_INSET_TUN_IPV4_TOS;
490 if (ipv4_mask->hdr.src_addr)
492 ICE_INSET_TUN_IPV4_SRC;
493 if (ipv4_mask->hdr.dst_addr)
495 ICE_INSET_TUN_IPV4_DST;
496 if (ipv4_mask->hdr.time_to_live)
498 ICE_INSET_TUN_IPV4_TTL;
499 if (ipv4_mask->hdr.next_proto_id)
501 ICE_INSET_TUN_IPV4_PROTO;
503 if (ipv4_mask->hdr.src_addr)
504 input_set |= ICE_INSET_IPV4_SRC;
505 if (ipv4_mask->hdr.dst_addr)
506 input_set |= ICE_INSET_IPV4_DST;
507 if (ipv4_mask->hdr.time_to_live)
508 input_set |= ICE_INSET_IPV4_TTL;
509 if (ipv4_mask->hdr.next_proto_id)
511 ICE_INSET_IPV4_PROTO;
512 if (ipv4_mask->hdr.type_of_service)
516 list[t].type = (tunnel_valid == 0) ?
517 ICE_IPV4_OFOS : ICE_IPV4_IL;
518 if (ipv4_mask->hdr.src_addr) {
519 list[t].h_u.ipv4_hdr.src_addr =
520 ipv4_spec->hdr.src_addr;
521 list[t].m_u.ipv4_hdr.src_addr =
522 ipv4_mask->hdr.src_addr;
524 if (ipv4_mask->hdr.dst_addr) {
525 list[t].h_u.ipv4_hdr.dst_addr =
526 ipv4_spec->hdr.dst_addr;
527 list[t].m_u.ipv4_hdr.dst_addr =
528 ipv4_mask->hdr.dst_addr;
530 if (ipv4_mask->hdr.time_to_live) {
531 list[t].h_u.ipv4_hdr.time_to_live =
532 ipv4_spec->hdr.time_to_live;
533 list[t].m_u.ipv4_hdr.time_to_live =
534 ipv4_mask->hdr.time_to_live;
536 if (ipv4_mask->hdr.next_proto_id) {
537 list[t].h_u.ipv4_hdr.protocol =
538 ipv4_spec->hdr.next_proto_id;
539 list[t].m_u.ipv4_hdr.protocol =
540 ipv4_mask->hdr.next_proto_id;
542 if (ipv4_mask->hdr.type_of_service) {
543 list[t].h_u.ipv4_hdr.tos =
544 ipv4_spec->hdr.type_of_service;
545 list[t].m_u.ipv4_hdr.tos =
546 ipv4_mask->hdr.type_of_service;
552 case RTE_FLOW_ITEM_TYPE_IPV6:
553 ipv6_spec = item->spec;
554 ipv6_mask = item->mask;
556 if (ipv6_spec && ipv6_mask) {
557 if (ipv6_mask->hdr.payload_len) {
558 rte_flow_error_set(error, EINVAL,
559 RTE_FLOW_ERROR_TYPE_ITEM,
561 "Invalid IPv6 mask");
565 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
566 if (ipv6_mask->hdr.src_addr[j] &&
569 ICE_INSET_TUN_IPV6_SRC;
571 } else if (ipv6_mask->hdr.src_addr[j]) {
572 input_set |= ICE_INSET_IPV6_SRC;
576 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
577 if (ipv6_mask->hdr.dst_addr[j] &&
580 ICE_INSET_TUN_IPV6_DST;
582 } else if (ipv6_mask->hdr.dst_addr[j]) {
583 input_set |= ICE_INSET_IPV6_DST;
587 if (ipv6_mask->hdr.proto &&
590 ICE_INSET_TUN_IPV6_NEXT_HDR;
591 else if (ipv6_mask->hdr.proto)
593 ICE_INSET_IPV6_NEXT_HDR;
594 if (ipv6_mask->hdr.hop_limits &&
597 ICE_INSET_TUN_IPV6_HOP_LIMIT;
598 else if (ipv6_mask->hdr.hop_limits)
600 ICE_INSET_IPV6_HOP_LIMIT;
601 if ((ipv6_mask->hdr.vtc_flow &
603 (RTE_IPV6_HDR_TC_MASK)) &&
606 ICE_INSET_TUN_IPV6_TC;
607 else if (ipv6_mask->hdr.vtc_flow &
609 (RTE_IPV6_HDR_TC_MASK))
610 input_set |= ICE_INSET_IPV6_TC;
612 list[t].type = (tunnel_valid == 0) ?
613 ICE_IPV6_OFOS : ICE_IPV6_IL;
614 struct ice_ipv6_hdr *f;
615 struct ice_ipv6_hdr *s;
616 f = &list[t].h_u.ipv6_hdr;
617 s = &list[t].m_u.ipv6_hdr;
618 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
619 if (ipv6_mask->hdr.src_addr[j]) {
621 ipv6_spec->hdr.src_addr[j];
623 ipv6_mask->hdr.src_addr[j];
625 if (ipv6_mask->hdr.dst_addr[j]) {
627 ipv6_spec->hdr.dst_addr[j];
629 ipv6_mask->hdr.dst_addr[j];
632 if (ipv6_mask->hdr.proto) {
634 ipv6_spec->hdr.proto;
636 ipv6_mask->hdr.proto;
638 if (ipv6_mask->hdr.hop_limits) {
640 ipv6_spec->hdr.hop_limits;
642 ipv6_mask->hdr.hop_limits;
644 if (ipv6_mask->hdr.vtc_flow &
646 (RTE_IPV6_HDR_TC_MASK)) {
647 struct ice_le_ver_tc_flow vtf;
648 vtf.u.fld.version = 0;
649 vtf.u.fld.flow_label = 0;
650 vtf.u.fld.tc = (rte_be_to_cpu_32
651 (ipv6_spec->hdr.vtc_flow) &
652 RTE_IPV6_HDR_TC_MASK) >>
653 RTE_IPV6_HDR_TC_SHIFT;
654 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
655 vtf.u.fld.tc = (rte_be_to_cpu_32
656 (ipv6_mask->hdr.vtc_flow) &
657 RTE_IPV6_HDR_TC_MASK) >>
658 RTE_IPV6_HDR_TC_SHIFT;
659 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
665 case RTE_FLOW_ITEM_TYPE_UDP:
666 udp_spec = item->spec;
667 udp_mask = item->mask;
669 if (udp_spec && udp_mask) {
670 /* Check UDP mask and update input set*/
671 if (udp_mask->hdr.dgram_len ||
672 udp_mask->hdr.dgram_cksum) {
673 rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ITEM,
681 if (udp_mask->hdr.src_port)
683 ICE_INSET_TUN_UDP_SRC_PORT;
684 if (udp_mask->hdr.dst_port)
686 ICE_INSET_TUN_UDP_DST_PORT;
688 if (udp_mask->hdr.src_port)
690 ICE_INSET_UDP_SRC_PORT;
691 if (udp_mask->hdr.dst_port)
693 ICE_INSET_UDP_DST_PORT;
695 if (*tun_type == ICE_SW_TUN_VXLAN &&
697 list[t].type = ICE_UDP_OF;
699 list[t].type = ICE_UDP_ILOS;
700 if (udp_mask->hdr.src_port) {
701 list[t].h_u.l4_hdr.src_port =
702 udp_spec->hdr.src_port;
703 list[t].m_u.l4_hdr.src_port =
704 udp_mask->hdr.src_port;
706 if (udp_mask->hdr.dst_port) {
707 list[t].h_u.l4_hdr.dst_port =
708 udp_spec->hdr.dst_port;
709 list[t].m_u.l4_hdr.dst_port =
710 udp_mask->hdr.dst_port;
716 case RTE_FLOW_ITEM_TYPE_TCP:
717 tcp_spec = item->spec;
718 tcp_mask = item->mask;
719 if (tcp_spec && tcp_mask) {
720 /* Check TCP mask and update input set */
721 if (tcp_mask->hdr.sent_seq ||
722 tcp_mask->hdr.recv_ack ||
723 tcp_mask->hdr.data_off ||
724 tcp_mask->hdr.tcp_flags ||
725 tcp_mask->hdr.rx_win ||
726 tcp_mask->hdr.cksum ||
727 tcp_mask->hdr.tcp_urp) {
728 rte_flow_error_set(error, EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM,
736 if (tcp_mask->hdr.src_port)
738 ICE_INSET_TUN_TCP_SRC_PORT;
739 if (tcp_mask->hdr.dst_port)
741 ICE_INSET_TUN_TCP_DST_PORT;
743 if (tcp_mask->hdr.src_port)
745 ICE_INSET_TCP_SRC_PORT;
746 if (tcp_mask->hdr.dst_port)
748 ICE_INSET_TCP_DST_PORT;
750 list[t].type = ICE_TCP_IL;
751 if (tcp_mask->hdr.src_port) {
752 list[t].h_u.l4_hdr.src_port =
753 tcp_spec->hdr.src_port;
754 list[t].m_u.l4_hdr.src_port =
755 tcp_mask->hdr.src_port;
757 if (tcp_mask->hdr.dst_port) {
758 list[t].h_u.l4_hdr.dst_port =
759 tcp_spec->hdr.dst_port;
760 list[t].m_u.l4_hdr.dst_port =
761 tcp_mask->hdr.dst_port;
767 case RTE_FLOW_ITEM_TYPE_SCTP:
768 sctp_spec = item->spec;
769 sctp_mask = item->mask;
770 if (sctp_spec && sctp_mask) {
771 /* Check SCTP mask and update input set */
772 if (sctp_mask->hdr.cksum) {
773 rte_flow_error_set(error, EINVAL,
774 RTE_FLOW_ERROR_TYPE_ITEM,
776 "Invalid SCTP mask");
781 if (sctp_mask->hdr.src_port)
783 ICE_INSET_TUN_SCTP_SRC_PORT;
784 if (sctp_mask->hdr.dst_port)
786 ICE_INSET_TUN_SCTP_DST_PORT;
788 if (sctp_mask->hdr.src_port)
790 ICE_INSET_SCTP_SRC_PORT;
791 if (sctp_mask->hdr.dst_port)
793 ICE_INSET_SCTP_DST_PORT;
795 list[t].type = ICE_SCTP_IL;
796 if (sctp_mask->hdr.src_port) {
797 list[t].h_u.sctp_hdr.src_port =
798 sctp_spec->hdr.src_port;
799 list[t].m_u.sctp_hdr.src_port =
800 sctp_mask->hdr.src_port;
802 if (sctp_mask->hdr.dst_port) {
803 list[t].h_u.sctp_hdr.dst_port =
804 sctp_spec->hdr.dst_port;
805 list[t].m_u.sctp_hdr.dst_port =
806 sctp_mask->hdr.dst_port;
812 case RTE_FLOW_ITEM_TYPE_VXLAN:
813 vxlan_spec = item->spec;
814 vxlan_mask = item->mask;
815 /* Check if VXLAN item is used to describe protocol.
816 * If yes, both spec and mask should be NULL.
817 * If no, both spec and mask shouldn't be NULL.
819 if ((!vxlan_spec && vxlan_mask) ||
820 (vxlan_spec && !vxlan_mask)) {
821 rte_flow_error_set(error, EINVAL,
822 RTE_FLOW_ERROR_TYPE_ITEM,
824 "Invalid VXLAN item");
829 if (vxlan_spec && vxlan_mask) {
830 list[t].type = ICE_VXLAN;
831 if (vxlan_mask->vni[0] ||
832 vxlan_mask->vni[1] ||
833 vxlan_mask->vni[2]) {
834 list[t].h_u.tnl_hdr.vni =
835 (vxlan_spec->vni[2] << 16) |
836 (vxlan_spec->vni[1] << 8) |
838 list[t].m_u.tnl_hdr.vni =
839 (vxlan_mask->vni[2] << 16) |
840 (vxlan_mask->vni[1] << 8) |
843 ICE_INSET_TUN_VXLAN_VNI;
849 case RTE_FLOW_ITEM_TYPE_NVGRE:
850 nvgre_spec = item->spec;
851 nvgre_mask = item->mask;
852 /* Check if NVGRE item is used to describe protocol.
853 * If yes, both spec and mask should be NULL.
854 * If no, both spec and mask shouldn't be NULL.
856 if ((!nvgre_spec && nvgre_mask) ||
857 (nvgre_spec && !nvgre_mask)) {
858 rte_flow_error_set(error, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ITEM,
861 "Invalid NVGRE item");
865 if (nvgre_spec && nvgre_mask) {
866 list[t].type = ICE_NVGRE;
867 if (nvgre_mask->tni[0] ||
868 nvgre_mask->tni[1] ||
869 nvgre_mask->tni[2]) {
870 list[t].h_u.nvgre_hdr.tni_flow =
871 (nvgre_spec->tni[2] << 16) |
872 (nvgre_spec->tni[1] << 8) |
874 list[t].m_u.nvgre_hdr.tni_flow =
875 (nvgre_mask->tni[2] << 16) |
876 (nvgre_mask->tni[1] << 8) |
879 ICE_INSET_TUN_NVGRE_TNI;
885 case RTE_FLOW_ITEM_TYPE_VLAN:
886 vlan_spec = item->spec;
887 vlan_mask = item->mask;
888 /* Check if VLAN item is used to describe protocol.
889 * If yes, both spec and mask should be NULL.
890 * If no, both spec and mask shouldn't be NULL.
892 if ((!vlan_spec && vlan_mask) ||
893 (vlan_spec && !vlan_mask)) {
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_ITEM,
897 "Invalid VLAN item");
900 if (vlan_spec && vlan_mask) {
901 list[t].type = ICE_VLAN_OFOS;
902 if (vlan_mask->tci) {
903 list[t].h_u.vlan_hdr.vlan =
905 list[t].m_u.vlan_hdr.vlan =
907 input_set |= ICE_INSET_VLAN_OUTER;
909 if (vlan_mask->inner_type) {
910 list[t].h_u.vlan_hdr.type =
911 vlan_spec->inner_type;
912 list[t].m_u.vlan_hdr.type =
913 vlan_mask->inner_type;
914 input_set |= ICE_INSET_ETHERTYPE;
920 case RTE_FLOW_ITEM_TYPE_PPPOED:
921 case RTE_FLOW_ITEM_TYPE_PPPOES:
922 pppoe_spec = item->spec;
923 pppoe_mask = item->mask;
924 /* Check if PPPoE item is used to describe protocol.
925 * If yes, both spec and mask should be NULL.
926 * If no, both spec and mask shouldn't be NULL.
928 if ((!pppoe_spec && pppoe_mask) ||
929 (pppoe_spec && !pppoe_mask)) {
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ITEM,
933 "Invalid pppoe item");
936 if (pppoe_spec && pppoe_mask) {
937 /* Check pppoe mask and update input set */
938 if (pppoe_mask->length ||
940 pppoe_mask->version_type) {
941 rte_flow_error_set(error, EINVAL,
942 RTE_FLOW_ERROR_TYPE_ITEM,
944 "Invalid pppoe mask");
947 list[t].type = ICE_PPPOE;
948 if (pppoe_mask->session_id) {
949 list[t].h_u.pppoe_hdr.session_id =
950 pppoe_spec->session_id;
951 list[t].m_u.pppoe_hdr.session_id =
952 pppoe_mask->session_id;
953 input_set |= ICE_INSET_PPPOE_SESSION;
960 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
961 pppoe_proto_spec = item->spec;
962 pppoe_proto_mask = item->mask;
963 /* Check if PPPoE optional proto_id item
964 * is used to describe protocol.
965 * If yes, both spec and mask should be NULL.
966 * If no, both spec and mask shouldn't be NULL.
968 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
969 (pppoe_proto_spec && !pppoe_proto_mask)) {
970 rte_flow_error_set(error, EINVAL,
971 RTE_FLOW_ERROR_TYPE_ITEM,
973 "Invalid pppoe proto item");
976 if (pppoe_proto_spec && pppoe_proto_mask) {
979 list[t].type = ICE_PPPOE;
980 if (pppoe_proto_mask->proto_id) {
981 list[t].h_u.pppoe_hdr.ppp_prot_id =
982 pppoe_proto_spec->proto_id;
983 list[t].m_u.pppoe_hdr.ppp_prot_id =
984 pppoe_proto_mask->proto_id;
985 input_set |= ICE_INSET_PPPOE_PROTO;
991 case RTE_FLOW_ITEM_TYPE_ESP:
992 esp_spec = item->spec;
993 esp_mask = item->mask;
994 if (esp_spec || esp_mask) {
995 rte_flow_error_set(error, EINVAL,
996 RTE_FLOW_ERROR_TYPE_ITEM,
1001 if (ipv6_valiad && udp_valiad)
1002 *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
1003 else if (ipv6_valiad)
1004 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1007 case RTE_FLOW_ITEM_TYPE_AH:
1008 ah_spec = item->spec;
1009 ah_mask = item->mask;
1010 if (ah_spec || ah_mask) {
1011 rte_flow_error_set(error, EINVAL,
1012 RTE_FLOW_ERROR_TYPE_ITEM,
1017 if (ipv6_valiad && udp_valiad)
1018 *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
1019 else if (ipv6_valiad)
1020 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1023 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1024 l2tp_spec = item->spec;
1025 l2tp_mask = item->mask;
1026 if (l2tp_spec || l2tp_mask) {
1027 rte_flow_error_set(error, EINVAL,
1028 RTE_FLOW_ERROR_TYPE_ITEM,
1030 "Invalid l2tp item");
1034 *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1036 case RTE_FLOW_ITEM_TYPE_PFCP:
1037 pfcp_spec = item->spec;
1038 pfcp_mask = item->mask;
1039 /* Check if PFCP item is used to describe protocol.
1040 * If yes, both spec and mask should be NULL.
1041 * If no, both spec and mask shouldn't be NULL.
1043 if ((!pfcp_spec && pfcp_mask) ||
1044 (pfcp_spec && !pfcp_mask)) {
1045 rte_flow_error_set(error, EINVAL,
1046 RTE_FLOW_ERROR_TYPE_ITEM,
1048 "Invalid PFCP item");
1051 if (pfcp_spec && pfcp_mask) {
1052 /* Check pfcp mask and update input set */
1053 if (pfcp_mask->msg_type ||
1054 pfcp_mask->msg_len ||
1056 rte_flow_error_set(error, EINVAL,
1057 RTE_FLOW_ERROR_TYPE_ITEM,
1059 "Invalid pfcp mask");
1062 if (pfcp_mask->s_field &&
1063 pfcp_spec->s_field == 0x01 &&
1066 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1067 else if (pfcp_mask->s_field &&
1068 pfcp_spec->s_field == 0x01)
1070 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1071 else if (pfcp_mask->s_field &&
1072 !pfcp_spec->s_field &&
1075 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1076 else if (pfcp_mask->s_field &&
1077 !pfcp_spec->s_field)
1079 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1086 case RTE_FLOW_ITEM_TYPE_VOID:
1090 rte_flow_error_set(error, EINVAL,
1091 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1092 "Invalid pattern item.");
1105 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1106 struct rte_flow_error *error,
1107 struct ice_adv_rule_info *rule_info)
1109 const struct rte_flow_action_vf *act_vf;
1110 const struct rte_flow_action *action;
1111 enum rte_flow_action_type action_type;
1113 for (action = actions; action->type !=
1114 RTE_FLOW_ACTION_TYPE_END; action++) {
1115 action_type = action->type;
1116 switch (action_type) {
1117 case RTE_FLOW_ACTION_TYPE_VF:
1118 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1119 act_vf = action->conf;
1120 rule_info->sw_act.vsi_handle = act_vf->id;
1123 rte_flow_error_set(error,
1124 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1126 "Invalid action type or queue number");
1131 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1133 rule_info->priority = 5;
1139 ice_switch_parse_action(struct ice_pf *pf,
1140 const struct rte_flow_action *actions,
1141 struct rte_flow_error *error,
1142 struct ice_adv_rule_info *rule_info)
1144 struct ice_vsi *vsi = pf->main_vsi;
1145 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1146 const struct rte_flow_action_queue *act_q;
1147 const struct rte_flow_action_rss *act_qgrop;
1148 uint16_t base_queue, i;
1149 const struct rte_flow_action *action;
1150 enum rte_flow_action_type action_type;
1151 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1152 2, 4, 8, 16, 32, 64, 128};
1154 base_queue = pf->base_queue + vsi->base_queue;
1155 for (action = actions; action->type !=
1156 RTE_FLOW_ACTION_TYPE_END; action++) {
1157 action_type = action->type;
1158 switch (action_type) {
1159 case RTE_FLOW_ACTION_TYPE_RSS:
1160 act_qgrop = action->conf;
1161 rule_info->sw_act.fltr_act =
1163 rule_info->sw_act.fwd_id.q_id =
1164 base_queue + act_qgrop->queue[0];
1165 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1166 if (act_qgrop->queue_num ==
1167 valid_qgrop_number[i])
1170 if (i == MAX_QGRP_NUM_TYPE)
1172 if ((act_qgrop->queue[0] +
1173 act_qgrop->queue_num) >
1174 dev->data->nb_rx_queues)
1176 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1177 if (act_qgrop->queue[i + 1] !=
1178 act_qgrop->queue[i] + 1)
1180 rule_info->sw_act.qgrp_size =
1181 act_qgrop->queue_num;
1183 case RTE_FLOW_ACTION_TYPE_QUEUE:
1184 act_q = action->conf;
1185 if (act_q->index >= dev->data->nb_rx_queues)
1187 rule_info->sw_act.fltr_act =
1189 rule_info->sw_act.fwd_id.q_id =
1190 base_queue + act_q->index;
1193 case RTE_FLOW_ACTION_TYPE_DROP:
1194 rule_info->sw_act.fltr_act =
1198 case RTE_FLOW_ACTION_TYPE_VOID:
1206 rule_info->sw_act.vsi_handle = vsi->idx;
1208 rule_info->sw_act.src = vsi->idx;
1209 rule_info->priority = 5;
1214 rte_flow_error_set(error,
1215 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1217 "Invalid action type or queue number");
1222 ice_switch_check_action(const struct rte_flow_action *actions,
1223 struct rte_flow_error *error)
1225 const struct rte_flow_action *action;
1226 enum rte_flow_action_type action_type;
1227 uint16_t actions_num = 0;
1229 for (action = actions; action->type !=
1230 RTE_FLOW_ACTION_TYPE_END; action++) {
1231 action_type = action->type;
1232 switch (action_type) {
1233 case RTE_FLOW_ACTION_TYPE_VF:
1234 case RTE_FLOW_ACTION_TYPE_RSS:
1235 case RTE_FLOW_ACTION_TYPE_QUEUE:
1236 case RTE_FLOW_ACTION_TYPE_DROP:
1239 case RTE_FLOW_ACTION_TYPE_VOID:
1242 rte_flow_error_set(error,
1243 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1245 "Invalid action type");
1250 if (actions_num > 1) {
1251 rte_flow_error_set(error,
1252 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1254 "Invalid action number");
1262 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1265 case ICE_SW_TUN_PROFID_IPV6_ESP:
1266 case ICE_SW_TUN_PROFID_IPV6_AH:
1267 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1268 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1269 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1270 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1271 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1272 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1282 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1283 struct ice_pattern_match_item *array,
1285 const struct rte_flow_item pattern[],
1286 const struct rte_flow_action actions[],
1288 struct rte_flow_error *error)
1290 struct ice_pf *pf = &ad->pf;
1291 uint64_t inputset = 0;
1293 struct sw_meta *sw_meta_ptr = NULL;
1294 struct ice_adv_rule_info rule_info;
1295 struct ice_adv_lkup_elem *list = NULL;
1296 uint16_t lkups_num = 0;
1297 const struct rte_flow_item *item = pattern;
1298 uint16_t item_num = 0;
1299 enum ice_sw_tunnel_type tun_type =
1300 ICE_SW_TUN_AND_NON_TUN;
1301 struct ice_pattern_match_item *pattern_match_item = NULL;
1303 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1305 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1306 tun_type = ICE_SW_TUN_VXLAN;
1307 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1308 tun_type = ICE_SW_TUN_NVGRE;
1309 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1310 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1311 tun_type = ICE_SW_TUN_PPPOE;
1312 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1313 const struct rte_flow_item_eth *eth_mask;
1315 eth_mask = item->mask;
1318 if (eth_mask->type == UINT16_MAX)
1319 tun_type = ICE_SW_TUN_AND_NON_TUN;
1321 /* reserve one more memory slot for ETH which may
1322 * consume 2 lookup items.
1324 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1328 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1330 rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1332 "No memory for PMD internal items");
1337 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1339 rte_flow_error_set(error, EINVAL,
1340 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1341 "No memory for sw_pattern_meta_ptr");
1345 pattern_match_item =
1346 ice_search_pattern_match_item(pattern, array, array_len, error);
1347 if (!pattern_match_item) {
1348 rte_flow_error_set(error, EINVAL,
1349 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1350 "Invalid input pattern");
1354 inputset = ice_switch_inset_get
1355 (pattern, error, list, &lkups_num, &tun_type);
1356 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1357 (inputset & ~pattern_match_item->input_set_mask)) {
1358 rte_flow_error_set(error, EINVAL,
1359 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1361 "Invalid input set");
1365 rule_info.tun_type = tun_type;
1367 ret = ice_switch_check_action(actions, error);
1369 rte_flow_error_set(error, EINVAL,
1370 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1371 "Invalid input action number");
1375 if (ad->hw.dcf_enabled)
1376 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1378 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1381 rte_flow_error_set(error, EINVAL,
1382 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1383 "Invalid input action");
1388 *meta = sw_meta_ptr;
1389 ((struct sw_meta *)*meta)->list = list;
1390 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1391 ((struct sw_meta *)*meta)->rule_info = rule_info;
1394 rte_free(sw_meta_ptr);
1397 rte_free(pattern_match_item);
1403 rte_free(sw_meta_ptr);
1404 rte_free(pattern_match_item);
1410 ice_switch_query(struct ice_adapter *ad __rte_unused,
1411 struct rte_flow *flow __rte_unused,
1412 struct rte_flow_query_count *count __rte_unused,
1413 struct rte_flow_error *error)
1415 rte_flow_error_set(error, EINVAL,
1416 RTE_FLOW_ERROR_TYPE_HANDLE,
1418 "count action not supported by switch filter");
1424 ice_switch_redirect(struct ice_adapter *ad,
1425 struct rte_flow *flow,
1426 struct ice_flow_redirect *rd)
1428 struct ice_rule_query_data *rdata = flow->rule;
1429 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1430 struct ice_adv_lkup_elem *lkups_dp = NULL;
1431 struct LIST_HEAD_TYPE *list_head;
1432 struct ice_adv_rule_info rinfo;
1433 struct ice_hw *hw = &ad->hw;
1434 struct ice_switch_info *sw;
1438 sw = hw->switch_info;
1439 if (!sw->recp_list[rdata->rid].recp_created)
1442 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1445 list_head = &sw->recp_list[rdata->rid].filt_rules;
1446 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1448 rinfo = list_itr->rule_info;
1449 if (rinfo.fltr_rule_id == rdata->rule_id &&
1450 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1451 rinfo.sw_act.vsi_handle == rd->vsi_handle) {
1452 lkups_cnt = list_itr->lkups_cnt;
1453 lkups_dp = (struct ice_adv_lkup_elem *)
1454 ice_memdup(hw, list_itr->lkups,
1455 sizeof(*list_itr->lkups) *
1456 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1458 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1469 /* Remove the old rule */
1470 ret = ice_rem_adv_rule(hw, list_itr->lkups,
1473 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1479 /* Update VSI context */
1480 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1482 /* Replay the rule */
1483 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1486 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1491 ice_free(hw, lkups_dp);
1496 ice_switch_init(struct ice_adapter *ad)
1499 struct ice_flow_parser *dist_parser;
1500 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1502 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1503 dist_parser = &ice_switch_dist_parser_comms;
1504 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1505 dist_parser = &ice_switch_dist_parser_os;
1509 if (ad->devargs.pipe_mode_support)
1510 ret = ice_register_parser(perm_parser, ad);
1512 ret = ice_register_parser(dist_parser, ad);
1517 ice_switch_uninit(struct ice_adapter *ad)
1519 struct ice_flow_parser *dist_parser;
1520 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1522 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1523 dist_parser = &ice_switch_dist_parser_comms;
1525 dist_parser = &ice_switch_dist_parser_os;
1527 if (ad->devargs.pipe_mode_support)
1528 ice_unregister_parser(perm_parser, ad);
1530 ice_unregister_parser(dist_parser, ad);
1534 ice_flow_engine ice_switch_engine = {
1535 .init = ice_switch_init,
1536 .uninit = ice_switch_uninit,
1537 .create = ice_switch_create,
1538 .destroy = ice_switch_destroy,
1539 .query_count = ice_switch_query,
1540 .redirect = ice_switch_redirect,
1541 .free = ice_switch_filter_rule_free,
1542 .type = ICE_FLOW_ENGINE_SWITCH,
1546 ice_flow_parser ice_switch_dist_parser_os = {
1547 .engine = &ice_switch_engine,
1548 .array = ice_switch_pattern_dist_os,
1549 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1550 .parse_pattern_action = ice_switch_parse_pattern_action,
1551 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1555 ice_flow_parser ice_switch_dist_parser_comms = {
1556 .engine = &ice_switch_engine,
1557 .array = ice_switch_pattern_dist_comms,
1558 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1559 .parse_pattern_action = ice_switch_parse_pattern_action,
1560 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1564 ice_flow_parser ice_switch_perm_parser = {
1565 .engine = &ice_switch_engine,
1566 .array = ice_switch_pattern_perm,
1567 .array_len = RTE_DIM(ice_switch_pattern_perm),
1568 .parse_pattern_action = ice_switch_parse_pattern_action,
1569 .stage = ICE_FLOW_STAGE_PERMISSION,
1572 RTE_INIT(ice_sw_engine_init)
1574 struct ice_flow_engine *engine = &ice_switch_engine;
1575 ice_register_flow_engine(engine);