1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49 ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86 ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90 ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE ( \
92 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
95 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97 ICE_INSET_PPPOE_PROTO)
100 struct ice_adv_lkup_elem *list;
102 struct ice_adv_rule_info rule_info;
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
112 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113 {pattern_ethertype_vlan,
114 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
116 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp,
118 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119 {pattern_eth_ipv4_tcp,
120 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
122 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123 {pattern_eth_ipv6_udp,
124 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125 {pattern_eth_ipv6_tcp,
126 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133 {pattern_eth_ipv4_nvgre_eth_ipv4,
134 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
140 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141 {pattern_eth_vlan_pppoed,
142 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
144 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145 {pattern_eth_vlan_pppoes,
146 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147 {pattern_eth_pppoes_proto,
148 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149 {pattern_eth_vlan_pppoes_proto,
150 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151 {pattern_eth_ipv6_esp,
152 ICE_INSET_NONE, ICE_INSET_NONE},
153 {pattern_eth_ipv6_ah,
154 ICE_INSET_NONE, ICE_INSET_NONE},
155 {pattern_eth_ipv6_l2tp,
156 ICE_INSET_NONE, ICE_INSET_NONE},
157 {pattern_eth_ipv4_pfcp,
158 ICE_INSET_NONE, ICE_INSET_NONE},
159 {pattern_eth_ipv6_pfcp,
160 ICE_INSET_NONE, ICE_INSET_NONE},
164 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
166 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
167 {pattern_ethertype_vlan,
168 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
170 ICE_INSET_NONE, ICE_INSET_NONE},
172 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
173 {pattern_eth_ipv4_udp,
174 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
175 {pattern_eth_ipv4_tcp,
176 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
178 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
179 {pattern_eth_ipv6_udp,
180 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
181 {pattern_eth_ipv6_tcp,
182 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
183 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
184 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
185 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
186 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
187 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
188 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
189 {pattern_eth_ipv4_nvgre_eth_ipv4,
190 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
191 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
192 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
193 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
194 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
198 ice_pattern_match_item ice_switch_pattern_perm[] = {
199 {pattern_ethertype_vlan,
200 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
202 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
203 {pattern_eth_ipv4_udp,
204 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
205 {pattern_eth_ipv4_tcp,
206 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
208 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
209 {pattern_eth_ipv6_udp,
210 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
211 {pattern_eth_ipv6_tcp,
212 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
213 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
214 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
215 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
216 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
217 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
218 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
219 {pattern_eth_ipv4_nvgre_eth_ipv4,
220 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
221 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
222 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
223 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
224 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
225 {pattern_eth_ipv6_esp,
226 ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_ipv6_ah,
228 ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_ipv6_l2tp,
230 ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_ipv4_pfcp,
232 ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_ipv6_pfcp,
234 ICE_INSET_NONE, ICE_INSET_NONE},
238 ice_switch_create(struct ice_adapter *ad,
239 struct rte_flow *flow,
241 struct rte_flow_error *error)
244 struct ice_pf *pf = &ad->pf;
245 struct ice_hw *hw = ICE_PF_TO_HW(pf);
246 struct ice_rule_query_data rule_added = {0};
247 struct ice_rule_query_data *filter_ptr;
248 struct ice_adv_lkup_elem *list =
249 ((struct sw_meta *)meta)->list;
251 ((struct sw_meta *)meta)->lkups_num;
252 struct ice_adv_rule_info *rule_info =
253 &((struct sw_meta *)meta)->rule_info;
255 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
256 rte_flow_error_set(error, EINVAL,
257 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
258 "item number too large for rule");
262 rte_flow_error_set(error, EINVAL,
263 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
264 "lookup list should not be NULL");
267 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
269 filter_ptr = rte_zmalloc("ice_switch_filter",
270 sizeof(struct ice_rule_query_data), 0);
272 rte_flow_error_set(error, EINVAL,
273 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
274 "No memory for ice_switch_filter");
277 flow->rule = filter_ptr;
278 rte_memcpy(filter_ptr,
280 sizeof(struct ice_rule_query_data));
282 rte_flow_error_set(error, EINVAL,
283 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
284 "switch filter create flow fail");
300 ice_switch_destroy(struct ice_adapter *ad,
301 struct rte_flow *flow,
302 struct rte_flow_error *error)
304 struct ice_hw *hw = &ad->hw;
306 struct ice_rule_query_data *filter_ptr;
308 filter_ptr = (struct ice_rule_query_data *)
312 rte_flow_error_set(error, EINVAL,
313 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
315 " create by switch filter");
319 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
321 rte_flow_error_set(error, EINVAL,
322 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
323 "fail to destroy switch filter rule");
327 rte_free(filter_ptr);
332 ice_switch_filter_rule_free(struct rte_flow *flow)
334 rte_free(flow->rule);
338 ice_switch_inset_get(const struct rte_flow_item pattern[],
339 struct rte_flow_error *error,
340 struct ice_adv_lkup_elem *list,
342 enum ice_sw_tunnel_type *tun_type)
344 const struct rte_flow_item *item = pattern;
345 enum rte_flow_item_type item_type;
346 const struct rte_flow_item_eth *eth_spec, *eth_mask;
347 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
348 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
349 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
350 const struct rte_flow_item_udp *udp_spec, *udp_mask;
351 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
352 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
353 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
354 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
355 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
356 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
358 const struct rte_flow_item_esp *esp_spec, *esp_mask;
359 const struct rte_flow_item_ah *ah_spec, *ah_mask;
360 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
361 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
362 uint64_t input_set = ICE_INSET_NONE;
364 uint16_t tunnel_valid = 0;
365 uint16_t pppoe_valid = 0;
366 uint16_t ipv6_valiad = 0;
369 for (item = pattern; item->type !=
370 RTE_FLOW_ITEM_TYPE_END; item++) {
372 rte_flow_error_set(error, EINVAL,
373 RTE_FLOW_ERROR_TYPE_ITEM,
375 "Not support range");
378 item_type = item->type;
381 case RTE_FLOW_ITEM_TYPE_ETH:
382 eth_spec = item->spec;
383 eth_mask = item->mask;
384 if (eth_spec && eth_mask) {
385 const uint8_t *a = eth_mask->src.addr_bytes;
386 const uint8_t *b = eth_mask->dst.addr_bytes;
387 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
388 if (a[j] && tunnel_valid) {
398 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
399 if (b[j] && tunnel_valid) {
410 input_set |= ICE_INSET_ETHERTYPE;
411 list[t].type = (tunnel_valid == 0) ?
412 ICE_MAC_OFOS : ICE_MAC_IL;
413 struct ice_ether_hdr *h;
414 struct ice_ether_hdr *m;
416 h = &list[t].h_u.eth_hdr;
417 m = &list[t].m_u.eth_hdr;
418 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
419 if (eth_mask->src.addr_bytes[j]) {
421 eth_spec->src.addr_bytes[j];
423 eth_mask->src.addr_bytes[j];
426 if (eth_mask->dst.addr_bytes[j]) {
428 eth_spec->dst.addr_bytes[j];
430 eth_mask->dst.addr_bytes[j];
436 if (eth_mask->type) {
437 list[t].type = ICE_ETYPE_OL;
438 list[t].h_u.ethertype.ethtype_id =
440 list[t].m_u.ethertype.ethtype_id =
447 case RTE_FLOW_ITEM_TYPE_IPV4:
448 ipv4_spec = item->spec;
449 ipv4_mask = item->mask;
450 if (ipv4_spec && ipv4_mask) {
451 /* Check IPv4 mask and update input set */
452 if (ipv4_mask->hdr.version_ihl ||
453 ipv4_mask->hdr.total_length ||
454 ipv4_mask->hdr.packet_id ||
455 ipv4_mask->hdr.hdr_checksum) {
456 rte_flow_error_set(error, EINVAL,
457 RTE_FLOW_ERROR_TYPE_ITEM,
459 "Invalid IPv4 mask.");
464 if (ipv4_mask->hdr.type_of_service)
466 ICE_INSET_TUN_IPV4_TOS;
467 if (ipv4_mask->hdr.src_addr)
469 ICE_INSET_TUN_IPV4_SRC;
470 if (ipv4_mask->hdr.dst_addr)
472 ICE_INSET_TUN_IPV4_DST;
473 if (ipv4_mask->hdr.time_to_live)
475 ICE_INSET_TUN_IPV4_TTL;
476 if (ipv4_mask->hdr.next_proto_id)
478 ICE_INSET_TUN_IPV4_PROTO;
480 if (ipv4_mask->hdr.src_addr)
481 input_set |= ICE_INSET_IPV4_SRC;
482 if (ipv4_mask->hdr.dst_addr)
483 input_set |= ICE_INSET_IPV4_DST;
484 if (ipv4_mask->hdr.time_to_live)
485 input_set |= ICE_INSET_IPV4_TTL;
486 if (ipv4_mask->hdr.next_proto_id)
488 ICE_INSET_IPV4_PROTO;
489 if (ipv4_mask->hdr.type_of_service)
493 list[t].type = (tunnel_valid == 0) ?
494 ICE_IPV4_OFOS : ICE_IPV4_IL;
495 if (ipv4_mask->hdr.src_addr) {
496 list[t].h_u.ipv4_hdr.src_addr =
497 ipv4_spec->hdr.src_addr;
498 list[t].m_u.ipv4_hdr.src_addr =
499 ipv4_mask->hdr.src_addr;
501 if (ipv4_mask->hdr.dst_addr) {
502 list[t].h_u.ipv4_hdr.dst_addr =
503 ipv4_spec->hdr.dst_addr;
504 list[t].m_u.ipv4_hdr.dst_addr =
505 ipv4_mask->hdr.dst_addr;
507 if (ipv4_mask->hdr.time_to_live) {
508 list[t].h_u.ipv4_hdr.time_to_live =
509 ipv4_spec->hdr.time_to_live;
510 list[t].m_u.ipv4_hdr.time_to_live =
511 ipv4_mask->hdr.time_to_live;
513 if (ipv4_mask->hdr.next_proto_id) {
514 list[t].h_u.ipv4_hdr.protocol =
515 ipv4_spec->hdr.next_proto_id;
516 list[t].m_u.ipv4_hdr.protocol =
517 ipv4_mask->hdr.next_proto_id;
519 if (ipv4_mask->hdr.type_of_service) {
520 list[t].h_u.ipv4_hdr.tos =
521 ipv4_spec->hdr.type_of_service;
522 list[t].m_u.ipv4_hdr.tos =
523 ipv4_mask->hdr.type_of_service;
529 case RTE_FLOW_ITEM_TYPE_IPV6:
530 ipv6_spec = item->spec;
531 ipv6_mask = item->mask;
533 if (ipv6_spec && ipv6_mask) {
534 if (ipv6_mask->hdr.payload_len) {
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ITEM,
538 "Invalid IPv6 mask");
542 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
543 if (ipv6_mask->hdr.src_addr[j] &&
546 ICE_INSET_TUN_IPV6_SRC;
548 } else if (ipv6_mask->hdr.src_addr[j]) {
549 input_set |= ICE_INSET_IPV6_SRC;
553 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
554 if (ipv6_mask->hdr.dst_addr[j] &&
557 ICE_INSET_TUN_IPV6_DST;
559 } else if (ipv6_mask->hdr.dst_addr[j]) {
560 input_set |= ICE_INSET_IPV6_DST;
564 if (ipv6_mask->hdr.proto &&
567 ICE_INSET_TUN_IPV6_NEXT_HDR;
568 else if (ipv6_mask->hdr.proto)
570 ICE_INSET_IPV6_NEXT_HDR;
571 if (ipv6_mask->hdr.hop_limits &&
574 ICE_INSET_TUN_IPV6_HOP_LIMIT;
575 else if (ipv6_mask->hdr.hop_limits)
577 ICE_INSET_IPV6_HOP_LIMIT;
578 if ((ipv6_mask->hdr.vtc_flow &
580 (RTE_IPV6_HDR_TC_MASK)) &&
583 ICE_INSET_TUN_IPV6_TC;
584 else if (ipv6_mask->hdr.vtc_flow &
586 (RTE_IPV6_HDR_TC_MASK))
587 input_set |= ICE_INSET_IPV6_TC;
589 list[t].type = (tunnel_valid == 0) ?
590 ICE_IPV6_OFOS : ICE_IPV6_IL;
591 struct ice_ipv6_hdr *f;
592 struct ice_ipv6_hdr *s;
593 f = &list[t].h_u.ipv6_hdr;
594 s = &list[t].m_u.ipv6_hdr;
595 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
596 if (ipv6_mask->hdr.src_addr[j]) {
598 ipv6_spec->hdr.src_addr[j];
600 ipv6_mask->hdr.src_addr[j];
602 if (ipv6_mask->hdr.dst_addr[j]) {
604 ipv6_spec->hdr.dst_addr[j];
606 ipv6_mask->hdr.dst_addr[j];
609 if (ipv6_mask->hdr.proto) {
611 ipv6_spec->hdr.proto;
613 ipv6_mask->hdr.proto;
615 if (ipv6_mask->hdr.hop_limits) {
617 ipv6_spec->hdr.hop_limits;
619 ipv6_mask->hdr.hop_limits;
621 if (ipv6_mask->hdr.vtc_flow &
623 (RTE_IPV6_HDR_TC_MASK)) {
624 struct ice_le_ver_tc_flow vtf;
625 vtf.u.fld.version = 0;
626 vtf.u.fld.flow_label = 0;
627 vtf.u.fld.tc = (rte_be_to_cpu_32
628 (ipv6_spec->hdr.vtc_flow) &
629 RTE_IPV6_HDR_TC_MASK) >>
630 RTE_IPV6_HDR_TC_SHIFT;
631 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
632 vtf.u.fld.tc = (rte_be_to_cpu_32
633 (ipv6_mask->hdr.vtc_flow) &
634 RTE_IPV6_HDR_TC_MASK) >>
635 RTE_IPV6_HDR_TC_SHIFT;
636 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
642 case RTE_FLOW_ITEM_TYPE_UDP:
643 udp_spec = item->spec;
644 udp_mask = item->mask;
645 if (udp_spec && udp_mask) {
646 /* Check UDP mask and update input set*/
647 if (udp_mask->hdr.dgram_len ||
648 udp_mask->hdr.dgram_cksum) {
649 rte_flow_error_set(error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ITEM,
657 if (udp_mask->hdr.src_port)
659 ICE_INSET_TUN_UDP_SRC_PORT;
660 if (udp_mask->hdr.dst_port)
662 ICE_INSET_TUN_UDP_DST_PORT;
664 if (udp_mask->hdr.src_port)
666 ICE_INSET_UDP_SRC_PORT;
667 if (udp_mask->hdr.dst_port)
669 ICE_INSET_UDP_DST_PORT;
671 if (*tun_type == ICE_SW_TUN_VXLAN &&
673 list[t].type = ICE_UDP_OF;
675 list[t].type = ICE_UDP_ILOS;
676 if (udp_mask->hdr.src_port) {
677 list[t].h_u.l4_hdr.src_port =
678 udp_spec->hdr.src_port;
679 list[t].m_u.l4_hdr.src_port =
680 udp_mask->hdr.src_port;
682 if (udp_mask->hdr.dst_port) {
683 list[t].h_u.l4_hdr.dst_port =
684 udp_spec->hdr.dst_port;
685 list[t].m_u.l4_hdr.dst_port =
686 udp_mask->hdr.dst_port;
692 case RTE_FLOW_ITEM_TYPE_TCP:
693 tcp_spec = item->spec;
694 tcp_mask = item->mask;
695 if (tcp_spec && tcp_mask) {
696 /* Check TCP mask and update input set */
697 if (tcp_mask->hdr.sent_seq ||
698 tcp_mask->hdr.recv_ack ||
699 tcp_mask->hdr.data_off ||
700 tcp_mask->hdr.tcp_flags ||
701 tcp_mask->hdr.rx_win ||
702 tcp_mask->hdr.cksum ||
703 tcp_mask->hdr.tcp_urp) {
704 rte_flow_error_set(error, EINVAL,
705 RTE_FLOW_ERROR_TYPE_ITEM,
712 if (tcp_mask->hdr.src_port)
714 ICE_INSET_TUN_TCP_SRC_PORT;
715 if (tcp_mask->hdr.dst_port)
717 ICE_INSET_TUN_TCP_DST_PORT;
719 if (tcp_mask->hdr.src_port)
721 ICE_INSET_TCP_SRC_PORT;
722 if (tcp_mask->hdr.dst_port)
724 ICE_INSET_TCP_DST_PORT;
726 list[t].type = ICE_TCP_IL;
727 if (tcp_mask->hdr.src_port) {
728 list[t].h_u.l4_hdr.src_port =
729 tcp_spec->hdr.src_port;
730 list[t].m_u.l4_hdr.src_port =
731 tcp_mask->hdr.src_port;
733 if (tcp_mask->hdr.dst_port) {
734 list[t].h_u.l4_hdr.dst_port =
735 tcp_spec->hdr.dst_port;
736 list[t].m_u.l4_hdr.dst_port =
737 tcp_mask->hdr.dst_port;
743 case RTE_FLOW_ITEM_TYPE_SCTP:
744 sctp_spec = item->spec;
745 sctp_mask = item->mask;
746 if (sctp_spec && sctp_mask) {
747 /* Check SCTP mask and update input set */
748 if (sctp_mask->hdr.cksum) {
749 rte_flow_error_set(error, EINVAL,
750 RTE_FLOW_ERROR_TYPE_ITEM,
752 "Invalid SCTP mask");
757 if (sctp_mask->hdr.src_port)
759 ICE_INSET_TUN_SCTP_SRC_PORT;
760 if (sctp_mask->hdr.dst_port)
762 ICE_INSET_TUN_SCTP_DST_PORT;
764 if (sctp_mask->hdr.src_port)
766 ICE_INSET_SCTP_SRC_PORT;
767 if (sctp_mask->hdr.dst_port)
769 ICE_INSET_SCTP_DST_PORT;
771 list[t].type = ICE_SCTP_IL;
772 if (sctp_mask->hdr.src_port) {
773 list[t].h_u.sctp_hdr.src_port =
774 sctp_spec->hdr.src_port;
775 list[t].m_u.sctp_hdr.src_port =
776 sctp_mask->hdr.src_port;
778 if (sctp_mask->hdr.dst_port) {
779 list[t].h_u.sctp_hdr.dst_port =
780 sctp_spec->hdr.dst_port;
781 list[t].m_u.sctp_hdr.dst_port =
782 sctp_mask->hdr.dst_port;
788 case RTE_FLOW_ITEM_TYPE_VXLAN:
789 vxlan_spec = item->spec;
790 vxlan_mask = item->mask;
791 /* Check if VXLAN item is used to describe protocol.
792 * If yes, both spec and mask should be NULL.
793 * If no, both spec and mask shouldn't be NULL.
795 if ((!vxlan_spec && vxlan_mask) ||
796 (vxlan_spec && !vxlan_mask)) {
797 rte_flow_error_set(error, EINVAL,
798 RTE_FLOW_ERROR_TYPE_ITEM,
800 "Invalid VXLAN item");
805 if (vxlan_spec && vxlan_mask) {
806 list[t].type = ICE_VXLAN;
807 if (vxlan_mask->vni[0] ||
808 vxlan_mask->vni[1] ||
809 vxlan_mask->vni[2]) {
810 list[t].h_u.tnl_hdr.vni =
811 (vxlan_spec->vni[2] << 16) |
812 (vxlan_spec->vni[1] << 8) |
814 list[t].m_u.tnl_hdr.vni =
815 (vxlan_mask->vni[2] << 16) |
816 (vxlan_mask->vni[1] << 8) |
819 ICE_INSET_TUN_VXLAN_VNI;
825 case RTE_FLOW_ITEM_TYPE_NVGRE:
826 nvgre_spec = item->spec;
827 nvgre_mask = item->mask;
828 /* Check if NVGRE item is used to describe protocol.
829 * If yes, both spec and mask should be NULL.
830 * If no, both spec and mask shouldn't be NULL.
832 if ((!nvgre_spec && nvgre_mask) ||
833 (nvgre_spec && !nvgre_mask)) {
834 rte_flow_error_set(error, EINVAL,
835 RTE_FLOW_ERROR_TYPE_ITEM,
837 "Invalid NVGRE item");
841 if (nvgre_spec && nvgre_mask) {
842 list[t].type = ICE_NVGRE;
843 if (nvgre_mask->tni[0] ||
844 nvgre_mask->tni[1] ||
845 nvgre_mask->tni[2]) {
846 list[t].h_u.nvgre_hdr.tni_flow =
847 (nvgre_spec->tni[2] << 16) |
848 (nvgre_spec->tni[1] << 8) |
850 list[t].m_u.nvgre_hdr.tni_flow =
851 (nvgre_mask->tni[2] << 16) |
852 (nvgre_mask->tni[1] << 8) |
855 ICE_INSET_TUN_NVGRE_TNI;
861 case RTE_FLOW_ITEM_TYPE_VLAN:
862 vlan_spec = item->spec;
863 vlan_mask = item->mask;
864 /* Check if VLAN item is used to describe protocol.
865 * If yes, both spec and mask should be NULL.
866 * If no, both spec and mask shouldn't be NULL.
868 if ((!vlan_spec && vlan_mask) ||
869 (vlan_spec && !vlan_mask)) {
870 rte_flow_error_set(error, EINVAL,
871 RTE_FLOW_ERROR_TYPE_ITEM,
873 "Invalid VLAN item");
876 if (vlan_spec && vlan_mask) {
877 list[t].type = ICE_VLAN_OFOS;
878 if (vlan_mask->tci) {
879 list[t].h_u.vlan_hdr.vlan =
881 list[t].m_u.vlan_hdr.vlan =
883 input_set |= ICE_INSET_VLAN_OUTER;
885 if (vlan_mask->inner_type) {
886 list[t].h_u.vlan_hdr.type =
887 vlan_spec->inner_type;
888 list[t].m_u.vlan_hdr.type =
889 vlan_mask->inner_type;
890 input_set |= ICE_INSET_VLAN_OUTER;
896 case RTE_FLOW_ITEM_TYPE_PPPOED:
897 case RTE_FLOW_ITEM_TYPE_PPPOES:
898 pppoe_spec = item->spec;
899 pppoe_mask = item->mask;
900 /* Check if PPPoE item is used to describe protocol.
901 * If yes, both spec and mask should be NULL.
902 * If no, both spec and mask shouldn't be NULL.
904 if ((!pppoe_spec && pppoe_mask) ||
905 (pppoe_spec && !pppoe_mask)) {
906 rte_flow_error_set(error, EINVAL,
907 RTE_FLOW_ERROR_TYPE_ITEM,
909 "Invalid pppoe item");
912 if (pppoe_spec && pppoe_mask) {
913 /* Check pppoe mask and update input set */
914 if (pppoe_mask->length ||
916 pppoe_mask->version_type) {
917 rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ITEM,
920 "Invalid pppoe mask");
923 list[t].type = ICE_PPPOE;
924 if (pppoe_mask->session_id) {
925 list[t].h_u.pppoe_hdr.session_id =
926 pppoe_spec->session_id;
927 list[t].m_u.pppoe_hdr.session_id =
928 pppoe_mask->session_id;
929 input_set |= ICE_INSET_PPPOE_SESSION;
936 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
937 pppoe_proto_spec = item->spec;
938 pppoe_proto_mask = item->mask;
939 /* Check if PPPoE optional proto_id item
940 * is used to describe protocol.
941 * If yes, both spec and mask should be NULL.
942 * If no, both spec and mask shouldn't be NULL.
944 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
945 (pppoe_proto_spec && !pppoe_proto_mask)) {
946 rte_flow_error_set(error, EINVAL,
947 RTE_FLOW_ERROR_TYPE_ITEM,
949 "Invalid pppoe proto item");
952 if (pppoe_proto_spec && pppoe_proto_mask) {
955 list[t].type = ICE_PPPOE;
956 if (pppoe_proto_mask->proto_id) {
957 list[t].h_u.pppoe_hdr.ppp_prot_id =
958 pppoe_proto_spec->proto_id;
959 list[t].m_u.pppoe_hdr.ppp_prot_id =
960 pppoe_proto_mask->proto_id;
961 input_set |= ICE_INSET_PPPOE_PROTO;
967 case RTE_FLOW_ITEM_TYPE_ESP:
968 esp_spec = item->spec;
969 esp_mask = item->mask;
970 if (esp_spec || esp_mask) {
971 rte_flow_error_set(error, EINVAL,
972 RTE_FLOW_ERROR_TYPE_ITEM,
978 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
981 case RTE_FLOW_ITEM_TYPE_AH:
982 ah_spec = item->spec;
983 ah_mask = item->mask;
984 if (ah_spec || ah_mask) {
985 rte_flow_error_set(error, EINVAL,
986 RTE_FLOW_ERROR_TYPE_ITEM,
992 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
995 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
996 l2tp_spec = item->spec;
997 l2tp_mask = item->mask;
998 if (l2tp_spec || l2tp_mask) {
999 rte_flow_error_set(error, EINVAL,
1000 RTE_FLOW_ERROR_TYPE_ITEM,
1002 "Invalid l2tp item");
1006 *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1008 case RTE_FLOW_ITEM_TYPE_PFCP:
1009 pfcp_spec = item->spec;
1010 pfcp_mask = item->mask;
1011 /* Check if PFCP item is used to describe protocol.
1012 * If yes, both spec and mask should be NULL.
1013 * If no, both spec and mask shouldn't be NULL.
1015 if ((!pfcp_spec && pfcp_mask) ||
1016 (pfcp_spec && !pfcp_mask)) {
1017 rte_flow_error_set(error, EINVAL,
1018 RTE_FLOW_ERROR_TYPE_ITEM,
1020 "Invalid PFCP item");
1023 if (pfcp_spec && pfcp_mask) {
1024 /* Check pfcp mask and update input set */
1025 if (pfcp_mask->msg_type ||
1026 pfcp_mask->msg_len ||
1028 rte_flow_error_set(error, EINVAL,
1029 RTE_FLOW_ERROR_TYPE_ITEM,
1031 "Invalid pfcp mask");
1034 if (pfcp_mask->s_field &&
1035 pfcp_spec->s_field == 0x01 &&
1038 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1039 else if (pfcp_mask->s_field &&
1040 pfcp_spec->s_field == 0x01)
1042 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1043 else if (pfcp_mask->s_field &&
1044 !pfcp_spec->s_field &&
1047 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1048 else if (pfcp_mask->s_field &&
1049 !pfcp_spec->s_field)
1051 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1058 case RTE_FLOW_ITEM_TYPE_VOID:
1062 rte_flow_error_set(error, EINVAL,
1063 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1064 "Invalid pattern item.");
1077 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1078 struct rte_flow_error *error,
1079 struct ice_adv_rule_info *rule_info)
1081 const struct rte_flow_action_vf *act_vf;
1082 const struct rte_flow_action *action;
1083 enum rte_flow_action_type action_type;
1085 for (action = actions; action->type !=
1086 RTE_FLOW_ACTION_TYPE_END; action++) {
1087 action_type = action->type;
1088 switch (action_type) {
1089 case RTE_FLOW_ACTION_TYPE_VF:
1090 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1091 act_vf = action->conf;
1092 rule_info->sw_act.vsi_handle = act_vf->id;
1095 rte_flow_error_set(error,
1096 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1098 "Invalid action type or queue number");
1103 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1105 rule_info->priority = 5;
1111 ice_switch_parse_action(struct ice_pf *pf,
1112 const struct rte_flow_action *actions,
1113 struct rte_flow_error *error,
1114 struct ice_adv_rule_info *rule_info)
1116 struct ice_vsi *vsi = pf->main_vsi;
1117 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1118 const struct rte_flow_action_queue *act_q;
1119 const struct rte_flow_action_rss *act_qgrop;
1120 uint16_t base_queue, i;
1121 const struct rte_flow_action *action;
1122 enum rte_flow_action_type action_type;
1123 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1124 2, 4, 8, 16, 32, 64, 128};
1126 base_queue = pf->base_queue + vsi->base_queue;
1127 for (action = actions; action->type !=
1128 RTE_FLOW_ACTION_TYPE_END; action++) {
1129 action_type = action->type;
1130 switch (action_type) {
1131 case RTE_FLOW_ACTION_TYPE_RSS:
1132 act_qgrop = action->conf;
1133 rule_info->sw_act.fltr_act =
1135 rule_info->sw_act.fwd_id.q_id =
1136 base_queue + act_qgrop->queue[0];
1137 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1138 if (act_qgrop->queue_num ==
1139 valid_qgrop_number[i])
1142 if (i == MAX_QGRP_NUM_TYPE)
1144 if ((act_qgrop->queue[0] +
1145 act_qgrop->queue_num) >
1146 dev->data->nb_rx_queues)
1148 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1149 if (act_qgrop->queue[i + 1] !=
1150 act_qgrop->queue[i] + 1)
1152 rule_info->sw_act.qgrp_size =
1153 act_qgrop->queue_num;
1155 case RTE_FLOW_ACTION_TYPE_QUEUE:
1156 act_q = action->conf;
1157 if (act_q->index >= dev->data->nb_rx_queues)
1159 rule_info->sw_act.fltr_act =
1161 rule_info->sw_act.fwd_id.q_id =
1162 base_queue + act_q->index;
1165 case RTE_FLOW_ACTION_TYPE_DROP:
1166 rule_info->sw_act.fltr_act =
1170 case RTE_FLOW_ACTION_TYPE_VOID:
1178 rule_info->sw_act.vsi_handle = vsi->idx;
1180 rule_info->sw_act.src = vsi->idx;
1181 rule_info->priority = 5;
1186 rte_flow_error_set(error,
1187 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1189 "Invalid action type or queue number");
1194 ice_switch_check_action(const struct rte_flow_action *actions,
1195 struct rte_flow_error *error)
1197 const struct rte_flow_action *action;
1198 enum rte_flow_action_type action_type;
1199 uint16_t actions_num = 0;
1201 for (action = actions; action->type !=
1202 RTE_FLOW_ACTION_TYPE_END; action++) {
1203 action_type = action->type;
1204 switch (action_type) {
1205 case RTE_FLOW_ACTION_TYPE_VF:
1206 case RTE_FLOW_ACTION_TYPE_RSS:
1207 case RTE_FLOW_ACTION_TYPE_QUEUE:
1208 case RTE_FLOW_ACTION_TYPE_DROP:
1211 case RTE_FLOW_ACTION_TYPE_VOID:
1214 rte_flow_error_set(error,
1215 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1217 "Invalid action type");
1222 if (actions_num > 1) {
1223 rte_flow_error_set(error,
1224 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1226 "Invalid action number");
1234 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1237 case ICE_SW_TUN_PROFID_IPV6_ESP:
1238 case ICE_SW_TUN_PROFID_IPV6_AH:
1239 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1240 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1241 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1242 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1243 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1253 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1254 struct ice_pattern_match_item *array,
1256 const struct rte_flow_item pattern[],
1257 const struct rte_flow_action actions[],
1259 struct rte_flow_error *error)
1261 struct ice_pf *pf = &ad->pf;
1262 uint64_t inputset = 0;
1264 struct sw_meta *sw_meta_ptr = NULL;
1265 struct ice_adv_rule_info rule_info;
1266 struct ice_adv_lkup_elem *list = NULL;
1267 uint16_t lkups_num = 0;
1268 const struct rte_flow_item *item = pattern;
1269 uint16_t item_num = 0;
1270 enum ice_sw_tunnel_type tun_type =
1271 ICE_SW_TUN_AND_NON_TUN;
1272 struct ice_pattern_match_item *pattern_match_item = NULL;
1274 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1276 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1277 tun_type = ICE_SW_TUN_VXLAN;
1278 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1279 tun_type = ICE_SW_TUN_NVGRE;
1280 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1281 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1282 tun_type = ICE_SW_TUN_PPPOE;
1283 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1284 const struct rte_flow_item_eth *eth_mask;
1286 eth_mask = item->mask;
1289 if (eth_mask->type == UINT16_MAX)
1290 tun_type = ICE_SW_TUN_AND_NON_TUN;
1292 /* reserve one more memory slot for ETH which may
1293 * consume 2 lookup items.
1295 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1299 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1301 rte_flow_error_set(error, EINVAL,
1302 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1303 "No memory for PMD internal items");
1308 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1310 rte_flow_error_set(error, EINVAL,
1311 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1312 "No memory for sw_pattern_meta_ptr");
1316 pattern_match_item =
1317 ice_search_pattern_match_item(pattern, array, array_len, error);
1318 if (!pattern_match_item) {
1319 rte_flow_error_set(error, EINVAL,
1320 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1321 "Invalid input pattern");
1325 inputset = ice_switch_inset_get
1326 (pattern, error, list, &lkups_num, &tun_type);
1327 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1328 (inputset & ~pattern_match_item->input_set_mask)) {
1329 rte_flow_error_set(error, EINVAL,
1330 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1332 "Invalid input set");
1336 rule_info.tun_type = tun_type;
1338 ret = ice_switch_check_action(actions, error);
1340 rte_flow_error_set(error, EINVAL,
1341 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1342 "Invalid input action number");
1346 if (ad->hw.dcf_enabled)
1347 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1349 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1352 rte_flow_error_set(error, EINVAL,
1353 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1354 "Invalid input action");
1359 *meta = sw_meta_ptr;
1360 ((struct sw_meta *)*meta)->list = list;
1361 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1362 ((struct sw_meta *)*meta)->rule_info = rule_info;
1365 rte_free(sw_meta_ptr);
1368 rte_free(pattern_match_item);
1374 rte_free(sw_meta_ptr);
1375 rte_free(pattern_match_item);
1381 ice_switch_query(struct ice_adapter *ad __rte_unused,
1382 struct rte_flow *flow __rte_unused,
1383 struct rte_flow_query_count *count __rte_unused,
1384 struct rte_flow_error *error)
1386 rte_flow_error_set(error, EINVAL,
1387 RTE_FLOW_ERROR_TYPE_HANDLE,
1389 "count action not supported by switch filter");
1395 ice_switch_init(struct ice_adapter *ad)
1398 struct ice_flow_parser *dist_parser;
1399 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1401 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1402 dist_parser = &ice_switch_dist_parser_comms;
1403 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1404 dist_parser = &ice_switch_dist_parser_os;
1408 if (ad->devargs.pipe_mode_support)
1409 ret = ice_register_parser(perm_parser, ad);
1411 ret = ice_register_parser(dist_parser, ad);
1416 ice_switch_uninit(struct ice_adapter *ad)
1418 struct ice_flow_parser *dist_parser;
1419 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1421 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1422 dist_parser = &ice_switch_dist_parser_comms;
1424 dist_parser = &ice_switch_dist_parser_os;
1426 if (ad->devargs.pipe_mode_support)
1427 ice_unregister_parser(perm_parser, ad);
1429 ice_unregister_parser(dist_parser, ad);
1433 ice_flow_engine ice_switch_engine = {
1434 .init = ice_switch_init,
1435 .uninit = ice_switch_uninit,
1436 .create = ice_switch_create,
1437 .destroy = ice_switch_destroy,
1438 .query_count = ice_switch_query,
1439 .free = ice_switch_filter_rule_free,
1440 .type = ICE_FLOW_ENGINE_SWITCH,
1444 ice_flow_parser ice_switch_dist_parser_os = {
1445 .engine = &ice_switch_engine,
1446 .array = ice_switch_pattern_dist_os,
1447 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1448 .parse_pattern_action = ice_switch_parse_pattern_action,
1449 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1453 ice_flow_parser ice_switch_dist_parser_comms = {
1454 .engine = &ice_switch_engine,
1455 .array = ice_switch_pattern_dist_comms,
1456 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1457 .parse_pattern_action = ice_switch_parse_pattern_action,
1458 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1462 ice_flow_parser ice_switch_perm_parser = {
1463 .engine = &ice_switch_engine,
1464 .array = ice_switch_pattern_perm,
1465 .array_len = RTE_DIM(ice_switch_pattern_perm),
1466 .parse_pattern_action = ice_switch_parse_pattern_action,
1467 .stage = ICE_FLOW_STAGE_PERMISSION,
1470 RTE_INIT(ice_sw_engine_init)
1472 struct ice_flow_engine *engine = &ice_switch_engine;
1473 ice_register_flow_engine(engine);