1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49 ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86 ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90 ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE ( \
92 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
95 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97 ICE_INSET_PPPOE_PROTO)
100 struct ice_adv_lkup_elem *list;
102 struct ice_adv_rule_info rule_info;
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
112 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113 {pattern_ethertype_vlan,
114 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
116 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp,
118 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119 {pattern_eth_ipv4_tcp,
120 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
122 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123 {pattern_eth_ipv6_udp,
124 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125 {pattern_eth_ipv6_tcp,
126 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133 {pattern_eth_ipv4_nvgre_eth_ipv4,
134 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
140 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141 {pattern_eth_vlan_pppoed,
142 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
144 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145 {pattern_eth_vlan_pppoes,
146 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147 {pattern_eth_pppoes_proto,
148 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149 {pattern_eth_vlan_pppoes_proto,
150 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151 {pattern_eth_ipv6_esp,
152 ICE_INSET_NONE, ICE_INSET_NONE},
153 {pattern_eth_ipv6_udp_esp,
154 ICE_INSET_NONE, ICE_INSET_NONE},
155 {pattern_eth_ipv6_ah,
156 ICE_INSET_NONE, ICE_INSET_NONE},
157 {pattern_eth_ipv6_udp_ah,
158 ICE_INSET_NONE, ICE_INSET_NONE},
159 {pattern_eth_ipv6_l2tp,
160 ICE_INSET_NONE, ICE_INSET_NONE},
161 {pattern_eth_ipv4_pfcp,
162 ICE_INSET_NONE, ICE_INSET_NONE},
163 {pattern_eth_ipv6_pfcp,
164 ICE_INSET_NONE, ICE_INSET_NONE},
168 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
170 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
171 {pattern_ethertype_vlan,
172 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
174 ICE_INSET_NONE, ICE_INSET_NONE},
176 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
177 {pattern_eth_ipv4_udp,
178 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
179 {pattern_eth_ipv4_tcp,
180 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
182 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
183 {pattern_eth_ipv6_udp,
184 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
185 {pattern_eth_ipv6_tcp,
186 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
187 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
188 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
189 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
190 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
191 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
192 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
193 {pattern_eth_ipv4_nvgre_eth_ipv4,
194 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
195 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
196 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
197 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
198 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
202 ice_pattern_match_item ice_switch_pattern_perm[] = {
203 {pattern_ethertype_vlan,
204 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
206 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
207 {pattern_eth_ipv4_udp,
208 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
209 {pattern_eth_ipv4_tcp,
210 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
212 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
213 {pattern_eth_ipv6_udp,
214 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
215 {pattern_eth_ipv6_tcp,
216 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
217 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
218 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
219 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
220 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
221 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
222 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
223 {pattern_eth_ipv4_nvgre_eth_ipv4,
224 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
225 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
226 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
227 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
228 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
229 {pattern_eth_ipv6_esp,
230 ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_ipv6_udp_esp,
232 ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_ipv6_ah,
234 ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_ipv6_udp_ah,
236 ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_ipv6_l2tp,
238 ICE_INSET_NONE, ICE_INSET_NONE},
239 {pattern_eth_ipv4_pfcp,
240 ICE_INSET_NONE, ICE_INSET_NONE},
241 {pattern_eth_ipv6_pfcp,
242 ICE_INSET_NONE, ICE_INSET_NONE},
246 ice_switch_create(struct ice_adapter *ad,
247 struct rte_flow *flow,
249 struct rte_flow_error *error)
252 struct ice_pf *pf = &ad->pf;
253 struct ice_hw *hw = ICE_PF_TO_HW(pf);
254 struct ice_rule_query_data rule_added = {0};
255 struct ice_rule_query_data *filter_ptr;
256 struct ice_adv_lkup_elem *list =
257 ((struct sw_meta *)meta)->list;
259 ((struct sw_meta *)meta)->lkups_num;
260 struct ice_adv_rule_info *rule_info =
261 &((struct sw_meta *)meta)->rule_info;
263 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
264 rte_flow_error_set(error, EINVAL,
265 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
266 "item number too large for rule");
270 rte_flow_error_set(error, EINVAL,
271 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
272 "lookup list should not be NULL");
275 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
277 filter_ptr = rte_zmalloc("ice_switch_filter",
278 sizeof(struct ice_rule_query_data), 0);
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
282 "No memory for ice_switch_filter");
285 flow->rule = filter_ptr;
286 rte_memcpy(filter_ptr,
288 sizeof(struct ice_rule_query_data));
290 rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
292 "switch filter create flow fail");
308 ice_switch_destroy(struct ice_adapter *ad,
309 struct rte_flow *flow,
310 struct rte_flow_error *error)
312 struct ice_hw *hw = &ad->hw;
314 struct ice_rule_query_data *filter_ptr;
316 filter_ptr = (struct ice_rule_query_data *)
320 rte_flow_error_set(error, EINVAL,
321 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
323 " create by switch filter");
327 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
329 rte_flow_error_set(error, EINVAL,
330 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
331 "fail to destroy switch filter rule");
335 rte_free(filter_ptr);
340 ice_switch_filter_rule_free(struct rte_flow *flow)
342 rte_free(flow->rule);
346 ice_switch_inset_get(const struct rte_flow_item pattern[],
347 struct rte_flow_error *error,
348 struct ice_adv_lkup_elem *list,
350 enum ice_sw_tunnel_type *tun_type)
352 const struct rte_flow_item *item = pattern;
353 enum rte_flow_item_type item_type;
354 const struct rte_flow_item_eth *eth_spec, *eth_mask;
355 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
356 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
357 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
358 const struct rte_flow_item_udp *udp_spec, *udp_mask;
359 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
360 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
361 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
362 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
363 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
364 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
366 const struct rte_flow_item_esp *esp_spec, *esp_mask;
367 const struct rte_flow_item_ah *ah_spec, *ah_mask;
368 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
369 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
370 uint64_t input_set = ICE_INSET_NONE;
372 uint16_t tunnel_valid = 0;
373 uint16_t pppoe_valid = 0;
374 uint16_t ipv6_valiad = 0;
375 uint16_t udp_valiad = 0;
378 for (item = pattern; item->type !=
379 RTE_FLOW_ITEM_TYPE_END; item++) {
381 rte_flow_error_set(error, EINVAL,
382 RTE_FLOW_ERROR_TYPE_ITEM,
384 "Not support range");
387 item_type = item->type;
390 case RTE_FLOW_ITEM_TYPE_ETH:
391 eth_spec = item->spec;
392 eth_mask = item->mask;
393 if (eth_spec && eth_mask) {
394 const uint8_t *a = eth_mask->src.addr_bytes;
395 const uint8_t *b = eth_mask->dst.addr_bytes;
396 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
397 if (a[j] && tunnel_valid) {
407 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
408 if (b[j] && tunnel_valid) {
419 input_set |= ICE_INSET_ETHERTYPE;
420 list[t].type = (tunnel_valid == 0) ?
421 ICE_MAC_OFOS : ICE_MAC_IL;
422 struct ice_ether_hdr *h;
423 struct ice_ether_hdr *m;
425 h = &list[t].h_u.eth_hdr;
426 m = &list[t].m_u.eth_hdr;
427 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
428 if (eth_mask->src.addr_bytes[j]) {
430 eth_spec->src.addr_bytes[j];
432 eth_mask->src.addr_bytes[j];
435 if (eth_mask->dst.addr_bytes[j]) {
437 eth_spec->dst.addr_bytes[j];
439 eth_mask->dst.addr_bytes[j];
445 if (eth_mask->type) {
446 list[t].type = ICE_ETYPE_OL;
447 list[t].h_u.ethertype.ethtype_id =
449 list[t].m_u.ethertype.ethtype_id =
456 case RTE_FLOW_ITEM_TYPE_IPV4:
457 ipv4_spec = item->spec;
458 ipv4_mask = item->mask;
459 if (ipv4_spec && ipv4_mask) {
460 /* Check IPv4 mask and update input set */
461 if (ipv4_mask->hdr.version_ihl ||
462 ipv4_mask->hdr.total_length ||
463 ipv4_mask->hdr.packet_id ||
464 ipv4_mask->hdr.hdr_checksum) {
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ITEM,
468 "Invalid IPv4 mask.");
473 if (ipv4_mask->hdr.type_of_service)
475 ICE_INSET_TUN_IPV4_TOS;
476 if (ipv4_mask->hdr.src_addr)
478 ICE_INSET_TUN_IPV4_SRC;
479 if (ipv4_mask->hdr.dst_addr)
481 ICE_INSET_TUN_IPV4_DST;
482 if (ipv4_mask->hdr.time_to_live)
484 ICE_INSET_TUN_IPV4_TTL;
485 if (ipv4_mask->hdr.next_proto_id)
487 ICE_INSET_TUN_IPV4_PROTO;
489 if (ipv4_mask->hdr.src_addr)
490 input_set |= ICE_INSET_IPV4_SRC;
491 if (ipv4_mask->hdr.dst_addr)
492 input_set |= ICE_INSET_IPV4_DST;
493 if (ipv4_mask->hdr.time_to_live)
494 input_set |= ICE_INSET_IPV4_TTL;
495 if (ipv4_mask->hdr.next_proto_id)
497 ICE_INSET_IPV4_PROTO;
498 if (ipv4_mask->hdr.type_of_service)
502 list[t].type = (tunnel_valid == 0) ?
503 ICE_IPV4_OFOS : ICE_IPV4_IL;
504 if (ipv4_mask->hdr.src_addr) {
505 list[t].h_u.ipv4_hdr.src_addr =
506 ipv4_spec->hdr.src_addr;
507 list[t].m_u.ipv4_hdr.src_addr =
508 ipv4_mask->hdr.src_addr;
510 if (ipv4_mask->hdr.dst_addr) {
511 list[t].h_u.ipv4_hdr.dst_addr =
512 ipv4_spec->hdr.dst_addr;
513 list[t].m_u.ipv4_hdr.dst_addr =
514 ipv4_mask->hdr.dst_addr;
516 if (ipv4_mask->hdr.time_to_live) {
517 list[t].h_u.ipv4_hdr.time_to_live =
518 ipv4_spec->hdr.time_to_live;
519 list[t].m_u.ipv4_hdr.time_to_live =
520 ipv4_mask->hdr.time_to_live;
522 if (ipv4_mask->hdr.next_proto_id) {
523 list[t].h_u.ipv4_hdr.protocol =
524 ipv4_spec->hdr.next_proto_id;
525 list[t].m_u.ipv4_hdr.protocol =
526 ipv4_mask->hdr.next_proto_id;
528 if (ipv4_mask->hdr.type_of_service) {
529 list[t].h_u.ipv4_hdr.tos =
530 ipv4_spec->hdr.type_of_service;
531 list[t].m_u.ipv4_hdr.tos =
532 ipv4_mask->hdr.type_of_service;
538 case RTE_FLOW_ITEM_TYPE_IPV6:
539 ipv6_spec = item->spec;
540 ipv6_mask = item->mask;
542 if (ipv6_spec && ipv6_mask) {
543 if (ipv6_mask->hdr.payload_len) {
544 rte_flow_error_set(error, EINVAL,
545 RTE_FLOW_ERROR_TYPE_ITEM,
547 "Invalid IPv6 mask");
551 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
552 if (ipv6_mask->hdr.src_addr[j] &&
555 ICE_INSET_TUN_IPV6_SRC;
557 } else if (ipv6_mask->hdr.src_addr[j]) {
558 input_set |= ICE_INSET_IPV6_SRC;
562 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
563 if (ipv6_mask->hdr.dst_addr[j] &&
566 ICE_INSET_TUN_IPV6_DST;
568 } else if (ipv6_mask->hdr.dst_addr[j]) {
569 input_set |= ICE_INSET_IPV6_DST;
573 if (ipv6_mask->hdr.proto &&
576 ICE_INSET_TUN_IPV6_NEXT_HDR;
577 else if (ipv6_mask->hdr.proto)
579 ICE_INSET_IPV6_NEXT_HDR;
580 if (ipv6_mask->hdr.hop_limits &&
583 ICE_INSET_TUN_IPV6_HOP_LIMIT;
584 else if (ipv6_mask->hdr.hop_limits)
586 ICE_INSET_IPV6_HOP_LIMIT;
587 if ((ipv6_mask->hdr.vtc_flow &
589 (RTE_IPV6_HDR_TC_MASK)) &&
592 ICE_INSET_TUN_IPV6_TC;
593 else if (ipv6_mask->hdr.vtc_flow &
595 (RTE_IPV6_HDR_TC_MASK))
596 input_set |= ICE_INSET_IPV6_TC;
598 list[t].type = (tunnel_valid == 0) ?
599 ICE_IPV6_OFOS : ICE_IPV6_IL;
600 struct ice_ipv6_hdr *f;
601 struct ice_ipv6_hdr *s;
602 f = &list[t].h_u.ipv6_hdr;
603 s = &list[t].m_u.ipv6_hdr;
604 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
605 if (ipv6_mask->hdr.src_addr[j]) {
607 ipv6_spec->hdr.src_addr[j];
609 ipv6_mask->hdr.src_addr[j];
611 if (ipv6_mask->hdr.dst_addr[j]) {
613 ipv6_spec->hdr.dst_addr[j];
615 ipv6_mask->hdr.dst_addr[j];
618 if (ipv6_mask->hdr.proto) {
620 ipv6_spec->hdr.proto;
622 ipv6_mask->hdr.proto;
624 if (ipv6_mask->hdr.hop_limits) {
626 ipv6_spec->hdr.hop_limits;
628 ipv6_mask->hdr.hop_limits;
630 if (ipv6_mask->hdr.vtc_flow &
632 (RTE_IPV6_HDR_TC_MASK)) {
633 struct ice_le_ver_tc_flow vtf;
634 vtf.u.fld.version = 0;
635 vtf.u.fld.flow_label = 0;
636 vtf.u.fld.tc = (rte_be_to_cpu_32
637 (ipv6_spec->hdr.vtc_flow) &
638 RTE_IPV6_HDR_TC_MASK) >>
639 RTE_IPV6_HDR_TC_SHIFT;
640 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
641 vtf.u.fld.tc = (rte_be_to_cpu_32
642 (ipv6_mask->hdr.vtc_flow) &
643 RTE_IPV6_HDR_TC_MASK) >>
644 RTE_IPV6_HDR_TC_SHIFT;
645 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
651 case RTE_FLOW_ITEM_TYPE_UDP:
652 udp_spec = item->spec;
653 udp_mask = item->mask;
655 if (udp_spec && udp_mask) {
656 /* Check UDP mask and update input set*/
657 if (udp_mask->hdr.dgram_len ||
658 udp_mask->hdr.dgram_cksum) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ITEM,
667 if (udp_mask->hdr.src_port)
669 ICE_INSET_TUN_UDP_SRC_PORT;
670 if (udp_mask->hdr.dst_port)
672 ICE_INSET_TUN_UDP_DST_PORT;
674 if (udp_mask->hdr.src_port)
676 ICE_INSET_UDP_SRC_PORT;
677 if (udp_mask->hdr.dst_port)
679 ICE_INSET_UDP_DST_PORT;
681 if (*tun_type == ICE_SW_TUN_VXLAN &&
683 list[t].type = ICE_UDP_OF;
685 list[t].type = ICE_UDP_ILOS;
686 if (udp_mask->hdr.src_port) {
687 list[t].h_u.l4_hdr.src_port =
688 udp_spec->hdr.src_port;
689 list[t].m_u.l4_hdr.src_port =
690 udp_mask->hdr.src_port;
692 if (udp_mask->hdr.dst_port) {
693 list[t].h_u.l4_hdr.dst_port =
694 udp_spec->hdr.dst_port;
695 list[t].m_u.l4_hdr.dst_port =
696 udp_mask->hdr.dst_port;
702 case RTE_FLOW_ITEM_TYPE_TCP:
703 tcp_spec = item->spec;
704 tcp_mask = item->mask;
705 if (tcp_spec && tcp_mask) {
706 /* Check TCP mask and update input set */
707 if (tcp_mask->hdr.sent_seq ||
708 tcp_mask->hdr.recv_ack ||
709 tcp_mask->hdr.data_off ||
710 tcp_mask->hdr.tcp_flags ||
711 tcp_mask->hdr.rx_win ||
712 tcp_mask->hdr.cksum ||
713 tcp_mask->hdr.tcp_urp) {
714 rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ITEM,
722 if (tcp_mask->hdr.src_port)
724 ICE_INSET_TUN_TCP_SRC_PORT;
725 if (tcp_mask->hdr.dst_port)
727 ICE_INSET_TUN_TCP_DST_PORT;
729 if (tcp_mask->hdr.src_port)
731 ICE_INSET_TCP_SRC_PORT;
732 if (tcp_mask->hdr.dst_port)
734 ICE_INSET_TCP_DST_PORT;
736 list[t].type = ICE_TCP_IL;
737 if (tcp_mask->hdr.src_port) {
738 list[t].h_u.l4_hdr.src_port =
739 tcp_spec->hdr.src_port;
740 list[t].m_u.l4_hdr.src_port =
741 tcp_mask->hdr.src_port;
743 if (tcp_mask->hdr.dst_port) {
744 list[t].h_u.l4_hdr.dst_port =
745 tcp_spec->hdr.dst_port;
746 list[t].m_u.l4_hdr.dst_port =
747 tcp_mask->hdr.dst_port;
753 case RTE_FLOW_ITEM_TYPE_SCTP:
754 sctp_spec = item->spec;
755 sctp_mask = item->mask;
756 if (sctp_spec && sctp_mask) {
757 /* Check SCTP mask and update input set */
758 if (sctp_mask->hdr.cksum) {
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ITEM,
762 "Invalid SCTP mask");
767 if (sctp_mask->hdr.src_port)
769 ICE_INSET_TUN_SCTP_SRC_PORT;
770 if (sctp_mask->hdr.dst_port)
772 ICE_INSET_TUN_SCTP_DST_PORT;
774 if (sctp_mask->hdr.src_port)
776 ICE_INSET_SCTP_SRC_PORT;
777 if (sctp_mask->hdr.dst_port)
779 ICE_INSET_SCTP_DST_PORT;
781 list[t].type = ICE_SCTP_IL;
782 if (sctp_mask->hdr.src_port) {
783 list[t].h_u.sctp_hdr.src_port =
784 sctp_spec->hdr.src_port;
785 list[t].m_u.sctp_hdr.src_port =
786 sctp_mask->hdr.src_port;
788 if (sctp_mask->hdr.dst_port) {
789 list[t].h_u.sctp_hdr.dst_port =
790 sctp_spec->hdr.dst_port;
791 list[t].m_u.sctp_hdr.dst_port =
792 sctp_mask->hdr.dst_port;
798 case RTE_FLOW_ITEM_TYPE_VXLAN:
799 vxlan_spec = item->spec;
800 vxlan_mask = item->mask;
801 /* Check if VXLAN item is used to describe protocol.
802 * If yes, both spec and mask should be NULL.
803 * If no, both spec and mask shouldn't be NULL.
805 if ((!vxlan_spec && vxlan_mask) ||
806 (vxlan_spec && !vxlan_mask)) {
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ITEM,
810 "Invalid VXLAN item");
815 if (vxlan_spec && vxlan_mask) {
816 list[t].type = ICE_VXLAN;
817 if (vxlan_mask->vni[0] ||
818 vxlan_mask->vni[1] ||
819 vxlan_mask->vni[2]) {
820 list[t].h_u.tnl_hdr.vni =
821 (vxlan_spec->vni[2] << 16) |
822 (vxlan_spec->vni[1] << 8) |
824 list[t].m_u.tnl_hdr.vni =
825 (vxlan_mask->vni[2] << 16) |
826 (vxlan_mask->vni[1] << 8) |
829 ICE_INSET_TUN_VXLAN_VNI;
835 case RTE_FLOW_ITEM_TYPE_NVGRE:
836 nvgre_spec = item->spec;
837 nvgre_mask = item->mask;
838 /* Check if NVGRE item is used to describe protocol.
839 * If yes, both spec and mask should be NULL.
840 * If no, both spec and mask shouldn't be NULL.
842 if ((!nvgre_spec && nvgre_mask) ||
843 (nvgre_spec && !nvgre_mask)) {
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ITEM,
847 "Invalid NVGRE item");
851 if (nvgre_spec && nvgre_mask) {
852 list[t].type = ICE_NVGRE;
853 if (nvgre_mask->tni[0] ||
854 nvgre_mask->tni[1] ||
855 nvgre_mask->tni[2]) {
856 list[t].h_u.nvgre_hdr.tni_flow =
857 (nvgre_spec->tni[2] << 16) |
858 (nvgre_spec->tni[1] << 8) |
860 list[t].m_u.nvgre_hdr.tni_flow =
861 (nvgre_mask->tni[2] << 16) |
862 (nvgre_mask->tni[1] << 8) |
865 ICE_INSET_TUN_NVGRE_TNI;
871 case RTE_FLOW_ITEM_TYPE_VLAN:
872 vlan_spec = item->spec;
873 vlan_mask = item->mask;
874 /* Check if VLAN item is used to describe protocol.
875 * If yes, both spec and mask should be NULL.
876 * If no, both spec and mask shouldn't be NULL.
878 if ((!vlan_spec && vlan_mask) ||
879 (vlan_spec && !vlan_mask)) {
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ITEM,
883 "Invalid VLAN item");
886 if (vlan_spec && vlan_mask) {
887 list[t].type = ICE_VLAN_OFOS;
888 if (vlan_mask->tci) {
889 list[t].h_u.vlan_hdr.vlan =
891 list[t].m_u.vlan_hdr.vlan =
893 input_set |= ICE_INSET_VLAN_OUTER;
895 if (vlan_mask->inner_type) {
896 list[t].h_u.vlan_hdr.type =
897 vlan_spec->inner_type;
898 list[t].m_u.vlan_hdr.type =
899 vlan_mask->inner_type;
900 input_set |= ICE_INSET_VLAN_OUTER;
906 case RTE_FLOW_ITEM_TYPE_PPPOED:
907 case RTE_FLOW_ITEM_TYPE_PPPOES:
908 pppoe_spec = item->spec;
909 pppoe_mask = item->mask;
910 /* Check if PPPoE item is used to describe protocol.
911 * If yes, both spec and mask should be NULL.
912 * If no, both spec and mask shouldn't be NULL.
914 if ((!pppoe_spec && pppoe_mask) ||
915 (pppoe_spec && !pppoe_mask)) {
916 rte_flow_error_set(error, EINVAL,
917 RTE_FLOW_ERROR_TYPE_ITEM,
919 "Invalid pppoe item");
922 if (pppoe_spec && pppoe_mask) {
923 /* Check pppoe mask and update input set */
924 if (pppoe_mask->length ||
926 pppoe_mask->version_type) {
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ITEM,
930 "Invalid pppoe mask");
933 list[t].type = ICE_PPPOE;
934 if (pppoe_mask->session_id) {
935 list[t].h_u.pppoe_hdr.session_id =
936 pppoe_spec->session_id;
937 list[t].m_u.pppoe_hdr.session_id =
938 pppoe_mask->session_id;
939 input_set |= ICE_INSET_PPPOE_SESSION;
946 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
947 pppoe_proto_spec = item->spec;
948 pppoe_proto_mask = item->mask;
949 /* Check if PPPoE optional proto_id item
950 * is used to describe protocol.
951 * If yes, both spec and mask should be NULL.
952 * If no, both spec and mask shouldn't be NULL.
954 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
955 (pppoe_proto_spec && !pppoe_proto_mask)) {
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ITEM,
959 "Invalid pppoe proto item");
962 if (pppoe_proto_spec && pppoe_proto_mask) {
965 list[t].type = ICE_PPPOE;
966 if (pppoe_proto_mask->proto_id) {
967 list[t].h_u.pppoe_hdr.ppp_prot_id =
968 pppoe_proto_spec->proto_id;
969 list[t].m_u.pppoe_hdr.ppp_prot_id =
970 pppoe_proto_mask->proto_id;
971 input_set |= ICE_INSET_PPPOE_PROTO;
977 case RTE_FLOW_ITEM_TYPE_ESP:
978 esp_spec = item->spec;
979 esp_mask = item->mask;
980 if (esp_spec || esp_mask) {
981 rte_flow_error_set(error, EINVAL,
982 RTE_FLOW_ERROR_TYPE_ITEM,
987 if (ipv6_valiad && udp_valiad)
988 *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
989 else if (ipv6_valiad)
990 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
993 case RTE_FLOW_ITEM_TYPE_AH:
994 ah_spec = item->spec;
995 ah_mask = item->mask;
996 if (ah_spec || ah_mask) {
997 rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ITEM,
1003 if (ipv6_valiad && udp_valiad)
1004 *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
1005 else if (ipv6_valiad)
1006 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1009 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1010 l2tp_spec = item->spec;
1011 l2tp_mask = item->mask;
1012 if (l2tp_spec || l2tp_mask) {
1013 rte_flow_error_set(error, EINVAL,
1014 RTE_FLOW_ERROR_TYPE_ITEM,
1016 "Invalid l2tp item");
1020 *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1022 case RTE_FLOW_ITEM_TYPE_PFCP:
1023 pfcp_spec = item->spec;
1024 pfcp_mask = item->mask;
1025 /* Check if PFCP item is used to describe protocol.
1026 * If yes, both spec and mask should be NULL.
1027 * If no, both spec and mask shouldn't be NULL.
1029 if ((!pfcp_spec && pfcp_mask) ||
1030 (pfcp_spec && !pfcp_mask)) {
1031 rte_flow_error_set(error, EINVAL,
1032 RTE_FLOW_ERROR_TYPE_ITEM,
1034 "Invalid PFCP item");
1037 if (pfcp_spec && pfcp_mask) {
1038 /* Check pfcp mask and update input set */
1039 if (pfcp_mask->msg_type ||
1040 pfcp_mask->msg_len ||
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ITEM,
1045 "Invalid pfcp mask");
1048 if (pfcp_mask->s_field &&
1049 pfcp_spec->s_field == 0x01 &&
1052 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1053 else if (pfcp_mask->s_field &&
1054 pfcp_spec->s_field == 0x01)
1056 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1057 else if (pfcp_mask->s_field &&
1058 !pfcp_spec->s_field &&
1061 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1062 else if (pfcp_mask->s_field &&
1063 !pfcp_spec->s_field)
1065 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1072 case RTE_FLOW_ITEM_TYPE_VOID:
1076 rte_flow_error_set(error, EINVAL,
1077 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1078 "Invalid pattern item.");
1091 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1092 struct rte_flow_error *error,
1093 struct ice_adv_rule_info *rule_info)
1095 const struct rte_flow_action_vf *act_vf;
1096 const struct rte_flow_action *action;
1097 enum rte_flow_action_type action_type;
1099 for (action = actions; action->type !=
1100 RTE_FLOW_ACTION_TYPE_END; action++) {
1101 action_type = action->type;
1102 switch (action_type) {
1103 case RTE_FLOW_ACTION_TYPE_VF:
1104 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1105 act_vf = action->conf;
1106 rule_info->sw_act.vsi_handle = act_vf->id;
1109 rte_flow_error_set(error,
1110 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1112 "Invalid action type or queue number");
1117 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1119 rule_info->priority = 5;
1125 ice_switch_parse_action(struct ice_pf *pf,
1126 const struct rte_flow_action *actions,
1127 struct rte_flow_error *error,
1128 struct ice_adv_rule_info *rule_info)
1130 struct ice_vsi *vsi = pf->main_vsi;
1131 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1132 const struct rte_flow_action_queue *act_q;
1133 const struct rte_flow_action_rss *act_qgrop;
1134 uint16_t base_queue, i;
1135 const struct rte_flow_action *action;
1136 enum rte_flow_action_type action_type;
1137 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1138 2, 4, 8, 16, 32, 64, 128};
1140 base_queue = pf->base_queue + vsi->base_queue;
1141 for (action = actions; action->type !=
1142 RTE_FLOW_ACTION_TYPE_END; action++) {
1143 action_type = action->type;
1144 switch (action_type) {
1145 case RTE_FLOW_ACTION_TYPE_RSS:
1146 act_qgrop = action->conf;
1147 rule_info->sw_act.fltr_act =
1149 rule_info->sw_act.fwd_id.q_id =
1150 base_queue + act_qgrop->queue[0];
1151 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1152 if (act_qgrop->queue_num ==
1153 valid_qgrop_number[i])
1156 if (i == MAX_QGRP_NUM_TYPE)
1158 if ((act_qgrop->queue[0] +
1159 act_qgrop->queue_num) >
1160 dev->data->nb_rx_queues)
1162 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1163 if (act_qgrop->queue[i + 1] !=
1164 act_qgrop->queue[i] + 1)
1166 rule_info->sw_act.qgrp_size =
1167 act_qgrop->queue_num;
1169 case RTE_FLOW_ACTION_TYPE_QUEUE:
1170 act_q = action->conf;
1171 if (act_q->index >= dev->data->nb_rx_queues)
1173 rule_info->sw_act.fltr_act =
1175 rule_info->sw_act.fwd_id.q_id =
1176 base_queue + act_q->index;
1179 case RTE_FLOW_ACTION_TYPE_DROP:
1180 rule_info->sw_act.fltr_act =
1184 case RTE_FLOW_ACTION_TYPE_VOID:
1192 rule_info->sw_act.vsi_handle = vsi->idx;
1194 rule_info->sw_act.src = vsi->idx;
1195 rule_info->priority = 5;
1200 rte_flow_error_set(error,
1201 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1203 "Invalid action type or queue number");
1208 ice_switch_check_action(const struct rte_flow_action *actions,
1209 struct rte_flow_error *error)
1211 const struct rte_flow_action *action;
1212 enum rte_flow_action_type action_type;
1213 uint16_t actions_num = 0;
1215 for (action = actions; action->type !=
1216 RTE_FLOW_ACTION_TYPE_END; action++) {
1217 action_type = action->type;
1218 switch (action_type) {
1219 case RTE_FLOW_ACTION_TYPE_VF:
1220 case RTE_FLOW_ACTION_TYPE_RSS:
1221 case RTE_FLOW_ACTION_TYPE_QUEUE:
1222 case RTE_FLOW_ACTION_TYPE_DROP:
1225 case RTE_FLOW_ACTION_TYPE_VOID:
1228 rte_flow_error_set(error,
1229 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1231 "Invalid action type");
1236 if (actions_num > 1) {
1237 rte_flow_error_set(error,
1238 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1240 "Invalid action number");
1248 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1251 case ICE_SW_TUN_PROFID_IPV6_ESP:
1252 case ICE_SW_TUN_PROFID_IPV6_AH:
1253 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1254 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1255 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1256 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1257 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1258 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1268 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1269 struct ice_pattern_match_item *array,
1271 const struct rte_flow_item pattern[],
1272 const struct rte_flow_action actions[],
1274 struct rte_flow_error *error)
1276 struct ice_pf *pf = &ad->pf;
1277 uint64_t inputset = 0;
1279 struct sw_meta *sw_meta_ptr = NULL;
1280 struct ice_adv_rule_info rule_info;
1281 struct ice_adv_lkup_elem *list = NULL;
1282 uint16_t lkups_num = 0;
1283 const struct rte_flow_item *item = pattern;
1284 uint16_t item_num = 0;
1285 enum ice_sw_tunnel_type tun_type =
1286 ICE_SW_TUN_AND_NON_TUN;
1287 struct ice_pattern_match_item *pattern_match_item = NULL;
1289 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1291 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1292 tun_type = ICE_SW_TUN_VXLAN;
1293 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1294 tun_type = ICE_SW_TUN_NVGRE;
1295 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1296 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1297 tun_type = ICE_SW_TUN_PPPOE;
1298 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1299 const struct rte_flow_item_eth *eth_mask;
1301 eth_mask = item->mask;
1304 if (eth_mask->type == UINT16_MAX)
1305 tun_type = ICE_SW_TUN_AND_NON_TUN;
1307 /* reserve one more memory slot for ETH which may
1308 * consume 2 lookup items.
1310 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1314 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1316 rte_flow_error_set(error, EINVAL,
1317 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1318 "No memory for PMD internal items");
1323 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1325 rte_flow_error_set(error, EINVAL,
1326 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1327 "No memory for sw_pattern_meta_ptr");
1331 pattern_match_item =
1332 ice_search_pattern_match_item(pattern, array, array_len, error);
1333 if (!pattern_match_item) {
1334 rte_flow_error_set(error, EINVAL,
1335 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1336 "Invalid input pattern");
1340 inputset = ice_switch_inset_get
1341 (pattern, error, list, &lkups_num, &tun_type);
1342 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1343 (inputset & ~pattern_match_item->input_set_mask)) {
1344 rte_flow_error_set(error, EINVAL,
1345 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1347 "Invalid input set");
1351 rule_info.tun_type = tun_type;
1353 ret = ice_switch_check_action(actions, error);
1355 rte_flow_error_set(error, EINVAL,
1356 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1357 "Invalid input action number");
1361 if (ad->hw.dcf_enabled)
1362 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1364 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1367 rte_flow_error_set(error, EINVAL,
1368 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1369 "Invalid input action");
1374 *meta = sw_meta_ptr;
1375 ((struct sw_meta *)*meta)->list = list;
1376 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1377 ((struct sw_meta *)*meta)->rule_info = rule_info;
1380 rte_free(sw_meta_ptr);
1383 rte_free(pattern_match_item);
1389 rte_free(sw_meta_ptr);
1390 rte_free(pattern_match_item);
1396 ice_switch_query(struct ice_adapter *ad __rte_unused,
1397 struct rte_flow *flow __rte_unused,
1398 struct rte_flow_query_count *count __rte_unused,
1399 struct rte_flow_error *error)
1401 rte_flow_error_set(error, EINVAL,
1402 RTE_FLOW_ERROR_TYPE_HANDLE,
1404 "count action not supported by switch filter");
1410 ice_switch_init(struct ice_adapter *ad)
1413 struct ice_flow_parser *dist_parser;
1414 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1416 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1417 dist_parser = &ice_switch_dist_parser_comms;
1418 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1419 dist_parser = &ice_switch_dist_parser_os;
1423 if (ad->devargs.pipe_mode_support)
1424 ret = ice_register_parser(perm_parser, ad);
1426 ret = ice_register_parser(dist_parser, ad);
1431 ice_switch_uninit(struct ice_adapter *ad)
1433 struct ice_flow_parser *dist_parser;
1434 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1436 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1437 dist_parser = &ice_switch_dist_parser_comms;
1439 dist_parser = &ice_switch_dist_parser_os;
1441 if (ad->devargs.pipe_mode_support)
1442 ice_unregister_parser(perm_parser, ad);
1444 ice_unregister_parser(dist_parser, ad);
1448 ice_flow_engine ice_switch_engine = {
1449 .init = ice_switch_init,
1450 .uninit = ice_switch_uninit,
1451 .create = ice_switch_create,
1452 .destroy = ice_switch_destroy,
1453 .query_count = ice_switch_query,
1454 .free = ice_switch_filter_rule_free,
1455 .type = ICE_FLOW_ENGINE_SWITCH,
1459 ice_flow_parser ice_switch_dist_parser_os = {
1460 .engine = &ice_switch_engine,
1461 .array = ice_switch_pattern_dist_os,
1462 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1463 .parse_pattern_action = ice_switch_parse_pattern_action,
1464 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1468 ice_flow_parser ice_switch_dist_parser_comms = {
1469 .engine = &ice_switch_engine,
1470 .array = ice_switch_pattern_dist_comms,
1471 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1472 .parse_pattern_action = ice_switch_parse_pattern_action,
1473 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1477 ice_flow_parser ice_switch_perm_parser = {
1478 .engine = &ice_switch_engine,
1479 .array = ice_switch_pattern_perm,
1480 .array_len = RTE_DIM(ice_switch_pattern_perm),
1481 .parse_pattern_action = ice_switch_parse_pattern_action,
1482 .stage = ICE_FLOW_STAGE_PERMISSION,
1485 RTE_INIT(ice_sw_engine_init)
1487 struct ice_flow_engine *engine = &ice_switch_engine;
1488 ice_register_flow_engine(engine);