1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_IPV4 ( \
33 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
34 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
35 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
38 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
39 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
40 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
42 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
43 #define ICE_SW_INSET_MAC_IPV6 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
45 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
46 ICE_INSET_IPV6_NEXT_HDR)
47 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
48 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
49 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
50 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
52 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
54 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
56 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
57 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
58 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
64 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
66 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
68 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
70 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
78 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
83 ICE_INSET_TUN_IPV4_TOS)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
85 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
87 ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_MAC_PPPOE ( \
89 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
90 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
93 struct ice_adv_lkup_elem *list;
95 struct ice_adv_rule_info rule_info;
98 static struct ice_flow_parser ice_switch_dist_parser_os;
99 static struct ice_flow_parser ice_switch_dist_parser_comms;
100 static struct ice_flow_parser ice_switch_perm_parser;
103 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
105 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
107 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
108 {pattern_eth_ipv4_udp,
109 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
110 {pattern_eth_ipv4_tcp,
111 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
113 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
114 {pattern_eth_ipv6_udp,
115 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
116 {pattern_eth_ipv6_tcp,
117 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
118 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
119 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
120 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
121 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
122 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
123 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
124 {pattern_eth_ipv4_nvgre_eth_ipv4,
125 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
126 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
127 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
128 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
129 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
131 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
132 {pattern_eth_vlan_pppoed,
133 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
135 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
136 {pattern_eth_vlan_pppoes,
137 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
143 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
145 ICE_INSET_NONE, ICE_INSET_NONE},
147 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
148 {pattern_eth_ipv4_udp,
149 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
150 {pattern_eth_ipv4_tcp,
151 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
153 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
154 {pattern_eth_ipv6_udp,
155 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
156 {pattern_eth_ipv6_tcp,
157 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
158 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
159 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
160 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
161 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
162 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
163 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
164 {pattern_eth_ipv4_nvgre_eth_ipv4,
165 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
166 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
167 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
168 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
169 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
173 ice_pattern_match_item ice_switch_pattern_perm[] = {
175 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
176 {pattern_eth_ipv4_udp,
177 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
178 {pattern_eth_ipv4_tcp,
179 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
181 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
182 {pattern_eth_ipv6_udp,
183 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
184 {pattern_eth_ipv6_tcp,
185 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
186 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
187 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
188 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
189 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
190 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
191 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
192 {pattern_eth_ipv4_nvgre_eth_ipv4,
193 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
194 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
195 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
196 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
197 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
201 ice_switch_create(struct ice_adapter *ad,
202 struct rte_flow *flow,
204 struct rte_flow_error *error)
207 struct ice_pf *pf = &ad->pf;
208 struct ice_hw *hw = ICE_PF_TO_HW(pf);
209 struct ice_rule_query_data rule_added = {0};
210 struct ice_rule_query_data *filter_ptr;
211 struct ice_adv_lkup_elem *list =
212 ((struct sw_meta *)meta)->list;
214 ((struct sw_meta *)meta)->lkups_num;
215 struct ice_adv_rule_info *rule_info =
216 &((struct sw_meta *)meta)->rule_info;
218 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
219 rte_flow_error_set(error, EINVAL,
220 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
221 "item number too large for rule");
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
227 "lookup list should not be NULL");
230 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
232 filter_ptr = rte_zmalloc("ice_switch_filter",
233 sizeof(struct ice_rule_query_data), 0);
235 rte_flow_error_set(error, EINVAL,
236 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
237 "No memory for ice_switch_filter");
240 flow->rule = filter_ptr;
241 rte_memcpy(filter_ptr,
243 sizeof(struct ice_rule_query_data));
245 rte_flow_error_set(error, EINVAL,
246 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
247 "switch filter create flow fail");
263 ice_switch_destroy(struct ice_adapter *ad,
264 struct rte_flow *flow,
265 struct rte_flow_error *error)
267 struct ice_hw *hw = &ad->hw;
269 struct ice_rule_query_data *filter_ptr;
271 filter_ptr = (struct ice_rule_query_data *)
275 rte_flow_error_set(error, EINVAL,
276 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
278 " create by switch filter");
282 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
284 rte_flow_error_set(error, EINVAL,
285 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
286 "fail to destroy switch filter rule");
290 rte_free(filter_ptr);
295 ice_switch_filter_rule_free(struct rte_flow *flow)
297 rte_free(flow->rule);
301 ice_switch_inset_get(const struct rte_flow_item pattern[],
302 struct rte_flow_error *error,
303 struct ice_adv_lkup_elem *list,
305 enum ice_sw_tunnel_type tun_type)
307 const struct rte_flow_item *item = pattern;
308 enum rte_flow_item_type item_type;
309 const struct rte_flow_item_eth *eth_spec, *eth_mask;
310 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
311 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
312 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
313 const struct rte_flow_item_udp *udp_spec, *udp_mask;
314 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
315 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
316 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
317 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
318 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
319 uint8_t ipv6_addr_mask[16] = {
320 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
321 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
322 uint64_t input_set = ICE_INSET_NONE;
324 uint16_t tunnel_valid = 0;
327 for (item = pattern; item->type !=
328 RTE_FLOW_ITEM_TYPE_END; item++) {
330 rte_flow_error_set(error, EINVAL,
331 RTE_FLOW_ERROR_TYPE_ITEM,
333 "Not support range");
336 item_type = item->type;
339 case RTE_FLOW_ITEM_TYPE_ETH:
340 eth_spec = item->spec;
341 eth_mask = item->mask;
342 if (eth_spec && eth_mask) {
344 rte_is_broadcast_ether_addr(ð_mask->src))
345 input_set |= ICE_INSET_TUN_SMAC;
347 rte_is_broadcast_ether_addr(ð_mask->src))
348 input_set |= ICE_INSET_SMAC;
350 rte_is_broadcast_ether_addr(ð_mask->dst))
351 input_set |= ICE_INSET_TUN_DMAC;
353 rte_is_broadcast_ether_addr(ð_mask->dst))
354 input_set |= ICE_INSET_DMAC;
355 if (eth_mask->type == RTE_BE16(0xffff))
356 input_set |= ICE_INSET_ETHERTYPE;
357 list[t].type = (tunnel_valid == 0) ?
358 ICE_MAC_OFOS : ICE_MAC_IL;
359 struct ice_ether_hdr *h;
360 struct ice_ether_hdr *m;
362 h = &list[t].h_u.eth_hdr;
363 m = &list[t].m_u.eth_hdr;
364 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
365 if (eth_mask->src.addr_bytes[j] ==
368 eth_spec->src.addr_bytes[j];
370 eth_mask->src.addr_bytes[j];
373 if (eth_mask->dst.addr_bytes[j] ==
376 eth_spec->dst.addr_bytes[j];
378 eth_mask->dst.addr_bytes[j];
384 if (eth_mask->type == UINT16_MAX) {
385 list[t].type = ICE_ETYPE_OL;
386 list[t].h_u.ethertype.ethtype_id =
388 list[t].m_u.ethertype.ethtype_id =
392 } else if (!eth_spec && !eth_mask) {
393 list[t].type = (tun_type == ICE_NON_TUN) ?
394 ICE_MAC_OFOS : ICE_MAC_IL;
398 case RTE_FLOW_ITEM_TYPE_IPV4:
399 ipv4_spec = item->spec;
400 ipv4_mask = item->mask;
401 if (ipv4_spec && ipv4_mask) {
402 /* Check IPv4 mask and update input set */
403 if (ipv4_mask->hdr.version_ihl ||
404 ipv4_mask->hdr.total_length ||
405 ipv4_mask->hdr.packet_id ||
406 ipv4_mask->hdr.hdr_checksum) {
407 rte_flow_error_set(error, EINVAL,
408 RTE_FLOW_ERROR_TYPE_ITEM,
410 "Invalid IPv4 mask.");
415 if (ipv4_mask->hdr.type_of_service ==
418 ICE_INSET_TUN_IPV4_TOS;
419 if (ipv4_mask->hdr.src_addr ==
422 ICE_INSET_TUN_IPV4_SRC;
423 if (ipv4_mask->hdr.dst_addr ==
426 ICE_INSET_TUN_IPV4_DST;
427 if (ipv4_mask->hdr.time_to_live ==
430 ICE_INSET_TUN_IPV4_TTL;
431 if (ipv4_mask->hdr.next_proto_id ==
434 ICE_INSET_TUN_IPV4_PROTO;
436 if (ipv4_mask->hdr.src_addr ==
438 input_set |= ICE_INSET_IPV4_SRC;
439 if (ipv4_mask->hdr.dst_addr ==
441 input_set |= ICE_INSET_IPV4_DST;
442 if (ipv4_mask->hdr.time_to_live ==
444 input_set |= ICE_INSET_IPV4_TTL;
445 if (ipv4_mask->hdr.next_proto_id ==
448 ICE_INSET_IPV4_PROTO;
449 if (ipv4_mask->hdr.type_of_service ==
454 list[t].type = (tunnel_valid == 0) ?
455 ICE_IPV4_OFOS : ICE_IPV4_IL;
456 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
457 list[t].h_u.ipv4_hdr.src_addr =
458 ipv4_spec->hdr.src_addr;
459 list[t].m_u.ipv4_hdr.src_addr =
462 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
463 list[t].h_u.ipv4_hdr.dst_addr =
464 ipv4_spec->hdr.dst_addr;
465 list[t].m_u.ipv4_hdr.dst_addr =
468 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
469 list[t].h_u.ipv4_hdr.time_to_live =
470 ipv4_spec->hdr.time_to_live;
471 list[t].m_u.ipv4_hdr.time_to_live =
474 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
475 list[t].h_u.ipv4_hdr.protocol =
476 ipv4_spec->hdr.next_proto_id;
477 list[t].m_u.ipv4_hdr.protocol =
480 if (ipv4_mask->hdr.type_of_service ==
482 list[t].h_u.ipv4_hdr.tos =
483 ipv4_spec->hdr.type_of_service;
484 list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
487 } else if (!ipv4_spec && !ipv4_mask) {
488 list[t].type = (tunnel_valid == 0) ?
489 ICE_IPV4_OFOS : ICE_IPV4_IL;
493 case RTE_FLOW_ITEM_TYPE_IPV6:
494 ipv6_spec = item->spec;
495 ipv6_mask = item->mask;
496 if (ipv6_spec && ipv6_mask) {
497 if (ipv6_mask->hdr.payload_len) {
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
501 "Invalid IPv6 mask");
506 if (!memcmp(ipv6_mask->hdr.src_addr,
508 RTE_DIM(ipv6_mask->hdr.src_addr)))
510 ICE_INSET_TUN_IPV6_SRC;
511 if (!memcmp(ipv6_mask->hdr.dst_addr,
513 RTE_DIM(ipv6_mask->hdr.dst_addr)))
515 ICE_INSET_TUN_IPV6_DST;
516 if (ipv6_mask->hdr.proto == UINT8_MAX)
518 ICE_INSET_TUN_IPV6_NEXT_HDR;
519 if (ipv6_mask->hdr.hop_limits ==
522 ICE_INSET_TUN_IPV6_HOP_LIMIT;
523 if ((ipv6_mask->hdr.vtc_flow &
525 (RTE_IPV6_HDR_TC_MASK))
527 (RTE_IPV6_HDR_TC_MASK))
529 ICE_INSET_TUN_IPV6_TC;
531 if (!memcmp(ipv6_mask->hdr.src_addr,
533 RTE_DIM(ipv6_mask->hdr.src_addr)))
534 input_set |= ICE_INSET_IPV6_SRC;
535 if (!memcmp(ipv6_mask->hdr.dst_addr,
537 RTE_DIM(ipv6_mask->hdr.dst_addr)))
538 input_set |= ICE_INSET_IPV6_DST;
539 if (ipv6_mask->hdr.proto == UINT8_MAX)
541 ICE_INSET_IPV6_NEXT_HDR;
542 if (ipv6_mask->hdr.hop_limits ==
545 ICE_INSET_IPV6_HOP_LIMIT;
546 if ((ipv6_mask->hdr.vtc_flow &
548 (RTE_IPV6_HDR_TC_MASK))
550 (RTE_IPV6_HDR_TC_MASK))
551 input_set |= ICE_INSET_IPV6_TC;
553 list[t].type = (tunnel_valid == 0) ?
554 ICE_IPV6_OFOS : ICE_IPV6_IL;
555 struct ice_ipv6_hdr *f;
556 struct ice_ipv6_hdr *s;
557 f = &list[t].h_u.ipv6_hdr;
558 s = &list[t].m_u.ipv6_hdr;
559 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
560 if (ipv6_mask->hdr.src_addr[j] ==
563 ipv6_spec->hdr.src_addr[j];
565 ipv6_mask->hdr.src_addr[j];
567 if (ipv6_mask->hdr.dst_addr[j] ==
570 ipv6_spec->hdr.dst_addr[j];
572 ipv6_mask->hdr.dst_addr[j];
575 if (ipv6_mask->hdr.proto == UINT8_MAX) {
577 ipv6_spec->hdr.proto;
578 s->next_hdr = UINT8_MAX;
580 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
582 ipv6_spec->hdr.hop_limits;
583 s->hop_limit = UINT8_MAX;
585 if ((ipv6_mask->hdr.vtc_flow &
587 (RTE_IPV6_HDR_TC_MASK))
589 (RTE_IPV6_HDR_TC_MASK)) {
590 f->tc = (rte_be_to_cpu_32
591 (ipv6_spec->hdr.vtc_flow) &
592 RTE_IPV6_HDR_TC_MASK) >>
593 RTE_IPV6_HDR_TC_SHIFT;
597 } else if (!ipv6_spec && !ipv6_mask) {
598 list[t].type = (tun_type == ICE_NON_TUN) ?
599 ICE_IPV4_OFOS : ICE_IPV4_IL;
603 case RTE_FLOW_ITEM_TYPE_UDP:
604 udp_spec = item->spec;
605 udp_mask = item->mask;
606 if (udp_spec && udp_mask) {
607 /* Check UDP mask and update input set*/
608 if (udp_mask->hdr.dgram_len ||
609 udp_mask->hdr.dgram_cksum) {
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
618 if (udp_mask->hdr.src_port ==
621 ICE_INSET_TUN_UDP_SRC_PORT;
622 if (udp_mask->hdr.dst_port ==
625 ICE_INSET_TUN_UDP_DST_PORT;
627 if (udp_mask->hdr.src_port ==
630 ICE_INSET_UDP_SRC_PORT;
631 if (udp_mask->hdr.dst_port ==
634 ICE_INSET_UDP_DST_PORT;
636 if (tun_type == ICE_SW_TUN_VXLAN &&
638 list[t].type = ICE_UDP_OF;
640 list[t].type = ICE_UDP_ILOS;
641 if (udp_mask->hdr.src_port == UINT16_MAX) {
642 list[t].h_u.l4_hdr.src_port =
643 udp_spec->hdr.src_port;
644 list[t].m_u.l4_hdr.src_port =
645 udp_mask->hdr.src_port;
647 if (udp_mask->hdr.dst_port == UINT16_MAX) {
648 list[t].h_u.l4_hdr.dst_port =
649 udp_spec->hdr.dst_port;
650 list[t].m_u.l4_hdr.dst_port =
651 udp_mask->hdr.dst_port;
654 } else if (!udp_spec && !udp_mask) {
655 list[t].type = ICE_UDP_ILOS;
659 case RTE_FLOW_ITEM_TYPE_TCP:
660 tcp_spec = item->spec;
661 tcp_mask = item->mask;
662 if (tcp_spec && tcp_mask) {
663 /* Check TCP mask and update input set */
664 if (tcp_mask->hdr.sent_seq ||
665 tcp_mask->hdr.recv_ack ||
666 tcp_mask->hdr.data_off ||
667 tcp_mask->hdr.tcp_flags ||
668 tcp_mask->hdr.rx_win ||
669 tcp_mask->hdr.cksum ||
670 tcp_mask->hdr.tcp_urp) {
671 rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ITEM,
679 if (tcp_mask->hdr.src_port ==
682 ICE_INSET_TUN_TCP_SRC_PORT;
683 if (tcp_mask->hdr.dst_port ==
686 ICE_INSET_TUN_TCP_DST_PORT;
688 if (tcp_mask->hdr.src_port ==
691 ICE_INSET_TCP_SRC_PORT;
692 if (tcp_mask->hdr.dst_port ==
695 ICE_INSET_TCP_DST_PORT;
697 list[t].type = ICE_TCP_IL;
698 if (tcp_mask->hdr.src_port == UINT16_MAX) {
699 list[t].h_u.l4_hdr.src_port =
700 tcp_spec->hdr.src_port;
701 list[t].m_u.l4_hdr.src_port =
702 tcp_mask->hdr.src_port;
704 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
705 list[t].h_u.l4_hdr.dst_port =
706 tcp_spec->hdr.dst_port;
707 list[t].m_u.l4_hdr.dst_port =
708 tcp_mask->hdr.dst_port;
711 } else if (!tcp_spec && !tcp_mask) {
712 list[t].type = ICE_TCP_IL;
716 case RTE_FLOW_ITEM_TYPE_SCTP:
717 sctp_spec = item->spec;
718 sctp_mask = item->mask;
719 if (sctp_spec && sctp_mask) {
720 /* Check SCTP mask and update input set */
721 if (sctp_mask->hdr.cksum) {
722 rte_flow_error_set(error, EINVAL,
723 RTE_FLOW_ERROR_TYPE_ITEM,
725 "Invalid SCTP mask");
730 if (sctp_mask->hdr.src_port ==
733 ICE_INSET_TUN_SCTP_SRC_PORT;
734 if (sctp_mask->hdr.dst_port ==
737 ICE_INSET_TUN_SCTP_DST_PORT;
739 if (sctp_mask->hdr.src_port ==
742 ICE_INSET_SCTP_SRC_PORT;
743 if (sctp_mask->hdr.dst_port ==
746 ICE_INSET_SCTP_DST_PORT;
748 list[t].type = ICE_SCTP_IL;
749 if (sctp_mask->hdr.src_port == UINT16_MAX) {
750 list[t].h_u.sctp_hdr.src_port =
751 sctp_spec->hdr.src_port;
752 list[t].m_u.sctp_hdr.src_port =
753 sctp_mask->hdr.src_port;
755 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
756 list[t].h_u.sctp_hdr.dst_port =
757 sctp_spec->hdr.dst_port;
758 list[t].m_u.sctp_hdr.dst_port =
759 sctp_mask->hdr.dst_port;
762 } else if (!sctp_spec && !sctp_mask) {
763 list[t].type = ICE_SCTP_IL;
767 case RTE_FLOW_ITEM_TYPE_VXLAN:
768 vxlan_spec = item->spec;
769 vxlan_mask = item->mask;
770 /* Check if VXLAN item is used to describe protocol.
771 * If yes, both spec and mask should be NULL.
772 * If no, both spec and mask shouldn't be NULL.
774 if ((!vxlan_spec && vxlan_mask) ||
775 (vxlan_spec && !vxlan_mask)) {
776 rte_flow_error_set(error, EINVAL,
777 RTE_FLOW_ERROR_TYPE_ITEM,
779 "Invalid VXLAN item");
784 if (vxlan_spec && vxlan_mask) {
785 list[t].type = ICE_VXLAN;
786 if (vxlan_mask->vni[0] == UINT8_MAX &&
787 vxlan_mask->vni[1] == UINT8_MAX &&
788 vxlan_mask->vni[2] == UINT8_MAX) {
789 list[t].h_u.tnl_hdr.vni =
790 (vxlan_spec->vni[2] << 16) |
791 (vxlan_spec->vni[1] << 8) |
793 list[t].m_u.tnl_hdr.vni =
796 ICE_INSET_TUN_VXLAN_VNI;
799 } else if (!vxlan_spec && !vxlan_mask) {
800 list[t].type = ICE_VXLAN;
804 case RTE_FLOW_ITEM_TYPE_NVGRE:
805 nvgre_spec = item->spec;
806 nvgre_mask = item->mask;
807 /* Check if NVGRE item is used to describe protocol.
808 * If yes, both spec and mask should be NULL.
809 * If no, both spec and mask shouldn't be NULL.
811 if ((!nvgre_spec && nvgre_mask) ||
812 (nvgre_spec && !nvgre_mask)) {
813 rte_flow_error_set(error, EINVAL,
814 RTE_FLOW_ERROR_TYPE_ITEM,
816 "Invalid NVGRE item");
820 if (nvgre_spec && nvgre_mask) {
821 list[t].type = ICE_NVGRE;
822 if (nvgre_mask->tni[0] == UINT8_MAX &&
823 nvgre_mask->tni[1] == UINT8_MAX &&
824 nvgre_mask->tni[2] == UINT8_MAX) {
825 list[t].h_u.nvgre_hdr.tni_flow =
826 (nvgre_spec->tni[2] << 16) |
827 (nvgre_spec->tni[1] << 8) |
829 list[t].m_u.nvgre_hdr.tni_flow =
832 ICE_INSET_TUN_NVGRE_TNI;
835 } else if (!nvgre_spec && !nvgre_mask) {
836 list[t].type = ICE_NVGRE;
840 case RTE_FLOW_ITEM_TYPE_VLAN:
841 vlan_spec = item->spec;
842 vlan_mask = item->mask;
843 /* Check if VLAN item is used to describe protocol.
844 * If yes, both spec and mask should be NULL.
845 * If no, both spec and mask shouldn't be NULL.
847 if ((!vlan_spec && vlan_mask) ||
848 (vlan_spec && !vlan_mask)) {
849 rte_flow_error_set(error, EINVAL,
850 RTE_FLOW_ERROR_TYPE_ITEM,
852 "Invalid VLAN item");
855 if (vlan_spec && vlan_mask) {
856 list[t].type = ICE_VLAN_OFOS;
857 if (vlan_mask->tci == UINT16_MAX) {
858 list[t].h_u.vlan_hdr.vlan =
860 list[t].m_u.vlan_hdr.vlan =
862 input_set |= ICE_INSET_VLAN_OUTER;
864 if (vlan_mask->inner_type == UINT16_MAX) {
865 list[t].h_u.vlan_hdr.type =
866 vlan_spec->inner_type;
867 list[t].m_u.vlan_hdr.type =
869 input_set |= ICE_INSET_VLAN_OUTER;
872 } else if (!vlan_spec && !vlan_mask) {
873 list[t].type = ICE_VLAN_OFOS;
877 case RTE_FLOW_ITEM_TYPE_PPPOED:
878 case RTE_FLOW_ITEM_TYPE_PPPOES:
879 pppoe_spec = item->spec;
880 pppoe_mask = item->mask;
881 /* Check if PPPoE item is used to describe protocol.
882 * If yes, both spec and mask should be NULL.
884 if (pppoe_spec || pppoe_mask) {
885 rte_flow_error_set(error, EINVAL,
886 RTE_FLOW_ERROR_TYPE_ITEM,
888 "Invalid pppoe item");
893 case RTE_FLOW_ITEM_TYPE_VOID:
897 rte_flow_error_set(error, EINVAL,
898 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
899 "Invalid pattern item.");
913 ice_switch_parse_action(struct ice_pf *pf,
914 const struct rte_flow_action *actions,
915 struct rte_flow_error *error,
916 struct ice_adv_rule_info *rule_info)
918 struct ice_vsi *vsi = pf->main_vsi;
919 struct rte_eth_dev *dev = pf->adapter->eth_dev;
920 const struct rte_flow_action_queue *act_q;
921 const struct rte_flow_action_rss *act_qgrop;
922 uint16_t base_queue, i;
923 const struct rte_flow_action *action;
924 enum rte_flow_action_type action_type;
925 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
926 2, 4, 8, 16, 32, 64, 128};
928 base_queue = pf->base_queue;
929 for (action = actions; action->type !=
930 RTE_FLOW_ACTION_TYPE_END; action++) {
931 action_type = action->type;
932 switch (action_type) {
933 case RTE_FLOW_ACTION_TYPE_RSS:
934 act_qgrop = action->conf;
935 rule_info->sw_act.fltr_act =
937 rule_info->sw_act.fwd_id.q_id =
938 base_queue + act_qgrop->queue[0];
939 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
940 if (act_qgrop->queue_num ==
941 valid_qgrop_number[i])
944 if (i == MAX_QGRP_NUM_TYPE)
946 if ((act_qgrop->queue[0] +
947 act_qgrop->queue_num) >
948 dev->data->nb_rx_queues)
950 for (i = 0; i < act_qgrop->queue_num - 1; i++)
951 if (act_qgrop->queue[i + 1] !=
952 act_qgrop->queue[i] + 1)
954 rule_info->sw_act.qgrp_size =
955 act_qgrop->queue_num;
957 case RTE_FLOW_ACTION_TYPE_QUEUE:
958 act_q = action->conf;
959 if (act_q->index >= dev->data->nb_rx_queues)
961 rule_info->sw_act.fltr_act =
963 rule_info->sw_act.fwd_id.q_id =
964 base_queue + act_q->index;
967 case RTE_FLOW_ACTION_TYPE_DROP:
968 rule_info->sw_act.fltr_act =
972 case RTE_FLOW_ACTION_TYPE_VOID:
980 rule_info->sw_act.vsi_handle = vsi->idx;
982 rule_info->sw_act.src = vsi->idx;
983 rule_info->priority = 5;
988 rte_flow_error_set(error,
989 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
991 "Invalid action type or queue number");
996 ice_switch_parse_pattern_action(struct ice_adapter *ad,
997 struct ice_pattern_match_item *array,
999 const struct rte_flow_item pattern[],
1000 const struct rte_flow_action actions[],
1002 struct rte_flow_error *error)
1004 struct ice_pf *pf = &ad->pf;
1005 uint64_t inputset = 0;
1007 struct sw_meta *sw_meta_ptr = NULL;
1008 struct ice_adv_rule_info rule_info;
1009 struct ice_adv_lkup_elem *list = NULL;
1010 uint16_t lkups_num = 0;
1011 const struct rte_flow_item *item = pattern;
1012 uint16_t item_num = 0;
1013 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1014 struct ice_pattern_match_item *pattern_match_item = NULL;
1016 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1018 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1019 tun_type = ICE_SW_TUN_VXLAN;
1020 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1021 tun_type = ICE_SW_TUN_NVGRE;
1022 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1023 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1024 tun_type = ICE_SW_TUN_PPPOE;
1025 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1026 const struct rte_flow_item_eth *eth_mask;
1028 eth_mask = item->mask;
1031 if (eth_mask->type == UINT16_MAX)
1032 tun_type = ICE_SW_TUN_AND_NON_TUN;
1034 /* reserve one more memory slot for ETH which may
1035 * consume 2 lookup items.
1037 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1041 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1043 rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1045 "No memory for PMD internal items");
1049 rule_info.tun_type = tun_type;
1052 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1054 rte_flow_error_set(error, EINVAL,
1055 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1056 "No memory for sw_pattern_meta_ptr");
1060 pattern_match_item =
1061 ice_search_pattern_match_item(pattern, array, array_len, error);
1062 if (!pattern_match_item) {
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1065 "Invalid input pattern");
1069 inputset = ice_switch_inset_get
1070 (pattern, error, list, &lkups_num, tun_type);
1071 if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
1072 rte_flow_error_set(error, EINVAL,
1073 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1075 "Invalid input set");
1079 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1083 "Invalid input action");
1086 *meta = sw_meta_ptr;
1087 ((struct sw_meta *)*meta)->list = list;
1088 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1089 ((struct sw_meta *)*meta)->rule_info = rule_info;
1090 rte_free(pattern_match_item);
1096 rte_free(sw_meta_ptr);
1097 rte_free(pattern_match_item);
1103 ice_switch_query(struct ice_adapter *ad __rte_unused,
1104 struct rte_flow *flow __rte_unused,
1105 struct rte_flow_query_count *count __rte_unused,
1106 struct rte_flow_error *error)
1108 rte_flow_error_set(error, EINVAL,
1109 RTE_FLOW_ERROR_TYPE_HANDLE,
1111 "count action not supported by switch filter");
1117 ice_switch_init(struct ice_adapter *ad)
1120 struct ice_flow_parser *dist_parser;
1121 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1123 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1124 dist_parser = &ice_switch_dist_parser_comms;
1126 dist_parser = &ice_switch_dist_parser_os;
1128 if (ad->devargs.pipe_mode_support)
1129 ret = ice_register_parser(perm_parser, ad);
1131 ret = ice_register_parser(dist_parser, ad);
1136 ice_switch_uninit(struct ice_adapter *ad)
1138 struct ice_flow_parser *dist_parser;
1139 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1141 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1142 dist_parser = &ice_switch_dist_parser_comms;
1144 dist_parser = &ice_switch_dist_parser_os;
1146 if (ad->devargs.pipe_mode_support)
1147 ice_unregister_parser(perm_parser, ad);
1149 ice_unregister_parser(dist_parser, ad);
1153 ice_flow_engine ice_switch_engine = {
1154 .init = ice_switch_init,
1155 .uninit = ice_switch_uninit,
1156 .create = ice_switch_create,
1157 .destroy = ice_switch_destroy,
1158 .query_count = ice_switch_query,
1159 .free = ice_switch_filter_rule_free,
1160 .type = ICE_FLOW_ENGINE_SWITCH,
1164 ice_flow_parser ice_switch_dist_parser_os = {
1165 .engine = &ice_switch_engine,
1166 .array = ice_switch_pattern_dist_os,
1167 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1168 .parse_pattern_action = ice_switch_parse_pattern_action,
1169 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1173 ice_flow_parser ice_switch_dist_parser_comms = {
1174 .engine = &ice_switch_engine,
1175 .array = ice_switch_pattern_dist_comms,
1176 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1177 .parse_pattern_action = ice_switch_parse_pattern_action,
1178 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1182 ice_flow_parser ice_switch_perm_parser = {
1183 .engine = &ice_switch_engine,
1184 .array = ice_switch_pattern_perm,
1185 .array_len = RTE_DIM(ice_switch_pattern_perm),
1186 .parse_pattern_action = ice_switch_parse_pattern_action,
1187 .stage = ICE_FLOW_STAGE_PERMISSION,
1190 RTE_INIT(ice_sw_engine_init)
1192 struct ice_flow_engine *engine = &ice_switch_engine;
1193 ice_register_flow_engine(engine);