1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49 ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86 ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90 ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE ( \
92 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
95 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97 ICE_INSET_PPPOE_PROTO)
100 struct ice_adv_lkup_elem *list;
102 struct ice_adv_rule_info rule_info;
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
112 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113 {pattern_ethertype_vlan,
114 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
116 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp,
118 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119 {pattern_eth_ipv4_tcp,
120 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
122 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123 {pattern_eth_ipv6_udp,
124 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125 {pattern_eth_ipv6_tcp,
126 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133 {pattern_eth_ipv4_nvgre_eth_ipv4,
134 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
140 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141 {pattern_eth_vlan_pppoed,
142 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
144 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145 {pattern_eth_vlan_pppoes,
146 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147 {pattern_eth_pppoes_proto,
148 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149 {pattern_eth_vlan_pppoes_proto,
150 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
154 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
156 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
157 {pattern_ethertype_vlan,
158 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
160 ICE_INSET_NONE, ICE_INSET_NONE},
162 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
163 {pattern_eth_ipv4_udp,
164 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
165 {pattern_eth_ipv4_tcp,
166 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
168 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
169 {pattern_eth_ipv6_udp,
170 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
171 {pattern_eth_ipv6_tcp,
172 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
173 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
174 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
175 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
176 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
177 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
178 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
179 {pattern_eth_ipv4_nvgre_eth_ipv4,
180 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
181 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
182 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
183 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
184 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
188 ice_pattern_match_item ice_switch_pattern_perm[] = {
189 {pattern_ethertype_vlan,
190 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
192 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
193 {pattern_eth_ipv4_udp,
194 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
195 {pattern_eth_ipv4_tcp,
196 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
198 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
199 {pattern_eth_ipv6_udp,
200 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
201 {pattern_eth_ipv6_tcp,
202 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
203 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
204 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
205 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
206 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
207 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
208 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
209 {pattern_eth_ipv4_nvgre_eth_ipv4,
210 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
211 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
212 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
213 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
214 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
218 ice_switch_create(struct ice_adapter *ad,
219 struct rte_flow *flow,
221 struct rte_flow_error *error)
224 struct ice_pf *pf = &ad->pf;
225 struct ice_hw *hw = ICE_PF_TO_HW(pf);
226 struct ice_rule_query_data rule_added = {0};
227 struct ice_rule_query_data *filter_ptr;
228 struct ice_adv_lkup_elem *list =
229 ((struct sw_meta *)meta)->list;
231 ((struct sw_meta *)meta)->lkups_num;
232 struct ice_adv_rule_info *rule_info =
233 &((struct sw_meta *)meta)->rule_info;
235 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
236 rte_flow_error_set(error, EINVAL,
237 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
238 "item number too large for rule");
242 rte_flow_error_set(error, EINVAL,
243 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
244 "lookup list should not be NULL");
247 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
249 filter_ptr = rte_zmalloc("ice_switch_filter",
250 sizeof(struct ice_rule_query_data), 0);
252 rte_flow_error_set(error, EINVAL,
253 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
254 "No memory for ice_switch_filter");
257 flow->rule = filter_ptr;
258 rte_memcpy(filter_ptr,
260 sizeof(struct ice_rule_query_data));
262 rte_flow_error_set(error, EINVAL,
263 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
264 "switch filter create flow fail");
280 ice_switch_destroy(struct ice_adapter *ad,
281 struct rte_flow *flow,
282 struct rte_flow_error *error)
284 struct ice_hw *hw = &ad->hw;
286 struct ice_rule_query_data *filter_ptr;
288 filter_ptr = (struct ice_rule_query_data *)
292 rte_flow_error_set(error, EINVAL,
293 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
295 " create by switch filter");
299 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
301 rte_flow_error_set(error, EINVAL,
302 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
303 "fail to destroy switch filter rule");
307 rte_free(filter_ptr);
312 ice_switch_filter_rule_free(struct rte_flow *flow)
314 rte_free(flow->rule);
318 ice_switch_inset_get(const struct rte_flow_item pattern[],
319 struct rte_flow_error *error,
320 struct ice_adv_lkup_elem *list,
322 enum ice_sw_tunnel_type tun_type)
324 const struct rte_flow_item *item = pattern;
325 enum rte_flow_item_type item_type;
326 const struct rte_flow_item_eth *eth_spec, *eth_mask;
327 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
328 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
329 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
330 const struct rte_flow_item_udp *udp_spec, *udp_mask;
331 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
332 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
333 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
334 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
335 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
336 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
338 uint64_t input_set = ICE_INSET_NONE;
340 uint16_t tunnel_valid = 0;
341 uint16_t pppoe_valid = 0;
344 for (item = pattern; item->type !=
345 RTE_FLOW_ITEM_TYPE_END; item++) {
347 rte_flow_error_set(error, EINVAL,
348 RTE_FLOW_ERROR_TYPE_ITEM,
350 "Not support range");
353 item_type = item->type;
356 case RTE_FLOW_ITEM_TYPE_ETH:
357 eth_spec = item->spec;
358 eth_mask = item->mask;
359 if (eth_spec && eth_mask) {
360 const uint8_t *a = eth_mask->src.addr_bytes;
361 const uint8_t *b = eth_mask->dst.addr_bytes;
362 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
363 if (a[j] && tunnel_valid) {
373 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
374 if (b[j] && tunnel_valid) {
385 input_set |= ICE_INSET_ETHERTYPE;
386 list[t].type = (tunnel_valid == 0) ?
387 ICE_MAC_OFOS : ICE_MAC_IL;
388 struct ice_ether_hdr *h;
389 struct ice_ether_hdr *m;
391 h = &list[t].h_u.eth_hdr;
392 m = &list[t].m_u.eth_hdr;
393 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
394 if (eth_mask->src.addr_bytes[j]) {
396 eth_spec->src.addr_bytes[j];
398 eth_mask->src.addr_bytes[j];
401 if (eth_mask->dst.addr_bytes[j]) {
403 eth_spec->dst.addr_bytes[j];
405 eth_mask->dst.addr_bytes[j];
411 if (eth_mask->type) {
412 list[t].type = ICE_ETYPE_OL;
413 list[t].h_u.ethertype.ethtype_id =
415 list[t].m_u.ethertype.ethtype_id =
422 case RTE_FLOW_ITEM_TYPE_IPV4:
423 ipv4_spec = item->spec;
424 ipv4_mask = item->mask;
425 if (ipv4_spec && ipv4_mask) {
426 /* Check IPv4 mask and update input set */
427 if (ipv4_mask->hdr.version_ihl ||
428 ipv4_mask->hdr.total_length ||
429 ipv4_mask->hdr.packet_id ||
430 ipv4_mask->hdr.hdr_checksum) {
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ITEM,
434 "Invalid IPv4 mask.");
439 if (ipv4_mask->hdr.type_of_service)
441 ICE_INSET_TUN_IPV4_TOS;
442 if (ipv4_mask->hdr.src_addr)
444 ICE_INSET_TUN_IPV4_SRC;
445 if (ipv4_mask->hdr.dst_addr)
447 ICE_INSET_TUN_IPV4_DST;
448 if (ipv4_mask->hdr.time_to_live)
450 ICE_INSET_TUN_IPV4_TTL;
451 if (ipv4_mask->hdr.next_proto_id)
453 ICE_INSET_TUN_IPV4_PROTO;
455 if (ipv4_mask->hdr.src_addr)
456 input_set |= ICE_INSET_IPV4_SRC;
457 if (ipv4_mask->hdr.dst_addr)
458 input_set |= ICE_INSET_IPV4_DST;
459 if (ipv4_mask->hdr.time_to_live)
460 input_set |= ICE_INSET_IPV4_TTL;
461 if (ipv4_mask->hdr.next_proto_id)
463 ICE_INSET_IPV4_PROTO;
464 if (ipv4_mask->hdr.type_of_service)
468 list[t].type = (tunnel_valid == 0) ?
469 ICE_IPV4_OFOS : ICE_IPV4_IL;
470 if (ipv4_mask->hdr.src_addr) {
471 list[t].h_u.ipv4_hdr.src_addr =
472 ipv4_spec->hdr.src_addr;
473 list[t].m_u.ipv4_hdr.src_addr =
474 ipv4_mask->hdr.src_addr;
476 if (ipv4_mask->hdr.dst_addr) {
477 list[t].h_u.ipv4_hdr.dst_addr =
478 ipv4_spec->hdr.dst_addr;
479 list[t].m_u.ipv4_hdr.dst_addr =
480 ipv4_mask->hdr.dst_addr;
482 if (ipv4_mask->hdr.time_to_live) {
483 list[t].h_u.ipv4_hdr.time_to_live =
484 ipv4_spec->hdr.time_to_live;
485 list[t].m_u.ipv4_hdr.time_to_live =
486 ipv4_mask->hdr.time_to_live;
488 if (ipv4_mask->hdr.next_proto_id) {
489 list[t].h_u.ipv4_hdr.protocol =
490 ipv4_spec->hdr.next_proto_id;
491 list[t].m_u.ipv4_hdr.protocol =
492 ipv4_mask->hdr.next_proto_id;
494 if (ipv4_mask->hdr.type_of_service) {
495 list[t].h_u.ipv4_hdr.tos =
496 ipv4_spec->hdr.type_of_service;
497 list[t].m_u.ipv4_hdr.tos =
498 ipv4_mask->hdr.type_of_service;
504 case RTE_FLOW_ITEM_TYPE_IPV6:
505 ipv6_spec = item->spec;
506 ipv6_mask = item->mask;
507 if (ipv6_spec && ipv6_mask) {
508 if (ipv6_mask->hdr.payload_len) {
509 rte_flow_error_set(error, EINVAL,
510 RTE_FLOW_ERROR_TYPE_ITEM,
512 "Invalid IPv6 mask");
516 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
517 if (ipv6_mask->hdr.src_addr[j] &&
520 ICE_INSET_TUN_IPV6_SRC;
522 } else if (ipv6_mask->hdr.src_addr[j]) {
523 input_set |= ICE_INSET_IPV6_SRC;
527 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
528 if (ipv6_mask->hdr.dst_addr[j] &&
531 ICE_INSET_TUN_IPV6_DST;
533 } else if (ipv6_mask->hdr.dst_addr[j]) {
534 input_set |= ICE_INSET_IPV6_DST;
538 if (ipv6_mask->hdr.proto &&
541 ICE_INSET_TUN_IPV6_NEXT_HDR;
542 else if (ipv6_mask->hdr.proto)
544 ICE_INSET_IPV6_NEXT_HDR;
545 if (ipv6_mask->hdr.hop_limits &&
548 ICE_INSET_TUN_IPV6_HOP_LIMIT;
549 else if (ipv6_mask->hdr.hop_limits)
551 ICE_INSET_IPV6_HOP_LIMIT;
552 if ((ipv6_mask->hdr.vtc_flow &
554 (RTE_IPV6_HDR_TC_MASK)) &&
557 ICE_INSET_TUN_IPV6_TC;
558 else if (ipv6_mask->hdr.vtc_flow &
560 (RTE_IPV6_HDR_TC_MASK))
561 input_set |= ICE_INSET_IPV6_TC;
563 list[t].type = (tunnel_valid == 0) ?
564 ICE_IPV6_OFOS : ICE_IPV6_IL;
565 struct ice_ipv6_hdr *f;
566 struct ice_ipv6_hdr *s;
567 f = &list[t].h_u.ipv6_hdr;
568 s = &list[t].m_u.ipv6_hdr;
569 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
570 if (ipv6_mask->hdr.src_addr[j]) {
572 ipv6_spec->hdr.src_addr[j];
574 ipv6_mask->hdr.src_addr[j];
576 if (ipv6_mask->hdr.dst_addr[j]) {
578 ipv6_spec->hdr.dst_addr[j];
580 ipv6_mask->hdr.dst_addr[j];
583 if (ipv6_mask->hdr.proto) {
585 ipv6_spec->hdr.proto;
587 ipv6_mask->hdr.proto;
589 if (ipv6_mask->hdr.hop_limits) {
591 ipv6_spec->hdr.hop_limits;
593 ipv6_mask->hdr.hop_limits;
595 if (ipv6_mask->hdr.vtc_flow &
597 (RTE_IPV6_HDR_TC_MASK)) {
598 struct ice_le_ver_tc_flow vtf;
599 vtf.u.fld.version = 0;
600 vtf.u.fld.flow_label = 0;
601 vtf.u.fld.tc = (rte_be_to_cpu_32
602 (ipv6_spec->hdr.vtc_flow) &
603 RTE_IPV6_HDR_TC_MASK) >>
604 RTE_IPV6_HDR_TC_SHIFT;
605 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
606 vtf.u.fld.tc = (rte_be_to_cpu_32
607 (ipv6_mask->hdr.vtc_flow) &
608 RTE_IPV6_HDR_TC_MASK) >>
609 RTE_IPV6_HDR_TC_SHIFT;
610 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
616 case RTE_FLOW_ITEM_TYPE_UDP:
617 udp_spec = item->spec;
618 udp_mask = item->mask;
619 if (udp_spec && udp_mask) {
620 /* Check UDP mask and update input set*/
621 if (udp_mask->hdr.dgram_len ||
622 udp_mask->hdr.dgram_cksum) {
623 rte_flow_error_set(error, EINVAL,
624 RTE_FLOW_ERROR_TYPE_ITEM,
631 if (udp_mask->hdr.src_port)
633 ICE_INSET_TUN_UDP_SRC_PORT;
634 if (udp_mask->hdr.dst_port)
636 ICE_INSET_TUN_UDP_DST_PORT;
638 if (udp_mask->hdr.src_port)
640 ICE_INSET_UDP_SRC_PORT;
641 if (udp_mask->hdr.dst_port)
643 ICE_INSET_UDP_DST_PORT;
645 if (tun_type == ICE_SW_TUN_VXLAN &&
647 list[t].type = ICE_UDP_OF;
649 list[t].type = ICE_UDP_ILOS;
650 if (udp_mask->hdr.src_port) {
651 list[t].h_u.l4_hdr.src_port =
652 udp_spec->hdr.src_port;
653 list[t].m_u.l4_hdr.src_port =
654 udp_mask->hdr.src_port;
656 if (udp_mask->hdr.dst_port) {
657 list[t].h_u.l4_hdr.dst_port =
658 udp_spec->hdr.dst_port;
659 list[t].m_u.l4_hdr.dst_port =
660 udp_mask->hdr.dst_port;
666 case RTE_FLOW_ITEM_TYPE_TCP:
667 tcp_spec = item->spec;
668 tcp_mask = item->mask;
669 if (tcp_spec && tcp_mask) {
670 /* Check TCP mask and update input set */
671 if (tcp_mask->hdr.sent_seq ||
672 tcp_mask->hdr.recv_ack ||
673 tcp_mask->hdr.data_off ||
674 tcp_mask->hdr.tcp_flags ||
675 tcp_mask->hdr.rx_win ||
676 tcp_mask->hdr.cksum ||
677 tcp_mask->hdr.tcp_urp) {
678 rte_flow_error_set(error, EINVAL,
679 RTE_FLOW_ERROR_TYPE_ITEM,
686 if (tcp_mask->hdr.src_port)
688 ICE_INSET_TUN_TCP_SRC_PORT;
689 if (tcp_mask->hdr.dst_port)
691 ICE_INSET_TUN_TCP_DST_PORT;
693 if (tcp_mask->hdr.src_port)
695 ICE_INSET_TCP_SRC_PORT;
696 if (tcp_mask->hdr.dst_port)
698 ICE_INSET_TCP_DST_PORT;
700 list[t].type = ICE_TCP_IL;
701 if (tcp_mask->hdr.src_port) {
702 list[t].h_u.l4_hdr.src_port =
703 tcp_spec->hdr.src_port;
704 list[t].m_u.l4_hdr.src_port =
705 tcp_mask->hdr.src_port;
707 if (tcp_mask->hdr.dst_port) {
708 list[t].h_u.l4_hdr.dst_port =
709 tcp_spec->hdr.dst_port;
710 list[t].m_u.l4_hdr.dst_port =
711 tcp_mask->hdr.dst_port;
717 case RTE_FLOW_ITEM_TYPE_SCTP:
718 sctp_spec = item->spec;
719 sctp_mask = item->mask;
720 if (sctp_spec && sctp_mask) {
721 /* Check SCTP mask and update input set */
722 if (sctp_mask->hdr.cksum) {
723 rte_flow_error_set(error, EINVAL,
724 RTE_FLOW_ERROR_TYPE_ITEM,
726 "Invalid SCTP mask");
731 if (sctp_mask->hdr.src_port)
733 ICE_INSET_TUN_SCTP_SRC_PORT;
734 if (sctp_mask->hdr.dst_port)
736 ICE_INSET_TUN_SCTP_DST_PORT;
738 if (sctp_mask->hdr.src_port)
740 ICE_INSET_SCTP_SRC_PORT;
741 if (sctp_mask->hdr.dst_port)
743 ICE_INSET_SCTP_DST_PORT;
745 list[t].type = ICE_SCTP_IL;
746 if (sctp_mask->hdr.src_port) {
747 list[t].h_u.sctp_hdr.src_port =
748 sctp_spec->hdr.src_port;
749 list[t].m_u.sctp_hdr.src_port =
750 sctp_mask->hdr.src_port;
752 if (sctp_mask->hdr.dst_port) {
753 list[t].h_u.sctp_hdr.dst_port =
754 sctp_spec->hdr.dst_port;
755 list[t].m_u.sctp_hdr.dst_port =
756 sctp_mask->hdr.dst_port;
762 case RTE_FLOW_ITEM_TYPE_VXLAN:
763 vxlan_spec = item->spec;
764 vxlan_mask = item->mask;
765 /* Check if VXLAN item is used to describe protocol.
766 * If yes, both spec and mask should be NULL.
767 * If no, both spec and mask shouldn't be NULL.
769 if ((!vxlan_spec && vxlan_mask) ||
770 (vxlan_spec && !vxlan_mask)) {
771 rte_flow_error_set(error, EINVAL,
772 RTE_FLOW_ERROR_TYPE_ITEM,
774 "Invalid VXLAN item");
779 if (vxlan_spec && vxlan_mask) {
780 list[t].type = ICE_VXLAN;
781 if (vxlan_mask->vni[0] ||
782 vxlan_mask->vni[1] ||
783 vxlan_mask->vni[2]) {
784 list[t].h_u.tnl_hdr.vni =
785 (vxlan_spec->vni[2] << 16) |
786 (vxlan_spec->vni[1] << 8) |
788 list[t].m_u.tnl_hdr.vni =
789 (vxlan_mask->vni[2] << 16) |
790 (vxlan_mask->vni[1] << 8) |
793 ICE_INSET_TUN_VXLAN_VNI;
799 case RTE_FLOW_ITEM_TYPE_NVGRE:
800 nvgre_spec = item->spec;
801 nvgre_mask = item->mask;
802 /* Check if NVGRE item is used to describe protocol.
803 * If yes, both spec and mask should be NULL.
804 * If no, both spec and mask shouldn't be NULL.
806 if ((!nvgre_spec && nvgre_mask) ||
807 (nvgre_spec && !nvgre_mask)) {
808 rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ITEM,
811 "Invalid NVGRE item");
815 if (nvgre_spec && nvgre_mask) {
816 list[t].type = ICE_NVGRE;
817 if (nvgre_mask->tni[0] ||
818 nvgre_mask->tni[1] ||
819 nvgre_mask->tni[2]) {
820 list[t].h_u.nvgre_hdr.tni_flow =
821 (nvgre_spec->tni[2] << 16) |
822 (nvgre_spec->tni[1] << 8) |
824 list[t].m_u.nvgre_hdr.tni_flow =
825 (nvgre_mask->tni[2] << 16) |
826 (nvgre_mask->tni[1] << 8) |
829 ICE_INSET_TUN_NVGRE_TNI;
835 case RTE_FLOW_ITEM_TYPE_VLAN:
836 vlan_spec = item->spec;
837 vlan_mask = item->mask;
838 /* Check if VLAN item is used to describe protocol.
839 * If yes, both spec and mask should be NULL.
840 * If no, both spec and mask shouldn't be NULL.
842 if ((!vlan_spec && vlan_mask) ||
843 (vlan_spec && !vlan_mask)) {
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ITEM,
847 "Invalid VLAN item");
850 if (vlan_spec && vlan_mask) {
851 list[t].type = ICE_VLAN_OFOS;
852 if (vlan_mask->tci) {
853 list[t].h_u.vlan_hdr.vlan =
855 list[t].m_u.vlan_hdr.vlan =
857 input_set |= ICE_INSET_VLAN_OUTER;
859 if (vlan_mask->inner_type) {
860 list[t].h_u.vlan_hdr.type =
861 vlan_spec->inner_type;
862 list[t].m_u.vlan_hdr.type =
863 vlan_mask->inner_type;
864 input_set |= ICE_INSET_VLAN_OUTER;
870 case RTE_FLOW_ITEM_TYPE_PPPOED:
871 case RTE_FLOW_ITEM_TYPE_PPPOES:
872 pppoe_spec = item->spec;
873 pppoe_mask = item->mask;
874 /* Check if PPPoE item is used to describe protocol.
875 * If yes, both spec and mask should be NULL.
876 * If no, both spec and mask shouldn't be NULL.
878 if ((!pppoe_spec && pppoe_mask) ||
879 (pppoe_spec && !pppoe_mask)) {
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ITEM,
883 "Invalid pppoe item");
886 if (pppoe_spec && pppoe_mask) {
887 /* Check pppoe mask and update input set */
888 if (pppoe_mask->length ||
890 pppoe_mask->version_type) {
891 rte_flow_error_set(error, EINVAL,
892 RTE_FLOW_ERROR_TYPE_ITEM,
894 "Invalid pppoe mask");
897 list[t].type = ICE_PPPOE;
898 if (pppoe_mask->session_id) {
899 list[t].h_u.pppoe_hdr.session_id =
900 pppoe_spec->session_id;
901 list[t].m_u.pppoe_hdr.session_id =
902 pppoe_mask->session_id;
903 input_set |= ICE_INSET_PPPOE_SESSION;
910 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
911 pppoe_proto_spec = item->spec;
912 pppoe_proto_mask = item->mask;
913 /* Check if PPPoE optional proto_id item
914 * is used to describe protocol.
915 * If yes, both spec and mask should be NULL.
916 * If no, both spec and mask shouldn't be NULL.
918 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
919 (pppoe_proto_spec && !pppoe_proto_mask)) {
920 rte_flow_error_set(error, EINVAL,
921 RTE_FLOW_ERROR_TYPE_ITEM,
923 "Invalid pppoe proto item");
926 if (pppoe_proto_spec && pppoe_proto_mask) {
929 list[t].type = ICE_PPPOE;
930 if (pppoe_proto_mask->proto_id) {
931 list[t].h_u.pppoe_hdr.ppp_prot_id =
932 pppoe_proto_spec->proto_id;
933 list[t].m_u.pppoe_hdr.ppp_prot_id =
934 pppoe_proto_mask->proto_id;
935 input_set |= ICE_INSET_PPPOE_PROTO;
941 case RTE_FLOW_ITEM_TYPE_VOID:
945 rte_flow_error_set(error, EINVAL,
946 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
947 "Invalid pattern item.");
960 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
961 struct rte_flow_error *error,
962 struct ice_adv_rule_info *rule_info)
964 const struct rte_flow_action_vf *act_vf;
965 const struct rte_flow_action *action;
966 enum rte_flow_action_type action_type;
968 for (action = actions; action->type !=
969 RTE_FLOW_ACTION_TYPE_END; action++) {
970 action_type = action->type;
971 switch (action_type) {
972 case RTE_FLOW_ACTION_TYPE_VF:
973 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
974 act_vf = action->conf;
975 rule_info->sw_act.vsi_handle = act_vf->id;
978 rte_flow_error_set(error,
979 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
981 "Invalid action type or queue number");
986 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
988 rule_info->priority = 5;
994 ice_switch_parse_action(struct ice_pf *pf,
995 const struct rte_flow_action *actions,
996 struct rte_flow_error *error,
997 struct ice_adv_rule_info *rule_info)
999 struct ice_vsi *vsi = pf->main_vsi;
1000 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1001 const struct rte_flow_action_queue *act_q;
1002 const struct rte_flow_action_rss *act_qgrop;
1003 uint16_t base_queue, i;
1004 const struct rte_flow_action *action;
1005 enum rte_flow_action_type action_type;
1006 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1007 2, 4, 8, 16, 32, 64, 128};
1009 base_queue = pf->base_queue + vsi->base_queue;
1010 for (action = actions; action->type !=
1011 RTE_FLOW_ACTION_TYPE_END; action++) {
1012 action_type = action->type;
1013 switch (action_type) {
1014 case RTE_FLOW_ACTION_TYPE_RSS:
1015 act_qgrop = action->conf;
1016 rule_info->sw_act.fltr_act =
1018 rule_info->sw_act.fwd_id.q_id =
1019 base_queue + act_qgrop->queue[0];
1020 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1021 if (act_qgrop->queue_num ==
1022 valid_qgrop_number[i])
1025 if (i == MAX_QGRP_NUM_TYPE)
1027 if ((act_qgrop->queue[0] +
1028 act_qgrop->queue_num) >
1029 dev->data->nb_rx_queues)
1031 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1032 if (act_qgrop->queue[i + 1] !=
1033 act_qgrop->queue[i] + 1)
1035 rule_info->sw_act.qgrp_size =
1036 act_qgrop->queue_num;
1038 case RTE_FLOW_ACTION_TYPE_QUEUE:
1039 act_q = action->conf;
1040 if (act_q->index >= dev->data->nb_rx_queues)
1042 rule_info->sw_act.fltr_act =
1044 rule_info->sw_act.fwd_id.q_id =
1045 base_queue + act_q->index;
1048 case RTE_FLOW_ACTION_TYPE_DROP:
1049 rule_info->sw_act.fltr_act =
1053 case RTE_FLOW_ACTION_TYPE_VOID:
1061 rule_info->sw_act.vsi_handle = vsi->idx;
1063 rule_info->sw_act.src = vsi->idx;
1064 rule_info->priority = 5;
1069 rte_flow_error_set(error,
1070 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1072 "Invalid action type or queue number");
1077 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1078 struct ice_pattern_match_item *array,
1080 const struct rte_flow_item pattern[],
1081 const struct rte_flow_action actions[],
1083 struct rte_flow_error *error)
1085 struct ice_pf *pf = &ad->pf;
1086 uint64_t inputset = 0;
1088 struct sw_meta *sw_meta_ptr = NULL;
1089 struct ice_adv_rule_info rule_info;
1090 struct ice_adv_lkup_elem *list = NULL;
1091 uint16_t lkups_num = 0;
1092 const struct rte_flow_item *item = pattern;
1093 uint16_t item_num = 0;
1094 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1095 struct ice_pattern_match_item *pattern_match_item = NULL;
1097 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1099 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1100 tun_type = ICE_SW_TUN_VXLAN;
1101 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1102 tun_type = ICE_SW_TUN_NVGRE;
1103 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1104 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1105 tun_type = ICE_SW_TUN_PPPOE;
1106 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1107 const struct rte_flow_item_eth *eth_mask;
1109 eth_mask = item->mask;
1112 if (eth_mask->type == UINT16_MAX)
1113 tun_type = ICE_SW_TUN_AND_NON_TUN;
1115 /* reserve one more memory slot for ETH which may
1116 * consume 2 lookup items.
1118 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1122 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1124 rte_flow_error_set(error, EINVAL,
1125 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1126 "No memory for PMD internal items");
1130 rule_info.tun_type = tun_type;
1133 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1135 rte_flow_error_set(error, EINVAL,
1136 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1137 "No memory for sw_pattern_meta_ptr");
1141 pattern_match_item =
1142 ice_search_pattern_match_item(pattern, array, array_len, error);
1143 if (!pattern_match_item) {
1144 rte_flow_error_set(error, EINVAL,
1145 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1146 "Invalid input pattern");
1150 inputset = ice_switch_inset_get
1151 (pattern, error, list, &lkups_num, tun_type);
1152 if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
1153 rte_flow_error_set(error, EINVAL,
1154 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1156 "Invalid input set");
1160 if (ad->hw.dcf_enabled)
1161 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1163 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1166 rte_flow_error_set(error, EINVAL,
1167 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1168 "Invalid input action");
1173 *meta = sw_meta_ptr;
1174 ((struct sw_meta *)*meta)->list = list;
1175 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1176 ((struct sw_meta *)*meta)->rule_info = rule_info;
1179 rte_free(sw_meta_ptr);
1182 rte_free(pattern_match_item);
1188 rte_free(sw_meta_ptr);
1189 rte_free(pattern_match_item);
1195 ice_switch_query(struct ice_adapter *ad __rte_unused,
1196 struct rte_flow *flow __rte_unused,
1197 struct rte_flow_query_count *count __rte_unused,
1198 struct rte_flow_error *error)
1200 rte_flow_error_set(error, EINVAL,
1201 RTE_FLOW_ERROR_TYPE_HANDLE,
1203 "count action not supported by switch filter");
1209 ice_switch_init(struct ice_adapter *ad)
1212 struct ice_flow_parser *dist_parser;
1213 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1215 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1216 dist_parser = &ice_switch_dist_parser_comms;
1217 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1218 dist_parser = &ice_switch_dist_parser_os;
1222 if (ad->devargs.pipe_mode_support)
1223 ret = ice_register_parser(perm_parser, ad);
1225 ret = ice_register_parser(dist_parser, ad);
1230 ice_switch_uninit(struct ice_adapter *ad)
1232 struct ice_flow_parser *dist_parser;
1233 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1235 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1236 dist_parser = &ice_switch_dist_parser_comms;
1238 dist_parser = &ice_switch_dist_parser_os;
1240 if (ad->devargs.pipe_mode_support)
1241 ice_unregister_parser(perm_parser, ad);
1243 ice_unregister_parser(dist_parser, ad);
1247 ice_flow_engine ice_switch_engine = {
1248 .init = ice_switch_init,
1249 .uninit = ice_switch_uninit,
1250 .create = ice_switch_create,
1251 .destroy = ice_switch_destroy,
1252 .query_count = ice_switch_query,
1253 .free = ice_switch_filter_rule_free,
1254 .type = ICE_FLOW_ENGINE_SWITCH,
1258 ice_flow_parser ice_switch_dist_parser_os = {
1259 .engine = &ice_switch_engine,
1260 .array = ice_switch_pattern_dist_os,
1261 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1262 .parse_pattern_action = ice_switch_parse_pattern_action,
1263 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1267 ice_flow_parser ice_switch_dist_parser_comms = {
1268 .engine = &ice_switch_engine,
1269 .array = ice_switch_pattern_dist_comms,
1270 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1271 .parse_pattern_action = ice_switch_parse_pattern_action,
1272 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1276 ice_flow_parser ice_switch_perm_parser = {
1277 .engine = &ice_switch_engine,
1278 .array = ice_switch_pattern_perm,
1279 .array_len = RTE_DIM(ice_switch_pattern_perm),
1280 .parse_pattern_action = ice_switch_parse_pattern_action,
1281 .stage = ICE_FLOW_STAGE_PERMISSION,
1284 RTE_INIT(ice_sw_engine_init)
1286 struct ice_flow_engine *engine = &ice_switch_engine;
1287 ice_register_flow_engine(engine);