1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
28 #define MAX_QGRP_NUM_TYPE 7
30 #define ICE_SW_INSET_ETHER ( \
31 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49 ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86 ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90 ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE ( \
92 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
95 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97 ICE_INSET_PPPOE_PROTO)
100 struct ice_adv_lkup_elem *list;
102 struct ice_adv_rule_info rule_info;
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
112 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113 {pattern_ethertype_vlan,
114 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
116 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp,
118 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119 {pattern_eth_ipv4_tcp,
120 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
122 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123 {pattern_eth_ipv6_udp,
124 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125 {pattern_eth_ipv6_tcp,
126 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133 {pattern_eth_ipv4_nvgre_eth_ipv4,
134 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
140 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141 {pattern_eth_vlan_pppoed,
142 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
144 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145 {pattern_eth_vlan_pppoes,
146 ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147 {pattern_eth_pppoes_proto,
148 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149 {pattern_eth_vlan_pppoes_proto,
150 ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151 {pattern_eth_ipv6_esp,
152 ICE_INSET_NONE, ICE_INSET_NONE},
153 {pattern_eth_ipv6_ah,
154 ICE_INSET_NONE, ICE_INSET_NONE},
155 {pattern_eth_ipv6_l2tp,
156 ICE_INSET_NONE, ICE_INSET_NONE},
160 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
162 ICE_SW_INSET_ETHER, ICE_INSET_NONE},
163 {pattern_ethertype_vlan,
164 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
166 ICE_INSET_NONE, ICE_INSET_NONE},
168 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
169 {pattern_eth_ipv4_udp,
170 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
171 {pattern_eth_ipv4_tcp,
172 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
174 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
175 {pattern_eth_ipv6_udp,
176 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
177 {pattern_eth_ipv6_tcp,
178 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
179 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
180 ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
181 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
182 ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
183 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
184 ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
185 {pattern_eth_ipv4_nvgre_eth_ipv4,
186 ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
187 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
188 ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
189 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
190 ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
194 ice_pattern_match_item ice_switch_pattern_perm[] = {
195 {pattern_ethertype_vlan,
196 ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
198 ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
199 {pattern_eth_ipv4_udp,
200 ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
201 {pattern_eth_ipv4_tcp,
202 ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
204 ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
205 {pattern_eth_ipv6_udp,
206 ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
207 {pattern_eth_ipv6_tcp,
208 ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
209 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
210 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
211 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
212 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
213 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
214 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
215 {pattern_eth_ipv4_nvgre_eth_ipv4,
216 ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
217 {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
218 ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
219 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
220 ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
221 {pattern_eth_ipv6_esp,
222 ICE_INSET_NONE, ICE_INSET_NONE},
223 {pattern_eth_ipv6_ah,
224 ICE_INSET_NONE, ICE_INSET_NONE},
225 {pattern_eth_ipv6_l2tp,
226 ICE_INSET_NONE, ICE_INSET_NONE},
230 ice_switch_create(struct ice_adapter *ad,
231 struct rte_flow *flow,
233 struct rte_flow_error *error)
236 struct ice_pf *pf = &ad->pf;
237 struct ice_hw *hw = ICE_PF_TO_HW(pf);
238 struct ice_rule_query_data rule_added = {0};
239 struct ice_rule_query_data *filter_ptr;
240 struct ice_adv_lkup_elem *list =
241 ((struct sw_meta *)meta)->list;
243 ((struct sw_meta *)meta)->lkups_num;
244 struct ice_adv_rule_info *rule_info =
245 &((struct sw_meta *)meta)->rule_info;
247 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
248 rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
250 "item number too large for rule");
254 rte_flow_error_set(error, EINVAL,
255 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
256 "lookup list should not be NULL");
259 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
261 filter_ptr = rte_zmalloc("ice_switch_filter",
262 sizeof(struct ice_rule_query_data), 0);
264 rte_flow_error_set(error, EINVAL,
265 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
266 "No memory for ice_switch_filter");
269 flow->rule = filter_ptr;
270 rte_memcpy(filter_ptr,
272 sizeof(struct ice_rule_query_data));
274 rte_flow_error_set(error, EINVAL,
275 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
276 "switch filter create flow fail");
292 ice_switch_destroy(struct ice_adapter *ad,
293 struct rte_flow *flow,
294 struct rte_flow_error *error)
296 struct ice_hw *hw = &ad->hw;
298 struct ice_rule_query_data *filter_ptr;
300 filter_ptr = (struct ice_rule_query_data *)
304 rte_flow_error_set(error, EINVAL,
305 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
307 " create by switch filter");
311 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
313 rte_flow_error_set(error, EINVAL,
314 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
315 "fail to destroy switch filter rule");
319 rte_free(filter_ptr);
324 ice_switch_filter_rule_free(struct rte_flow *flow)
326 rte_free(flow->rule);
330 ice_switch_inset_get(const struct rte_flow_item pattern[],
331 struct rte_flow_error *error,
332 struct ice_adv_lkup_elem *list,
334 enum ice_sw_tunnel_type *tun_type)
336 const struct rte_flow_item *item = pattern;
337 enum rte_flow_item_type item_type;
338 const struct rte_flow_item_eth *eth_spec, *eth_mask;
339 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
340 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
341 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
342 const struct rte_flow_item_udp *udp_spec, *udp_mask;
343 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
344 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
345 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
346 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
347 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
348 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
350 const struct rte_flow_item_esp *esp_spec, *esp_mask;
351 const struct rte_flow_item_ah *ah_spec, *ah_mask;
352 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
353 uint64_t input_set = ICE_INSET_NONE;
355 uint16_t tunnel_valid = 0;
356 uint16_t pppoe_valid = 0;
357 uint16_t ipv6_valiad = 0;
360 for (item = pattern; item->type !=
361 RTE_FLOW_ITEM_TYPE_END; item++) {
363 rte_flow_error_set(error, EINVAL,
364 RTE_FLOW_ERROR_TYPE_ITEM,
366 "Not support range");
369 item_type = item->type;
372 case RTE_FLOW_ITEM_TYPE_ETH:
373 eth_spec = item->spec;
374 eth_mask = item->mask;
375 if (eth_spec && eth_mask) {
376 const uint8_t *a = eth_mask->src.addr_bytes;
377 const uint8_t *b = eth_mask->dst.addr_bytes;
378 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
379 if (a[j] && tunnel_valid) {
389 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
390 if (b[j] && tunnel_valid) {
401 input_set |= ICE_INSET_ETHERTYPE;
402 list[t].type = (tunnel_valid == 0) ?
403 ICE_MAC_OFOS : ICE_MAC_IL;
404 struct ice_ether_hdr *h;
405 struct ice_ether_hdr *m;
407 h = &list[t].h_u.eth_hdr;
408 m = &list[t].m_u.eth_hdr;
409 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
410 if (eth_mask->src.addr_bytes[j]) {
412 eth_spec->src.addr_bytes[j];
414 eth_mask->src.addr_bytes[j];
417 if (eth_mask->dst.addr_bytes[j]) {
419 eth_spec->dst.addr_bytes[j];
421 eth_mask->dst.addr_bytes[j];
427 if (eth_mask->type) {
428 list[t].type = ICE_ETYPE_OL;
429 list[t].h_u.ethertype.ethtype_id =
431 list[t].m_u.ethertype.ethtype_id =
438 case RTE_FLOW_ITEM_TYPE_IPV4:
439 ipv4_spec = item->spec;
440 ipv4_mask = item->mask;
441 if (ipv4_spec && ipv4_mask) {
442 /* Check IPv4 mask and update input set */
443 if (ipv4_mask->hdr.version_ihl ||
444 ipv4_mask->hdr.total_length ||
445 ipv4_mask->hdr.packet_id ||
446 ipv4_mask->hdr.hdr_checksum) {
447 rte_flow_error_set(error, EINVAL,
448 RTE_FLOW_ERROR_TYPE_ITEM,
450 "Invalid IPv4 mask.");
455 if (ipv4_mask->hdr.type_of_service)
457 ICE_INSET_TUN_IPV4_TOS;
458 if (ipv4_mask->hdr.src_addr)
460 ICE_INSET_TUN_IPV4_SRC;
461 if (ipv4_mask->hdr.dst_addr)
463 ICE_INSET_TUN_IPV4_DST;
464 if (ipv4_mask->hdr.time_to_live)
466 ICE_INSET_TUN_IPV4_TTL;
467 if (ipv4_mask->hdr.next_proto_id)
469 ICE_INSET_TUN_IPV4_PROTO;
471 if (ipv4_mask->hdr.src_addr)
472 input_set |= ICE_INSET_IPV4_SRC;
473 if (ipv4_mask->hdr.dst_addr)
474 input_set |= ICE_INSET_IPV4_DST;
475 if (ipv4_mask->hdr.time_to_live)
476 input_set |= ICE_INSET_IPV4_TTL;
477 if (ipv4_mask->hdr.next_proto_id)
479 ICE_INSET_IPV4_PROTO;
480 if (ipv4_mask->hdr.type_of_service)
484 list[t].type = (tunnel_valid == 0) ?
485 ICE_IPV4_OFOS : ICE_IPV4_IL;
486 if (ipv4_mask->hdr.src_addr) {
487 list[t].h_u.ipv4_hdr.src_addr =
488 ipv4_spec->hdr.src_addr;
489 list[t].m_u.ipv4_hdr.src_addr =
490 ipv4_mask->hdr.src_addr;
492 if (ipv4_mask->hdr.dst_addr) {
493 list[t].h_u.ipv4_hdr.dst_addr =
494 ipv4_spec->hdr.dst_addr;
495 list[t].m_u.ipv4_hdr.dst_addr =
496 ipv4_mask->hdr.dst_addr;
498 if (ipv4_mask->hdr.time_to_live) {
499 list[t].h_u.ipv4_hdr.time_to_live =
500 ipv4_spec->hdr.time_to_live;
501 list[t].m_u.ipv4_hdr.time_to_live =
502 ipv4_mask->hdr.time_to_live;
504 if (ipv4_mask->hdr.next_proto_id) {
505 list[t].h_u.ipv4_hdr.protocol =
506 ipv4_spec->hdr.next_proto_id;
507 list[t].m_u.ipv4_hdr.protocol =
508 ipv4_mask->hdr.next_proto_id;
510 if (ipv4_mask->hdr.type_of_service) {
511 list[t].h_u.ipv4_hdr.tos =
512 ipv4_spec->hdr.type_of_service;
513 list[t].m_u.ipv4_hdr.tos =
514 ipv4_mask->hdr.type_of_service;
520 case RTE_FLOW_ITEM_TYPE_IPV6:
521 ipv6_spec = item->spec;
522 ipv6_mask = item->mask;
524 if (ipv6_spec && ipv6_mask) {
525 if (ipv6_mask->hdr.payload_len) {
526 rte_flow_error_set(error, EINVAL,
527 RTE_FLOW_ERROR_TYPE_ITEM,
529 "Invalid IPv6 mask");
533 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
534 if (ipv6_mask->hdr.src_addr[j] &&
537 ICE_INSET_TUN_IPV6_SRC;
539 } else if (ipv6_mask->hdr.src_addr[j]) {
540 input_set |= ICE_INSET_IPV6_SRC;
544 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
545 if (ipv6_mask->hdr.dst_addr[j] &&
548 ICE_INSET_TUN_IPV6_DST;
550 } else if (ipv6_mask->hdr.dst_addr[j]) {
551 input_set |= ICE_INSET_IPV6_DST;
555 if (ipv6_mask->hdr.proto &&
558 ICE_INSET_TUN_IPV6_NEXT_HDR;
559 else if (ipv6_mask->hdr.proto)
561 ICE_INSET_IPV6_NEXT_HDR;
562 if (ipv6_mask->hdr.hop_limits &&
565 ICE_INSET_TUN_IPV6_HOP_LIMIT;
566 else if (ipv6_mask->hdr.hop_limits)
568 ICE_INSET_IPV6_HOP_LIMIT;
569 if ((ipv6_mask->hdr.vtc_flow &
571 (RTE_IPV6_HDR_TC_MASK)) &&
574 ICE_INSET_TUN_IPV6_TC;
575 else if (ipv6_mask->hdr.vtc_flow &
577 (RTE_IPV6_HDR_TC_MASK))
578 input_set |= ICE_INSET_IPV6_TC;
580 list[t].type = (tunnel_valid == 0) ?
581 ICE_IPV6_OFOS : ICE_IPV6_IL;
582 struct ice_ipv6_hdr *f;
583 struct ice_ipv6_hdr *s;
584 f = &list[t].h_u.ipv6_hdr;
585 s = &list[t].m_u.ipv6_hdr;
586 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
587 if (ipv6_mask->hdr.src_addr[j]) {
589 ipv6_spec->hdr.src_addr[j];
591 ipv6_mask->hdr.src_addr[j];
593 if (ipv6_mask->hdr.dst_addr[j]) {
595 ipv6_spec->hdr.dst_addr[j];
597 ipv6_mask->hdr.dst_addr[j];
600 if (ipv6_mask->hdr.proto) {
602 ipv6_spec->hdr.proto;
604 ipv6_mask->hdr.proto;
606 if (ipv6_mask->hdr.hop_limits) {
608 ipv6_spec->hdr.hop_limits;
610 ipv6_mask->hdr.hop_limits;
612 if (ipv6_mask->hdr.vtc_flow &
614 (RTE_IPV6_HDR_TC_MASK)) {
615 struct ice_le_ver_tc_flow vtf;
616 vtf.u.fld.version = 0;
617 vtf.u.fld.flow_label = 0;
618 vtf.u.fld.tc = (rte_be_to_cpu_32
619 (ipv6_spec->hdr.vtc_flow) &
620 RTE_IPV6_HDR_TC_MASK) >>
621 RTE_IPV6_HDR_TC_SHIFT;
622 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
623 vtf.u.fld.tc = (rte_be_to_cpu_32
624 (ipv6_mask->hdr.vtc_flow) &
625 RTE_IPV6_HDR_TC_MASK) >>
626 RTE_IPV6_HDR_TC_SHIFT;
627 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
633 case RTE_FLOW_ITEM_TYPE_UDP:
634 udp_spec = item->spec;
635 udp_mask = item->mask;
636 if (udp_spec && udp_mask) {
637 /* Check UDP mask and update input set*/
638 if (udp_mask->hdr.dgram_len ||
639 udp_mask->hdr.dgram_cksum) {
640 rte_flow_error_set(error, EINVAL,
641 RTE_FLOW_ERROR_TYPE_ITEM,
648 if (udp_mask->hdr.src_port)
650 ICE_INSET_TUN_UDP_SRC_PORT;
651 if (udp_mask->hdr.dst_port)
653 ICE_INSET_TUN_UDP_DST_PORT;
655 if (udp_mask->hdr.src_port)
657 ICE_INSET_UDP_SRC_PORT;
658 if (udp_mask->hdr.dst_port)
660 ICE_INSET_UDP_DST_PORT;
662 if (*tun_type == ICE_SW_TUN_VXLAN &&
664 list[t].type = ICE_UDP_OF;
666 list[t].type = ICE_UDP_ILOS;
667 if (udp_mask->hdr.src_port) {
668 list[t].h_u.l4_hdr.src_port =
669 udp_spec->hdr.src_port;
670 list[t].m_u.l4_hdr.src_port =
671 udp_mask->hdr.src_port;
673 if (udp_mask->hdr.dst_port) {
674 list[t].h_u.l4_hdr.dst_port =
675 udp_spec->hdr.dst_port;
676 list[t].m_u.l4_hdr.dst_port =
677 udp_mask->hdr.dst_port;
683 case RTE_FLOW_ITEM_TYPE_TCP:
684 tcp_spec = item->spec;
685 tcp_mask = item->mask;
686 if (tcp_spec && tcp_mask) {
687 /* Check TCP mask and update input set */
688 if (tcp_mask->hdr.sent_seq ||
689 tcp_mask->hdr.recv_ack ||
690 tcp_mask->hdr.data_off ||
691 tcp_mask->hdr.tcp_flags ||
692 tcp_mask->hdr.rx_win ||
693 tcp_mask->hdr.cksum ||
694 tcp_mask->hdr.tcp_urp) {
695 rte_flow_error_set(error, EINVAL,
696 RTE_FLOW_ERROR_TYPE_ITEM,
703 if (tcp_mask->hdr.src_port)
705 ICE_INSET_TUN_TCP_SRC_PORT;
706 if (tcp_mask->hdr.dst_port)
708 ICE_INSET_TUN_TCP_DST_PORT;
710 if (tcp_mask->hdr.src_port)
712 ICE_INSET_TCP_SRC_PORT;
713 if (tcp_mask->hdr.dst_port)
715 ICE_INSET_TCP_DST_PORT;
717 list[t].type = ICE_TCP_IL;
718 if (tcp_mask->hdr.src_port) {
719 list[t].h_u.l4_hdr.src_port =
720 tcp_spec->hdr.src_port;
721 list[t].m_u.l4_hdr.src_port =
722 tcp_mask->hdr.src_port;
724 if (tcp_mask->hdr.dst_port) {
725 list[t].h_u.l4_hdr.dst_port =
726 tcp_spec->hdr.dst_port;
727 list[t].m_u.l4_hdr.dst_port =
728 tcp_mask->hdr.dst_port;
734 case RTE_FLOW_ITEM_TYPE_SCTP:
735 sctp_spec = item->spec;
736 sctp_mask = item->mask;
737 if (sctp_spec && sctp_mask) {
738 /* Check SCTP mask and update input set */
739 if (sctp_mask->hdr.cksum) {
740 rte_flow_error_set(error, EINVAL,
741 RTE_FLOW_ERROR_TYPE_ITEM,
743 "Invalid SCTP mask");
748 if (sctp_mask->hdr.src_port)
750 ICE_INSET_TUN_SCTP_SRC_PORT;
751 if (sctp_mask->hdr.dst_port)
753 ICE_INSET_TUN_SCTP_DST_PORT;
755 if (sctp_mask->hdr.src_port)
757 ICE_INSET_SCTP_SRC_PORT;
758 if (sctp_mask->hdr.dst_port)
760 ICE_INSET_SCTP_DST_PORT;
762 list[t].type = ICE_SCTP_IL;
763 if (sctp_mask->hdr.src_port) {
764 list[t].h_u.sctp_hdr.src_port =
765 sctp_spec->hdr.src_port;
766 list[t].m_u.sctp_hdr.src_port =
767 sctp_mask->hdr.src_port;
769 if (sctp_mask->hdr.dst_port) {
770 list[t].h_u.sctp_hdr.dst_port =
771 sctp_spec->hdr.dst_port;
772 list[t].m_u.sctp_hdr.dst_port =
773 sctp_mask->hdr.dst_port;
779 case RTE_FLOW_ITEM_TYPE_VXLAN:
780 vxlan_spec = item->spec;
781 vxlan_mask = item->mask;
782 /* Check if VXLAN item is used to describe protocol.
783 * If yes, both spec and mask should be NULL.
784 * If no, both spec and mask shouldn't be NULL.
786 if ((!vxlan_spec && vxlan_mask) ||
787 (vxlan_spec && !vxlan_mask)) {
788 rte_flow_error_set(error, EINVAL,
789 RTE_FLOW_ERROR_TYPE_ITEM,
791 "Invalid VXLAN item");
796 if (vxlan_spec && vxlan_mask) {
797 list[t].type = ICE_VXLAN;
798 if (vxlan_mask->vni[0] ||
799 vxlan_mask->vni[1] ||
800 vxlan_mask->vni[2]) {
801 list[t].h_u.tnl_hdr.vni =
802 (vxlan_spec->vni[2] << 16) |
803 (vxlan_spec->vni[1] << 8) |
805 list[t].m_u.tnl_hdr.vni =
806 (vxlan_mask->vni[2] << 16) |
807 (vxlan_mask->vni[1] << 8) |
810 ICE_INSET_TUN_VXLAN_VNI;
816 case RTE_FLOW_ITEM_TYPE_NVGRE:
817 nvgre_spec = item->spec;
818 nvgre_mask = item->mask;
819 /* Check if NVGRE item is used to describe protocol.
820 * If yes, both spec and mask should be NULL.
821 * If no, both spec and mask shouldn't be NULL.
823 if ((!nvgre_spec && nvgre_mask) ||
824 (nvgre_spec && !nvgre_mask)) {
825 rte_flow_error_set(error, EINVAL,
826 RTE_FLOW_ERROR_TYPE_ITEM,
828 "Invalid NVGRE item");
832 if (nvgre_spec && nvgre_mask) {
833 list[t].type = ICE_NVGRE;
834 if (nvgre_mask->tni[0] ||
835 nvgre_mask->tni[1] ||
836 nvgre_mask->tni[2]) {
837 list[t].h_u.nvgre_hdr.tni_flow =
838 (nvgre_spec->tni[2] << 16) |
839 (nvgre_spec->tni[1] << 8) |
841 list[t].m_u.nvgre_hdr.tni_flow =
842 (nvgre_mask->tni[2] << 16) |
843 (nvgre_mask->tni[1] << 8) |
846 ICE_INSET_TUN_NVGRE_TNI;
852 case RTE_FLOW_ITEM_TYPE_VLAN:
853 vlan_spec = item->spec;
854 vlan_mask = item->mask;
855 /* Check if VLAN item is used to describe protocol.
856 * If yes, both spec and mask should be NULL.
857 * If no, both spec and mask shouldn't be NULL.
859 if ((!vlan_spec && vlan_mask) ||
860 (vlan_spec && !vlan_mask)) {
861 rte_flow_error_set(error, EINVAL,
862 RTE_FLOW_ERROR_TYPE_ITEM,
864 "Invalid VLAN item");
867 if (vlan_spec && vlan_mask) {
868 list[t].type = ICE_VLAN_OFOS;
869 if (vlan_mask->tci) {
870 list[t].h_u.vlan_hdr.vlan =
872 list[t].m_u.vlan_hdr.vlan =
874 input_set |= ICE_INSET_VLAN_OUTER;
876 if (vlan_mask->inner_type) {
877 list[t].h_u.vlan_hdr.type =
878 vlan_spec->inner_type;
879 list[t].m_u.vlan_hdr.type =
880 vlan_mask->inner_type;
881 input_set |= ICE_INSET_VLAN_OUTER;
887 case RTE_FLOW_ITEM_TYPE_PPPOED:
888 case RTE_FLOW_ITEM_TYPE_PPPOES:
889 pppoe_spec = item->spec;
890 pppoe_mask = item->mask;
891 /* Check if PPPoE item is used to describe protocol.
892 * If yes, both spec and mask should be NULL.
893 * If no, both spec and mask shouldn't be NULL.
895 if ((!pppoe_spec && pppoe_mask) ||
896 (pppoe_spec && !pppoe_mask)) {
897 rte_flow_error_set(error, EINVAL,
898 RTE_FLOW_ERROR_TYPE_ITEM,
900 "Invalid pppoe item");
903 if (pppoe_spec && pppoe_mask) {
904 /* Check pppoe mask and update input set */
905 if (pppoe_mask->length ||
907 pppoe_mask->version_type) {
908 rte_flow_error_set(error, EINVAL,
909 RTE_FLOW_ERROR_TYPE_ITEM,
911 "Invalid pppoe mask");
914 list[t].type = ICE_PPPOE;
915 if (pppoe_mask->session_id) {
916 list[t].h_u.pppoe_hdr.session_id =
917 pppoe_spec->session_id;
918 list[t].m_u.pppoe_hdr.session_id =
919 pppoe_mask->session_id;
920 input_set |= ICE_INSET_PPPOE_SESSION;
927 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
928 pppoe_proto_spec = item->spec;
929 pppoe_proto_mask = item->mask;
930 /* Check if PPPoE optional proto_id item
931 * is used to describe protocol.
932 * If yes, both spec and mask should be NULL.
933 * If no, both spec and mask shouldn't be NULL.
935 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
936 (pppoe_proto_spec && !pppoe_proto_mask)) {
937 rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ITEM,
940 "Invalid pppoe proto item");
943 if (pppoe_proto_spec && pppoe_proto_mask) {
946 list[t].type = ICE_PPPOE;
947 if (pppoe_proto_mask->proto_id) {
948 list[t].h_u.pppoe_hdr.ppp_prot_id =
949 pppoe_proto_spec->proto_id;
950 list[t].m_u.pppoe_hdr.ppp_prot_id =
951 pppoe_proto_mask->proto_id;
952 input_set |= ICE_INSET_PPPOE_PROTO;
958 case RTE_FLOW_ITEM_TYPE_ESP:
959 esp_spec = item->spec;
960 esp_mask = item->mask;
961 if (esp_spec || esp_mask) {
962 rte_flow_error_set(error, EINVAL,
963 RTE_FLOW_ERROR_TYPE_ITEM,
969 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
972 case RTE_FLOW_ITEM_TYPE_AH:
973 ah_spec = item->spec;
974 ah_mask = item->mask;
975 if (ah_spec || ah_mask) {
976 rte_flow_error_set(error, EINVAL,
977 RTE_FLOW_ERROR_TYPE_ITEM,
983 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
986 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
987 l2tp_spec = item->spec;
988 l2tp_mask = item->mask;
989 if (l2tp_spec || l2tp_mask) {
990 rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_ITEM,
993 "Invalid l2tp item");
997 *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1000 case RTE_FLOW_ITEM_TYPE_VOID:
1004 rte_flow_error_set(error, EINVAL,
1005 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1006 "Invalid pattern item.");
1019 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1020 struct rte_flow_error *error,
1021 struct ice_adv_rule_info *rule_info)
1023 const struct rte_flow_action_vf *act_vf;
1024 const struct rte_flow_action *action;
1025 enum rte_flow_action_type action_type;
1027 for (action = actions; action->type !=
1028 RTE_FLOW_ACTION_TYPE_END; action++) {
1029 action_type = action->type;
1030 switch (action_type) {
1031 case RTE_FLOW_ACTION_TYPE_VF:
1032 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1033 act_vf = action->conf;
1034 rule_info->sw_act.vsi_handle = act_vf->id;
1037 rte_flow_error_set(error,
1038 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1040 "Invalid action type or queue number");
1045 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1047 rule_info->priority = 5;
1053 ice_switch_parse_action(struct ice_pf *pf,
1054 const struct rte_flow_action *actions,
1055 struct rte_flow_error *error,
1056 struct ice_adv_rule_info *rule_info)
1058 struct ice_vsi *vsi = pf->main_vsi;
1059 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1060 const struct rte_flow_action_queue *act_q;
1061 const struct rte_flow_action_rss *act_qgrop;
1062 uint16_t base_queue, i;
1063 const struct rte_flow_action *action;
1064 enum rte_flow_action_type action_type;
1065 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1066 2, 4, 8, 16, 32, 64, 128};
1068 base_queue = pf->base_queue + vsi->base_queue;
1069 for (action = actions; action->type !=
1070 RTE_FLOW_ACTION_TYPE_END; action++) {
1071 action_type = action->type;
1072 switch (action_type) {
1073 case RTE_FLOW_ACTION_TYPE_RSS:
1074 act_qgrop = action->conf;
1075 rule_info->sw_act.fltr_act =
1077 rule_info->sw_act.fwd_id.q_id =
1078 base_queue + act_qgrop->queue[0];
1079 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1080 if (act_qgrop->queue_num ==
1081 valid_qgrop_number[i])
1084 if (i == MAX_QGRP_NUM_TYPE)
1086 if ((act_qgrop->queue[0] +
1087 act_qgrop->queue_num) >
1088 dev->data->nb_rx_queues)
1090 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1091 if (act_qgrop->queue[i + 1] !=
1092 act_qgrop->queue[i] + 1)
1094 rule_info->sw_act.qgrp_size =
1095 act_qgrop->queue_num;
1097 case RTE_FLOW_ACTION_TYPE_QUEUE:
1098 act_q = action->conf;
1099 if (act_q->index >= dev->data->nb_rx_queues)
1101 rule_info->sw_act.fltr_act =
1103 rule_info->sw_act.fwd_id.q_id =
1104 base_queue + act_q->index;
1107 case RTE_FLOW_ACTION_TYPE_DROP:
1108 rule_info->sw_act.fltr_act =
1112 case RTE_FLOW_ACTION_TYPE_VOID:
1120 rule_info->sw_act.vsi_handle = vsi->idx;
1122 rule_info->sw_act.src = vsi->idx;
1123 rule_info->priority = 5;
1128 rte_flow_error_set(error,
1129 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1131 "Invalid action type or queue number");
1136 ice_switch_check_action(const struct rte_flow_action *actions,
1137 struct rte_flow_error *error)
1139 const struct rte_flow_action *action;
1140 enum rte_flow_action_type action_type;
1141 uint16_t actions_num = 0;
1143 for (action = actions; action->type !=
1144 RTE_FLOW_ACTION_TYPE_END; action++) {
1145 action_type = action->type;
1146 switch (action_type) {
1147 case RTE_FLOW_ACTION_TYPE_VF:
1148 case RTE_FLOW_ACTION_TYPE_RSS:
1149 case RTE_FLOW_ACTION_TYPE_QUEUE:
1150 case RTE_FLOW_ACTION_TYPE_DROP:
1153 case RTE_FLOW_ACTION_TYPE_VOID:
1156 rte_flow_error_set(error,
1157 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1159 "Invalid action type");
1164 if (actions_num > 1) {
1165 rte_flow_error_set(error,
1166 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1168 "Invalid action number");
1176 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1179 case ICE_SW_TUN_PROFID_IPV6_ESP:
1180 case ICE_SW_TUN_PROFID_IPV6_AH:
1181 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1191 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1192 struct ice_pattern_match_item *array,
1194 const struct rte_flow_item pattern[],
1195 const struct rte_flow_action actions[],
1197 struct rte_flow_error *error)
1199 struct ice_pf *pf = &ad->pf;
1200 uint64_t inputset = 0;
1202 struct sw_meta *sw_meta_ptr = NULL;
1203 struct ice_adv_rule_info rule_info;
1204 struct ice_adv_lkup_elem *list = NULL;
1205 uint16_t lkups_num = 0;
1206 const struct rte_flow_item *item = pattern;
1207 uint16_t item_num = 0;
1208 enum ice_sw_tunnel_type tun_type =
1209 ICE_SW_TUN_AND_NON_TUN;
1210 struct ice_pattern_match_item *pattern_match_item = NULL;
1212 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1214 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1215 tun_type = ICE_SW_TUN_VXLAN;
1216 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1217 tun_type = ICE_SW_TUN_NVGRE;
1218 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1219 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1220 tun_type = ICE_SW_TUN_PPPOE;
1221 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1222 const struct rte_flow_item_eth *eth_mask;
1224 eth_mask = item->mask;
1227 if (eth_mask->type == UINT16_MAX)
1228 tun_type = ICE_SW_TUN_AND_NON_TUN;
1230 /* reserve one more memory slot for ETH which may
1231 * consume 2 lookup items.
1233 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1237 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1241 "No memory for PMD internal items");
1246 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1250 "No memory for sw_pattern_meta_ptr");
1254 pattern_match_item =
1255 ice_search_pattern_match_item(pattern, array, array_len, error);
1256 if (!pattern_match_item) {
1257 rte_flow_error_set(error, EINVAL,
1258 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1259 "Invalid input pattern");
1263 inputset = ice_switch_inset_get
1264 (pattern, error, list, &lkups_num, &tun_type);
1265 if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1266 (inputset & ~pattern_match_item->input_set_mask)) {
1267 rte_flow_error_set(error, EINVAL,
1268 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1270 "Invalid input set");
1274 rule_info.tun_type = tun_type;
1276 ret = ice_switch_check_action(actions, error);
1278 rte_flow_error_set(error, EINVAL,
1279 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1280 "Invalid input action number");
1284 if (ad->hw.dcf_enabled)
1285 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1287 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1290 rte_flow_error_set(error, EINVAL,
1291 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1292 "Invalid input action");
1297 *meta = sw_meta_ptr;
1298 ((struct sw_meta *)*meta)->list = list;
1299 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1300 ((struct sw_meta *)*meta)->rule_info = rule_info;
1303 rte_free(sw_meta_ptr);
1306 rte_free(pattern_match_item);
1312 rte_free(sw_meta_ptr);
1313 rte_free(pattern_match_item);
1319 ice_switch_query(struct ice_adapter *ad __rte_unused,
1320 struct rte_flow *flow __rte_unused,
1321 struct rte_flow_query_count *count __rte_unused,
1322 struct rte_flow_error *error)
1324 rte_flow_error_set(error, EINVAL,
1325 RTE_FLOW_ERROR_TYPE_HANDLE,
1327 "count action not supported by switch filter");
1333 ice_switch_init(struct ice_adapter *ad)
1336 struct ice_flow_parser *dist_parser;
1337 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1339 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1340 dist_parser = &ice_switch_dist_parser_comms;
1341 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1342 dist_parser = &ice_switch_dist_parser_os;
1346 if (ad->devargs.pipe_mode_support)
1347 ret = ice_register_parser(perm_parser, ad);
1349 ret = ice_register_parser(dist_parser, ad);
1354 ice_switch_uninit(struct ice_adapter *ad)
1356 struct ice_flow_parser *dist_parser;
1357 struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1359 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1360 dist_parser = &ice_switch_dist_parser_comms;
1362 dist_parser = &ice_switch_dist_parser_os;
1364 if (ad->devargs.pipe_mode_support)
1365 ice_unregister_parser(perm_parser, ad);
1367 ice_unregister_parser(dist_parser, ad);
1371 ice_flow_engine ice_switch_engine = {
1372 .init = ice_switch_init,
1373 .uninit = ice_switch_uninit,
1374 .create = ice_switch_create,
1375 .destroy = ice_switch_destroy,
1376 .query_count = ice_switch_query,
1377 .free = ice_switch_filter_rule_free,
1378 .type = ICE_FLOW_ENGINE_SWITCH,
1382 ice_flow_parser ice_switch_dist_parser_os = {
1383 .engine = &ice_switch_engine,
1384 .array = ice_switch_pattern_dist_os,
1385 .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1386 .parse_pattern_action = ice_switch_parse_pattern_action,
1387 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1391 ice_flow_parser ice_switch_dist_parser_comms = {
1392 .engine = &ice_switch_engine,
1393 .array = ice_switch_pattern_dist_comms,
1394 .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1395 .parse_pattern_action = ice_switch_parse_pattern_action,
1396 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1400 ice_flow_parser ice_switch_perm_parser = {
1401 .engine = &ice_switch_engine,
1402 .array = ice_switch_pattern_perm,
1403 .array_len = RTE_DIM(ice_switch_pattern_perm),
1404 .parse_pattern_action = ice_switch_parse_pattern_action,
1405 .stage = ICE_FLOW_STAGE_PERMISSION,
1408 RTE_INIT(ice_sw_engine_init)
1410 struct ice_flow_engine *engine = &ice_switch_engine;
1411 ice_register_flow_engine(engine);