net/ice: support MAC VLAN rule
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
34                 ICE_INSET_VLAN_OUTER)
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49         ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86         ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90         ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE  ( \
92         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
95         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97         ICE_INSET_PPPOE_PROTO)
98
99 struct sw_meta {
100         struct ice_adv_lkup_elem *list;
101         uint16_t lkups_num;
102         struct ice_adv_rule_info rule_info;
103 };
104
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
108
109 static struct
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
111         {pattern_ethertype,
112                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113         {pattern_ethertype_vlan,
114                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
115         {pattern_eth_ipv4,
116                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp,
118                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119         {pattern_eth_ipv4_tcp,
120                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
121         {pattern_eth_ipv6,
122                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123         {pattern_eth_ipv6_udp,
124                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125         {pattern_eth_ipv6_tcp,
126                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133         {pattern_eth_ipv4_nvgre_eth_ipv4,
134                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
139         {pattern_eth_pppoed,
140                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141         {pattern_eth_vlan_pppoed,
142                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
143         {pattern_eth_pppoes,
144                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145         {pattern_eth_vlan_pppoes,
146                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147         {pattern_eth_pppoes_proto,
148                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149         {pattern_eth_vlan_pppoes_proto,
150                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151 };
152
153 static struct
154 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
155         {pattern_ethertype,
156                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
157         {pattern_ethertype_vlan,
158                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
159         {pattern_eth_arp,
160                         ICE_INSET_NONE, ICE_INSET_NONE},
161         {pattern_eth_ipv4,
162                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
163         {pattern_eth_ipv4_udp,
164                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
165         {pattern_eth_ipv4_tcp,
166                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
167         {pattern_eth_ipv6,
168                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
169         {pattern_eth_ipv6_udp,
170                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
171         {pattern_eth_ipv6_tcp,
172                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
173         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
174                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
175         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
176                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
177         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
178                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
179         {pattern_eth_ipv4_nvgre_eth_ipv4,
180                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
181         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
182                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
183         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
184                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
185 };
186
187 static struct
188 ice_pattern_match_item ice_switch_pattern_perm[] = {
189         {pattern_ethertype_vlan,
190                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
191         {pattern_eth_ipv4,
192                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
193         {pattern_eth_ipv4_udp,
194                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
195         {pattern_eth_ipv4_tcp,
196                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
197         {pattern_eth_ipv6,
198                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
199         {pattern_eth_ipv6_udp,
200                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
201         {pattern_eth_ipv6_tcp,
202                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
203         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
204                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
205         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
206                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
207         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
208                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
209         {pattern_eth_ipv4_nvgre_eth_ipv4,
210                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
211         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
212                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
213         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
214                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
215 };
216
217 static int
218 ice_switch_create(struct ice_adapter *ad,
219                 struct rte_flow *flow,
220                 void *meta,
221                 struct rte_flow_error *error)
222 {
223         int ret = 0;
224         struct ice_pf *pf = &ad->pf;
225         struct ice_hw *hw = ICE_PF_TO_HW(pf);
226         struct ice_rule_query_data rule_added = {0};
227         struct ice_rule_query_data *filter_ptr;
228         struct ice_adv_lkup_elem *list =
229                 ((struct sw_meta *)meta)->list;
230         uint16_t lkups_cnt =
231                 ((struct sw_meta *)meta)->lkups_num;
232         struct ice_adv_rule_info *rule_info =
233                 &((struct sw_meta *)meta)->rule_info;
234
235         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
236                 rte_flow_error_set(error, EINVAL,
237                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
238                         "item number too large for rule");
239                 goto error;
240         }
241         if (!list) {
242                 rte_flow_error_set(error, EINVAL,
243                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
244                         "lookup list should not be NULL");
245                 goto error;
246         }
247         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
248         if (!ret) {
249                 filter_ptr = rte_zmalloc("ice_switch_filter",
250                         sizeof(struct ice_rule_query_data), 0);
251                 if (!filter_ptr) {
252                         rte_flow_error_set(error, EINVAL,
253                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
254                                    "No memory for ice_switch_filter");
255                         goto error;
256                 }
257                 flow->rule = filter_ptr;
258                 rte_memcpy(filter_ptr,
259                         &rule_added,
260                         sizeof(struct ice_rule_query_data));
261         } else {
262                 rte_flow_error_set(error, EINVAL,
263                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
264                         "switch filter create flow fail");
265                 goto error;
266         }
267
268         rte_free(list);
269         rte_free(meta);
270         return 0;
271
272 error:
273         rte_free(list);
274         rte_free(meta);
275
276         return -rte_errno;
277 }
278
279 static int
280 ice_switch_destroy(struct ice_adapter *ad,
281                 struct rte_flow *flow,
282                 struct rte_flow_error *error)
283 {
284         struct ice_hw *hw = &ad->hw;
285         int ret;
286         struct ice_rule_query_data *filter_ptr;
287
288         filter_ptr = (struct ice_rule_query_data *)
289                 flow->rule;
290
291         if (!filter_ptr) {
292                 rte_flow_error_set(error, EINVAL,
293                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
294                         "no such flow"
295                         " create by switch filter");
296                 return -rte_errno;
297         }
298
299         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
300         if (ret) {
301                 rte_flow_error_set(error, EINVAL,
302                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
303                         "fail to destroy switch filter rule");
304                 return -rte_errno;
305         }
306
307         rte_free(filter_ptr);
308         return ret;
309 }
310
311 static void
312 ice_switch_filter_rule_free(struct rte_flow *flow)
313 {
314         rte_free(flow->rule);
315 }
316
317 static uint64_t
318 ice_switch_inset_get(const struct rte_flow_item pattern[],
319                 struct rte_flow_error *error,
320                 struct ice_adv_lkup_elem *list,
321                 uint16_t *lkups_num,
322                 enum ice_sw_tunnel_type tun_type)
323 {
324         const struct rte_flow_item *item = pattern;
325         enum rte_flow_item_type item_type;
326         const struct rte_flow_item_eth *eth_spec, *eth_mask;
327         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
328         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
329         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
330         const struct rte_flow_item_udp *udp_spec, *udp_mask;
331         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
332         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
333         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
334         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
335         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
336         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
337                                 *pppoe_proto_mask;
338         uint64_t input_set = ICE_INSET_NONE;
339         uint16_t j, t = 0;
340         uint16_t tunnel_valid = 0;
341         uint16_t pppoe_valid = 0;
342
343
344         for (item = pattern; item->type !=
345                         RTE_FLOW_ITEM_TYPE_END; item++) {
346                 if (item->last) {
347                         rte_flow_error_set(error, EINVAL,
348                                         RTE_FLOW_ERROR_TYPE_ITEM,
349                                         item,
350                                         "Not support range");
351                         return 0;
352                 }
353                 item_type = item->type;
354
355                 switch (item_type) {
356                 case RTE_FLOW_ITEM_TYPE_ETH:
357                         eth_spec = item->spec;
358                         eth_mask = item->mask;
359                         if (eth_spec && eth_mask) {
360                                 const uint8_t *a = eth_mask->src.addr_bytes;
361                                 const uint8_t *b = eth_mask->dst.addr_bytes;
362                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
363                                         if (a[j] && tunnel_valid) {
364                                                 input_set |=
365                                                         ICE_INSET_TUN_SMAC;
366                                                 break;
367                                         } else if (a[j]) {
368                                                 input_set |=
369                                                         ICE_INSET_SMAC;
370                                                 break;
371                                         }
372                                 }
373                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
374                                         if (b[j] && tunnel_valid) {
375                                                 input_set |=
376                                                         ICE_INSET_TUN_DMAC;
377                                                 break;
378                                         } else if (b[j]) {
379                                                 input_set |=
380                                                         ICE_INSET_DMAC;
381                                                 break;
382                                         }
383                                 }
384                                 if (eth_mask->type)
385                                         input_set |= ICE_INSET_ETHERTYPE;
386                                 list[t].type = (tunnel_valid  == 0) ?
387                                         ICE_MAC_OFOS : ICE_MAC_IL;
388                                 struct ice_ether_hdr *h;
389                                 struct ice_ether_hdr *m;
390                                 uint16_t i = 0;
391                                 h = &list[t].h_u.eth_hdr;
392                                 m = &list[t].m_u.eth_hdr;
393                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
394                                         if (eth_mask->src.addr_bytes[j]) {
395                                                 h->src_addr[j] =
396                                                 eth_spec->src.addr_bytes[j];
397                                                 m->src_addr[j] =
398                                                 eth_mask->src.addr_bytes[j];
399                                                 i = 1;
400                                         }
401                                         if (eth_mask->dst.addr_bytes[j]) {
402                                                 h->dst_addr[j] =
403                                                 eth_spec->dst.addr_bytes[j];
404                                                 m->dst_addr[j] =
405                                                 eth_mask->dst.addr_bytes[j];
406                                                 i = 1;
407                                         }
408                                 }
409                                 if (i)
410                                         t++;
411                                 if (eth_mask->type) {
412                                         list[t].type = ICE_ETYPE_OL;
413                                         list[t].h_u.ethertype.ethtype_id =
414                                                 eth_spec->type;
415                                         list[t].m_u.ethertype.ethtype_id =
416                                                 eth_mask->type;
417                                         t++;
418                                 }
419                         }
420                         break;
421
422                 case RTE_FLOW_ITEM_TYPE_IPV4:
423                         ipv4_spec = item->spec;
424                         ipv4_mask = item->mask;
425                         if (ipv4_spec && ipv4_mask) {
426                                 /* Check IPv4 mask and update input set */
427                                 if (ipv4_mask->hdr.version_ihl ||
428                                         ipv4_mask->hdr.total_length ||
429                                         ipv4_mask->hdr.packet_id ||
430                                         ipv4_mask->hdr.hdr_checksum) {
431                                         rte_flow_error_set(error, EINVAL,
432                                                    RTE_FLOW_ERROR_TYPE_ITEM,
433                                                    item,
434                                                    "Invalid IPv4 mask.");
435                                         return 0;
436                                 }
437
438                                 if (tunnel_valid) {
439                                         if (ipv4_mask->hdr.type_of_service)
440                                                 input_set |=
441                                                         ICE_INSET_TUN_IPV4_TOS;
442                                         if (ipv4_mask->hdr.src_addr)
443                                                 input_set |=
444                                                         ICE_INSET_TUN_IPV4_SRC;
445                                         if (ipv4_mask->hdr.dst_addr)
446                                                 input_set |=
447                                                         ICE_INSET_TUN_IPV4_DST;
448                                         if (ipv4_mask->hdr.time_to_live)
449                                                 input_set |=
450                                                         ICE_INSET_TUN_IPV4_TTL;
451                                         if (ipv4_mask->hdr.next_proto_id)
452                                                 input_set |=
453                                                 ICE_INSET_TUN_IPV4_PROTO;
454                                 } else {
455                                         if (ipv4_mask->hdr.src_addr)
456                                                 input_set |= ICE_INSET_IPV4_SRC;
457                                         if (ipv4_mask->hdr.dst_addr)
458                                                 input_set |= ICE_INSET_IPV4_DST;
459                                         if (ipv4_mask->hdr.time_to_live)
460                                                 input_set |= ICE_INSET_IPV4_TTL;
461                                         if (ipv4_mask->hdr.next_proto_id)
462                                                 input_set |=
463                                                 ICE_INSET_IPV4_PROTO;
464                                         if (ipv4_mask->hdr.type_of_service)
465                                                 input_set |=
466                                                         ICE_INSET_IPV4_TOS;
467                                 }
468                                 list[t].type = (tunnel_valid  == 0) ?
469                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
470                                 if (ipv4_mask->hdr.src_addr) {
471                                         list[t].h_u.ipv4_hdr.src_addr =
472                                                 ipv4_spec->hdr.src_addr;
473                                         list[t].m_u.ipv4_hdr.src_addr =
474                                                 ipv4_mask->hdr.src_addr;
475                                 }
476                                 if (ipv4_mask->hdr.dst_addr) {
477                                         list[t].h_u.ipv4_hdr.dst_addr =
478                                                 ipv4_spec->hdr.dst_addr;
479                                         list[t].m_u.ipv4_hdr.dst_addr =
480                                                 ipv4_mask->hdr.dst_addr;
481                                 }
482                                 if (ipv4_mask->hdr.time_to_live) {
483                                         list[t].h_u.ipv4_hdr.time_to_live =
484                                                 ipv4_spec->hdr.time_to_live;
485                                         list[t].m_u.ipv4_hdr.time_to_live =
486                                                 ipv4_mask->hdr.time_to_live;
487                                 }
488                                 if (ipv4_mask->hdr.next_proto_id) {
489                                         list[t].h_u.ipv4_hdr.protocol =
490                                                 ipv4_spec->hdr.next_proto_id;
491                                         list[t].m_u.ipv4_hdr.protocol =
492                                                 ipv4_mask->hdr.next_proto_id;
493                                 }
494                                 if (ipv4_mask->hdr.type_of_service) {
495                                         list[t].h_u.ipv4_hdr.tos =
496                                                 ipv4_spec->hdr.type_of_service;
497                                         list[t].m_u.ipv4_hdr.tos =
498                                                 ipv4_mask->hdr.type_of_service;
499                                 }
500                                 t++;
501                         }
502                         break;
503
504                 case RTE_FLOW_ITEM_TYPE_IPV6:
505                         ipv6_spec = item->spec;
506                         ipv6_mask = item->mask;
507                         if (ipv6_spec && ipv6_mask) {
508                                 if (ipv6_mask->hdr.payload_len) {
509                                         rte_flow_error_set(error, EINVAL,
510                                            RTE_FLOW_ERROR_TYPE_ITEM,
511                                            item,
512                                            "Invalid IPv6 mask");
513                                         return 0;
514                                 }
515
516                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
517                                         if (ipv6_mask->hdr.src_addr[j] &&
518                                                 tunnel_valid) {
519                                                 input_set |=
520                                                 ICE_INSET_TUN_IPV6_SRC;
521                                                 break;
522                                         } else if (ipv6_mask->hdr.src_addr[j]) {
523                                                 input_set |= ICE_INSET_IPV6_SRC;
524                                                 break;
525                                         }
526                                 }
527                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
528                                         if (ipv6_mask->hdr.dst_addr[j] &&
529                                                 tunnel_valid) {
530                                                 input_set |=
531                                                 ICE_INSET_TUN_IPV6_DST;
532                                                 break;
533                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
534                                                 input_set |= ICE_INSET_IPV6_DST;
535                                                 break;
536                                         }
537                                 }
538                                 if (ipv6_mask->hdr.proto &&
539                                         tunnel_valid)
540                                         input_set |=
541                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
542                                 else if (ipv6_mask->hdr.proto)
543                                         input_set |=
544                                                 ICE_INSET_IPV6_NEXT_HDR;
545                                 if (ipv6_mask->hdr.hop_limits &&
546                                         tunnel_valid)
547                                         input_set |=
548                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
549                                 else if (ipv6_mask->hdr.hop_limits)
550                                         input_set |=
551                                                 ICE_INSET_IPV6_HOP_LIMIT;
552                                 if ((ipv6_mask->hdr.vtc_flow &
553                                                 rte_cpu_to_be_32
554                                                 (RTE_IPV6_HDR_TC_MASK)) &&
555                                         tunnel_valid)
556                                         input_set |=
557                                                         ICE_INSET_TUN_IPV6_TC;
558                                 else if (ipv6_mask->hdr.vtc_flow &
559                                                 rte_cpu_to_be_32
560                                                 (RTE_IPV6_HDR_TC_MASK))
561                                         input_set |= ICE_INSET_IPV6_TC;
562
563                                 list[t].type = (tunnel_valid  == 0) ?
564                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
565                                 struct ice_ipv6_hdr *f;
566                                 struct ice_ipv6_hdr *s;
567                                 f = &list[t].h_u.ipv6_hdr;
568                                 s = &list[t].m_u.ipv6_hdr;
569                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
570                                         if (ipv6_mask->hdr.src_addr[j]) {
571                                                 f->src_addr[j] =
572                                                 ipv6_spec->hdr.src_addr[j];
573                                                 s->src_addr[j] =
574                                                 ipv6_mask->hdr.src_addr[j];
575                                         }
576                                         if (ipv6_mask->hdr.dst_addr[j]) {
577                                                 f->dst_addr[j] =
578                                                 ipv6_spec->hdr.dst_addr[j];
579                                                 s->dst_addr[j] =
580                                                 ipv6_mask->hdr.dst_addr[j];
581                                         }
582                                 }
583                                 if (ipv6_mask->hdr.proto) {
584                                         f->next_hdr =
585                                                 ipv6_spec->hdr.proto;
586                                         s->next_hdr =
587                                                 ipv6_mask->hdr.proto;
588                                 }
589                                 if (ipv6_mask->hdr.hop_limits) {
590                                         f->hop_limit =
591                                                 ipv6_spec->hdr.hop_limits;
592                                         s->hop_limit =
593                                                 ipv6_mask->hdr.hop_limits;
594                                 }
595                                 if (ipv6_mask->hdr.vtc_flow &
596                                                 rte_cpu_to_be_32
597                                                 (RTE_IPV6_HDR_TC_MASK)) {
598                                         struct ice_le_ver_tc_flow vtf;
599                                         vtf.u.fld.version = 0;
600                                         vtf.u.fld.flow_label = 0;
601                                         vtf.u.fld.tc = (rte_be_to_cpu_32
602                                                 (ipv6_spec->hdr.vtc_flow) &
603                                                         RTE_IPV6_HDR_TC_MASK) >>
604                                                         RTE_IPV6_HDR_TC_SHIFT;
605                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
606                                         vtf.u.fld.tc = (rte_be_to_cpu_32
607                                                 (ipv6_mask->hdr.vtc_flow) &
608                                                         RTE_IPV6_HDR_TC_MASK) >>
609                                                         RTE_IPV6_HDR_TC_SHIFT;
610                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
611                                 }
612                                 t++;
613                         }
614                         break;
615
616                 case RTE_FLOW_ITEM_TYPE_UDP:
617                         udp_spec = item->spec;
618                         udp_mask = item->mask;
619                         if (udp_spec && udp_mask) {
620                                 /* Check UDP mask and update input set*/
621                                 if (udp_mask->hdr.dgram_len ||
622                                     udp_mask->hdr.dgram_cksum) {
623                                         rte_flow_error_set(error, EINVAL,
624                                                    RTE_FLOW_ERROR_TYPE_ITEM,
625                                                    item,
626                                                    "Invalid UDP mask");
627                                         return 0;
628                                 }
629
630                                 if (tunnel_valid) {
631                                         if (udp_mask->hdr.src_port)
632                                                 input_set |=
633                                                 ICE_INSET_TUN_UDP_SRC_PORT;
634                                         if (udp_mask->hdr.dst_port)
635                                                 input_set |=
636                                                 ICE_INSET_TUN_UDP_DST_PORT;
637                                 } else {
638                                         if (udp_mask->hdr.src_port)
639                                                 input_set |=
640                                                 ICE_INSET_UDP_SRC_PORT;
641                                         if (udp_mask->hdr.dst_port)
642                                                 input_set |=
643                                                 ICE_INSET_UDP_DST_PORT;
644                                 }
645                                 if (tun_type == ICE_SW_TUN_VXLAN &&
646                                                 tunnel_valid == 0)
647                                         list[t].type = ICE_UDP_OF;
648                                 else
649                                         list[t].type = ICE_UDP_ILOS;
650                                 if (udp_mask->hdr.src_port) {
651                                         list[t].h_u.l4_hdr.src_port =
652                                                 udp_spec->hdr.src_port;
653                                         list[t].m_u.l4_hdr.src_port =
654                                                 udp_mask->hdr.src_port;
655                                 }
656                                 if (udp_mask->hdr.dst_port) {
657                                         list[t].h_u.l4_hdr.dst_port =
658                                                 udp_spec->hdr.dst_port;
659                                         list[t].m_u.l4_hdr.dst_port =
660                                                 udp_mask->hdr.dst_port;
661                                 }
662                                                 t++;
663                         }
664                         break;
665
666                 case RTE_FLOW_ITEM_TYPE_TCP:
667                         tcp_spec = item->spec;
668                         tcp_mask = item->mask;
669                         if (tcp_spec && tcp_mask) {
670                                 /* Check TCP mask and update input set */
671                                 if (tcp_mask->hdr.sent_seq ||
672                                         tcp_mask->hdr.recv_ack ||
673                                         tcp_mask->hdr.data_off ||
674                                         tcp_mask->hdr.tcp_flags ||
675                                         tcp_mask->hdr.rx_win ||
676                                         tcp_mask->hdr.cksum ||
677                                         tcp_mask->hdr.tcp_urp) {
678                                         rte_flow_error_set(error, EINVAL,
679                                            RTE_FLOW_ERROR_TYPE_ITEM,
680                                            item,
681                                            "Invalid TCP mask");
682                                         return 0;
683                                 }
684
685                                 if (tunnel_valid) {
686                                         if (tcp_mask->hdr.src_port)
687                                                 input_set |=
688                                                 ICE_INSET_TUN_TCP_SRC_PORT;
689                                         if (tcp_mask->hdr.dst_port)
690                                                 input_set |=
691                                                 ICE_INSET_TUN_TCP_DST_PORT;
692                                 } else {
693                                         if (tcp_mask->hdr.src_port)
694                                                 input_set |=
695                                                 ICE_INSET_TCP_SRC_PORT;
696                                         if (tcp_mask->hdr.dst_port)
697                                                 input_set |=
698                                                 ICE_INSET_TCP_DST_PORT;
699                                 }
700                                 list[t].type = ICE_TCP_IL;
701                                 if (tcp_mask->hdr.src_port) {
702                                         list[t].h_u.l4_hdr.src_port =
703                                                 tcp_spec->hdr.src_port;
704                                         list[t].m_u.l4_hdr.src_port =
705                                                 tcp_mask->hdr.src_port;
706                                 }
707                                 if (tcp_mask->hdr.dst_port) {
708                                         list[t].h_u.l4_hdr.dst_port =
709                                                 tcp_spec->hdr.dst_port;
710                                         list[t].m_u.l4_hdr.dst_port =
711                                                 tcp_mask->hdr.dst_port;
712                                 }
713                                 t++;
714                         }
715                         break;
716
717                 case RTE_FLOW_ITEM_TYPE_SCTP:
718                         sctp_spec = item->spec;
719                         sctp_mask = item->mask;
720                         if (sctp_spec && sctp_mask) {
721                                 /* Check SCTP mask and update input set */
722                                 if (sctp_mask->hdr.cksum) {
723                                         rte_flow_error_set(error, EINVAL,
724                                            RTE_FLOW_ERROR_TYPE_ITEM,
725                                            item,
726                                            "Invalid SCTP mask");
727                                         return 0;
728                                 }
729
730                                 if (tunnel_valid) {
731                                         if (sctp_mask->hdr.src_port)
732                                                 input_set |=
733                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
734                                         if (sctp_mask->hdr.dst_port)
735                                                 input_set |=
736                                                 ICE_INSET_TUN_SCTP_DST_PORT;
737                                 } else {
738                                         if (sctp_mask->hdr.src_port)
739                                                 input_set |=
740                                                 ICE_INSET_SCTP_SRC_PORT;
741                                         if (sctp_mask->hdr.dst_port)
742                                                 input_set |=
743                                                 ICE_INSET_SCTP_DST_PORT;
744                                 }
745                                 list[t].type = ICE_SCTP_IL;
746                                 if (sctp_mask->hdr.src_port) {
747                                         list[t].h_u.sctp_hdr.src_port =
748                                                 sctp_spec->hdr.src_port;
749                                         list[t].m_u.sctp_hdr.src_port =
750                                                 sctp_mask->hdr.src_port;
751                                 }
752                                 if (sctp_mask->hdr.dst_port) {
753                                         list[t].h_u.sctp_hdr.dst_port =
754                                                 sctp_spec->hdr.dst_port;
755                                         list[t].m_u.sctp_hdr.dst_port =
756                                                 sctp_mask->hdr.dst_port;
757                                 }
758                                 t++;
759                         }
760                         break;
761
762                 case RTE_FLOW_ITEM_TYPE_VXLAN:
763                         vxlan_spec = item->spec;
764                         vxlan_mask = item->mask;
765                         /* Check if VXLAN item is used to describe protocol.
766                          * If yes, both spec and mask should be NULL.
767                          * If no, both spec and mask shouldn't be NULL.
768                          */
769                         if ((!vxlan_spec && vxlan_mask) ||
770                             (vxlan_spec && !vxlan_mask)) {
771                                 rte_flow_error_set(error, EINVAL,
772                                            RTE_FLOW_ERROR_TYPE_ITEM,
773                                            item,
774                                            "Invalid VXLAN item");
775                                 return 0;
776                         }
777
778                         tunnel_valid = 1;
779                         if (vxlan_spec && vxlan_mask) {
780                                 list[t].type = ICE_VXLAN;
781                                 if (vxlan_mask->vni[0] ||
782                                         vxlan_mask->vni[1] ||
783                                         vxlan_mask->vni[2]) {
784                                         list[t].h_u.tnl_hdr.vni =
785                                                 (vxlan_spec->vni[2] << 16) |
786                                                 (vxlan_spec->vni[1] << 8) |
787                                                 vxlan_spec->vni[0];
788                                         list[t].m_u.tnl_hdr.vni =
789                                                 (vxlan_mask->vni[2] << 16) |
790                                                 (vxlan_mask->vni[1] << 8) |
791                                                 vxlan_mask->vni[0];
792                                         input_set |=
793                                                 ICE_INSET_TUN_VXLAN_VNI;
794                                 }
795                                 t++;
796                         }
797                         break;
798
799                 case RTE_FLOW_ITEM_TYPE_NVGRE:
800                         nvgre_spec = item->spec;
801                         nvgre_mask = item->mask;
802                         /* Check if NVGRE item is used to describe protocol.
803                          * If yes, both spec and mask should be NULL.
804                          * If no, both spec and mask shouldn't be NULL.
805                          */
806                         if ((!nvgre_spec && nvgre_mask) ||
807                             (nvgre_spec && !nvgre_mask)) {
808                                 rte_flow_error_set(error, EINVAL,
809                                            RTE_FLOW_ERROR_TYPE_ITEM,
810                                            item,
811                                            "Invalid NVGRE item");
812                                 return 0;
813                         }
814                         tunnel_valid = 1;
815                         if (nvgre_spec && nvgre_mask) {
816                                 list[t].type = ICE_NVGRE;
817                                 if (nvgre_mask->tni[0] ||
818                                         nvgre_mask->tni[1] ||
819                                         nvgre_mask->tni[2]) {
820                                         list[t].h_u.nvgre_hdr.tni_flow =
821                                                 (nvgre_spec->tni[2] << 16) |
822                                                 (nvgre_spec->tni[1] << 8) |
823                                                 nvgre_spec->tni[0];
824                                         list[t].m_u.nvgre_hdr.tni_flow =
825                                                 (nvgre_mask->tni[2] << 16) |
826                                                 (nvgre_mask->tni[1] << 8) |
827                                                 nvgre_mask->tni[0];
828                                         input_set |=
829                                                 ICE_INSET_TUN_NVGRE_TNI;
830                                 }
831                                 t++;
832                         }
833                         break;
834
835                 case RTE_FLOW_ITEM_TYPE_VLAN:
836                         vlan_spec = item->spec;
837                         vlan_mask = item->mask;
838                         /* Check if VLAN item is used to describe protocol.
839                          * If yes, both spec and mask should be NULL.
840                          * If no, both spec and mask shouldn't be NULL.
841                          */
842                         if ((!vlan_spec && vlan_mask) ||
843                             (vlan_spec && !vlan_mask)) {
844                                 rte_flow_error_set(error, EINVAL,
845                                            RTE_FLOW_ERROR_TYPE_ITEM,
846                                            item,
847                                            "Invalid VLAN item");
848                                 return 0;
849                         }
850                         if (vlan_spec && vlan_mask) {
851                                 list[t].type = ICE_VLAN_OFOS;
852                                 if (vlan_mask->tci) {
853                                         list[t].h_u.vlan_hdr.vlan =
854                                                 vlan_spec->tci;
855                                         list[t].m_u.vlan_hdr.vlan =
856                                                 vlan_mask->tci;
857                                         input_set |= ICE_INSET_VLAN_OUTER;
858                                 }
859                                 if (vlan_mask->inner_type) {
860                                         list[t].h_u.vlan_hdr.type =
861                                                 vlan_spec->inner_type;
862                                         list[t].m_u.vlan_hdr.type =
863                                                 vlan_mask->inner_type;
864                                         input_set |= ICE_INSET_VLAN_OUTER;
865                                 }
866                                 t++;
867                         }
868                         break;
869
870                 case RTE_FLOW_ITEM_TYPE_PPPOED:
871                 case RTE_FLOW_ITEM_TYPE_PPPOES:
872                         pppoe_spec = item->spec;
873                         pppoe_mask = item->mask;
874                         /* Check if PPPoE item is used to describe protocol.
875                          * If yes, both spec and mask should be NULL.
876                          * If no, both spec and mask shouldn't be NULL.
877                          */
878                         if ((!pppoe_spec && pppoe_mask) ||
879                                 (pppoe_spec && !pppoe_mask)) {
880                                 rte_flow_error_set(error, EINVAL,
881                                         RTE_FLOW_ERROR_TYPE_ITEM,
882                                         item,
883                                         "Invalid pppoe item");
884                                 return 0;
885                         }
886                         if (pppoe_spec && pppoe_mask) {
887                                 /* Check pppoe mask and update input set */
888                                 if (pppoe_mask->length ||
889                                         pppoe_mask->code ||
890                                         pppoe_mask->version_type) {
891                                         rte_flow_error_set(error, EINVAL,
892                                                 RTE_FLOW_ERROR_TYPE_ITEM,
893                                                 item,
894                                                 "Invalid pppoe mask");
895                                         return 0;
896                                 }
897                                 list[t].type = ICE_PPPOE;
898                                 if (pppoe_mask->session_id) {
899                                         list[t].h_u.pppoe_hdr.session_id =
900                                                 pppoe_spec->session_id;
901                                         list[t].m_u.pppoe_hdr.session_id =
902                                                 pppoe_mask->session_id;
903                                         input_set |= ICE_INSET_PPPOE_SESSION;
904                                 }
905                                 t++;
906                                 pppoe_valid = 1;
907                         }
908                         break;
909
910                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
911                         pppoe_proto_spec = item->spec;
912                         pppoe_proto_mask = item->mask;
913                         /* Check if PPPoE optional proto_id item
914                          * is used to describe protocol.
915                          * If yes, both spec and mask should be NULL.
916                          * If no, both spec and mask shouldn't be NULL.
917                          */
918                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
919                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
920                                 rte_flow_error_set(error, EINVAL,
921                                         RTE_FLOW_ERROR_TYPE_ITEM,
922                                         item,
923                                         "Invalid pppoe proto item");
924                                 return 0;
925                         }
926                         if (pppoe_proto_spec && pppoe_proto_mask) {
927                                 if (pppoe_valid)
928                                         t--;
929                                 list[t].type = ICE_PPPOE;
930                                 if (pppoe_proto_mask->proto_id) {
931                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
932                                                 pppoe_proto_spec->proto_id;
933                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
934                                                 pppoe_proto_mask->proto_id;
935                                         input_set |= ICE_INSET_PPPOE_PROTO;
936                                 }
937                                 t++;
938                         }
939                         break;
940
941                 case RTE_FLOW_ITEM_TYPE_VOID:
942                         break;
943
944                 default:
945                         rte_flow_error_set(error, EINVAL,
946                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
947                                    "Invalid pattern item.");
948                         goto out;
949                 }
950         }
951
952         *lkups_num = t;
953
954         return input_set;
955 out:
956         return 0;
957 }
958
959 static int
960 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
961                             struct rte_flow_error *error,
962                             struct ice_adv_rule_info *rule_info)
963 {
964         const struct rte_flow_action_vf *act_vf;
965         const struct rte_flow_action *action;
966         enum rte_flow_action_type action_type;
967
968         for (action = actions; action->type !=
969                                 RTE_FLOW_ACTION_TYPE_END; action++) {
970                 action_type = action->type;
971                 switch (action_type) {
972                 case RTE_FLOW_ACTION_TYPE_VF:
973                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
974                         act_vf = action->conf;
975                         rule_info->sw_act.vsi_handle = act_vf->id;
976                         break;
977                 default:
978                         rte_flow_error_set(error,
979                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
980                                            actions,
981                                            "Invalid action type or queue number");
982                         return -rte_errno;
983                 }
984         }
985
986         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
987         rule_info->rx = 1;
988         rule_info->priority = 5;
989
990         return 0;
991 }
992
993 static int
994 ice_switch_parse_action(struct ice_pf *pf,
995                 const struct rte_flow_action *actions,
996                 struct rte_flow_error *error,
997                 struct ice_adv_rule_info *rule_info)
998 {
999         struct ice_vsi *vsi = pf->main_vsi;
1000         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1001         const struct rte_flow_action_queue *act_q;
1002         const struct rte_flow_action_rss *act_qgrop;
1003         uint16_t base_queue, i;
1004         const struct rte_flow_action *action;
1005         enum rte_flow_action_type action_type;
1006         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1007                  2, 4, 8, 16, 32, 64, 128};
1008
1009         base_queue = pf->base_queue + vsi->base_queue;
1010         for (action = actions; action->type !=
1011                         RTE_FLOW_ACTION_TYPE_END; action++) {
1012                 action_type = action->type;
1013                 switch (action_type) {
1014                 case RTE_FLOW_ACTION_TYPE_RSS:
1015                         act_qgrop = action->conf;
1016                         rule_info->sw_act.fltr_act =
1017                                 ICE_FWD_TO_QGRP;
1018                         rule_info->sw_act.fwd_id.q_id =
1019                                 base_queue + act_qgrop->queue[0];
1020                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1021                                 if (act_qgrop->queue_num ==
1022                                         valid_qgrop_number[i])
1023                                         break;
1024                         }
1025                         if (i == MAX_QGRP_NUM_TYPE)
1026                                 goto error;
1027                         if ((act_qgrop->queue[0] +
1028                                 act_qgrop->queue_num) >
1029                                 dev->data->nb_rx_queues)
1030                                 goto error;
1031                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1032                                 if (act_qgrop->queue[i + 1] !=
1033                                         act_qgrop->queue[i] + 1)
1034                                         goto error;
1035                         rule_info->sw_act.qgrp_size =
1036                                 act_qgrop->queue_num;
1037                         break;
1038                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1039                         act_q = action->conf;
1040                         if (act_q->index >= dev->data->nb_rx_queues)
1041                                 goto error;
1042                         rule_info->sw_act.fltr_act =
1043                                 ICE_FWD_TO_Q;
1044                         rule_info->sw_act.fwd_id.q_id =
1045                                 base_queue + act_q->index;
1046                         break;
1047
1048                 case RTE_FLOW_ACTION_TYPE_DROP:
1049                         rule_info->sw_act.fltr_act =
1050                                 ICE_DROP_PACKET;
1051                         break;
1052
1053                 case RTE_FLOW_ACTION_TYPE_VOID:
1054                         break;
1055
1056                 default:
1057                         goto error;
1058                 }
1059         }
1060
1061         rule_info->sw_act.vsi_handle = vsi->idx;
1062         rule_info->rx = 1;
1063         rule_info->sw_act.src = vsi->idx;
1064         rule_info->priority = 5;
1065
1066         return 0;
1067
1068 error:
1069         rte_flow_error_set(error,
1070                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1071                 actions,
1072                 "Invalid action type or queue number");
1073         return -rte_errno;
1074 }
1075
1076 static int
1077 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1078                 struct ice_pattern_match_item *array,
1079                 uint32_t array_len,
1080                 const struct rte_flow_item pattern[],
1081                 const struct rte_flow_action actions[],
1082                 void **meta,
1083                 struct rte_flow_error *error)
1084 {
1085         struct ice_pf *pf = &ad->pf;
1086         uint64_t inputset = 0;
1087         int ret = 0;
1088         struct sw_meta *sw_meta_ptr = NULL;
1089         struct ice_adv_rule_info rule_info;
1090         struct ice_adv_lkup_elem *list = NULL;
1091         uint16_t lkups_num = 0;
1092         const struct rte_flow_item *item = pattern;
1093         uint16_t item_num = 0;
1094         enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1095         struct ice_pattern_match_item *pattern_match_item = NULL;
1096
1097         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1098                 item_num++;
1099                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1100                         tun_type = ICE_SW_TUN_VXLAN;
1101                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1102                         tun_type = ICE_SW_TUN_NVGRE;
1103                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1104                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1105                         tun_type = ICE_SW_TUN_PPPOE;
1106                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1107                         const struct rte_flow_item_eth *eth_mask;
1108                         if (item->mask)
1109                                 eth_mask = item->mask;
1110                         else
1111                                 continue;
1112                         if (eth_mask->type == UINT16_MAX)
1113                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1114                 }
1115                 /* reserve one more memory slot for ETH which may
1116                  * consume 2 lookup items.
1117                  */
1118                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1119                         item_num++;
1120         }
1121
1122         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1123         if (!list) {
1124                 rte_flow_error_set(error, EINVAL,
1125                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1126                                    "No memory for PMD internal items");
1127                 return -rte_errno;
1128         }
1129
1130         rule_info.tun_type = tun_type;
1131
1132         sw_meta_ptr =
1133                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1134         if (!sw_meta_ptr) {
1135                 rte_flow_error_set(error, EINVAL,
1136                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1137                                    "No memory for sw_pattern_meta_ptr");
1138                 goto error;
1139         }
1140
1141         pattern_match_item =
1142                 ice_search_pattern_match_item(pattern, array, array_len, error);
1143         if (!pattern_match_item) {
1144                 rte_flow_error_set(error, EINVAL,
1145                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1146                                    "Invalid input pattern");
1147                 goto error;
1148         }
1149
1150         inputset = ice_switch_inset_get
1151                 (pattern, error, list, &lkups_num, tun_type);
1152         if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
1153                 rte_flow_error_set(error, EINVAL,
1154                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1155                                    pattern,
1156                                    "Invalid input set");
1157                 goto error;
1158         }
1159
1160         if (ad->hw.dcf_enabled)
1161                 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1162         else
1163                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1164
1165         if (ret) {
1166                 rte_flow_error_set(error, EINVAL,
1167                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1168                                    "Invalid input action");
1169                 goto error;
1170         }
1171
1172         if (meta) {
1173                 *meta = sw_meta_ptr;
1174                 ((struct sw_meta *)*meta)->list = list;
1175                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1176                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1177         } else {
1178                 rte_free(list);
1179                 rte_free(sw_meta_ptr);
1180         }
1181
1182         rte_free(pattern_match_item);
1183
1184         return 0;
1185
1186 error:
1187         rte_free(list);
1188         rte_free(sw_meta_ptr);
1189         rte_free(pattern_match_item);
1190
1191         return -rte_errno;
1192 }
1193
1194 static int
1195 ice_switch_query(struct ice_adapter *ad __rte_unused,
1196                 struct rte_flow *flow __rte_unused,
1197                 struct rte_flow_query_count *count __rte_unused,
1198                 struct rte_flow_error *error)
1199 {
1200         rte_flow_error_set(error, EINVAL,
1201                 RTE_FLOW_ERROR_TYPE_HANDLE,
1202                 NULL,
1203                 "count action not supported by switch filter");
1204
1205         return -rte_errno;
1206 }
1207
1208 static int
1209 ice_switch_init(struct ice_adapter *ad)
1210 {
1211         int ret = 0;
1212         struct ice_flow_parser *dist_parser;
1213         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1214
1215         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1216                 dist_parser = &ice_switch_dist_parser_comms;
1217         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1218                 dist_parser = &ice_switch_dist_parser_os;
1219         else
1220                 return -EINVAL;
1221
1222         if (ad->devargs.pipe_mode_support)
1223                 ret = ice_register_parser(perm_parser, ad);
1224         else
1225                 ret = ice_register_parser(dist_parser, ad);
1226         return ret;
1227 }
1228
1229 static void
1230 ice_switch_uninit(struct ice_adapter *ad)
1231 {
1232         struct ice_flow_parser *dist_parser;
1233         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1234
1235         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1236                 dist_parser = &ice_switch_dist_parser_comms;
1237         else
1238                 dist_parser = &ice_switch_dist_parser_os;
1239
1240         if (ad->devargs.pipe_mode_support)
1241                 ice_unregister_parser(perm_parser, ad);
1242         else
1243                 ice_unregister_parser(dist_parser, ad);
1244 }
1245
1246 static struct
1247 ice_flow_engine ice_switch_engine = {
1248         .init = ice_switch_init,
1249         .uninit = ice_switch_uninit,
1250         .create = ice_switch_create,
1251         .destroy = ice_switch_destroy,
1252         .query_count = ice_switch_query,
1253         .free = ice_switch_filter_rule_free,
1254         .type = ICE_FLOW_ENGINE_SWITCH,
1255 };
1256
1257 static struct
1258 ice_flow_parser ice_switch_dist_parser_os = {
1259         .engine = &ice_switch_engine,
1260         .array = ice_switch_pattern_dist_os,
1261         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1262         .parse_pattern_action = ice_switch_parse_pattern_action,
1263         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1264 };
1265
1266 static struct
1267 ice_flow_parser ice_switch_dist_parser_comms = {
1268         .engine = &ice_switch_engine,
1269         .array = ice_switch_pattern_dist_comms,
1270         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1271         .parse_pattern_action = ice_switch_parse_pattern_action,
1272         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1273 };
1274
1275 static struct
1276 ice_flow_parser ice_switch_perm_parser = {
1277         .engine = &ice_switch_engine,
1278         .array = ice_switch_pattern_perm,
1279         .array_len = RTE_DIM(ice_switch_pattern_perm),
1280         .parse_pattern_action = ice_switch_parse_pattern_action,
1281         .stage = ICE_FLOW_STAGE_PERMISSION,
1282 };
1283
1284 RTE_INIT(ice_sw_engine_init)
1285 {
1286         struct ice_flow_engine *engine = &ice_switch_engine;
1287         ice_register_flow_engine(engine);
1288 }