4be87c2b9ad8b71437c400bb78dd2b17aae98ed8
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_IPV4 ( \
33         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
34         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
35 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
38         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
39 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
40         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
42         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
43 #define ICE_SW_INSET_MAC_IPV6 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
45         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
46         ICE_INSET_IPV6_NEXT_HDR)
47 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
49         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
50         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
54         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
57         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
58 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
64         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
66         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
68         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
70         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
78         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
83         ICE_INSET_TUN_IPV4_TOS)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
87         ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_MAC_PPPOE  ( \
89         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
90         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
91
92 struct sw_meta {
93         struct ice_adv_lkup_elem *list;
94         uint16_t lkups_num;
95         struct ice_adv_rule_info rule_info;
96 };
97
98 static struct ice_flow_parser ice_switch_dist_parser_os;
99 static struct ice_flow_parser ice_switch_dist_parser_comms;
100 static struct ice_flow_parser ice_switch_perm_parser;
101
102 static struct
103 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
104         {pattern_ethertype,
105                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
106         {pattern_eth_ipv4,
107                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
108         {pattern_eth_ipv4_udp,
109                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
110         {pattern_eth_ipv4_tcp,
111                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
112         {pattern_eth_ipv6,
113                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
114         {pattern_eth_ipv6_udp,
115                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
116         {pattern_eth_ipv6_tcp,
117                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
118         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
119                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
120         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
121                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
122         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
123                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
124         {pattern_eth_ipv4_nvgre_eth_ipv4,
125                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
126         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
127                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
128         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
129                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
130         {pattern_eth_pppoed,
131                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
132         {pattern_eth_vlan_pppoed,
133                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
134         {pattern_eth_pppoes,
135                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
136         {pattern_eth_vlan_pppoes,
137                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
138 };
139
140 static struct
141 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
142         {pattern_ethertype,
143                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
144         {pattern_eth_arp,
145                         ICE_INSET_NONE, ICE_INSET_NONE},
146         {pattern_eth_ipv4,
147                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
148         {pattern_eth_ipv4_udp,
149                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
150         {pattern_eth_ipv4_tcp,
151                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
152         {pattern_eth_ipv6,
153                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
154         {pattern_eth_ipv6_udp,
155                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
156         {pattern_eth_ipv6_tcp,
157                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
158         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
159                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
160         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
161                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
162         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
163                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
164         {pattern_eth_ipv4_nvgre_eth_ipv4,
165                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
166         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
167                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
168         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
169                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
170 };
171
172 static struct
173 ice_pattern_match_item ice_switch_pattern_perm[] = {
174         {pattern_eth_ipv4,
175                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
176         {pattern_eth_ipv4_udp,
177                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
178         {pattern_eth_ipv4_tcp,
179                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
180         {pattern_eth_ipv6,
181                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
182         {pattern_eth_ipv6_udp,
183                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
184         {pattern_eth_ipv6_tcp,
185                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
186         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
187                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
188         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
189                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
190         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
191                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
192         {pattern_eth_ipv4_nvgre_eth_ipv4,
193                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
194         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
195                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
196         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
197                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
198 };
199
200 static int
201 ice_switch_create(struct ice_adapter *ad,
202                 struct rte_flow *flow,
203                 void *meta,
204                 struct rte_flow_error *error)
205 {
206         int ret = 0;
207         struct ice_pf *pf = &ad->pf;
208         struct ice_hw *hw = ICE_PF_TO_HW(pf);
209         struct ice_rule_query_data rule_added = {0};
210         struct ice_rule_query_data *filter_ptr;
211         struct ice_adv_lkup_elem *list =
212                 ((struct sw_meta *)meta)->list;
213         uint16_t lkups_cnt =
214                 ((struct sw_meta *)meta)->lkups_num;
215         struct ice_adv_rule_info *rule_info =
216                 &((struct sw_meta *)meta)->rule_info;
217
218         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
219                 rte_flow_error_set(error, EINVAL,
220                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
221                         "item number too large for rule");
222                 goto error;
223         }
224         if (!list) {
225                 rte_flow_error_set(error, EINVAL,
226                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
227                         "lookup list should not be NULL");
228                 goto error;
229         }
230         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
231         if (!ret) {
232                 filter_ptr = rte_zmalloc("ice_switch_filter",
233                         sizeof(struct ice_rule_query_data), 0);
234                 if (!filter_ptr) {
235                         rte_flow_error_set(error, EINVAL,
236                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
237                                    "No memory for ice_switch_filter");
238                         goto error;
239                 }
240                 flow->rule = filter_ptr;
241                 rte_memcpy(filter_ptr,
242                         &rule_added,
243                         sizeof(struct ice_rule_query_data));
244         } else {
245                 rte_flow_error_set(error, EINVAL,
246                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
247                         "switch filter create flow fail");
248                 goto error;
249         }
250
251         rte_free(list);
252         rte_free(meta);
253         return 0;
254
255 error:
256         rte_free(list);
257         rte_free(meta);
258
259         return -rte_errno;
260 }
261
262 static int
263 ice_switch_destroy(struct ice_adapter *ad,
264                 struct rte_flow *flow,
265                 struct rte_flow_error *error)
266 {
267         struct ice_hw *hw = &ad->hw;
268         int ret;
269         struct ice_rule_query_data *filter_ptr;
270
271         filter_ptr = (struct ice_rule_query_data *)
272                 flow->rule;
273
274         if (!filter_ptr) {
275                 rte_flow_error_set(error, EINVAL,
276                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
277                         "no such flow"
278                         " create by switch filter");
279                 return -rte_errno;
280         }
281
282         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
283         if (ret) {
284                 rte_flow_error_set(error, EINVAL,
285                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
286                         "fail to destroy switch filter rule");
287                 return -rte_errno;
288         }
289
290         rte_free(filter_ptr);
291         return ret;
292 }
293
294 static void
295 ice_switch_filter_rule_free(struct rte_flow *flow)
296 {
297         rte_free(flow->rule);
298 }
299
300 static uint64_t
301 ice_switch_inset_get(const struct rte_flow_item pattern[],
302                 struct rte_flow_error *error,
303                 struct ice_adv_lkup_elem *list,
304                 uint16_t *lkups_num,
305                 enum ice_sw_tunnel_type tun_type)
306 {
307         const struct rte_flow_item *item = pattern;
308         enum rte_flow_item_type item_type;
309         const struct rte_flow_item_eth *eth_spec, *eth_mask;
310         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
311         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
312         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
313         const struct rte_flow_item_udp *udp_spec, *udp_mask;
314         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
315         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
316         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
317         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
318         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
319         uint8_t  ipv6_addr_mask[16] = {
320                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
321                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
322         uint64_t input_set = ICE_INSET_NONE;
323         uint16_t j, t = 0;
324         uint16_t tunnel_valid = 0;
325
326
327         for (item = pattern; item->type !=
328                         RTE_FLOW_ITEM_TYPE_END; item++) {
329                 if (item->last) {
330                         rte_flow_error_set(error, EINVAL,
331                                         RTE_FLOW_ERROR_TYPE_ITEM,
332                                         item,
333                                         "Not support range");
334                         return 0;
335                 }
336                 item_type = item->type;
337
338                 switch (item_type) {
339                 case RTE_FLOW_ITEM_TYPE_ETH:
340                         eth_spec = item->spec;
341                         eth_mask = item->mask;
342                         if (eth_spec && eth_mask) {
343                                 if (tunnel_valid &&
344                                     rte_is_broadcast_ether_addr(&eth_mask->src))
345                                         input_set |= ICE_INSET_TUN_SMAC;
346                                 else if (
347                                 rte_is_broadcast_ether_addr(&eth_mask->src))
348                                         input_set |= ICE_INSET_SMAC;
349                                 if (tunnel_valid &&
350                                     rte_is_broadcast_ether_addr(&eth_mask->dst))
351                                         input_set |= ICE_INSET_TUN_DMAC;
352                                 else if (
353                                 rte_is_broadcast_ether_addr(&eth_mask->dst))
354                                         input_set |= ICE_INSET_DMAC;
355                                 if (eth_mask->type == RTE_BE16(0xffff))
356                                         input_set |= ICE_INSET_ETHERTYPE;
357                                 list[t].type = (tunnel_valid  == 0) ?
358                                         ICE_MAC_OFOS : ICE_MAC_IL;
359                                 struct ice_ether_hdr *h;
360                                 struct ice_ether_hdr *m;
361                                 uint16_t i = 0;
362                                 h = &list[t].h_u.eth_hdr;
363                                 m = &list[t].m_u.eth_hdr;
364                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
365                                         if (eth_mask->src.addr_bytes[j] ==
366                                                                 UINT8_MAX) {
367                                                 h->src_addr[j] =
368                                                 eth_spec->src.addr_bytes[j];
369                                                 m->src_addr[j] =
370                                                 eth_mask->src.addr_bytes[j];
371                                                 i = 1;
372                                         }
373                                         if (eth_mask->dst.addr_bytes[j] ==
374                                                                 UINT8_MAX) {
375                                                 h->dst_addr[j] =
376                                                 eth_spec->dst.addr_bytes[j];
377                                                 m->dst_addr[j] =
378                                                 eth_mask->dst.addr_bytes[j];
379                                                 i = 1;
380                                         }
381                                 }
382                                 if (i)
383                                         t++;
384                                 if (eth_mask->type == UINT16_MAX) {
385                                         list[t].type = ICE_ETYPE_OL;
386                                         list[t].h_u.ethertype.ethtype_id =
387                                                 eth_spec->type;
388                                         list[t].m_u.ethertype.ethtype_id =
389                                                 UINT16_MAX;
390                                         t++;
391                                 }
392                         } else if (!eth_spec && !eth_mask) {
393                                 list[t].type = (tun_type == ICE_NON_TUN) ?
394                                         ICE_MAC_OFOS : ICE_MAC_IL;
395                         }
396                         break;
397
398                 case RTE_FLOW_ITEM_TYPE_IPV4:
399                         ipv4_spec = item->spec;
400                         ipv4_mask = item->mask;
401                         if (ipv4_spec && ipv4_mask) {
402                                 /* Check IPv4 mask and update input set */
403                                 if (ipv4_mask->hdr.version_ihl ||
404                                         ipv4_mask->hdr.total_length ||
405                                         ipv4_mask->hdr.packet_id ||
406                                         ipv4_mask->hdr.hdr_checksum) {
407                                         rte_flow_error_set(error, EINVAL,
408                                                    RTE_FLOW_ERROR_TYPE_ITEM,
409                                                    item,
410                                                    "Invalid IPv4 mask.");
411                                         return 0;
412                                 }
413
414                                 if (tunnel_valid) {
415                                         if (ipv4_mask->hdr.type_of_service ==
416                                                         UINT8_MAX)
417                                                 input_set |=
418                                                         ICE_INSET_TUN_IPV4_TOS;
419                                         if (ipv4_mask->hdr.src_addr ==
420                                                         UINT32_MAX)
421                                                 input_set |=
422                                                         ICE_INSET_TUN_IPV4_SRC;
423                                         if (ipv4_mask->hdr.dst_addr ==
424                                                         UINT32_MAX)
425                                                 input_set |=
426                                                         ICE_INSET_TUN_IPV4_DST;
427                                         if (ipv4_mask->hdr.time_to_live ==
428                                                         UINT8_MAX)
429                                                 input_set |=
430                                                         ICE_INSET_TUN_IPV4_TTL;
431                                         if (ipv4_mask->hdr.next_proto_id ==
432                                                         UINT8_MAX)
433                                                 input_set |=
434                                                 ICE_INSET_TUN_IPV4_PROTO;
435                                 } else {
436                                         if (ipv4_mask->hdr.src_addr ==
437                                                         UINT32_MAX)
438                                                 input_set |= ICE_INSET_IPV4_SRC;
439                                         if (ipv4_mask->hdr.dst_addr ==
440                                                         UINT32_MAX)
441                                                 input_set |= ICE_INSET_IPV4_DST;
442                                         if (ipv4_mask->hdr.time_to_live ==
443                                                         UINT8_MAX)
444                                                 input_set |= ICE_INSET_IPV4_TTL;
445                                         if (ipv4_mask->hdr.next_proto_id ==
446                                                         UINT8_MAX)
447                                                 input_set |=
448                                                 ICE_INSET_IPV4_PROTO;
449                                         if (ipv4_mask->hdr.type_of_service ==
450                                                         UINT8_MAX)
451                                                 input_set |=
452                                                         ICE_INSET_IPV4_TOS;
453                                 }
454                                 list[t].type = (tunnel_valid  == 0) ?
455                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
456                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
457                                         list[t].h_u.ipv4_hdr.src_addr =
458                                                 ipv4_spec->hdr.src_addr;
459                                         list[t].m_u.ipv4_hdr.src_addr =
460                                                 UINT32_MAX;
461                                 }
462                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
463                                         list[t].h_u.ipv4_hdr.dst_addr =
464                                                 ipv4_spec->hdr.dst_addr;
465                                         list[t].m_u.ipv4_hdr.dst_addr =
466                                                 UINT32_MAX;
467                                 }
468                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
469                                         list[t].h_u.ipv4_hdr.time_to_live =
470                                                 ipv4_spec->hdr.time_to_live;
471                                         list[t].m_u.ipv4_hdr.time_to_live =
472                                                 UINT8_MAX;
473                                 }
474                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
475                                         list[t].h_u.ipv4_hdr.protocol =
476                                                 ipv4_spec->hdr.next_proto_id;
477                                         list[t].m_u.ipv4_hdr.protocol =
478                                                 UINT8_MAX;
479                                 }
480                                 if (ipv4_mask->hdr.type_of_service ==
481                                                 UINT8_MAX) {
482                                         list[t].h_u.ipv4_hdr.tos =
483                                                 ipv4_spec->hdr.type_of_service;
484                                         list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
485                                 }
486                                 t++;
487                         } else if (!ipv4_spec && !ipv4_mask) {
488                                 list[t].type = (tunnel_valid  == 0) ?
489                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
490                         }
491                         break;
492
493                 case RTE_FLOW_ITEM_TYPE_IPV6:
494                         ipv6_spec = item->spec;
495                         ipv6_mask = item->mask;
496                         if (ipv6_spec && ipv6_mask) {
497                                 if (ipv6_mask->hdr.payload_len) {
498                                         rte_flow_error_set(error, EINVAL,
499                                            RTE_FLOW_ERROR_TYPE_ITEM,
500                                            item,
501                                            "Invalid IPv6 mask");
502                                         return 0;
503                                 }
504
505                                 if (tunnel_valid) {
506                                         if (!memcmp(ipv6_mask->hdr.src_addr,
507                                                 ipv6_addr_mask,
508                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
509                                                 input_set |=
510                                                         ICE_INSET_TUN_IPV6_SRC;
511                                         if (!memcmp(ipv6_mask->hdr.dst_addr,
512                                                 ipv6_addr_mask,
513                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
514                                                 input_set |=
515                                                         ICE_INSET_TUN_IPV6_DST;
516                                         if (ipv6_mask->hdr.proto == UINT8_MAX)
517                                                 input_set |=
518                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
519                                         if (ipv6_mask->hdr.hop_limits ==
520                                                         UINT8_MAX)
521                                                 input_set |=
522                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
523                                         if ((ipv6_mask->hdr.vtc_flow &
524                                                 rte_cpu_to_be_32
525                                                 (RTE_IPV6_HDR_TC_MASK))
526                                                         == rte_cpu_to_be_32
527                                                         (RTE_IPV6_HDR_TC_MASK))
528                                                 input_set |=
529                                                         ICE_INSET_TUN_IPV6_TC;
530                                 } else {
531                                         if (!memcmp(ipv6_mask->hdr.src_addr,
532                                                 ipv6_addr_mask,
533                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
534                                                 input_set |= ICE_INSET_IPV6_SRC;
535                                         if (!memcmp(ipv6_mask->hdr.dst_addr,
536                                                 ipv6_addr_mask,
537                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
538                                                 input_set |= ICE_INSET_IPV6_DST;
539                                         if (ipv6_mask->hdr.proto == UINT8_MAX)
540                                                 input_set |=
541                                                 ICE_INSET_IPV6_NEXT_HDR;
542                                         if (ipv6_mask->hdr.hop_limits ==
543                                                         UINT8_MAX)
544                                                 input_set |=
545                                                 ICE_INSET_IPV6_HOP_LIMIT;
546                                         if ((ipv6_mask->hdr.vtc_flow &
547                                                 rte_cpu_to_be_32
548                                                 (RTE_IPV6_HDR_TC_MASK))
549                                                         == rte_cpu_to_be_32
550                                                         (RTE_IPV6_HDR_TC_MASK))
551                                                 input_set |= ICE_INSET_IPV6_TC;
552                                 }
553                                 list[t].type = (tunnel_valid  == 0) ?
554                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
555                                 struct ice_ipv6_hdr *f;
556                                 struct ice_ipv6_hdr *s;
557                                 f = &list[t].h_u.ipv6_hdr;
558                                 s = &list[t].m_u.ipv6_hdr;
559                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
560                                         if (ipv6_mask->hdr.src_addr[j] ==
561                                                 UINT8_MAX) {
562                                                 f->src_addr[j] =
563                                                 ipv6_spec->hdr.src_addr[j];
564                                                 s->src_addr[j] =
565                                                 ipv6_mask->hdr.src_addr[j];
566                                         }
567                                         if (ipv6_mask->hdr.dst_addr[j] ==
568                                                                 UINT8_MAX) {
569                                                 f->dst_addr[j] =
570                                                 ipv6_spec->hdr.dst_addr[j];
571                                                 s->dst_addr[j] =
572                                                 ipv6_mask->hdr.dst_addr[j];
573                                         }
574                                 }
575                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
576                                         f->next_hdr =
577                                                 ipv6_spec->hdr.proto;
578                                         s->next_hdr = UINT8_MAX;
579                                 }
580                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
581                                         f->hop_limit =
582                                                 ipv6_spec->hdr.hop_limits;
583                                         s->hop_limit = UINT8_MAX;
584                                 }
585                                 if ((ipv6_mask->hdr.vtc_flow &
586                                                 rte_cpu_to_be_32
587                                                 (RTE_IPV6_HDR_TC_MASK))
588                                                 == rte_cpu_to_be_32
589                                                 (RTE_IPV6_HDR_TC_MASK)) {
590                                         f->tc = (rte_be_to_cpu_32
591                                                 (ipv6_spec->hdr.vtc_flow) &
592                                                         RTE_IPV6_HDR_TC_MASK) >>
593                                                         RTE_IPV6_HDR_TC_SHIFT;
594                                         s->tc = UINT8_MAX;
595                                 }
596                                 t++;
597                         } else if (!ipv6_spec && !ipv6_mask) {
598                                 list[t].type = (tun_type == ICE_NON_TUN) ?
599                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
600                         }
601                         break;
602
603                 case RTE_FLOW_ITEM_TYPE_UDP:
604                         udp_spec = item->spec;
605                         udp_mask = item->mask;
606                         if (udp_spec && udp_mask) {
607                                 /* Check UDP mask and update input set*/
608                                 if (udp_mask->hdr.dgram_len ||
609                                     udp_mask->hdr.dgram_cksum) {
610                                         rte_flow_error_set(error, EINVAL,
611                                                    RTE_FLOW_ERROR_TYPE_ITEM,
612                                                    item,
613                                                    "Invalid UDP mask");
614                                         return 0;
615                                 }
616
617                                 if (tunnel_valid) {
618                                         if (udp_mask->hdr.src_port ==
619                                                         UINT16_MAX)
620                                                 input_set |=
621                                                 ICE_INSET_TUN_UDP_SRC_PORT;
622                                         if (udp_mask->hdr.dst_port ==
623                                                         UINT16_MAX)
624                                                 input_set |=
625                                                 ICE_INSET_TUN_UDP_DST_PORT;
626                                 } else {
627                                         if (udp_mask->hdr.src_port ==
628                                                         UINT16_MAX)
629                                                 input_set |=
630                                                 ICE_INSET_UDP_SRC_PORT;
631                                         if (udp_mask->hdr.dst_port ==
632                                                         UINT16_MAX)
633                                                 input_set |=
634                                                 ICE_INSET_UDP_DST_PORT;
635                                 }
636                                 if (tun_type == ICE_SW_TUN_VXLAN &&
637                                                 tunnel_valid == 0)
638                                         list[t].type = ICE_UDP_OF;
639                                 else
640                                         list[t].type = ICE_UDP_ILOS;
641                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
642                                         list[t].h_u.l4_hdr.src_port =
643                                                 udp_spec->hdr.src_port;
644                                         list[t].m_u.l4_hdr.src_port =
645                                                 udp_mask->hdr.src_port;
646                                 }
647                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
648                                         list[t].h_u.l4_hdr.dst_port =
649                                                 udp_spec->hdr.dst_port;
650                                         list[t].m_u.l4_hdr.dst_port =
651                                                 udp_mask->hdr.dst_port;
652                                 }
653                                                 t++;
654                         } else if (!udp_spec && !udp_mask) {
655                                 list[t].type = ICE_UDP_ILOS;
656                         }
657                         break;
658
659                 case RTE_FLOW_ITEM_TYPE_TCP:
660                         tcp_spec = item->spec;
661                         tcp_mask = item->mask;
662                         if (tcp_spec && tcp_mask) {
663                                 /* Check TCP mask and update input set */
664                                 if (tcp_mask->hdr.sent_seq ||
665                                         tcp_mask->hdr.recv_ack ||
666                                         tcp_mask->hdr.data_off ||
667                                         tcp_mask->hdr.tcp_flags ||
668                                         tcp_mask->hdr.rx_win ||
669                                         tcp_mask->hdr.cksum ||
670                                         tcp_mask->hdr.tcp_urp) {
671                                         rte_flow_error_set(error, EINVAL,
672                                            RTE_FLOW_ERROR_TYPE_ITEM,
673                                            item,
674                                            "Invalid TCP mask");
675                                         return 0;
676                                 }
677
678                                 if (tunnel_valid) {
679                                         if (tcp_mask->hdr.src_port ==
680                                                         UINT16_MAX)
681                                                 input_set |=
682                                                 ICE_INSET_TUN_TCP_SRC_PORT;
683                                         if (tcp_mask->hdr.dst_port ==
684                                                         UINT16_MAX)
685                                                 input_set |=
686                                                 ICE_INSET_TUN_TCP_DST_PORT;
687                                 } else {
688                                         if (tcp_mask->hdr.src_port ==
689                                                         UINT16_MAX)
690                                                 input_set |=
691                                                 ICE_INSET_TCP_SRC_PORT;
692                                         if (tcp_mask->hdr.dst_port ==
693                                                         UINT16_MAX)
694                                                 input_set |=
695                                                 ICE_INSET_TCP_DST_PORT;
696                                 }
697                                 list[t].type = ICE_TCP_IL;
698                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
699                                         list[t].h_u.l4_hdr.src_port =
700                                                 tcp_spec->hdr.src_port;
701                                         list[t].m_u.l4_hdr.src_port =
702                                                 tcp_mask->hdr.src_port;
703                                 }
704                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
705                                         list[t].h_u.l4_hdr.dst_port =
706                                                 tcp_spec->hdr.dst_port;
707                                         list[t].m_u.l4_hdr.dst_port =
708                                                 tcp_mask->hdr.dst_port;
709                                 }
710                                 t++;
711                         } else if (!tcp_spec && !tcp_mask) {
712                                 list[t].type = ICE_TCP_IL;
713                         }
714                         break;
715
716                 case RTE_FLOW_ITEM_TYPE_SCTP:
717                         sctp_spec = item->spec;
718                         sctp_mask = item->mask;
719                         if (sctp_spec && sctp_mask) {
720                                 /* Check SCTP mask and update input set */
721                                 if (sctp_mask->hdr.cksum) {
722                                         rte_flow_error_set(error, EINVAL,
723                                            RTE_FLOW_ERROR_TYPE_ITEM,
724                                            item,
725                                            "Invalid SCTP mask");
726                                         return 0;
727                                 }
728
729                                 if (tunnel_valid) {
730                                         if (sctp_mask->hdr.src_port ==
731                                                         UINT16_MAX)
732                                                 input_set |=
733                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
734                                         if (sctp_mask->hdr.dst_port ==
735                                                         UINT16_MAX)
736                                                 input_set |=
737                                                 ICE_INSET_TUN_SCTP_DST_PORT;
738                                 } else {
739                                         if (sctp_mask->hdr.src_port ==
740                                                         UINT16_MAX)
741                                                 input_set |=
742                                                 ICE_INSET_SCTP_SRC_PORT;
743                                         if (sctp_mask->hdr.dst_port ==
744                                                         UINT16_MAX)
745                                                 input_set |=
746                                                 ICE_INSET_SCTP_DST_PORT;
747                                 }
748                                 list[t].type = ICE_SCTP_IL;
749                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
750                                         list[t].h_u.sctp_hdr.src_port =
751                                                 sctp_spec->hdr.src_port;
752                                         list[t].m_u.sctp_hdr.src_port =
753                                                 sctp_mask->hdr.src_port;
754                                 }
755                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
756                                         list[t].h_u.sctp_hdr.dst_port =
757                                                 sctp_spec->hdr.dst_port;
758                                         list[t].m_u.sctp_hdr.dst_port =
759                                                 sctp_mask->hdr.dst_port;
760                                 }
761                                 t++;
762                         } else if (!sctp_spec && !sctp_mask) {
763                                 list[t].type = ICE_SCTP_IL;
764                         }
765                         break;
766
767                 case RTE_FLOW_ITEM_TYPE_VXLAN:
768                         vxlan_spec = item->spec;
769                         vxlan_mask = item->mask;
770                         /* Check if VXLAN item is used to describe protocol.
771                          * If yes, both spec and mask should be NULL.
772                          * If no, both spec and mask shouldn't be NULL.
773                          */
774                         if ((!vxlan_spec && vxlan_mask) ||
775                             (vxlan_spec && !vxlan_mask)) {
776                                 rte_flow_error_set(error, EINVAL,
777                                            RTE_FLOW_ERROR_TYPE_ITEM,
778                                            item,
779                                            "Invalid VXLAN item");
780                                 return 0;
781                         }
782
783                         tunnel_valid = 1;
784                         if (vxlan_spec && vxlan_mask) {
785                                 list[t].type = ICE_VXLAN;
786                                 if (vxlan_mask->vni[0] == UINT8_MAX &&
787                                         vxlan_mask->vni[1] == UINT8_MAX &&
788                                         vxlan_mask->vni[2] == UINT8_MAX) {
789                                         list[t].h_u.tnl_hdr.vni =
790                                                 (vxlan_spec->vni[2] << 16) |
791                                                 (vxlan_spec->vni[1] << 8) |
792                                                 vxlan_spec->vni[0];
793                                         list[t].m_u.tnl_hdr.vni =
794                                                 UINT32_MAX;
795                                         input_set |=
796                                                 ICE_INSET_TUN_VXLAN_VNI;
797                                 }
798                                 t++;
799                         } else if (!vxlan_spec && !vxlan_mask) {
800                                 list[t].type = ICE_VXLAN;
801                         }
802                         break;
803
804                 case RTE_FLOW_ITEM_TYPE_NVGRE:
805                         nvgre_spec = item->spec;
806                         nvgre_mask = item->mask;
807                         /* Check if NVGRE item is used to describe protocol.
808                          * If yes, both spec and mask should be NULL.
809                          * If no, both spec and mask shouldn't be NULL.
810                          */
811                         if ((!nvgre_spec && nvgre_mask) ||
812                             (nvgre_spec && !nvgre_mask)) {
813                                 rte_flow_error_set(error, EINVAL,
814                                            RTE_FLOW_ERROR_TYPE_ITEM,
815                                            item,
816                                            "Invalid NVGRE item");
817                                 return 0;
818                         }
819                         tunnel_valid = 1;
820                         if (nvgre_spec && nvgre_mask) {
821                                 list[t].type = ICE_NVGRE;
822                                 if (nvgre_mask->tni[0] == UINT8_MAX &&
823                                         nvgre_mask->tni[1] == UINT8_MAX &&
824                                         nvgre_mask->tni[2] == UINT8_MAX) {
825                                         list[t].h_u.nvgre_hdr.tni_flow =
826                                                 (nvgre_spec->tni[2] << 16) |
827                                                 (nvgre_spec->tni[1] << 8) |
828                                                 nvgre_spec->tni[0];
829                                         list[t].m_u.nvgre_hdr.tni_flow =
830                                                 UINT32_MAX;
831                                         input_set |=
832                                                 ICE_INSET_TUN_NVGRE_TNI;
833                                 }
834                                 t++;
835                         } else if (!nvgre_spec && !nvgre_mask) {
836                                 list[t].type = ICE_NVGRE;
837                         }
838                         break;
839
840                 case RTE_FLOW_ITEM_TYPE_VLAN:
841                         vlan_spec = item->spec;
842                         vlan_mask = item->mask;
843                         /* Check if VLAN item is used to describe protocol.
844                          * If yes, both spec and mask should be NULL.
845                          * If no, both spec and mask shouldn't be NULL.
846                          */
847                         if ((!vlan_spec && vlan_mask) ||
848                             (vlan_spec && !vlan_mask)) {
849                                 rte_flow_error_set(error, EINVAL,
850                                            RTE_FLOW_ERROR_TYPE_ITEM,
851                                            item,
852                                            "Invalid VLAN item");
853                                 return 0;
854                         }
855                         if (vlan_spec && vlan_mask) {
856                                 list[t].type = ICE_VLAN_OFOS;
857                                 if (vlan_mask->tci == UINT16_MAX) {
858                                         list[t].h_u.vlan_hdr.vlan =
859                                                 vlan_spec->tci;
860                                         list[t].m_u.vlan_hdr.vlan =
861                                                 UINT16_MAX;
862                                         input_set |= ICE_INSET_VLAN_OUTER;
863                                 }
864                                 if (vlan_mask->inner_type == UINT16_MAX) {
865                                         list[t].h_u.vlan_hdr.type =
866                                                 vlan_spec->inner_type;
867                                         list[t].m_u.vlan_hdr.type =
868                                                 UINT16_MAX;
869                                         input_set |= ICE_INSET_VLAN_OUTER;
870                                 }
871                                 t++;
872                         } else if (!vlan_spec && !vlan_mask) {
873                                 list[t].type = ICE_VLAN_OFOS;
874                         }
875                         break;
876
877                 case RTE_FLOW_ITEM_TYPE_PPPOED:
878                 case RTE_FLOW_ITEM_TYPE_PPPOES:
879                         pppoe_spec = item->spec;
880                         pppoe_mask = item->mask;
881                         /* Check if PPPoE item is used to describe protocol.
882                          * If yes, both spec and mask should be NULL.
883                          */
884                         if (pppoe_spec || pppoe_mask) {
885                                 rte_flow_error_set(error, EINVAL,
886                                            RTE_FLOW_ERROR_TYPE_ITEM,
887                                            item,
888                                            "Invalid pppoe item");
889                                 return 0;
890                         }
891                         break;
892
893                 case RTE_FLOW_ITEM_TYPE_VOID:
894                         break;
895
896                 default:
897                         rte_flow_error_set(error, EINVAL,
898                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
899                                    "Invalid pattern item.");
900                         goto out;
901                 }
902         }
903
904         *lkups_num = t;
905
906         return input_set;
907 out:
908         return 0;
909 }
910
911
912 static int
913 ice_switch_parse_action(struct ice_pf *pf,
914                 const struct rte_flow_action *actions,
915                 struct rte_flow_error *error,
916                 struct ice_adv_rule_info *rule_info)
917 {
918         struct ice_vsi *vsi = pf->main_vsi;
919         struct rte_eth_dev *dev = pf->adapter->eth_dev;
920         const struct rte_flow_action_queue *act_q;
921         const struct rte_flow_action_rss *act_qgrop;
922         uint16_t base_queue, i;
923         const struct rte_flow_action *action;
924         enum rte_flow_action_type action_type;
925         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
926                  2, 4, 8, 16, 32, 64, 128};
927
928         base_queue = pf->base_queue + vsi->base_queue;
929         for (action = actions; action->type !=
930                         RTE_FLOW_ACTION_TYPE_END; action++) {
931                 action_type = action->type;
932                 switch (action_type) {
933                 case RTE_FLOW_ACTION_TYPE_RSS:
934                         act_qgrop = action->conf;
935                         rule_info->sw_act.fltr_act =
936                                 ICE_FWD_TO_QGRP;
937                         rule_info->sw_act.fwd_id.q_id =
938                                 base_queue + act_qgrop->queue[0];
939                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
940                                 if (act_qgrop->queue_num ==
941                                         valid_qgrop_number[i])
942                                         break;
943                         }
944                         if (i == MAX_QGRP_NUM_TYPE)
945                                 goto error;
946                         if ((act_qgrop->queue[0] +
947                                 act_qgrop->queue_num) >
948                                 dev->data->nb_rx_queues)
949                                 goto error;
950                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
951                                 if (act_qgrop->queue[i + 1] !=
952                                         act_qgrop->queue[i] + 1)
953                                         goto error;
954                         rule_info->sw_act.qgrp_size =
955                                 act_qgrop->queue_num;
956                         break;
957                 case RTE_FLOW_ACTION_TYPE_QUEUE:
958                         act_q = action->conf;
959                         if (act_q->index >= dev->data->nb_rx_queues)
960                                 goto error;
961                         rule_info->sw_act.fltr_act =
962                                 ICE_FWD_TO_Q;
963                         rule_info->sw_act.fwd_id.q_id =
964                                 base_queue + act_q->index;
965                         break;
966
967                 case RTE_FLOW_ACTION_TYPE_DROP:
968                         rule_info->sw_act.fltr_act =
969                                 ICE_DROP_PACKET;
970                         break;
971
972                 case RTE_FLOW_ACTION_TYPE_VOID:
973                         break;
974
975                 default:
976                         goto error;
977                 }
978         }
979
980         rule_info->sw_act.vsi_handle = vsi->idx;
981         rule_info->rx = 1;
982         rule_info->sw_act.src = vsi->idx;
983         rule_info->priority = 5;
984
985         return 0;
986
987 error:
988         rte_flow_error_set(error,
989                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
990                 actions,
991                 "Invalid action type or queue number");
992         return -rte_errno;
993 }
994
995 static int
996 ice_switch_parse_pattern_action(struct ice_adapter *ad,
997                 struct ice_pattern_match_item *array,
998                 uint32_t array_len,
999                 const struct rte_flow_item pattern[],
1000                 const struct rte_flow_action actions[],
1001                 void **meta,
1002                 struct rte_flow_error *error)
1003 {
1004         struct ice_pf *pf = &ad->pf;
1005         uint64_t inputset = 0;
1006         int ret = 0;
1007         struct sw_meta *sw_meta_ptr = NULL;
1008         struct ice_adv_rule_info rule_info;
1009         struct ice_adv_lkup_elem *list = NULL;
1010         uint16_t lkups_num = 0;
1011         const struct rte_flow_item *item = pattern;
1012         uint16_t item_num = 0;
1013         enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1014         struct ice_pattern_match_item *pattern_match_item = NULL;
1015
1016         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1017                 item_num++;
1018                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1019                         tun_type = ICE_SW_TUN_VXLAN;
1020                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1021                         tun_type = ICE_SW_TUN_NVGRE;
1022                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1023                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1024                         tun_type = ICE_SW_TUN_PPPOE;
1025                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1026                         const struct rte_flow_item_eth *eth_mask;
1027                         if (item->mask)
1028                                 eth_mask = item->mask;
1029                         else
1030                                 continue;
1031                         if (eth_mask->type == UINT16_MAX)
1032                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1033                 }
1034                 /* reserve one more memory slot for ETH which may
1035                  * consume 2 lookup items.
1036                  */
1037                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1038                         item_num++;
1039         }
1040
1041         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1042         if (!list) {
1043                 rte_flow_error_set(error, EINVAL,
1044                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1045                                    "No memory for PMD internal items");
1046                 return -rte_errno;
1047         }
1048
1049         rule_info.tun_type = tun_type;
1050
1051         sw_meta_ptr =
1052                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1053         if (!sw_meta_ptr) {
1054                 rte_flow_error_set(error, EINVAL,
1055                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1056                                    "No memory for sw_pattern_meta_ptr");
1057                 goto error;
1058         }
1059
1060         pattern_match_item =
1061                 ice_search_pattern_match_item(pattern, array, array_len, error);
1062         if (!pattern_match_item) {
1063                 rte_flow_error_set(error, EINVAL,
1064                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1065                                    "Invalid input pattern");
1066                 goto error;
1067         }
1068
1069         inputset = ice_switch_inset_get
1070                 (pattern, error, list, &lkups_num, tun_type);
1071         if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
1072                 rte_flow_error_set(error, EINVAL,
1073                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1074                                    pattern,
1075                                    "Invalid input set");
1076                 goto error;
1077         }
1078
1079         ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1080         if (ret) {
1081                 rte_flow_error_set(error, EINVAL,
1082                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1083                                    "Invalid input action");
1084                 goto error;
1085         }
1086         *meta = sw_meta_ptr;
1087         ((struct sw_meta *)*meta)->list = list;
1088         ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1089         ((struct sw_meta *)*meta)->rule_info = rule_info;
1090         rte_free(pattern_match_item);
1091
1092         return 0;
1093
1094 error:
1095         rte_free(list);
1096         rte_free(sw_meta_ptr);
1097         rte_free(pattern_match_item);
1098
1099         return -rte_errno;
1100 }
1101
1102 static int
1103 ice_switch_query(struct ice_adapter *ad __rte_unused,
1104                 struct rte_flow *flow __rte_unused,
1105                 struct rte_flow_query_count *count __rte_unused,
1106                 struct rte_flow_error *error)
1107 {
1108         rte_flow_error_set(error, EINVAL,
1109                 RTE_FLOW_ERROR_TYPE_HANDLE,
1110                 NULL,
1111                 "count action not supported by switch filter");
1112
1113         return -rte_errno;
1114 }
1115
1116 static int
1117 ice_switch_init(struct ice_adapter *ad)
1118 {
1119         int ret = 0;
1120         struct ice_flow_parser *dist_parser;
1121         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1122
1123         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1124                 dist_parser = &ice_switch_dist_parser_comms;
1125         else
1126                 dist_parser = &ice_switch_dist_parser_os;
1127
1128         if (ad->devargs.pipe_mode_support)
1129                 ret = ice_register_parser(perm_parser, ad);
1130         else
1131                 ret = ice_register_parser(dist_parser, ad);
1132         return ret;
1133 }
1134
1135 static void
1136 ice_switch_uninit(struct ice_adapter *ad)
1137 {
1138         struct ice_flow_parser *dist_parser;
1139         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1140
1141         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1142                 dist_parser = &ice_switch_dist_parser_comms;
1143         else
1144                 dist_parser = &ice_switch_dist_parser_os;
1145
1146         if (ad->devargs.pipe_mode_support)
1147                 ice_unregister_parser(perm_parser, ad);
1148         else
1149                 ice_unregister_parser(dist_parser, ad);
1150 }
1151
1152 static struct
1153 ice_flow_engine ice_switch_engine = {
1154         .init = ice_switch_init,
1155         .uninit = ice_switch_uninit,
1156         .create = ice_switch_create,
1157         .destroy = ice_switch_destroy,
1158         .query_count = ice_switch_query,
1159         .free = ice_switch_filter_rule_free,
1160         .type = ICE_FLOW_ENGINE_SWITCH,
1161 };
1162
1163 static struct
1164 ice_flow_parser ice_switch_dist_parser_os = {
1165         .engine = &ice_switch_engine,
1166         .array = ice_switch_pattern_dist_os,
1167         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1168         .parse_pattern_action = ice_switch_parse_pattern_action,
1169         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1170 };
1171
1172 static struct
1173 ice_flow_parser ice_switch_dist_parser_comms = {
1174         .engine = &ice_switch_engine,
1175         .array = ice_switch_pattern_dist_comms,
1176         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1177         .parse_pattern_action = ice_switch_parse_pattern_action,
1178         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1179 };
1180
1181 static struct
1182 ice_flow_parser ice_switch_perm_parser = {
1183         .engine = &ice_switch_engine,
1184         .array = ice_switch_pattern_perm,
1185         .array_len = RTE_DIM(ice_switch_pattern_perm),
1186         .parse_pattern_action = ice_switch_parse_pattern_action,
1187         .stage = ICE_FLOW_STAGE_PERMISSION,
1188 };
1189
1190 RTE_INIT(ice_sw_engine_init)
1191 {
1192         struct ice_flow_engine *engine = &ice_switch_engine;
1193         ice_register_flow_engine(engine);
1194 }