net/ice: remove unnecessary variable
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_IPV4 ( \
33         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
34         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
35 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
38         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
39 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
40         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
42         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
43 #define ICE_SW_INSET_MAC_IPV6 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
45         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
46         ICE_INSET_IPV6_NEXT_HDR)
47 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
49         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
50         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
54         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
57         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
58 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
64         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
66         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
68         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
70         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
78         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
83         ICE_INSET_TUN_IPV4_TOS)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
87         ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_MAC_PPPOE  ( \
89         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
90         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
91
92 struct sw_meta {
93         struct ice_adv_lkup_elem *list;
94         uint16_t lkups_num;
95         struct ice_adv_rule_info rule_info;
96 };
97
98 static struct ice_flow_parser ice_switch_dist_parser_os;
99 static struct ice_flow_parser ice_switch_dist_parser_comms;
100 static struct ice_flow_parser ice_switch_perm_parser;
101
102 static struct
103 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
104         {pattern_ethertype,
105                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
106         {pattern_eth_ipv4,
107                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
108         {pattern_eth_ipv4_udp,
109                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
110         {pattern_eth_ipv4_tcp,
111                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
112         {pattern_eth_ipv6,
113                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
114         {pattern_eth_ipv6_udp,
115                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
116         {pattern_eth_ipv6_tcp,
117                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
118         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
119                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
120         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
121                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
122         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
123                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
124         {pattern_eth_ipv4_nvgre_eth_ipv4,
125                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
126         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
127                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
128         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
129                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
130         {pattern_eth_pppoed,
131                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
132         {pattern_eth_vlan_pppoed,
133                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
134         {pattern_eth_pppoes,
135                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
136         {pattern_eth_vlan_pppoes,
137                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
138 };
139
140 static struct
141 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
142         {pattern_ethertype,
143                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
144         {pattern_eth_arp,
145                         ICE_INSET_NONE, ICE_INSET_NONE},
146         {pattern_eth_ipv4,
147                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
148         {pattern_eth_ipv4_udp,
149                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
150         {pattern_eth_ipv4_tcp,
151                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
152         {pattern_eth_ipv6,
153                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
154         {pattern_eth_ipv6_udp,
155                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
156         {pattern_eth_ipv6_tcp,
157                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
158         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
159                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
160         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
161                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
162         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
163                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
164         {pattern_eth_ipv4_nvgre_eth_ipv4,
165                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
166         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
167                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
168         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
169                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
170 };
171
172 static struct
173 ice_pattern_match_item ice_switch_pattern_perm[] = {
174         {pattern_eth_ipv4,
175                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
176         {pattern_eth_ipv4_udp,
177                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
178         {pattern_eth_ipv4_tcp,
179                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
180         {pattern_eth_ipv6,
181                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
182         {pattern_eth_ipv6_udp,
183                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
184         {pattern_eth_ipv6_tcp,
185                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
186         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
187                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
188         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
189                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
190         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
191                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
192         {pattern_eth_ipv4_nvgre_eth_ipv4,
193                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
194         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
195                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
196         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
197                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
198 };
199
200 static int
201 ice_switch_create(struct ice_adapter *ad,
202                 struct rte_flow *flow,
203                 void *meta,
204                 struct rte_flow_error *error)
205 {
206         int ret = 0;
207         struct ice_pf *pf = &ad->pf;
208         struct ice_hw *hw = ICE_PF_TO_HW(pf);
209         struct ice_rule_query_data rule_added = {0};
210         struct ice_rule_query_data *filter_ptr;
211         struct ice_adv_lkup_elem *list =
212                 ((struct sw_meta *)meta)->list;
213         uint16_t lkups_cnt =
214                 ((struct sw_meta *)meta)->lkups_num;
215         struct ice_adv_rule_info *rule_info =
216                 &((struct sw_meta *)meta)->rule_info;
217
218         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
219                 rte_flow_error_set(error, EINVAL,
220                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
221                         "item number too large for rule");
222                 goto error;
223         }
224         if (!list) {
225                 rte_flow_error_set(error, EINVAL,
226                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
227                         "lookup list should not be NULL");
228                 goto error;
229         }
230         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
231         if (!ret) {
232                 filter_ptr = rte_zmalloc("ice_switch_filter",
233                         sizeof(struct ice_rule_query_data), 0);
234                 if (!filter_ptr) {
235                         rte_flow_error_set(error, EINVAL,
236                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
237                                    "No memory for ice_switch_filter");
238                         goto error;
239                 }
240                 flow->rule = filter_ptr;
241                 rte_memcpy(filter_ptr,
242                         &rule_added,
243                         sizeof(struct ice_rule_query_data));
244         } else {
245                 rte_flow_error_set(error, EINVAL,
246                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
247                         "switch filter create flow fail");
248                 goto error;
249         }
250
251         rte_free(list);
252         rte_free(meta);
253         return 0;
254
255 error:
256         rte_free(list);
257         rte_free(meta);
258
259         return -rte_errno;
260 }
261
262 static int
263 ice_switch_destroy(struct ice_adapter *ad,
264                 struct rte_flow *flow,
265                 struct rte_flow_error *error)
266 {
267         struct ice_hw *hw = &ad->hw;
268         int ret;
269         struct ice_rule_query_data *filter_ptr;
270
271         filter_ptr = (struct ice_rule_query_data *)
272                 flow->rule;
273
274         if (!filter_ptr) {
275                 rte_flow_error_set(error, EINVAL,
276                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
277                         "no such flow"
278                         " create by switch filter");
279                 return -rte_errno;
280         }
281
282         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
283         if (ret) {
284                 rte_flow_error_set(error, EINVAL,
285                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
286                         "fail to destroy switch filter rule");
287                 return -rte_errno;
288         }
289
290         rte_free(filter_ptr);
291         return ret;
292 }
293
294 static void
295 ice_switch_filter_rule_free(struct rte_flow *flow)
296 {
297         rte_free(flow->rule);
298 }
299
300 static uint64_t
301 ice_switch_inset_get(const struct rte_flow_item pattern[],
302                 struct rte_flow_error *error,
303                 struct ice_adv_lkup_elem *list,
304                 uint16_t *lkups_num,
305                 enum ice_sw_tunnel_type tun_type)
306 {
307         const struct rte_flow_item *item = pattern;
308         enum rte_flow_item_type item_type;
309         const struct rte_flow_item_eth *eth_spec, *eth_mask;
310         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
311         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
312         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
313         const struct rte_flow_item_udp *udp_spec, *udp_mask;
314         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
315         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
316         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
317         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
318         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
319         uint8_t  ipv6_addr_mask[16] = {
320                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
321                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
322         uint64_t input_set = ICE_INSET_NONE;
323         uint16_t j, t = 0;
324         uint16_t tunnel_valid = 0;
325
326
327         for (item = pattern; item->type !=
328                         RTE_FLOW_ITEM_TYPE_END; item++) {
329                 if (item->last) {
330                         rte_flow_error_set(error, EINVAL,
331                                         RTE_FLOW_ERROR_TYPE_ITEM,
332                                         item,
333                                         "Not support range");
334                         return 0;
335                 }
336                 item_type = item->type;
337
338                 switch (item_type) {
339                 case RTE_FLOW_ITEM_TYPE_ETH:
340                         eth_spec = item->spec;
341                         eth_mask = item->mask;
342                         if (eth_spec && eth_mask) {
343                                 if (tunnel_valid &&
344                                     rte_is_broadcast_ether_addr(&eth_mask->src))
345                                         input_set |= ICE_INSET_TUN_SMAC;
346                                 else if (
347                                 rte_is_broadcast_ether_addr(&eth_mask->src))
348                                         input_set |= ICE_INSET_SMAC;
349                                 if (tunnel_valid &&
350                                     rte_is_broadcast_ether_addr(&eth_mask->dst))
351                                         input_set |= ICE_INSET_TUN_DMAC;
352                                 else if (
353                                 rte_is_broadcast_ether_addr(&eth_mask->dst))
354                                         input_set |= ICE_INSET_DMAC;
355                                 if (eth_mask->type == RTE_BE16(0xffff))
356                                         input_set |= ICE_INSET_ETHERTYPE;
357                                 list[t].type = (tunnel_valid  == 0) ?
358                                         ICE_MAC_OFOS : ICE_MAC_IL;
359                                 struct ice_ether_hdr *h;
360                                 struct ice_ether_hdr *m;
361                                 uint16_t i = 0;
362                                 h = &list[t].h_u.eth_hdr;
363                                 m = &list[t].m_u.eth_hdr;
364                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
365                                         if (eth_mask->src.addr_bytes[j] ==
366                                                                 UINT8_MAX) {
367                                                 h->src_addr[j] =
368                                                 eth_spec->src.addr_bytes[j];
369                                                 m->src_addr[j] =
370                                                 eth_mask->src.addr_bytes[j];
371                                                 i = 1;
372                                         }
373                                         if (eth_mask->dst.addr_bytes[j] ==
374                                                                 UINT8_MAX) {
375                                                 h->dst_addr[j] =
376                                                 eth_spec->dst.addr_bytes[j];
377                                                 m->dst_addr[j] =
378                                                 eth_mask->dst.addr_bytes[j];
379                                                 i = 1;
380                                         }
381                                 }
382                                 if (i)
383                                         t++;
384                                 if (eth_mask->type == UINT16_MAX) {
385                                         list[t].type = ICE_ETYPE_OL;
386                                         list[t].h_u.ethertype.ethtype_id =
387                                                 eth_spec->type;
388                                         list[t].m_u.ethertype.ethtype_id =
389                                                 UINT16_MAX;
390                                         t++;
391                                 }
392                         } else if (!eth_spec && !eth_mask) {
393                                 list[t].type = (tun_type == ICE_NON_TUN) ?
394                                         ICE_MAC_OFOS : ICE_MAC_IL;
395                         }
396                         break;
397
398                 case RTE_FLOW_ITEM_TYPE_IPV4:
399                         ipv4_spec = item->spec;
400                         ipv4_mask = item->mask;
401                         if (ipv4_spec && ipv4_mask) {
402                                 /* Check IPv4 mask and update input set */
403                                 if (ipv4_mask->hdr.version_ihl ||
404                                         ipv4_mask->hdr.total_length ||
405                                         ipv4_mask->hdr.packet_id ||
406                                         ipv4_mask->hdr.hdr_checksum) {
407                                         rte_flow_error_set(error, EINVAL,
408                                                    RTE_FLOW_ERROR_TYPE_ITEM,
409                                                    item,
410                                                    "Invalid IPv4 mask.");
411                                         return 0;
412                                 }
413
414                                 if (tunnel_valid) {
415                                         if (ipv4_mask->hdr.type_of_service ==
416                                                         UINT8_MAX)
417                                                 input_set |=
418                                                         ICE_INSET_TUN_IPV4_TOS;
419                                         if (ipv4_mask->hdr.src_addr ==
420                                                         UINT32_MAX)
421                                                 input_set |=
422                                                         ICE_INSET_TUN_IPV4_SRC;
423                                         if (ipv4_mask->hdr.dst_addr ==
424                                                         UINT32_MAX)
425                                                 input_set |=
426                                                         ICE_INSET_TUN_IPV4_DST;
427                                         if (ipv4_mask->hdr.time_to_live ==
428                                                         UINT8_MAX)
429                                                 input_set |=
430                                                         ICE_INSET_TUN_IPV4_TTL;
431                                         if (ipv4_mask->hdr.next_proto_id ==
432                                                         UINT8_MAX)
433                                                 input_set |=
434                                                 ICE_INSET_TUN_IPV4_PROTO;
435                                 } else {
436                                         if (ipv4_mask->hdr.src_addr ==
437                                                         UINT32_MAX)
438                                                 input_set |= ICE_INSET_IPV4_SRC;
439                                         if (ipv4_mask->hdr.dst_addr ==
440                                                         UINT32_MAX)
441                                                 input_set |= ICE_INSET_IPV4_DST;
442                                         if (ipv4_mask->hdr.time_to_live ==
443                                                         UINT8_MAX)
444                                                 input_set |= ICE_INSET_IPV4_TTL;
445                                         if (ipv4_mask->hdr.next_proto_id ==
446                                                         UINT8_MAX)
447                                                 input_set |=
448                                                 ICE_INSET_IPV4_PROTO;
449                                         if (ipv4_mask->hdr.type_of_service ==
450                                                         UINT8_MAX)
451                                                 input_set |=
452                                                         ICE_INSET_IPV4_TOS;
453                                 }
454                                 list[t].type = (tunnel_valid  == 0) ?
455                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
456                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
457                                         list[t].h_u.ipv4_hdr.src_addr =
458                                                 ipv4_spec->hdr.src_addr;
459                                         list[t].m_u.ipv4_hdr.src_addr =
460                                                 UINT32_MAX;
461                                 }
462                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
463                                         list[t].h_u.ipv4_hdr.dst_addr =
464                                                 ipv4_spec->hdr.dst_addr;
465                                         list[t].m_u.ipv4_hdr.dst_addr =
466                                                 UINT32_MAX;
467                                 }
468                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
469                                         list[t].h_u.ipv4_hdr.time_to_live =
470                                                 ipv4_spec->hdr.time_to_live;
471                                         list[t].m_u.ipv4_hdr.time_to_live =
472                                                 UINT8_MAX;
473                                 }
474                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
475                                         list[t].h_u.ipv4_hdr.protocol =
476                                                 ipv4_spec->hdr.next_proto_id;
477                                         list[t].m_u.ipv4_hdr.protocol =
478                                                 UINT8_MAX;
479                                 }
480                                 if (ipv4_mask->hdr.type_of_service ==
481                                                 UINT8_MAX) {
482                                         list[t].h_u.ipv4_hdr.tos =
483                                                 ipv4_spec->hdr.type_of_service;
484                                         list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
485                                 }
486                                 t++;
487                         } else if (!ipv4_spec && !ipv4_mask) {
488                                 list[t].type = (tunnel_valid  == 0) ?
489                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
490                         }
491                         break;
492
493                 case RTE_FLOW_ITEM_TYPE_IPV6:
494                         ipv6_spec = item->spec;
495                         ipv6_mask = item->mask;
496                         if (ipv6_spec && ipv6_mask) {
497                                 if (ipv6_mask->hdr.payload_len) {
498                                         rte_flow_error_set(error, EINVAL,
499                                            RTE_FLOW_ERROR_TYPE_ITEM,
500                                            item,
501                                            "Invalid IPv6 mask");
502                                         return 0;
503                                 }
504
505                                 if (tunnel_valid) {
506                                         if (!memcmp(ipv6_mask->hdr.src_addr,
507                                                 ipv6_addr_mask,
508                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
509                                                 input_set |=
510                                                         ICE_INSET_TUN_IPV6_SRC;
511                                         if (!memcmp(ipv6_mask->hdr.dst_addr,
512                                                 ipv6_addr_mask,
513                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
514                                                 input_set |=
515                                                         ICE_INSET_TUN_IPV6_DST;
516                                         if (ipv6_mask->hdr.proto == UINT8_MAX)
517                                                 input_set |=
518                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
519                                         if (ipv6_mask->hdr.hop_limits ==
520                                                         UINT8_MAX)
521                                                 input_set |=
522                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
523                                         if ((ipv6_mask->hdr.vtc_flow &
524                                                 rte_cpu_to_be_32
525                                                 (RTE_IPV6_HDR_TC_MASK))
526                                                         == rte_cpu_to_be_32
527                                                         (RTE_IPV6_HDR_TC_MASK))
528                                                 input_set |=
529                                                         ICE_INSET_TUN_IPV6_TC;
530                                 } else {
531                                         if (!memcmp(ipv6_mask->hdr.src_addr,
532                                                 ipv6_addr_mask,
533                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
534                                                 input_set |= ICE_INSET_IPV6_SRC;
535                                         if (!memcmp(ipv6_mask->hdr.dst_addr,
536                                                 ipv6_addr_mask,
537                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
538                                                 input_set |= ICE_INSET_IPV6_DST;
539                                         if (ipv6_mask->hdr.proto == UINT8_MAX)
540                                                 input_set |=
541                                                 ICE_INSET_IPV6_NEXT_HDR;
542                                         if (ipv6_mask->hdr.hop_limits ==
543                                                         UINT8_MAX)
544                                                 input_set |=
545                                                 ICE_INSET_IPV6_HOP_LIMIT;
546                                         if ((ipv6_mask->hdr.vtc_flow &
547                                                 rte_cpu_to_be_32
548                                                 (RTE_IPV6_HDR_TC_MASK))
549                                                         == rte_cpu_to_be_32
550                                                         (RTE_IPV6_HDR_TC_MASK))
551                                                 input_set |= ICE_INSET_IPV6_TC;
552                                 }
553                                 list[t].type = (tunnel_valid  == 0) ?
554                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
555                                 struct ice_ipv6_hdr *f;
556                                 struct ice_ipv6_hdr *s;
557                                 f = &list[t].h_u.ipv6_hdr;
558                                 s = &list[t].m_u.ipv6_hdr;
559                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
560                                         if (ipv6_mask->hdr.src_addr[j] ==
561                                                 UINT8_MAX) {
562                                                 f->src_addr[j] =
563                                                 ipv6_spec->hdr.src_addr[j];
564                                                 s->src_addr[j] =
565                                                 ipv6_mask->hdr.src_addr[j];
566                                         }
567                                         if (ipv6_mask->hdr.dst_addr[j] ==
568                                                                 UINT8_MAX) {
569                                                 f->dst_addr[j] =
570                                                 ipv6_spec->hdr.dst_addr[j];
571                                                 s->dst_addr[j] =
572                                                 ipv6_mask->hdr.dst_addr[j];
573                                         }
574                                 }
575                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
576                                         f->next_hdr =
577                                                 ipv6_spec->hdr.proto;
578                                         s->next_hdr = UINT8_MAX;
579                                 }
580                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
581                                         f->hop_limit =
582                                                 ipv6_spec->hdr.hop_limits;
583                                         s->hop_limit = UINT8_MAX;
584                                 }
585                                 if ((ipv6_mask->hdr.vtc_flow &
586                                                 rte_cpu_to_be_32
587                                                 (RTE_IPV6_HDR_TC_MASK))
588                                                 == rte_cpu_to_be_32
589                                                 (RTE_IPV6_HDR_TC_MASK)) {
590                                         struct ice_le_ver_tc_flow vtf;
591                                         vtf.u.fld.version = 0;
592                                         vtf.u.fld.flow_label = 0;
593                                         vtf.u.fld.tc = (rte_be_to_cpu_32
594                                                 (ipv6_spec->hdr.vtc_flow) &
595                                                         RTE_IPV6_HDR_TC_MASK) >>
596                                                         RTE_IPV6_HDR_TC_SHIFT;
597                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
598                                         vtf.u.fld.tc = UINT8_MAX;
599                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
600                                 }
601                                 t++;
602                         } else if (!ipv6_spec && !ipv6_mask) {
603                                 list[t].type = (tun_type == ICE_NON_TUN) ?
604                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
605                         }
606                         break;
607
608                 case RTE_FLOW_ITEM_TYPE_UDP:
609                         udp_spec = item->spec;
610                         udp_mask = item->mask;
611                         if (udp_spec && udp_mask) {
612                                 /* Check UDP mask and update input set*/
613                                 if (udp_mask->hdr.dgram_len ||
614                                     udp_mask->hdr.dgram_cksum) {
615                                         rte_flow_error_set(error, EINVAL,
616                                                    RTE_FLOW_ERROR_TYPE_ITEM,
617                                                    item,
618                                                    "Invalid UDP mask");
619                                         return 0;
620                                 }
621
622                                 if (tunnel_valid) {
623                                         if (udp_mask->hdr.src_port ==
624                                                         UINT16_MAX)
625                                                 input_set |=
626                                                 ICE_INSET_TUN_UDP_SRC_PORT;
627                                         if (udp_mask->hdr.dst_port ==
628                                                         UINT16_MAX)
629                                                 input_set |=
630                                                 ICE_INSET_TUN_UDP_DST_PORT;
631                                 } else {
632                                         if (udp_mask->hdr.src_port ==
633                                                         UINT16_MAX)
634                                                 input_set |=
635                                                 ICE_INSET_UDP_SRC_PORT;
636                                         if (udp_mask->hdr.dst_port ==
637                                                         UINT16_MAX)
638                                                 input_set |=
639                                                 ICE_INSET_UDP_DST_PORT;
640                                 }
641                                 if (tun_type == ICE_SW_TUN_VXLAN &&
642                                                 tunnel_valid == 0)
643                                         list[t].type = ICE_UDP_OF;
644                                 else
645                                         list[t].type = ICE_UDP_ILOS;
646                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
647                                         list[t].h_u.l4_hdr.src_port =
648                                                 udp_spec->hdr.src_port;
649                                         list[t].m_u.l4_hdr.src_port =
650                                                 udp_mask->hdr.src_port;
651                                 }
652                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
653                                         list[t].h_u.l4_hdr.dst_port =
654                                                 udp_spec->hdr.dst_port;
655                                         list[t].m_u.l4_hdr.dst_port =
656                                                 udp_mask->hdr.dst_port;
657                                 }
658                                                 t++;
659                         } else if (!udp_spec && !udp_mask) {
660                                 list[t].type = ICE_UDP_ILOS;
661                         }
662                         break;
663
664                 case RTE_FLOW_ITEM_TYPE_TCP:
665                         tcp_spec = item->spec;
666                         tcp_mask = item->mask;
667                         if (tcp_spec && tcp_mask) {
668                                 /* Check TCP mask and update input set */
669                                 if (tcp_mask->hdr.sent_seq ||
670                                         tcp_mask->hdr.recv_ack ||
671                                         tcp_mask->hdr.data_off ||
672                                         tcp_mask->hdr.tcp_flags ||
673                                         tcp_mask->hdr.rx_win ||
674                                         tcp_mask->hdr.cksum ||
675                                         tcp_mask->hdr.tcp_urp) {
676                                         rte_flow_error_set(error, EINVAL,
677                                            RTE_FLOW_ERROR_TYPE_ITEM,
678                                            item,
679                                            "Invalid TCP mask");
680                                         return 0;
681                                 }
682
683                                 if (tunnel_valid) {
684                                         if (tcp_mask->hdr.src_port ==
685                                                         UINT16_MAX)
686                                                 input_set |=
687                                                 ICE_INSET_TUN_TCP_SRC_PORT;
688                                         if (tcp_mask->hdr.dst_port ==
689                                                         UINT16_MAX)
690                                                 input_set |=
691                                                 ICE_INSET_TUN_TCP_DST_PORT;
692                                 } else {
693                                         if (tcp_mask->hdr.src_port ==
694                                                         UINT16_MAX)
695                                                 input_set |=
696                                                 ICE_INSET_TCP_SRC_PORT;
697                                         if (tcp_mask->hdr.dst_port ==
698                                                         UINT16_MAX)
699                                                 input_set |=
700                                                 ICE_INSET_TCP_DST_PORT;
701                                 }
702                                 list[t].type = ICE_TCP_IL;
703                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
704                                         list[t].h_u.l4_hdr.src_port =
705                                                 tcp_spec->hdr.src_port;
706                                         list[t].m_u.l4_hdr.src_port =
707                                                 tcp_mask->hdr.src_port;
708                                 }
709                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
710                                         list[t].h_u.l4_hdr.dst_port =
711                                                 tcp_spec->hdr.dst_port;
712                                         list[t].m_u.l4_hdr.dst_port =
713                                                 tcp_mask->hdr.dst_port;
714                                 }
715                                 t++;
716                         } else if (!tcp_spec && !tcp_mask) {
717                                 list[t].type = ICE_TCP_IL;
718                         }
719                         break;
720
721                 case RTE_FLOW_ITEM_TYPE_SCTP:
722                         sctp_spec = item->spec;
723                         sctp_mask = item->mask;
724                         if (sctp_spec && sctp_mask) {
725                                 /* Check SCTP mask and update input set */
726                                 if (sctp_mask->hdr.cksum) {
727                                         rte_flow_error_set(error, EINVAL,
728                                            RTE_FLOW_ERROR_TYPE_ITEM,
729                                            item,
730                                            "Invalid SCTP mask");
731                                         return 0;
732                                 }
733
734                                 if (tunnel_valid) {
735                                         if (sctp_mask->hdr.src_port ==
736                                                         UINT16_MAX)
737                                                 input_set |=
738                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
739                                         if (sctp_mask->hdr.dst_port ==
740                                                         UINT16_MAX)
741                                                 input_set |=
742                                                 ICE_INSET_TUN_SCTP_DST_PORT;
743                                 } else {
744                                         if (sctp_mask->hdr.src_port ==
745                                                         UINT16_MAX)
746                                                 input_set |=
747                                                 ICE_INSET_SCTP_SRC_PORT;
748                                         if (sctp_mask->hdr.dst_port ==
749                                                         UINT16_MAX)
750                                                 input_set |=
751                                                 ICE_INSET_SCTP_DST_PORT;
752                                 }
753                                 list[t].type = ICE_SCTP_IL;
754                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
755                                         list[t].h_u.sctp_hdr.src_port =
756                                                 sctp_spec->hdr.src_port;
757                                         list[t].m_u.sctp_hdr.src_port =
758                                                 sctp_mask->hdr.src_port;
759                                 }
760                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
761                                         list[t].h_u.sctp_hdr.dst_port =
762                                                 sctp_spec->hdr.dst_port;
763                                         list[t].m_u.sctp_hdr.dst_port =
764                                                 sctp_mask->hdr.dst_port;
765                                 }
766                                 t++;
767                         } else if (!sctp_spec && !sctp_mask) {
768                                 list[t].type = ICE_SCTP_IL;
769                         }
770                         break;
771
772                 case RTE_FLOW_ITEM_TYPE_VXLAN:
773                         vxlan_spec = item->spec;
774                         vxlan_mask = item->mask;
775                         /* Check if VXLAN item is used to describe protocol.
776                          * If yes, both spec and mask should be NULL.
777                          * If no, both spec and mask shouldn't be NULL.
778                          */
779                         if ((!vxlan_spec && vxlan_mask) ||
780                             (vxlan_spec && !vxlan_mask)) {
781                                 rte_flow_error_set(error, EINVAL,
782                                            RTE_FLOW_ERROR_TYPE_ITEM,
783                                            item,
784                                            "Invalid VXLAN item");
785                                 return 0;
786                         }
787
788                         tunnel_valid = 1;
789                         if (vxlan_spec && vxlan_mask) {
790                                 list[t].type = ICE_VXLAN;
791                                 if (vxlan_mask->vni[0] == UINT8_MAX &&
792                                         vxlan_mask->vni[1] == UINT8_MAX &&
793                                         vxlan_mask->vni[2] == UINT8_MAX) {
794                                         list[t].h_u.tnl_hdr.vni =
795                                                 (vxlan_spec->vni[2] << 16) |
796                                                 (vxlan_spec->vni[1] << 8) |
797                                                 vxlan_spec->vni[0];
798                                         list[t].m_u.tnl_hdr.vni =
799                                                 UINT32_MAX;
800                                         input_set |=
801                                                 ICE_INSET_TUN_VXLAN_VNI;
802                                 }
803                                 t++;
804                         } else if (!vxlan_spec && !vxlan_mask) {
805                                 list[t].type = ICE_VXLAN;
806                         }
807                         break;
808
809                 case RTE_FLOW_ITEM_TYPE_NVGRE:
810                         nvgre_spec = item->spec;
811                         nvgre_mask = item->mask;
812                         /* Check if NVGRE item is used to describe protocol.
813                          * If yes, both spec and mask should be NULL.
814                          * If no, both spec and mask shouldn't be NULL.
815                          */
816                         if ((!nvgre_spec && nvgre_mask) ||
817                             (nvgre_spec && !nvgre_mask)) {
818                                 rte_flow_error_set(error, EINVAL,
819                                            RTE_FLOW_ERROR_TYPE_ITEM,
820                                            item,
821                                            "Invalid NVGRE item");
822                                 return 0;
823                         }
824                         tunnel_valid = 1;
825                         if (nvgre_spec && nvgre_mask) {
826                                 list[t].type = ICE_NVGRE;
827                                 if (nvgre_mask->tni[0] == UINT8_MAX &&
828                                         nvgre_mask->tni[1] == UINT8_MAX &&
829                                         nvgre_mask->tni[2] == UINT8_MAX) {
830                                         list[t].h_u.nvgre_hdr.tni_flow =
831                                                 (nvgre_spec->tni[2] << 16) |
832                                                 (nvgre_spec->tni[1] << 8) |
833                                                 nvgre_spec->tni[0];
834                                         list[t].m_u.nvgre_hdr.tni_flow =
835                                                 UINT32_MAX;
836                                         input_set |=
837                                                 ICE_INSET_TUN_NVGRE_TNI;
838                                 }
839                                 t++;
840                         } else if (!nvgre_spec && !nvgre_mask) {
841                                 list[t].type = ICE_NVGRE;
842                         }
843                         break;
844
845                 case RTE_FLOW_ITEM_TYPE_VLAN:
846                         vlan_spec = item->spec;
847                         vlan_mask = item->mask;
848                         /* Check if VLAN item is used to describe protocol.
849                          * If yes, both spec and mask should be NULL.
850                          * If no, both spec and mask shouldn't be NULL.
851                          */
852                         if ((!vlan_spec && vlan_mask) ||
853                             (vlan_spec && !vlan_mask)) {
854                                 rte_flow_error_set(error, EINVAL,
855                                            RTE_FLOW_ERROR_TYPE_ITEM,
856                                            item,
857                                            "Invalid VLAN item");
858                                 return 0;
859                         }
860                         if (vlan_spec && vlan_mask) {
861                                 list[t].type = ICE_VLAN_OFOS;
862                                 if (vlan_mask->tci == UINT16_MAX) {
863                                         list[t].h_u.vlan_hdr.vlan =
864                                                 vlan_spec->tci;
865                                         list[t].m_u.vlan_hdr.vlan =
866                                                 UINT16_MAX;
867                                         input_set |= ICE_INSET_VLAN_OUTER;
868                                 }
869                                 if (vlan_mask->inner_type == UINT16_MAX) {
870                                         list[t].h_u.vlan_hdr.type =
871                                                 vlan_spec->inner_type;
872                                         list[t].m_u.vlan_hdr.type =
873                                                 UINT16_MAX;
874                                         input_set |= ICE_INSET_VLAN_OUTER;
875                                 }
876                                 t++;
877                         } else if (!vlan_spec && !vlan_mask) {
878                                 list[t].type = ICE_VLAN_OFOS;
879                         }
880                         break;
881
882                 case RTE_FLOW_ITEM_TYPE_PPPOED:
883                 case RTE_FLOW_ITEM_TYPE_PPPOES:
884                         pppoe_spec = item->spec;
885                         pppoe_mask = item->mask;
886                         /* Check if PPPoE item is used to describe protocol.
887                          * If yes, both spec and mask should be NULL.
888                          */
889                         if (pppoe_spec || pppoe_mask) {
890                                 rte_flow_error_set(error, EINVAL,
891                                            RTE_FLOW_ERROR_TYPE_ITEM,
892                                            item,
893                                            "Invalid pppoe item");
894                                 return 0;
895                         }
896                         break;
897
898                 case RTE_FLOW_ITEM_TYPE_VOID:
899                         break;
900
901                 default:
902                         rte_flow_error_set(error, EINVAL,
903                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
904                                    "Invalid pattern item.");
905                         goto out;
906                 }
907         }
908
909         *lkups_num = t;
910
911         return input_set;
912 out:
913         return 0;
914 }
915
916
917 static int
918 ice_switch_parse_action(struct ice_pf *pf,
919                 const struct rte_flow_action *actions,
920                 struct rte_flow_error *error,
921                 struct ice_adv_rule_info *rule_info)
922 {
923         struct ice_vsi *vsi = pf->main_vsi;
924         struct rte_eth_dev *dev = pf->adapter->eth_dev;
925         const struct rte_flow_action_queue *act_q;
926         const struct rte_flow_action_rss *act_qgrop;
927         uint16_t base_queue, i;
928         const struct rte_flow_action *action;
929         enum rte_flow_action_type action_type;
930         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
931                  2, 4, 8, 16, 32, 64, 128};
932
933         base_queue = pf->base_queue + vsi->base_queue;
934         for (action = actions; action->type !=
935                         RTE_FLOW_ACTION_TYPE_END; action++) {
936                 action_type = action->type;
937                 switch (action_type) {
938                 case RTE_FLOW_ACTION_TYPE_RSS:
939                         act_qgrop = action->conf;
940                         rule_info->sw_act.fltr_act =
941                                 ICE_FWD_TO_QGRP;
942                         rule_info->sw_act.fwd_id.q_id =
943                                 base_queue + act_qgrop->queue[0];
944                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
945                                 if (act_qgrop->queue_num ==
946                                         valid_qgrop_number[i])
947                                         break;
948                         }
949                         if (i == MAX_QGRP_NUM_TYPE)
950                                 goto error;
951                         if ((act_qgrop->queue[0] +
952                                 act_qgrop->queue_num) >
953                                 dev->data->nb_rx_queues)
954                                 goto error;
955                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
956                                 if (act_qgrop->queue[i + 1] !=
957                                         act_qgrop->queue[i] + 1)
958                                         goto error;
959                         rule_info->sw_act.qgrp_size =
960                                 act_qgrop->queue_num;
961                         break;
962                 case RTE_FLOW_ACTION_TYPE_QUEUE:
963                         act_q = action->conf;
964                         if (act_q->index >= dev->data->nb_rx_queues)
965                                 goto error;
966                         rule_info->sw_act.fltr_act =
967                                 ICE_FWD_TO_Q;
968                         rule_info->sw_act.fwd_id.q_id =
969                                 base_queue + act_q->index;
970                         break;
971
972                 case RTE_FLOW_ACTION_TYPE_DROP:
973                         rule_info->sw_act.fltr_act =
974                                 ICE_DROP_PACKET;
975                         break;
976
977                 case RTE_FLOW_ACTION_TYPE_VOID:
978                         break;
979
980                 default:
981                         goto error;
982                 }
983         }
984
985         rule_info->sw_act.vsi_handle = vsi->idx;
986         rule_info->rx = 1;
987         rule_info->sw_act.src = vsi->idx;
988         rule_info->priority = 5;
989
990         return 0;
991
992 error:
993         rte_flow_error_set(error,
994                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
995                 actions,
996                 "Invalid action type or queue number");
997         return -rte_errno;
998 }
999
1000 static int
1001 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1002                 struct ice_pattern_match_item *array,
1003                 uint32_t array_len,
1004                 const struct rte_flow_item pattern[],
1005                 const struct rte_flow_action actions[],
1006                 void **meta,
1007                 struct rte_flow_error *error)
1008 {
1009         struct ice_pf *pf = &ad->pf;
1010         uint64_t inputset = 0;
1011         int ret = 0;
1012         struct sw_meta *sw_meta_ptr = NULL;
1013         struct ice_adv_rule_info rule_info;
1014         struct ice_adv_lkup_elem *list = NULL;
1015         uint16_t lkups_num = 0;
1016         const struct rte_flow_item *item = pattern;
1017         uint16_t item_num = 0;
1018         enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1019         struct ice_pattern_match_item *pattern_match_item = NULL;
1020
1021         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1022                 item_num++;
1023                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1024                         tun_type = ICE_SW_TUN_VXLAN;
1025                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1026                         tun_type = ICE_SW_TUN_NVGRE;
1027                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1028                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1029                         tun_type = ICE_SW_TUN_PPPOE;
1030                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1031                         const struct rte_flow_item_eth *eth_mask;
1032                         if (item->mask)
1033                                 eth_mask = item->mask;
1034                         else
1035                                 continue;
1036                         if (eth_mask->type == UINT16_MAX)
1037                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1038                 }
1039                 /* reserve one more memory slot for ETH which may
1040                  * consume 2 lookup items.
1041                  */
1042                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1043                         item_num++;
1044         }
1045
1046         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1047         if (!list) {
1048                 rte_flow_error_set(error, EINVAL,
1049                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1050                                    "No memory for PMD internal items");
1051                 return -rte_errno;
1052         }
1053
1054         rule_info.tun_type = tun_type;
1055
1056         sw_meta_ptr =
1057                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1058         if (!sw_meta_ptr) {
1059                 rte_flow_error_set(error, EINVAL,
1060                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1061                                    "No memory for sw_pattern_meta_ptr");
1062                 goto error;
1063         }
1064
1065         pattern_match_item =
1066                 ice_search_pattern_match_item(pattern, array, array_len, error);
1067         if (!pattern_match_item) {
1068                 rte_flow_error_set(error, EINVAL,
1069                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1070                                    "Invalid input pattern");
1071                 goto error;
1072         }
1073
1074         inputset = ice_switch_inset_get
1075                 (pattern, error, list, &lkups_num, tun_type);
1076         if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
1077                 rte_flow_error_set(error, EINVAL,
1078                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1079                                    pattern,
1080                                    "Invalid input set");
1081                 goto error;
1082         }
1083
1084         ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1085         if (ret) {
1086                 rte_flow_error_set(error, EINVAL,
1087                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1088                                    "Invalid input action");
1089                 goto error;
1090         }
1091
1092         if (meta) {
1093                 *meta = sw_meta_ptr;
1094                 ((struct sw_meta *)*meta)->list = list;
1095                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1096                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1097         } else {
1098                 rte_free(list);
1099                 rte_free(sw_meta_ptr);
1100         }
1101
1102         rte_free(pattern_match_item);
1103
1104         return 0;
1105
1106 error:
1107         rte_free(list);
1108         rte_free(sw_meta_ptr);
1109         rte_free(pattern_match_item);
1110
1111         return -rte_errno;
1112 }
1113
1114 static int
1115 ice_switch_query(struct ice_adapter *ad __rte_unused,
1116                 struct rte_flow *flow __rte_unused,
1117                 struct rte_flow_query_count *count __rte_unused,
1118                 struct rte_flow_error *error)
1119 {
1120         rte_flow_error_set(error, EINVAL,
1121                 RTE_FLOW_ERROR_TYPE_HANDLE,
1122                 NULL,
1123                 "count action not supported by switch filter");
1124
1125         return -rte_errno;
1126 }
1127
1128 static int
1129 ice_switch_init(struct ice_adapter *ad)
1130 {
1131         int ret = 0;
1132         struct ice_flow_parser *dist_parser;
1133         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1134
1135         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1136                 dist_parser = &ice_switch_dist_parser_comms;
1137         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1138                 dist_parser = &ice_switch_dist_parser_os;
1139         else
1140                 return -EINVAL;
1141
1142         if (ad->devargs.pipe_mode_support)
1143                 ret = ice_register_parser(perm_parser, ad);
1144         else
1145                 ret = ice_register_parser(dist_parser, ad);
1146         return ret;
1147 }
1148
1149 static void
1150 ice_switch_uninit(struct ice_adapter *ad)
1151 {
1152         struct ice_flow_parser *dist_parser;
1153         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1154
1155         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1156                 dist_parser = &ice_switch_dist_parser_comms;
1157         else
1158                 dist_parser = &ice_switch_dist_parser_os;
1159
1160         if (ad->devargs.pipe_mode_support)
1161                 ice_unregister_parser(perm_parser, ad);
1162         else
1163                 ice_unregister_parser(dist_parser, ad);
1164 }
1165
1166 static struct
1167 ice_flow_engine ice_switch_engine = {
1168         .init = ice_switch_init,
1169         .uninit = ice_switch_uninit,
1170         .create = ice_switch_create,
1171         .destroy = ice_switch_destroy,
1172         .query_count = ice_switch_query,
1173         .free = ice_switch_filter_rule_free,
1174         .type = ICE_FLOW_ENGINE_SWITCH,
1175 };
1176
1177 static struct
1178 ice_flow_parser ice_switch_dist_parser_os = {
1179         .engine = &ice_switch_engine,
1180         .array = ice_switch_pattern_dist_os,
1181         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1182         .parse_pattern_action = ice_switch_parse_pattern_action,
1183         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1184 };
1185
1186 static struct
1187 ice_flow_parser ice_switch_dist_parser_comms = {
1188         .engine = &ice_switch_engine,
1189         .array = ice_switch_pattern_dist_comms,
1190         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1191         .parse_pattern_action = ice_switch_parse_pattern_action,
1192         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1193 };
1194
1195 static struct
1196 ice_flow_parser ice_switch_perm_parser = {
1197         .engine = &ice_switch_engine,
1198         .array = ice_switch_pattern_perm,
1199         .array_len = RTE_DIM(ice_switch_pattern_perm),
1200         .parse_pattern_action = ice_switch_parse_pattern_action,
1201         .stage = ICE_FLOW_STAGE_PERMISSION,
1202 };
1203
1204 RTE_INIT(ice_sw_engine_init)
1205 {
1206         struct ice_flow_engine *engine = &ice_switch_engine;
1207         ice_register_flow_engine(engine);
1208 }