net/ice: support more PPPoE input set
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_IPV4 ( \
33         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
34         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
35 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
38         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
39 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
40         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
42         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
43 #define ICE_SW_INSET_MAC_IPV6 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
45         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
46         ICE_INSET_IPV6_NEXT_HDR)
47 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
49         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
50         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
54         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
57         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
58 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
64         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
66         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
68         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
70         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
78         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
83         ICE_INSET_TUN_IPV4_TOS)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
87         ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_MAC_PPPOE  ( \
89         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
90         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
91 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
92         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
94         ICE_INSET_PPPOE_PROTO)
95
96 struct sw_meta {
97         struct ice_adv_lkup_elem *list;
98         uint16_t lkups_num;
99         struct ice_adv_rule_info rule_info;
100 };
101
102 static struct ice_flow_parser ice_switch_dist_parser_os;
103 static struct ice_flow_parser ice_switch_dist_parser_comms;
104 static struct ice_flow_parser ice_switch_perm_parser;
105
106 static struct
107 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
108         {pattern_ethertype,
109                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
110         {pattern_eth_ipv4,
111                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
112         {pattern_eth_ipv4_udp,
113                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
114         {pattern_eth_ipv4_tcp,
115                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
116         {pattern_eth_ipv6,
117                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
118         {pattern_eth_ipv6_udp,
119                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
120         {pattern_eth_ipv6_tcp,
121                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
122         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
123                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
124         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
125                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
126         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
127                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
128         {pattern_eth_ipv4_nvgre_eth_ipv4,
129                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
130         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
131                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
132         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
133                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
134         {pattern_eth_pppoed,
135                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
136         {pattern_eth_vlan_pppoed,
137                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
138         {pattern_eth_pppoes,
139                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
140         {pattern_eth_vlan_pppoes,
141                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
142         {pattern_eth_pppoes_proto,
143                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
144         {pattern_eth_vlan_pppoes_proto,
145                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
146 };
147
148 static struct
149 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
150         {pattern_ethertype,
151                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
152         {pattern_eth_arp,
153                         ICE_INSET_NONE, ICE_INSET_NONE},
154         {pattern_eth_ipv4,
155                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
156         {pattern_eth_ipv4_udp,
157                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
158         {pattern_eth_ipv4_tcp,
159                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
160         {pattern_eth_ipv6,
161                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
162         {pattern_eth_ipv6_udp,
163                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
164         {pattern_eth_ipv6_tcp,
165                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
166         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
167                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
168         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
169                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
170         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
171                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
172         {pattern_eth_ipv4_nvgre_eth_ipv4,
173                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
174         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
175                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
176         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
177                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
178 };
179
180 static struct
181 ice_pattern_match_item ice_switch_pattern_perm[] = {
182         {pattern_eth_ipv4,
183                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
184         {pattern_eth_ipv4_udp,
185                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
186         {pattern_eth_ipv4_tcp,
187                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
188         {pattern_eth_ipv6,
189                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
190         {pattern_eth_ipv6_udp,
191                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
192         {pattern_eth_ipv6_tcp,
193                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
194         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
195                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
196         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
197                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
198         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
199                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
200         {pattern_eth_ipv4_nvgre_eth_ipv4,
201                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
202         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
203                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
204         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
205                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
206 };
207
208 static int
209 ice_switch_create(struct ice_adapter *ad,
210                 struct rte_flow *flow,
211                 void *meta,
212                 struct rte_flow_error *error)
213 {
214         int ret = 0;
215         struct ice_pf *pf = &ad->pf;
216         struct ice_hw *hw = ICE_PF_TO_HW(pf);
217         struct ice_rule_query_data rule_added = {0};
218         struct ice_rule_query_data *filter_ptr;
219         struct ice_adv_lkup_elem *list =
220                 ((struct sw_meta *)meta)->list;
221         uint16_t lkups_cnt =
222                 ((struct sw_meta *)meta)->lkups_num;
223         struct ice_adv_rule_info *rule_info =
224                 &((struct sw_meta *)meta)->rule_info;
225
226         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
227                 rte_flow_error_set(error, EINVAL,
228                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
229                         "item number too large for rule");
230                 goto error;
231         }
232         if (!list) {
233                 rte_flow_error_set(error, EINVAL,
234                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
235                         "lookup list should not be NULL");
236                 goto error;
237         }
238         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
239         if (!ret) {
240                 filter_ptr = rte_zmalloc("ice_switch_filter",
241                         sizeof(struct ice_rule_query_data), 0);
242                 if (!filter_ptr) {
243                         rte_flow_error_set(error, EINVAL,
244                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
245                                    "No memory for ice_switch_filter");
246                         goto error;
247                 }
248                 flow->rule = filter_ptr;
249                 rte_memcpy(filter_ptr,
250                         &rule_added,
251                         sizeof(struct ice_rule_query_data));
252         } else {
253                 rte_flow_error_set(error, EINVAL,
254                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
255                         "switch filter create flow fail");
256                 goto error;
257         }
258
259         rte_free(list);
260         rte_free(meta);
261         return 0;
262
263 error:
264         rte_free(list);
265         rte_free(meta);
266
267         return -rte_errno;
268 }
269
270 static int
271 ice_switch_destroy(struct ice_adapter *ad,
272                 struct rte_flow *flow,
273                 struct rte_flow_error *error)
274 {
275         struct ice_hw *hw = &ad->hw;
276         int ret;
277         struct ice_rule_query_data *filter_ptr;
278
279         filter_ptr = (struct ice_rule_query_data *)
280                 flow->rule;
281
282         if (!filter_ptr) {
283                 rte_flow_error_set(error, EINVAL,
284                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
285                         "no such flow"
286                         " create by switch filter");
287                 return -rte_errno;
288         }
289
290         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
291         if (ret) {
292                 rte_flow_error_set(error, EINVAL,
293                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
294                         "fail to destroy switch filter rule");
295                 return -rte_errno;
296         }
297
298         rte_free(filter_ptr);
299         return ret;
300 }
301
302 static void
303 ice_switch_filter_rule_free(struct rte_flow *flow)
304 {
305         rte_free(flow->rule);
306 }
307
308 static uint64_t
309 ice_switch_inset_get(const struct rte_flow_item pattern[],
310                 struct rte_flow_error *error,
311                 struct ice_adv_lkup_elem *list,
312                 uint16_t *lkups_num,
313                 enum ice_sw_tunnel_type tun_type)
314 {
315         const struct rte_flow_item *item = pattern;
316         enum rte_flow_item_type item_type;
317         const struct rte_flow_item_eth *eth_spec, *eth_mask;
318         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
319         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
320         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
321         const struct rte_flow_item_udp *udp_spec, *udp_mask;
322         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
323         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
324         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
325         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
326         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
327         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
328                                 *pppoe_proto_mask;
329         uint8_t  ipv6_addr_mask[16] = {
330                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
331                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
332         uint64_t input_set = ICE_INSET_NONE;
333         uint16_t j, t = 0;
334         uint16_t tunnel_valid = 0;
335         uint16_t pppoe_valid = 0;
336
337
338         for (item = pattern; item->type !=
339                         RTE_FLOW_ITEM_TYPE_END; item++) {
340                 if (item->last) {
341                         rte_flow_error_set(error, EINVAL,
342                                         RTE_FLOW_ERROR_TYPE_ITEM,
343                                         item,
344                                         "Not support range");
345                         return 0;
346                 }
347                 item_type = item->type;
348
349                 switch (item_type) {
350                 case RTE_FLOW_ITEM_TYPE_ETH:
351                         eth_spec = item->spec;
352                         eth_mask = item->mask;
353                         if (eth_spec && eth_mask) {
354                                 if (tunnel_valid &&
355                                     rte_is_broadcast_ether_addr(&eth_mask->src))
356                                         input_set |= ICE_INSET_TUN_SMAC;
357                                 else if (
358                                 rte_is_broadcast_ether_addr(&eth_mask->src))
359                                         input_set |= ICE_INSET_SMAC;
360                                 if (tunnel_valid &&
361                                     rte_is_broadcast_ether_addr(&eth_mask->dst))
362                                         input_set |= ICE_INSET_TUN_DMAC;
363                                 else if (
364                                 rte_is_broadcast_ether_addr(&eth_mask->dst))
365                                         input_set |= ICE_INSET_DMAC;
366                                 if (eth_mask->type == RTE_BE16(0xffff))
367                                         input_set |= ICE_INSET_ETHERTYPE;
368                                 list[t].type = (tunnel_valid  == 0) ?
369                                         ICE_MAC_OFOS : ICE_MAC_IL;
370                                 struct ice_ether_hdr *h;
371                                 struct ice_ether_hdr *m;
372                                 uint16_t i = 0;
373                                 h = &list[t].h_u.eth_hdr;
374                                 m = &list[t].m_u.eth_hdr;
375                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
376                                         if (eth_mask->src.addr_bytes[j] ==
377                                                                 UINT8_MAX) {
378                                                 h->src_addr[j] =
379                                                 eth_spec->src.addr_bytes[j];
380                                                 m->src_addr[j] =
381                                                 eth_mask->src.addr_bytes[j];
382                                                 i = 1;
383                                         }
384                                         if (eth_mask->dst.addr_bytes[j] ==
385                                                                 UINT8_MAX) {
386                                                 h->dst_addr[j] =
387                                                 eth_spec->dst.addr_bytes[j];
388                                                 m->dst_addr[j] =
389                                                 eth_mask->dst.addr_bytes[j];
390                                                 i = 1;
391                                         }
392                                 }
393                                 if (i)
394                                         t++;
395                                 if (eth_mask->type == UINT16_MAX) {
396                                         list[t].type = ICE_ETYPE_OL;
397                                         list[t].h_u.ethertype.ethtype_id =
398                                                 eth_spec->type;
399                                         list[t].m_u.ethertype.ethtype_id =
400                                                 UINT16_MAX;
401                                         t++;
402                                 }
403                         } else if (!eth_spec && !eth_mask) {
404                                 list[t].type = (tun_type == ICE_NON_TUN) ?
405                                         ICE_MAC_OFOS : ICE_MAC_IL;
406                         }
407                         break;
408
409                 case RTE_FLOW_ITEM_TYPE_IPV4:
410                         ipv4_spec = item->spec;
411                         ipv4_mask = item->mask;
412                         if (ipv4_spec && ipv4_mask) {
413                                 /* Check IPv4 mask and update input set */
414                                 if (ipv4_mask->hdr.version_ihl ||
415                                         ipv4_mask->hdr.total_length ||
416                                         ipv4_mask->hdr.packet_id ||
417                                         ipv4_mask->hdr.hdr_checksum) {
418                                         rte_flow_error_set(error, EINVAL,
419                                                    RTE_FLOW_ERROR_TYPE_ITEM,
420                                                    item,
421                                                    "Invalid IPv4 mask.");
422                                         return 0;
423                                 }
424
425                                 if (tunnel_valid) {
426                                         if (ipv4_mask->hdr.type_of_service ==
427                                                         UINT8_MAX)
428                                                 input_set |=
429                                                         ICE_INSET_TUN_IPV4_TOS;
430                                         if (ipv4_mask->hdr.src_addr ==
431                                                         UINT32_MAX)
432                                                 input_set |=
433                                                         ICE_INSET_TUN_IPV4_SRC;
434                                         if (ipv4_mask->hdr.dst_addr ==
435                                                         UINT32_MAX)
436                                                 input_set |=
437                                                         ICE_INSET_TUN_IPV4_DST;
438                                         if (ipv4_mask->hdr.time_to_live ==
439                                                         UINT8_MAX)
440                                                 input_set |=
441                                                         ICE_INSET_TUN_IPV4_TTL;
442                                         if (ipv4_mask->hdr.next_proto_id ==
443                                                         UINT8_MAX)
444                                                 input_set |=
445                                                 ICE_INSET_TUN_IPV4_PROTO;
446                                 } else {
447                                         if (ipv4_mask->hdr.src_addr ==
448                                                         UINT32_MAX)
449                                                 input_set |= ICE_INSET_IPV4_SRC;
450                                         if (ipv4_mask->hdr.dst_addr ==
451                                                         UINT32_MAX)
452                                                 input_set |= ICE_INSET_IPV4_DST;
453                                         if (ipv4_mask->hdr.time_to_live ==
454                                                         UINT8_MAX)
455                                                 input_set |= ICE_INSET_IPV4_TTL;
456                                         if (ipv4_mask->hdr.next_proto_id ==
457                                                         UINT8_MAX)
458                                                 input_set |=
459                                                 ICE_INSET_IPV4_PROTO;
460                                         if (ipv4_mask->hdr.type_of_service ==
461                                                         UINT8_MAX)
462                                                 input_set |=
463                                                         ICE_INSET_IPV4_TOS;
464                                 }
465                                 list[t].type = (tunnel_valid  == 0) ?
466                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
467                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
468                                         list[t].h_u.ipv4_hdr.src_addr =
469                                                 ipv4_spec->hdr.src_addr;
470                                         list[t].m_u.ipv4_hdr.src_addr =
471                                                 UINT32_MAX;
472                                 }
473                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
474                                         list[t].h_u.ipv4_hdr.dst_addr =
475                                                 ipv4_spec->hdr.dst_addr;
476                                         list[t].m_u.ipv4_hdr.dst_addr =
477                                                 UINT32_MAX;
478                                 }
479                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
480                                         list[t].h_u.ipv4_hdr.time_to_live =
481                                                 ipv4_spec->hdr.time_to_live;
482                                         list[t].m_u.ipv4_hdr.time_to_live =
483                                                 UINT8_MAX;
484                                 }
485                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
486                                         list[t].h_u.ipv4_hdr.protocol =
487                                                 ipv4_spec->hdr.next_proto_id;
488                                         list[t].m_u.ipv4_hdr.protocol =
489                                                 UINT8_MAX;
490                                 }
491                                 if (ipv4_mask->hdr.type_of_service ==
492                                                 UINT8_MAX) {
493                                         list[t].h_u.ipv4_hdr.tos =
494                                                 ipv4_spec->hdr.type_of_service;
495                                         list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
496                                 }
497                                 t++;
498                         } else if (!ipv4_spec && !ipv4_mask) {
499                                 list[t].type = (tunnel_valid  == 0) ?
500                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
501                         }
502                         break;
503
504                 case RTE_FLOW_ITEM_TYPE_IPV6:
505                         ipv6_spec = item->spec;
506                         ipv6_mask = item->mask;
507                         if (ipv6_spec && ipv6_mask) {
508                                 if (ipv6_mask->hdr.payload_len) {
509                                         rte_flow_error_set(error, EINVAL,
510                                            RTE_FLOW_ERROR_TYPE_ITEM,
511                                            item,
512                                            "Invalid IPv6 mask");
513                                         return 0;
514                                 }
515
516                                 if (tunnel_valid) {
517                                         if (!memcmp(ipv6_mask->hdr.src_addr,
518                                                 ipv6_addr_mask,
519                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
520                                                 input_set |=
521                                                         ICE_INSET_TUN_IPV6_SRC;
522                                         if (!memcmp(ipv6_mask->hdr.dst_addr,
523                                                 ipv6_addr_mask,
524                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
525                                                 input_set |=
526                                                         ICE_INSET_TUN_IPV6_DST;
527                                         if (ipv6_mask->hdr.proto == UINT8_MAX)
528                                                 input_set |=
529                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
530                                         if (ipv6_mask->hdr.hop_limits ==
531                                                         UINT8_MAX)
532                                                 input_set |=
533                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
534                                         if ((ipv6_mask->hdr.vtc_flow &
535                                                 rte_cpu_to_be_32
536                                                 (RTE_IPV6_HDR_TC_MASK))
537                                                         == rte_cpu_to_be_32
538                                                         (RTE_IPV6_HDR_TC_MASK))
539                                                 input_set |=
540                                                         ICE_INSET_TUN_IPV6_TC;
541                                 } else {
542                                         if (!memcmp(ipv6_mask->hdr.src_addr,
543                                                 ipv6_addr_mask,
544                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
545                                                 input_set |= ICE_INSET_IPV6_SRC;
546                                         if (!memcmp(ipv6_mask->hdr.dst_addr,
547                                                 ipv6_addr_mask,
548                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
549                                                 input_set |= ICE_INSET_IPV6_DST;
550                                         if (ipv6_mask->hdr.proto == UINT8_MAX)
551                                                 input_set |=
552                                                 ICE_INSET_IPV6_NEXT_HDR;
553                                         if (ipv6_mask->hdr.hop_limits ==
554                                                         UINT8_MAX)
555                                                 input_set |=
556                                                 ICE_INSET_IPV6_HOP_LIMIT;
557                                         if ((ipv6_mask->hdr.vtc_flow &
558                                                 rte_cpu_to_be_32
559                                                 (RTE_IPV6_HDR_TC_MASK))
560                                                         == rte_cpu_to_be_32
561                                                         (RTE_IPV6_HDR_TC_MASK))
562                                                 input_set |= ICE_INSET_IPV6_TC;
563                                 }
564                                 list[t].type = (tunnel_valid  == 0) ?
565                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
566                                 struct ice_ipv6_hdr *f;
567                                 struct ice_ipv6_hdr *s;
568                                 f = &list[t].h_u.ipv6_hdr;
569                                 s = &list[t].m_u.ipv6_hdr;
570                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
571                                         if (ipv6_mask->hdr.src_addr[j] ==
572                                                 UINT8_MAX) {
573                                                 f->src_addr[j] =
574                                                 ipv6_spec->hdr.src_addr[j];
575                                                 s->src_addr[j] =
576                                                 ipv6_mask->hdr.src_addr[j];
577                                         }
578                                         if (ipv6_mask->hdr.dst_addr[j] ==
579                                                                 UINT8_MAX) {
580                                                 f->dst_addr[j] =
581                                                 ipv6_spec->hdr.dst_addr[j];
582                                                 s->dst_addr[j] =
583                                                 ipv6_mask->hdr.dst_addr[j];
584                                         }
585                                 }
586                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
587                                         f->next_hdr =
588                                                 ipv6_spec->hdr.proto;
589                                         s->next_hdr = UINT8_MAX;
590                                 }
591                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
592                                         f->hop_limit =
593                                                 ipv6_spec->hdr.hop_limits;
594                                         s->hop_limit = UINT8_MAX;
595                                 }
596                                 if ((ipv6_mask->hdr.vtc_flow &
597                                                 rte_cpu_to_be_32
598                                                 (RTE_IPV6_HDR_TC_MASK))
599                                                 == rte_cpu_to_be_32
600                                                 (RTE_IPV6_HDR_TC_MASK)) {
601                                         struct ice_le_ver_tc_flow vtf;
602                                         vtf.u.fld.version = 0;
603                                         vtf.u.fld.flow_label = 0;
604                                         vtf.u.fld.tc = (rte_be_to_cpu_32
605                                                 (ipv6_spec->hdr.vtc_flow) &
606                                                         RTE_IPV6_HDR_TC_MASK) >>
607                                                         RTE_IPV6_HDR_TC_SHIFT;
608                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
609                                         vtf.u.fld.tc = UINT8_MAX;
610                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
611                                 }
612                                 t++;
613                         } else if (!ipv6_spec && !ipv6_mask) {
614                                 list[t].type = (tun_type == ICE_NON_TUN) ?
615                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
616                         }
617                         break;
618
619                 case RTE_FLOW_ITEM_TYPE_UDP:
620                         udp_spec = item->spec;
621                         udp_mask = item->mask;
622                         if (udp_spec && udp_mask) {
623                                 /* Check UDP mask and update input set*/
624                                 if (udp_mask->hdr.dgram_len ||
625                                     udp_mask->hdr.dgram_cksum) {
626                                         rte_flow_error_set(error, EINVAL,
627                                                    RTE_FLOW_ERROR_TYPE_ITEM,
628                                                    item,
629                                                    "Invalid UDP mask");
630                                         return 0;
631                                 }
632
633                                 if (tunnel_valid) {
634                                         if (udp_mask->hdr.src_port ==
635                                                         UINT16_MAX)
636                                                 input_set |=
637                                                 ICE_INSET_TUN_UDP_SRC_PORT;
638                                         if (udp_mask->hdr.dst_port ==
639                                                         UINT16_MAX)
640                                                 input_set |=
641                                                 ICE_INSET_TUN_UDP_DST_PORT;
642                                 } else {
643                                         if (udp_mask->hdr.src_port ==
644                                                         UINT16_MAX)
645                                                 input_set |=
646                                                 ICE_INSET_UDP_SRC_PORT;
647                                         if (udp_mask->hdr.dst_port ==
648                                                         UINT16_MAX)
649                                                 input_set |=
650                                                 ICE_INSET_UDP_DST_PORT;
651                                 }
652                                 if (tun_type == ICE_SW_TUN_VXLAN &&
653                                                 tunnel_valid == 0)
654                                         list[t].type = ICE_UDP_OF;
655                                 else
656                                         list[t].type = ICE_UDP_ILOS;
657                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
658                                         list[t].h_u.l4_hdr.src_port =
659                                                 udp_spec->hdr.src_port;
660                                         list[t].m_u.l4_hdr.src_port =
661                                                 udp_mask->hdr.src_port;
662                                 }
663                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
664                                         list[t].h_u.l4_hdr.dst_port =
665                                                 udp_spec->hdr.dst_port;
666                                         list[t].m_u.l4_hdr.dst_port =
667                                                 udp_mask->hdr.dst_port;
668                                 }
669                                                 t++;
670                         } else if (!udp_spec && !udp_mask) {
671                                 list[t].type = ICE_UDP_ILOS;
672                         }
673                         break;
674
675                 case RTE_FLOW_ITEM_TYPE_TCP:
676                         tcp_spec = item->spec;
677                         tcp_mask = item->mask;
678                         if (tcp_spec && tcp_mask) {
679                                 /* Check TCP mask and update input set */
680                                 if (tcp_mask->hdr.sent_seq ||
681                                         tcp_mask->hdr.recv_ack ||
682                                         tcp_mask->hdr.data_off ||
683                                         tcp_mask->hdr.tcp_flags ||
684                                         tcp_mask->hdr.rx_win ||
685                                         tcp_mask->hdr.cksum ||
686                                         tcp_mask->hdr.tcp_urp) {
687                                         rte_flow_error_set(error, EINVAL,
688                                            RTE_FLOW_ERROR_TYPE_ITEM,
689                                            item,
690                                            "Invalid TCP mask");
691                                         return 0;
692                                 }
693
694                                 if (tunnel_valid) {
695                                         if (tcp_mask->hdr.src_port ==
696                                                         UINT16_MAX)
697                                                 input_set |=
698                                                 ICE_INSET_TUN_TCP_SRC_PORT;
699                                         if (tcp_mask->hdr.dst_port ==
700                                                         UINT16_MAX)
701                                                 input_set |=
702                                                 ICE_INSET_TUN_TCP_DST_PORT;
703                                 } else {
704                                         if (tcp_mask->hdr.src_port ==
705                                                         UINT16_MAX)
706                                                 input_set |=
707                                                 ICE_INSET_TCP_SRC_PORT;
708                                         if (tcp_mask->hdr.dst_port ==
709                                                         UINT16_MAX)
710                                                 input_set |=
711                                                 ICE_INSET_TCP_DST_PORT;
712                                 }
713                                 list[t].type = ICE_TCP_IL;
714                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
715                                         list[t].h_u.l4_hdr.src_port =
716                                                 tcp_spec->hdr.src_port;
717                                         list[t].m_u.l4_hdr.src_port =
718                                                 tcp_mask->hdr.src_port;
719                                 }
720                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
721                                         list[t].h_u.l4_hdr.dst_port =
722                                                 tcp_spec->hdr.dst_port;
723                                         list[t].m_u.l4_hdr.dst_port =
724                                                 tcp_mask->hdr.dst_port;
725                                 }
726                                 t++;
727                         } else if (!tcp_spec && !tcp_mask) {
728                                 list[t].type = ICE_TCP_IL;
729                         }
730                         break;
731
732                 case RTE_FLOW_ITEM_TYPE_SCTP:
733                         sctp_spec = item->spec;
734                         sctp_mask = item->mask;
735                         if (sctp_spec && sctp_mask) {
736                                 /* Check SCTP mask and update input set */
737                                 if (sctp_mask->hdr.cksum) {
738                                         rte_flow_error_set(error, EINVAL,
739                                            RTE_FLOW_ERROR_TYPE_ITEM,
740                                            item,
741                                            "Invalid SCTP mask");
742                                         return 0;
743                                 }
744
745                                 if (tunnel_valid) {
746                                         if (sctp_mask->hdr.src_port ==
747                                                         UINT16_MAX)
748                                                 input_set |=
749                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
750                                         if (sctp_mask->hdr.dst_port ==
751                                                         UINT16_MAX)
752                                                 input_set |=
753                                                 ICE_INSET_TUN_SCTP_DST_PORT;
754                                 } else {
755                                         if (sctp_mask->hdr.src_port ==
756                                                         UINT16_MAX)
757                                                 input_set |=
758                                                 ICE_INSET_SCTP_SRC_PORT;
759                                         if (sctp_mask->hdr.dst_port ==
760                                                         UINT16_MAX)
761                                                 input_set |=
762                                                 ICE_INSET_SCTP_DST_PORT;
763                                 }
764                                 list[t].type = ICE_SCTP_IL;
765                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
766                                         list[t].h_u.sctp_hdr.src_port =
767                                                 sctp_spec->hdr.src_port;
768                                         list[t].m_u.sctp_hdr.src_port =
769                                                 sctp_mask->hdr.src_port;
770                                 }
771                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
772                                         list[t].h_u.sctp_hdr.dst_port =
773                                                 sctp_spec->hdr.dst_port;
774                                         list[t].m_u.sctp_hdr.dst_port =
775                                                 sctp_mask->hdr.dst_port;
776                                 }
777                                 t++;
778                         } else if (!sctp_spec && !sctp_mask) {
779                                 list[t].type = ICE_SCTP_IL;
780                         }
781                         break;
782
783                 case RTE_FLOW_ITEM_TYPE_VXLAN:
784                         vxlan_spec = item->spec;
785                         vxlan_mask = item->mask;
786                         /* Check if VXLAN item is used to describe protocol.
787                          * If yes, both spec and mask should be NULL.
788                          * If no, both spec and mask shouldn't be NULL.
789                          */
790                         if ((!vxlan_spec && vxlan_mask) ||
791                             (vxlan_spec && !vxlan_mask)) {
792                                 rte_flow_error_set(error, EINVAL,
793                                            RTE_FLOW_ERROR_TYPE_ITEM,
794                                            item,
795                                            "Invalid VXLAN item");
796                                 return 0;
797                         }
798
799                         tunnel_valid = 1;
800                         if (vxlan_spec && vxlan_mask) {
801                                 list[t].type = ICE_VXLAN;
802                                 if (vxlan_mask->vni[0] == UINT8_MAX &&
803                                         vxlan_mask->vni[1] == UINT8_MAX &&
804                                         vxlan_mask->vni[2] == UINT8_MAX) {
805                                         list[t].h_u.tnl_hdr.vni =
806                                                 (vxlan_spec->vni[2] << 16) |
807                                                 (vxlan_spec->vni[1] << 8) |
808                                                 vxlan_spec->vni[0];
809                                         list[t].m_u.tnl_hdr.vni =
810                                                 UINT32_MAX;
811                                         input_set |=
812                                                 ICE_INSET_TUN_VXLAN_VNI;
813                                 }
814                                 t++;
815                         } else if (!vxlan_spec && !vxlan_mask) {
816                                 list[t].type = ICE_VXLAN;
817                         }
818                         break;
819
820                 case RTE_FLOW_ITEM_TYPE_NVGRE:
821                         nvgre_spec = item->spec;
822                         nvgre_mask = item->mask;
823                         /* Check if NVGRE item is used to describe protocol.
824                          * If yes, both spec and mask should be NULL.
825                          * If no, both spec and mask shouldn't be NULL.
826                          */
827                         if ((!nvgre_spec && nvgre_mask) ||
828                             (nvgre_spec && !nvgre_mask)) {
829                                 rte_flow_error_set(error, EINVAL,
830                                            RTE_FLOW_ERROR_TYPE_ITEM,
831                                            item,
832                                            "Invalid NVGRE item");
833                                 return 0;
834                         }
835                         tunnel_valid = 1;
836                         if (nvgre_spec && nvgre_mask) {
837                                 list[t].type = ICE_NVGRE;
838                                 if (nvgre_mask->tni[0] == UINT8_MAX &&
839                                         nvgre_mask->tni[1] == UINT8_MAX &&
840                                         nvgre_mask->tni[2] == UINT8_MAX) {
841                                         list[t].h_u.nvgre_hdr.tni_flow =
842                                                 (nvgre_spec->tni[2] << 16) |
843                                                 (nvgre_spec->tni[1] << 8) |
844                                                 nvgre_spec->tni[0];
845                                         list[t].m_u.nvgre_hdr.tni_flow =
846                                                 UINT32_MAX;
847                                         input_set |=
848                                                 ICE_INSET_TUN_NVGRE_TNI;
849                                 }
850                                 t++;
851                         } else if (!nvgre_spec && !nvgre_mask) {
852                                 list[t].type = ICE_NVGRE;
853                         }
854                         break;
855
856                 case RTE_FLOW_ITEM_TYPE_VLAN:
857                         vlan_spec = item->spec;
858                         vlan_mask = item->mask;
859                         /* Check if VLAN item is used to describe protocol.
860                          * If yes, both spec and mask should be NULL.
861                          * If no, both spec and mask shouldn't be NULL.
862                          */
863                         if ((!vlan_spec && vlan_mask) ||
864                             (vlan_spec && !vlan_mask)) {
865                                 rte_flow_error_set(error, EINVAL,
866                                            RTE_FLOW_ERROR_TYPE_ITEM,
867                                            item,
868                                            "Invalid VLAN item");
869                                 return 0;
870                         }
871                         if (vlan_spec && vlan_mask) {
872                                 list[t].type = ICE_VLAN_OFOS;
873                                 if (vlan_mask->tci == UINT16_MAX) {
874                                         list[t].h_u.vlan_hdr.vlan =
875                                                 vlan_spec->tci;
876                                         list[t].m_u.vlan_hdr.vlan =
877                                                 UINT16_MAX;
878                                         input_set |= ICE_INSET_VLAN_OUTER;
879                                 }
880                                 if (vlan_mask->inner_type == UINT16_MAX) {
881                                         list[t].h_u.vlan_hdr.type =
882                                                 vlan_spec->inner_type;
883                                         list[t].m_u.vlan_hdr.type =
884                                                 UINT16_MAX;
885                                         input_set |= ICE_INSET_VLAN_OUTER;
886                                 }
887                                 t++;
888                         } else if (!vlan_spec && !vlan_mask) {
889                                 list[t].type = ICE_VLAN_OFOS;
890                         }
891                         break;
892
893                 case RTE_FLOW_ITEM_TYPE_PPPOED:
894                 case RTE_FLOW_ITEM_TYPE_PPPOES:
895                         pppoe_spec = item->spec;
896                         pppoe_mask = item->mask;
897                         /* Check if PPPoE item is used to describe protocol.
898                          * If yes, both spec and mask should be NULL.
899                          * If no, both spec and mask shouldn't be NULL.
900                          */
901                         if ((!pppoe_spec && pppoe_mask) ||
902                                 (pppoe_spec && !pppoe_mask)) {
903                                 rte_flow_error_set(error, EINVAL,
904                                         RTE_FLOW_ERROR_TYPE_ITEM,
905                                         item,
906                                         "Invalid pppoe item");
907                                 return 0;
908                         }
909                         if (pppoe_spec && pppoe_mask) {
910                                 /* Check pppoe mask and update input set */
911                                 if (pppoe_mask->length ||
912                                         pppoe_mask->code ||
913                                         pppoe_mask->version_type) {
914                                         rte_flow_error_set(error, EINVAL,
915                                                 RTE_FLOW_ERROR_TYPE_ITEM,
916                                                 item,
917                                                 "Invalid pppoe mask");
918                                         return 0;
919                                 }
920                                 list[t].type = ICE_PPPOE;
921                                 if (pppoe_mask->session_id == UINT16_MAX) {
922                                         list[t].h_u.pppoe_hdr.session_id =
923                                                 pppoe_spec->session_id;
924                                         list[t].m_u.pppoe_hdr.session_id =
925                                                 UINT16_MAX;
926                                         input_set |= ICE_INSET_PPPOE_SESSION;
927                                 }
928                                 t++;
929                                 pppoe_valid = 1;
930                         } else if (!pppoe_spec && !pppoe_mask) {
931                                 list[t].type = ICE_PPPOE;
932                         }
933
934                         break;
935
936                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
937                         pppoe_proto_spec = item->spec;
938                         pppoe_proto_mask = item->mask;
939                         /* Check if PPPoE optional proto_id item
940                          * is used to describe protocol.
941                          * If yes, both spec and mask should be NULL.
942                          * If no, both spec and mask shouldn't be NULL.
943                          */
944                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
945                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
946                                 rte_flow_error_set(error, EINVAL,
947                                         RTE_FLOW_ERROR_TYPE_ITEM,
948                                         item,
949                                         "Invalid pppoe proto item");
950                                 return 0;
951                         }
952                         if (pppoe_proto_spec && pppoe_proto_mask) {
953                                 if (pppoe_valid)
954                                         t--;
955                                 list[t].type = ICE_PPPOE;
956                                 if (pppoe_proto_mask->proto_id == UINT16_MAX) {
957                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
958                                                 pppoe_proto_spec->proto_id;
959                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
960                                                 UINT16_MAX;
961                                         input_set |= ICE_INSET_PPPOE_PROTO;
962                                 }
963                                 t++;
964                         } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
965                                 list[t].type = ICE_PPPOE;
966                         }
967
968                         break;
969
970                 case RTE_FLOW_ITEM_TYPE_VOID:
971                         break;
972
973                 default:
974                         rte_flow_error_set(error, EINVAL,
975                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
976                                    "Invalid pattern item.");
977                         goto out;
978                 }
979         }
980
981         *lkups_num = t;
982
983         return input_set;
984 out:
985         return 0;
986 }
987
988 static int
989 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
990                             struct rte_flow_error *error,
991                             struct ice_adv_rule_info *rule_info)
992 {
993         const struct rte_flow_action_vf *act_vf;
994         const struct rte_flow_action *action;
995         enum rte_flow_action_type action_type;
996
997         for (action = actions; action->type !=
998                                 RTE_FLOW_ACTION_TYPE_END; action++) {
999                 action_type = action->type;
1000                 switch (action_type) {
1001                 case RTE_FLOW_ACTION_TYPE_VF:
1002                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1003                         act_vf = action->conf;
1004                         rule_info->sw_act.vsi_handle = act_vf->id;
1005                         break;
1006                 default:
1007                         rte_flow_error_set(error,
1008                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1009                                            actions,
1010                                            "Invalid action type or queue number");
1011                         return -rte_errno;
1012                 }
1013         }
1014
1015         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1016         rule_info->rx = 1;
1017         rule_info->priority = 5;
1018
1019         return 0;
1020 }
1021
1022 static int
1023 ice_switch_parse_action(struct ice_pf *pf,
1024                 const struct rte_flow_action *actions,
1025                 struct rte_flow_error *error,
1026                 struct ice_adv_rule_info *rule_info)
1027 {
1028         struct ice_vsi *vsi = pf->main_vsi;
1029         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1030         const struct rte_flow_action_queue *act_q;
1031         const struct rte_flow_action_rss *act_qgrop;
1032         uint16_t base_queue, i;
1033         const struct rte_flow_action *action;
1034         enum rte_flow_action_type action_type;
1035         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1036                  2, 4, 8, 16, 32, 64, 128};
1037
1038         base_queue = pf->base_queue + vsi->base_queue;
1039         for (action = actions; action->type !=
1040                         RTE_FLOW_ACTION_TYPE_END; action++) {
1041                 action_type = action->type;
1042                 switch (action_type) {
1043                 case RTE_FLOW_ACTION_TYPE_RSS:
1044                         act_qgrop = action->conf;
1045                         rule_info->sw_act.fltr_act =
1046                                 ICE_FWD_TO_QGRP;
1047                         rule_info->sw_act.fwd_id.q_id =
1048                                 base_queue + act_qgrop->queue[0];
1049                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1050                                 if (act_qgrop->queue_num ==
1051                                         valid_qgrop_number[i])
1052                                         break;
1053                         }
1054                         if (i == MAX_QGRP_NUM_TYPE)
1055                                 goto error;
1056                         if ((act_qgrop->queue[0] +
1057                                 act_qgrop->queue_num) >
1058                                 dev->data->nb_rx_queues)
1059                                 goto error;
1060                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1061                                 if (act_qgrop->queue[i + 1] !=
1062                                         act_qgrop->queue[i] + 1)
1063                                         goto error;
1064                         rule_info->sw_act.qgrp_size =
1065                                 act_qgrop->queue_num;
1066                         break;
1067                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1068                         act_q = action->conf;
1069                         if (act_q->index >= dev->data->nb_rx_queues)
1070                                 goto error;
1071                         rule_info->sw_act.fltr_act =
1072                                 ICE_FWD_TO_Q;
1073                         rule_info->sw_act.fwd_id.q_id =
1074                                 base_queue + act_q->index;
1075                         break;
1076
1077                 case RTE_FLOW_ACTION_TYPE_DROP:
1078                         rule_info->sw_act.fltr_act =
1079                                 ICE_DROP_PACKET;
1080                         break;
1081
1082                 case RTE_FLOW_ACTION_TYPE_VOID:
1083                         break;
1084
1085                 default:
1086                         goto error;
1087                 }
1088         }
1089
1090         rule_info->sw_act.vsi_handle = vsi->idx;
1091         rule_info->rx = 1;
1092         rule_info->sw_act.src = vsi->idx;
1093         rule_info->priority = 5;
1094
1095         return 0;
1096
1097 error:
1098         rte_flow_error_set(error,
1099                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1100                 actions,
1101                 "Invalid action type or queue number");
1102         return -rte_errno;
1103 }
1104
1105 static int
1106 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1107                 struct ice_pattern_match_item *array,
1108                 uint32_t array_len,
1109                 const struct rte_flow_item pattern[],
1110                 const struct rte_flow_action actions[],
1111                 void **meta,
1112                 struct rte_flow_error *error)
1113 {
1114         struct ice_pf *pf = &ad->pf;
1115         uint64_t inputset = 0;
1116         int ret = 0;
1117         struct sw_meta *sw_meta_ptr = NULL;
1118         struct ice_adv_rule_info rule_info;
1119         struct ice_adv_lkup_elem *list = NULL;
1120         uint16_t lkups_num = 0;
1121         const struct rte_flow_item *item = pattern;
1122         uint16_t item_num = 0;
1123         enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1124         struct ice_pattern_match_item *pattern_match_item = NULL;
1125
1126         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1127                 item_num++;
1128                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1129                         tun_type = ICE_SW_TUN_VXLAN;
1130                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1131                         tun_type = ICE_SW_TUN_NVGRE;
1132                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1133                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1134                         tun_type = ICE_SW_TUN_PPPOE;
1135                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1136                         const struct rte_flow_item_eth *eth_mask;
1137                         if (item->mask)
1138                                 eth_mask = item->mask;
1139                         else
1140                                 continue;
1141                         if (eth_mask->type == UINT16_MAX)
1142                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1143                 }
1144                 /* reserve one more memory slot for ETH which may
1145                  * consume 2 lookup items.
1146                  */
1147                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1148                         item_num++;
1149         }
1150
1151         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1152         if (!list) {
1153                 rte_flow_error_set(error, EINVAL,
1154                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1155                                    "No memory for PMD internal items");
1156                 return -rte_errno;
1157         }
1158
1159         rule_info.tun_type = tun_type;
1160
1161         sw_meta_ptr =
1162                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1163         if (!sw_meta_ptr) {
1164                 rte_flow_error_set(error, EINVAL,
1165                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1166                                    "No memory for sw_pattern_meta_ptr");
1167                 goto error;
1168         }
1169
1170         pattern_match_item =
1171                 ice_search_pattern_match_item(pattern, array, array_len, error);
1172         if (!pattern_match_item) {
1173                 rte_flow_error_set(error, EINVAL,
1174                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1175                                    "Invalid input pattern");
1176                 goto error;
1177         }
1178
1179         inputset = ice_switch_inset_get
1180                 (pattern, error, list, &lkups_num, tun_type);
1181         if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
1182                 rte_flow_error_set(error, EINVAL,
1183                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1184                                    pattern,
1185                                    "Invalid input set");
1186                 goto error;
1187         }
1188
1189         if (ad->hw.dcf_enabled)
1190                 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1191         else
1192                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1193
1194         if (ret) {
1195                 rte_flow_error_set(error, EINVAL,
1196                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1197                                    "Invalid input action");
1198                 goto error;
1199         }
1200
1201         if (meta) {
1202                 *meta = sw_meta_ptr;
1203                 ((struct sw_meta *)*meta)->list = list;
1204                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1205                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1206         } else {
1207                 rte_free(list);
1208                 rte_free(sw_meta_ptr);
1209         }
1210
1211         rte_free(pattern_match_item);
1212
1213         return 0;
1214
1215 error:
1216         rte_free(list);
1217         rte_free(sw_meta_ptr);
1218         rte_free(pattern_match_item);
1219
1220         return -rte_errno;
1221 }
1222
1223 static int
1224 ice_switch_query(struct ice_adapter *ad __rte_unused,
1225                 struct rte_flow *flow __rte_unused,
1226                 struct rte_flow_query_count *count __rte_unused,
1227                 struct rte_flow_error *error)
1228 {
1229         rte_flow_error_set(error, EINVAL,
1230                 RTE_FLOW_ERROR_TYPE_HANDLE,
1231                 NULL,
1232                 "count action not supported by switch filter");
1233
1234         return -rte_errno;
1235 }
1236
1237 static int
1238 ice_switch_init(struct ice_adapter *ad)
1239 {
1240         int ret = 0;
1241         struct ice_flow_parser *dist_parser;
1242         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1243
1244         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1245                 dist_parser = &ice_switch_dist_parser_comms;
1246         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1247                 dist_parser = &ice_switch_dist_parser_os;
1248         else
1249                 return -EINVAL;
1250
1251         if (ad->devargs.pipe_mode_support)
1252                 ret = ice_register_parser(perm_parser, ad);
1253         else
1254                 ret = ice_register_parser(dist_parser, ad);
1255         return ret;
1256 }
1257
1258 static void
1259 ice_switch_uninit(struct ice_adapter *ad)
1260 {
1261         struct ice_flow_parser *dist_parser;
1262         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1263
1264         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1265                 dist_parser = &ice_switch_dist_parser_comms;
1266         else
1267                 dist_parser = &ice_switch_dist_parser_os;
1268
1269         if (ad->devargs.pipe_mode_support)
1270                 ice_unregister_parser(perm_parser, ad);
1271         else
1272                 ice_unregister_parser(dist_parser, ad);
1273 }
1274
1275 static struct
1276 ice_flow_engine ice_switch_engine = {
1277         .init = ice_switch_init,
1278         .uninit = ice_switch_uninit,
1279         .create = ice_switch_create,
1280         .destroy = ice_switch_destroy,
1281         .query_count = ice_switch_query,
1282         .free = ice_switch_filter_rule_free,
1283         .type = ICE_FLOW_ENGINE_SWITCH,
1284 };
1285
1286 static struct
1287 ice_flow_parser ice_switch_dist_parser_os = {
1288         .engine = &ice_switch_engine,
1289         .array = ice_switch_pattern_dist_os,
1290         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1291         .parse_pattern_action = ice_switch_parse_pattern_action,
1292         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1293 };
1294
1295 static struct
1296 ice_flow_parser ice_switch_dist_parser_comms = {
1297         .engine = &ice_switch_engine,
1298         .array = ice_switch_pattern_dist_comms,
1299         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1300         .parse_pattern_action = ice_switch_parse_pattern_action,
1301         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1302 };
1303
1304 static struct
1305 ice_flow_parser ice_switch_perm_parser = {
1306         .engine = &ice_switch_engine,
1307         .array = ice_switch_pattern_perm,
1308         .array_len = RTE_DIM(ice_switch_pattern_perm),
1309         .parse_pattern_action = ice_switch_parse_pattern_action,
1310         .stage = ICE_FLOW_STAGE_PERMISSION,
1311 };
1312
1313 RTE_INIT(ice_sw_engine_init)
1314 {
1315         struct ice_flow_engine *engine = &ice_switch_engine;
1316         ice_register_flow_engine(engine);
1317 }