net/ice: support DCF device reset
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH (\
22         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25         ICE_FDIR_INSET_ETH | \
26         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30         ICE_FDIR_INSET_ETH_IPV4 | \
31         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34         ICE_FDIR_INSET_ETH_IPV4 | \
35         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38         ICE_FDIR_INSET_ETH_IPV4 | \
39         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42         ICE_INSET_DMAC | \
43         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
45         ICE_INSET_IPV6_PKID)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
54
55 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
56         ICE_FDIR_INSET_ETH_IPV6 | \
57         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
58
59 #define ICE_FDIR_INSET_IPV4 (\
60         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
61         ICE_INSET_IPV4_PKID)
62
63 #define ICE_FDIR_INSET_IPV4_TCP (\
64         ICE_FDIR_INSET_IPV4 | \
65         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
66
67 #define ICE_FDIR_INSET_IPV4_UDP (\
68         ICE_FDIR_INSET_IPV4 | \
69         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
70
71 #define ICE_FDIR_INSET_IPV4_SCTP (\
72         ICE_FDIR_INSET_IPV4 | \
73         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
74
75 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
76         ICE_FDIR_INSET_ETH | ICE_FDIR_INSET_ETH_IPV4 | \
77         ICE_INSET_VXLAN_VNI)
78
79 #define ICE_FDIR_INSET_IPV4_GTPU (\
80         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
81
82 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
83         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
84         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
85
86 #define ICE_FDIR_INSET_IPV6_GTPU (\
87         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
88
89 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
90         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
91         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
92
93 #define ICE_FDIR_INSET_IPV4_ESP (\
94         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
95         ICE_INSET_ESP_SPI)
96
97 #define ICE_FDIR_INSET_IPV6_ESP (\
98         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
99         ICE_INSET_ESP_SPI)
100
101 #define ICE_FDIR_INSET_IPV4_NATT_ESP (\
102         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
103         ICE_INSET_NAT_T_ESP_SPI)
104
105 #define ICE_FDIR_INSET_IPV6_NATT_ESP (\
106         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
107         ICE_INSET_NAT_T_ESP_SPI)
108
109 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
110         {pattern_ethertype,                             ICE_FDIR_INSET_ETH,             ICE_INSET_NONE,                 ICE_INSET_NONE},
111         {pattern_eth_ipv4,                              ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE,                 ICE_INSET_NONE},
112         {pattern_eth_ipv4_udp,                          ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
113         {pattern_eth_ipv4_tcp,                          ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
114         {pattern_eth_ipv4_sctp,                         ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
115         {pattern_eth_ipv6,                              ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
116         {pattern_eth_ipv6_frag_ext,                     ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
117         {pattern_eth_ipv6_udp,                          ICE_FDIR_INSET_ETH_IPV6_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
118         {pattern_eth_ipv6_tcp,                          ICE_FDIR_INSET_ETH_IPV6_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
119         {pattern_eth_ipv6_sctp,                         ICE_FDIR_INSET_ETH_IPV6_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
120         {pattern_eth_ipv4_esp,                          ICE_FDIR_INSET_IPV4_ESP,        ICE_INSET_NONE,                 ICE_INSET_NONE},
121         {pattern_eth_ipv4_udp_esp,                      ICE_FDIR_INSET_IPV4_NATT_ESP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
122         {pattern_eth_ipv6_esp,                          ICE_FDIR_INSET_IPV6_ESP,        ICE_INSET_NONE,                 ICE_INSET_NONE},
123         {pattern_eth_ipv6_udp_esp,                      ICE_FDIR_INSET_IPV6_NATT_ESP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
124         {pattern_eth_ipv4_udp_vxlan_ipv4,               ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4,            ICE_INSET_NONE},
125         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_UDP,        ICE_INSET_NONE},
126         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_TCP,        ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,          ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_SCTP,       ICE_INSET_NONE},
128         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE},
129         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE},
130         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE},
131         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,      ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE},
132         /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
133         {pattern_eth_ipv4_gtpu,                         ICE_FDIR_INSET_IPV4_GTPU,       ICE_FDIR_INSET_IPV4_GTPU,       ICE_INSET_NONE},
134         {pattern_eth_ipv4_gtpu_eh,                      ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_INSET_NONE},
135         {pattern_eth_ipv6_gtpu,                         ICE_FDIR_INSET_IPV6_GTPU,       ICE_FDIR_INSET_IPV6_GTPU,       ICE_INSET_NONE},
136         {pattern_eth_ipv6_gtpu_eh,                      ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_INSET_NONE},
137 };
138
139 static struct ice_flow_parser ice_fdir_parser;
140
141 static int
142 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
143
144 static const struct rte_memzone *
145 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
146 {
147         const struct rte_memzone *mz;
148
149         mz = rte_memzone_lookup(name);
150         if (mz)
151                 return mz;
152
153         return rte_memzone_reserve_aligned(name, len, socket_id,
154                                            RTE_MEMZONE_IOVA_CONTIG,
155                                            ICE_RING_BASE_ALIGN);
156 }
157
158 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
159
160 static int
161 ice_fdir_prof_alloc(struct ice_hw *hw)
162 {
163         enum ice_fltr_ptype ptype, fltr_ptype;
164
165         if (!hw->fdir_prof) {
166                 hw->fdir_prof = (struct ice_fd_hw_prof **)
167                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
168                                    sizeof(*hw->fdir_prof));
169                 if (!hw->fdir_prof)
170                         return -ENOMEM;
171         }
172         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
173              ptype < ICE_FLTR_PTYPE_MAX;
174              ptype++) {
175                 if (!hw->fdir_prof[ptype]) {
176                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
177                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
178                         if (!hw->fdir_prof[ptype])
179                                 goto fail_mem;
180                 }
181         }
182         return 0;
183
184 fail_mem:
185         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
186              fltr_ptype < ptype;
187              fltr_ptype++) {
188                 rte_free(hw->fdir_prof[fltr_ptype]);
189                 hw->fdir_prof[fltr_ptype] = NULL;
190         }
191
192         rte_free(hw->fdir_prof);
193         hw->fdir_prof = NULL;
194
195         return -ENOMEM;
196 }
197
198 static int
199 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
200                           struct ice_fdir_counter_pool_container *container,
201                           uint32_t index_start,
202                           uint32_t len)
203 {
204         struct ice_fdir_counter_pool *pool;
205         uint32_t i;
206         int ret = 0;
207
208         pool = rte_zmalloc("ice_fdir_counter_pool",
209                            sizeof(*pool) +
210                            sizeof(struct ice_fdir_counter) * len,
211                            0);
212         if (!pool) {
213                 PMD_INIT_LOG(ERR,
214                              "Failed to allocate memory for fdir counter pool");
215                 return -ENOMEM;
216         }
217
218         TAILQ_INIT(&pool->counter_list);
219         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
220
221         for (i = 0; i < len; i++) {
222                 struct ice_fdir_counter *counter = &pool->counters[i];
223
224                 counter->hw_index = index_start + i;
225                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
226         }
227
228         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
229                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
230                 ret = -EINVAL;
231                 goto free_pool;
232         }
233
234         container->pools[container->index_free++] = pool;
235         return 0;
236
237 free_pool:
238         rte_free(pool);
239         return ret;
240 }
241
242 static int
243 ice_fdir_counter_init(struct ice_pf *pf)
244 {
245         struct ice_hw *hw = ICE_PF_TO_HW(pf);
246         struct ice_fdir_info *fdir_info = &pf->fdir;
247         struct ice_fdir_counter_pool_container *container =
248                                 &fdir_info->counter;
249         uint32_t cnt_index, len;
250         int ret;
251
252         TAILQ_INIT(&container->pool_list);
253
254         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
255         len = ICE_FDIR_COUNTERS_PER_BLOCK;
256
257         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
258         if (ret) {
259                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
260                 return ret;
261         }
262
263         return 0;
264 }
265
266 static int
267 ice_fdir_counter_release(struct ice_pf *pf)
268 {
269         struct ice_fdir_info *fdir_info = &pf->fdir;
270         struct ice_fdir_counter_pool_container *container =
271                                 &fdir_info->counter;
272         uint8_t i;
273
274         for (i = 0; i < container->index_free; i++) {
275                 rte_free(container->pools[i]);
276                 container->pools[i] = NULL;
277         }
278
279         TAILQ_INIT(&container->pool_list);
280         container->index_free = 0;
281
282         return 0;
283 }
284
285 static struct ice_fdir_counter *
286 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
287                                         *container,
288                                uint32_t id)
289 {
290         struct ice_fdir_counter_pool *pool;
291         struct ice_fdir_counter *counter;
292         int i;
293
294         TAILQ_FOREACH(pool, &container->pool_list, next) {
295                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
296                         counter = &pool->counters[i];
297
298                         if (counter->shared &&
299                             counter->ref_cnt &&
300                             counter->id == id)
301                                 return counter;
302                 }
303         }
304
305         return NULL;
306 }
307
308 static struct ice_fdir_counter *
309 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
310 {
311         struct ice_hw *hw = ICE_PF_TO_HW(pf);
312         struct ice_fdir_info *fdir_info = &pf->fdir;
313         struct ice_fdir_counter_pool_container *container =
314                                 &fdir_info->counter;
315         struct ice_fdir_counter_pool *pool = NULL;
316         struct ice_fdir_counter *counter_free = NULL;
317
318         if (shared) {
319                 counter_free = ice_fdir_counter_shared_search(container, id);
320                 if (counter_free) {
321                         if (counter_free->ref_cnt + 1 == 0) {
322                                 rte_errno = E2BIG;
323                                 return NULL;
324                         }
325                         counter_free->ref_cnt++;
326                         return counter_free;
327                 }
328         }
329
330         TAILQ_FOREACH(pool, &container->pool_list, next) {
331                 counter_free = TAILQ_FIRST(&pool->counter_list);
332                 if (counter_free)
333                         break;
334                 counter_free = NULL;
335         }
336
337         if (!counter_free) {
338                 PMD_DRV_LOG(ERR, "No free counter found\n");
339                 return NULL;
340         }
341
342         counter_free->shared = shared;
343         counter_free->id = id;
344         counter_free->ref_cnt = 1;
345         counter_free->pool = pool;
346
347         /* reset statistic counter value */
348         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
349         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
350
351         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
352         if (TAILQ_EMPTY(&pool->counter_list)) {
353                 TAILQ_REMOVE(&container->pool_list, pool, next);
354                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
355         }
356
357         return counter_free;
358 }
359
360 static void
361 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
362                       struct ice_fdir_counter *counter)
363 {
364         if (!counter)
365                 return;
366
367         if (--counter->ref_cnt == 0) {
368                 struct ice_fdir_counter_pool *pool = counter->pool;
369
370                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
371         }
372 }
373
374 static int
375 ice_fdir_init_filter_list(struct ice_pf *pf)
376 {
377         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
378         struct ice_fdir_info *fdir_info = &pf->fdir;
379         char fdir_hash_name[RTE_HASH_NAMESIZE];
380         int ret;
381
382         struct rte_hash_parameters fdir_hash_params = {
383                 .name = fdir_hash_name,
384                 .entries = ICE_MAX_FDIR_FILTER_NUM,
385                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
386                 .hash_func = rte_hash_crc,
387                 .hash_func_init_val = 0,
388                 .socket_id = rte_socket_id(),
389                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
390         };
391
392         /* Initialize hash */
393         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
394                  "fdir_%s", dev->device->name);
395         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
396         if (!fdir_info->hash_table) {
397                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
398                 return -EINVAL;
399         }
400         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
401                                           sizeof(*fdir_info->hash_map) *
402                                           ICE_MAX_FDIR_FILTER_NUM,
403                                           0);
404         if (!fdir_info->hash_map) {
405                 PMD_INIT_LOG(ERR,
406                              "Failed to allocate memory for fdir hash map!");
407                 ret = -ENOMEM;
408                 goto err_fdir_hash_map_alloc;
409         }
410         return 0;
411
412 err_fdir_hash_map_alloc:
413         rte_hash_free(fdir_info->hash_table);
414
415         return ret;
416 }
417
418 static void
419 ice_fdir_release_filter_list(struct ice_pf *pf)
420 {
421         struct ice_fdir_info *fdir_info = &pf->fdir;
422
423         if (fdir_info->hash_map)
424                 rte_free(fdir_info->hash_map);
425         if (fdir_info->hash_table)
426                 rte_hash_free(fdir_info->hash_table);
427
428         fdir_info->hash_map = NULL;
429         fdir_info->hash_table = NULL;
430 }
431
432 /*
433  * ice_fdir_setup - reserve and initialize the Flow Director resources
434  * @pf: board private structure
435  */
436 static int
437 ice_fdir_setup(struct ice_pf *pf)
438 {
439         struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
440         struct ice_hw *hw = ICE_PF_TO_HW(pf);
441         const struct rte_memzone *mz = NULL;
442         char z_name[RTE_MEMZONE_NAMESIZE];
443         struct ice_vsi *vsi;
444         int err = ICE_SUCCESS;
445
446         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
447                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
448                 return -ENOTSUP;
449         }
450
451         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
452                     " fd_fltr_best_effort = %u.",
453                     hw->func_caps.fd_fltr_guar,
454                     hw->func_caps.fd_fltr_best_effort);
455
456         if (pf->fdir.fdir_vsi) {
457                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
458                 return ICE_SUCCESS;
459         }
460
461         /* make new FDIR VSI */
462         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
463         if (!vsi) {
464                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
465                 return -EINVAL;
466         }
467         pf->fdir.fdir_vsi = vsi;
468
469         err = ice_fdir_init_filter_list(pf);
470         if (err) {
471                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
472                 return -EINVAL;
473         }
474
475         err = ice_fdir_counter_init(pf);
476         if (err) {
477                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
478                 return -EINVAL;
479         }
480
481         /*Fdir tx queue setup*/
482         err = ice_fdir_setup_tx_resources(pf);
483         if (err) {
484                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
485                 goto fail_setup_tx;
486         }
487
488         /*Fdir rx queue setup*/
489         err = ice_fdir_setup_rx_resources(pf);
490         if (err) {
491                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
492                 goto fail_setup_rx;
493         }
494
495         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
496         if (err) {
497                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
498                 goto fail_mem;
499         }
500
501         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
502         if (err) {
503                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
504                 goto fail_mem;
505         }
506
507         /* Enable FDIR MSIX interrupt */
508         vsi->nb_used_qps = 1;
509         ice_vsi_queues_bind_intr(vsi);
510         ice_vsi_enable_queues_intr(vsi);
511
512         /* reserve memory for the fdir programming packet */
513         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
514                  ICE_FDIR_MZ_NAME,
515                  eth_dev->data->port_id);
516         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
517         if (!mz) {
518                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
519                             "flow director program packet.");
520                 err = -ENOMEM;
521                 goto fail_mem;
522         }
523         pf->fdir.prg_pkt = mz->addr;
524         pf->fdir.dma_addr = mz->iova;
525         pf->fdir.mz = mz;
526
527         err = ice_fdir_prof_alloc(hw);
528         if (err) {
529                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
530                             "flow director profile.");
531                 err = -ENOMEM;
532                 goto fail_prof;
533         }
534
535         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
536                     vsi->base_queue);
537         return ICE_SUCCESS;
538
539 fail_prof:
540         rte_memzone_free(pf->fdir.mz);
541         pf->fdir.mz = NULL;
542 fail_mem:
543         ice_rx_queue_release(pf->fdir.rxq);
544         pf->fdir.rxq = NULL;
545 fail_setup_rx:
546         ice_tx_queue_release(pf->fdir.txq);
547         pf->fdir.txq = NULL;
548 fail_setup_tx:
549         ice_release_vsi(vsi);
550         pf->fdir.fdir_vsi = NULL;
551         return err;
552 }
553
554 static void
555 ice_fdir_prof_free(struct ice_hw *hw)
556 {
557         enum ice_fltr_ptype ptype;
558
559         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
560              ptype < ICE_FLTR_PTYPE_MAX;
561              ptype++) {
562                 rte_free(hw->fdir_prof[ptype]);
563                 hw->fdir_prof[ptype] = NULL;
564         }
565
566         rte_free(hw->fdir_prof);
567         hw->fdir_prof = NULL;
568 }
569
570 /* Remove a profile for some filter type */
571 static void
572 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
573 {
574         struct ice_hw *hw = ICE_PF_TO_HW(pf);
575         struct ice_fd_hw_prof *hw_prof;
576         uint64_t prof_id;
577         uint16_t vsi_num;
578         int i;
579
580         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
581                 return;
582
583         hw_prof = hw->fdir_prof[ptype];
584
585         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
586         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
587                 if (hw_prof->entry_h[i][is_tunnel]) {
588                         vsi_num = ice_get_hw_vsi_num(hw,
589                                                      hw_prof->vsi_h[i]);
590                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
591                                              vsi_num, ptype);
592                         ice_flow_rem_entry(hw, ICE_BLK_FD,
593                                            hw_prof->entry_h[i][is_tunnel]);
594                         hw_prof->entry_h[i][is_tunnel] = 0;
595                 }
596         }
597         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
598         rte_free(hw_prof->fdir_seg[is_tunnel]);
599         hw_prof->fdir_seg[is_tunnel] = NULL;
600
601         for (i = 0; i < hw_prof->cnt; i++)
602                 hw_prof->vsi_h[i] = 0;
603         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
604 }
605
606 /* Remove all created profiles */
607 static void
608 ice_fdir_prof_rm_all(struct ice_pf *pf)
609 {
610         enum ice_fltr_ptype ptype;
611
612         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
613              ptype < ICE_FLTR_PTYPE_MAX;
614              ptype++) {
615                 ice_fdir_prof_rm(pf, ptype, false);
616                 ice_fdir_prof_rm(pf, ptype, true);
617         }
618 }
619
620 /*
621  * ice_fdir_teardown - release the Flow Director resources
622  * @pf: board private structure
623  */
624 static void
625 ice_fdir_teardown(struct ice_pf *pf)
626 {
627         struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
628         struct ice_hw *hw = ICE_PF_TO_HW(pf);
629         struct ice_vsi *vsi;
630         int err;
631
632         vsi = pf->fdir.fdir_vsi;
633         if (!vsi)
634                 return;
635
636         ice_vsi_disable_queues_intr(vsi);
637
638         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
639         if (err)
640                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
641
642         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
643         if (err)
644                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
645
646         err = ice_fdir_counter_release(pf);
647         if (err)
648                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
649
650         ice_fdir_release_filter_list(pf);
651
652         ice_tx_queue_release(pf->fdir.txq);
653         pf->fdir.txq = NULL;
654         rte_eth_dma_zone_free(eth_dev, "fdir_tx_ring", ICE_FDIR_QUEUE_ID);
655         ice_rx_queue_release(pf->fdir.rxq);
656         pf->fdir.rxq = NULL;
657         rte_eth_dma_zone_free(eth_dev, "fdir_rx_ring", ICE_FDIR_QUEUE_ID);
658         ice_fdir_prof_rm_all(pf);
659         ice_fdir_prof_free(hw);
660         ice_release_vsi(vsi);
661         pf->fdir.fdir_vsi = NULL;
662
663         if (pf->fdir.mz) {
664                 err = rte_memzone_free(pf->fdir.mz);
665                 pf->fdir.mz = NULL;
666                 if (err)
667                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
668         }
669 }
670
671 static int
672 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
673                            enum ice_fltr_ptype ptype,
674                            struct ice_flow_seg_info *seg,
675                            bool is_tunnel)
676 {
677         struct ice_hw *hw = ICE_PF_TO_HW(pf);
678         struct ice_flow_seg_info *ori_seg;
679         struct ice_fd_hw_prof *hw_prof;
680
681         hw_prof = hw->fdir_prof[ptype];
682         ori_seg = hw_prof->fdir_seg[is_tunnel];
683
684         /* profile does not exist */
685         if (!ori_seg)
686                 return 0;
687
688         /* if no input set conflict, return -EEXIST */
689         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
690             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
691                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
692                             ptype);
693                 return -EEXIST;
694         }
695
696         /* a rule with input set conflict already exist, so give up */
697         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
698                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
699                             ptype);
700                 return -EINVAL;
701         }
702
703         /* it's safe to delete an empty profile */
704         ice_fdir_prof_rm(pf, ptype, is_tunnel);
705         return 0;
706 }
707
708 static bool
709 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
710                                enum ice_fltr_ptype ptype,
711                                bool is_tunnel)
712 {
713         struct ice_hw *hw = ICE_PF_TO_HW(pf);
714         struct ice_fd_hw_prof *hw_prof;
715         struct ice_flow_seg_info *seg;
716
717         hw_prof = hw->fdir_prof[ptype];
718         seg = hw_prof->fdir_seg[is_tunnel];
719
720         /* profile does not exist */
721         if (!seg)
722                 return true;
723
724         /* profile exists and rule exists, fail to resolve the conflict */
725         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
726                 return false;
727
728         /* it's safe to delete an empty profile */
729         ice_fdir_prof_rm(pf, ptype, is_tunnel);
730
731         return true;
732 }
733
734 static int
735 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
736                              enum ice_fltr_ptype ptype,
737                              bool is_tunnel)
738 {
739         enum ice_fltr_ptype cflct_ptype;
740
741         switch (ptype) {
742         /* IPv4 */
743         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
744         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
745         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
746                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
747                 if (!ice_fdir_prof_resolve_conflict
748                         (pf, cflct_ptype, is_tunnel))
749                         goto err;
750                 break;
751         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
752                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
753                 if (!ice_fdir_prof_resolve_conflict
754                         (pf, cflct_ptype, is_tunnel))
755                         goto err;
756                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
757                 if (!ice_fdir_prof_resolve_conflict
758                         (pf, cflct_ptype, is_tunnel))
759                         goto err;
760                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
761                 if (!ice_fdir_prof_resolve_conflict
762                         (pf, cflct_ptype, is_tunnel))
763                         goto err;
764                 break;
765         /* IPv4 GTPU */
766         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
767         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
768         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
769                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
770                 if (!ice_fdir_prof_resolve_conflict
771                         (pf, cflct_ptype, is_tunnel))
772                         goto err;
773                 break;
774         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
775                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
776                 if (!ice_fdir_prof_resolve_conflict
777                         (pf, cflct_ptype, is_tunnel))
778                         goto err;
779                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
780                 if (!ice_fdir_prof_resolve_conflict
781                         (pf, cflct_ptype, is_tunnel))
782                         goto err;
783                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
784                 if (!ice_fdir_prof_resolve_conflict
785                         (pf, cflct_ptype, is_tunnel))
786                         goto err;
787                 break;
788         /* IPv6 */
789         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
790         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
791         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
792                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
793                 if (!ice_fdir_prof_resolve_conflict
794                         (pf, cflct_ptype, is_tunnel))
795                         goto err;
796                 break;
797         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
798                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
799                 if (!ice_fdir_prof_resolve_conflict
800                         (pf, cflct_ptype, is_tunnel))
801                         goto err;
802                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
803                 if (!ice_fdir_prof_resolve_conflict
804                         (pf, cflct_ptype, is_tunnel))
805                         goto err;
806                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
807                 if (!ice_fdir_prof_resolve_conflict
808                         (pf, cflct_ptype, is_tunnel))
809                         goto err;
810                 break;
811         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
812         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
813         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
814                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
815                 if (!ice_fdir_prof_resolve_conflict
816                         (pf, cflct_ptype, is_tunnel))
817                         goto err;
818                 break;
819         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
820                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
821                 if (!ice_fdir_prof_resolve_conflict
822                         (pf, cflct_ptype, is_tunnel))
823                         goto err;
824                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
825                 if (!ice_fdir_prof_resolve_conflict
826                         (pf, cflct_ptype, is_tunnel))
827                         goto err;
828                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
829                 if (!ice_fdir_prof_resolve_conflict
830                         (pf, cflct_ptype, is_tunnel))
831                         goto err;
832                 break;
833         default:
834                 break;
835         }
836         return 0;
837 err:
838         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
839                     ptype, cflct_ptype);
840         return -EINVAL;
841 }
842
843 static int
844 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
845                      struct ice_vsi *ctrl_vsi,
846                      struct ice_flow_seg_info *seg,
847                      enum ice_fltr_ptype ptype,
848                      bool is_tunnel)
849 {
850         struct ice_hw *hw = ICE_PF_TO_HW(pf);
851         enum ice_flow_dir dir = ICE_FLOW_RX;
852         struct ice_fd_hw_prof *hw_prof;
853         struct ice_flow_prof *prof;
854         uint64_t entry_1 = 0;
855         uint64_t entry_2 = 0;
856         uint16_t vsi_num;
857         int ret;
858         uint64_t prof_id;
859
860         /* check if have input set conflict on current profile. */
861         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
862         if (ret)
863                 return ret;
864
865         /* check if the profile is conflict with other profile. */
866         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
867         if (ret)
868                 return ret;
869
870         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
871         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
872                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
873         if (ret)
874                 return ret;
875         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
876                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
877                                  seg, NULL, 0, &entry_1);
878         if (ret) {
879                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
880                             ptype);
881                 goto err_add_prof;
882         }
883         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
884                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
885                                  seg, NULL, 0, &entry_2);
886         if (ret) {
887                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
888                             ptype);
889                 goto err_add_entry;
890         }
891
892         hw_prof = hw->fdir_prof[ptype];
893         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
894         hw_prof->cnt = 0;
895         hw_prof->fdir_seg[is_tunnel] = seg;
896         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
897         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
898         pf->hw_prof_cnt[ptype][is_tunnel]++;
899         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
900         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
901         pf->hw_prof_cnt[ptype][is_tunnel]++;
902
903         return ret;
904
905 err_add_entry:
906         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
907         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
908         ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
909 err_add_prof:
910         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
911
912         return ret;
913 }
914
915 static void
916 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
917 {
918         uint32_t i, j;
919
920         struct ice_inset_map {
921                 uint64_t inset;
922                 enum ice_flow_field fld;
923         };
924         static const struct ice_inset_map ice_inset_map[] = {
925                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
926                 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
927                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
928                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
929                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
930                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
931                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
932                 {ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
933                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
934                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
935                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
936                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
937                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
938                 {ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
939                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
940                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
941                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
942                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
943                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
944                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
945                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
946                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
947                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
948                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
949                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
950                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
951                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
952                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
953                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
954                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
955                 {ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
956                 {ICE_INSET_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI},
957                 {ICE_INSET_NAT_T_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI},
958         };
959
960         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
961                 if ((inset & ice_inset_map[i].inset) ==
962                     ice_inset_map[i].inset)
963                         field[j++] = ice_inset_map[i].fld;
964         }
965 }
966
967 static void
968 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
969 {
970         switch (flow) {
971         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
972                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
973                                   ICE_FLOW_SEG_HDR_IPV4 |
974                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
975                 break;
976         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
977                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
978                                   ICE_FLOW_SEG_HDR_IPV4 |
979                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
980                 break;
981         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
982                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
983                                   ICE_FLOW_SEG_HDR_IPV4 |
984                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
985                 break;
986         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
987                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
988                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
989                 break;
990         case ICE_FLTR_PTYPE_FRAG_IPV4:
991                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
992                                   ICE_FLOW_SEG_HDR_IPV_FRAG);
993                 break;
994         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
995                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
996                                   ICE_FLOW_SEG_HDR_IPV6 |
997                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
998                 break;
999         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
1000                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
1001                                   ICE_FLOW_SEG_HDR_IPV6 |
1002                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1003                 break;
1004         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
1005                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
1006                                   ICE_FLOW_SEG_HDR_IPV6 |
1007                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1008                 break;
1009         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
1010                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1011                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1012                 break;
1013         case ICE_FLTR_PTYPE_FRAG_IPV6:
1014                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1015                                   ICE_FLOW_SEG_HDR_IPV_FRAG);
1016                 break;
1017         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
1018         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
1019         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
1020                 break;
1021         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
1022                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV_OTHER);
1023                 break;
1024         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
1025                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1026                                   ICE_FLOW_SEG_HDR_IPV4 |
1027                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1028                 break;
1029         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
1030                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1031                                   ICE_FLOW_SEG_HDR_GTPU_IP |
1032                                   ICE_FLOW_SEG_HDR_IPV4 |
1033                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1034                 break;
1035         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
1036                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1037                                   ICE_FLOW_SEG_HDR_IPV6 |
1038                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1039                 break;
1040         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
1041                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1042                                   ICE_FLOW_SEG_HDR_GTPU_IP |
1043                                   ICE_FLOW_SEG_HDR_IPV6 |
1044                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1045                 break;
1046         case ICE_FLTR_PTYPE_NON_IP_L2:
1047                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1048                 break;
1049         case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
1050                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1051                                   ICE_FLOW_SEG_HDR_IPV4 |
1052                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1053                 break;
1054         case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
1055                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1056                                   ICE_FLOW_SEG_HDR_IPV6 |
1057                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1058                 break;
1059         case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
1060                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1061                                   ICE_FLOW_SEG_HDR_IPV4 |
1062                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1063                 break;
1064         case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
1065                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1066                                   ICE_FLOW_SEG_HDR_IPV6 |
1067                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1068                 break;
1069         default:
1070                 PMD_DRV_LOG(ERR, "not supported filter type.");
1071                 break;
1072         }
1073 }
1074
1075 static int
1076 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
1077                         uint64_t inner_input_set, uint64_t outer_input_set,
1078                         enum ice_fdir_tunnel_type ttype)
1079 {
1080         struct ice_flow_seg_info *seg;
1081         struct ice_flow_seg_info *seg_tun = NULL;
1082         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1083         uint64_t input_set;
1084         bool is_tunnel;
1085         int k, i, ret = 0;
1086
1087         if (!(inner_input_set | outer_input_set))
1088                 return -EINVAL;
1089
1090         seg_tun = (struct ice_flow_seg_info *)
1091                 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1092         if (!seg_tun) {
1093                 PMD_DRV_LOG(ERR, "No memory can be allocated");
1094                 return -ENOMEM;
1095         }
1096
1097         /* use seg_tun[1] to record tunnel inner part */
1098         for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1099                 seg = &seg_tun[k];
1100                 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1101                 if (input_set == 0)
1102                         continue;
1103
1104                 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1105                         field[i] = ICE_FLOW_FIELD_IDX_MAX;
1106
1107                 ice_fdir_input_set_parse(input_set, field);
1108
1109                 ice_fdir_input_set_hdrs(flow, seg);
1110
1111                 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1112                         ice_flow_set_fld(seg, field[i],
1113                                          ICE_FLOW_FLD_OFF_INVAL,
1114                                          ICE_FLOW_FLD_OFF_INVAL,
1115                                          ICE_FLOW_FLD_OFF_INVAL, false);
1116                 }
1117         }
1118
1119         is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1120
1121         ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1122                                    seg_tun, flow, is_tunnel);
1123
1124         if (!ret) {
1125                 return ret;
1126         } else if (ret < 0) {
1127                 rte_free(seg_tun);
1128                 return (ret == -EEXIST) ? 0 : ret;
1129         } else {
1130                 return ret;
1131         }
1132 }
1133
1134 static void
1135 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1136                     bool is_tunnel, bool add)
1137 {
1138         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1139         int cnt;
1140
1141         cnt = (add) ? 1 : -1;
1142         hw->fdir_active_fltr += cnt;
1143         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1144                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1145         else
1146                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1147 }
1148
1149 static int
1150 ice_fdir_init(struct ice_adapter *ad)
1151 {
1152         struct ice_pf *pf = &ad->pf;
1153         struct ice_flow_parser *parser;
1154         int ret;
1155
1156         if (ad->hw.dcf_enabled)
1157                 return 0;
1158
1159         ret = ice_fdir_setup(pf);
1160         if (ret)
1161                 return ret;
1162
1163         parser = &ice_fdir_parser;
1164
1165         return ice_register_parser(parser, ad);
1166 }
1167
1168 static void
1169 ice_fdir_uninit(struct ice_adapter *ad)
1170 {
1171         struct ice_flow_parser *parser;
1172         struct ice_pf *pf = &ad->pf;
1173
1174         if (ad->hw.dcf_enabled)
1175                 return;
1176
1177         parser = &ice_fdir_parser;
1178
1179         ice_unregister_parser(parser, ad);
1180
1181         ice_fdir_teardown(pf);
1182 }
1183
1184 static int
1185 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1186 {
1187         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1188                 return 1;
1189         else
1190                 return 0;
1191 }
1192
1193 static int
1194 ice_fdir_add_del_filter(struct ice_pf *pf,
1195                         struct ice_fdir_filter_conf *filter,
1196                         bool add)
1197 {
1198         struct ice_fltr_desc desc;
1199         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1200         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1201         bool is_tun;
1202         int ret;
1203
1204         filter->input.dest_vsi = pf->main_vsi->idx;
1205
1206         memset(&desc, 0, sizeof(desc));
1207         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1208         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1209
1210         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1211
1212         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1213         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1214         if (ret) {
1215                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1216                 return -EINVAL;
1217         }
1218
1219         return ice_fdir_programming(pf, &desc);
1220 }
1221
1222 static void
1223 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1224                           struct ice_fdir_filter_conf *filter)
1225 {
1226         struct ice_fdir_fltr *input = &filter->input;
1227         memset(key, 0, sizeof(*key));
1228
1229         key->flow_type = input->flow_type;
1230         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1231         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1232         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1233         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1234
1235         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1236         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1237
1238         key->tunnel_type = filter->tunnel_type;
1239 }
1240
1241 /* Check if there exists the flow director filter */
1242 static struct ice_fdir_filter_conf *
1243 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1244                         const struct ice_fdir_fltr_pattern *key)
1245 {
1246         int ret;
1247
1248         ret = rte_hash_lookup(fdir_info->hash_table, key);
1249         if (ret < 0)
1250                 return NULL;
1251
1252         return fdir_info->hash_map[ret];
1253 }
1254
1255 /* Add a flow director entry into the SW list */
1256 static int
1257 ice_fdir_entry_insert(struct ice_pf *pf,
1258                       struct ice_fdir_filter_conf *entry,
1259                       struct ice_fdir_fltr_pattern *key)
1260 {
1261         struct ice_fdir_info *fdir_info = &pf->fdir;
1262         int ret;
1263
1264         ret = rte_hash_add_key(fdir_info->hash_table, key);
1265         if (ret < 0) {
1266                 PMD_DRV_LOG(ERR,
1267                             "Failed to insert fdir entry to hash table %d!",
1268                             ret);
1269                 return ret;
1270         }
1271         fdir_info->hash_map[ret] = entry;
1272
1273         return 0;
1274 }
1275
1276 /* Delete a flow director entry from the SW list */
1277 static int
1278 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1279 {
1280         struct ice_fdir_info *fdir_info = &pf->fdir;
1281         int ret;
1282
1283         ret = rte_hash_del_key(fdir_info->hash_table, key);
1284         if (ret < 0) {
1285                 PMD_DRV_LOG(ERR,
1286                             "Failed to delete fdir filter to hash table %d!",
1287                             ret);
1288                 return ret;
1289         }
1290         fdir_info->hash_map[ret] = NULL;
1291
1292         return 0;
1293 }
1294
1295 static int
1296 ice_fdir_create_filter(struct ice_adapter *ad,
1297                        struct rte_flow *flow,
1298                        void *meta,
1299                        struct rte_flow_error *error)
1300 {
1301         struct ice_pf *pf = &ad->pf;
1302         struct ice_fdir_filter_conf *filter = meta;
1303         struct ice_fdir_info *fdir_info = &pf->fdir;
1304         struct ice_fdir_filter_conf *entry, *node;
1305         struct ice_fdir_fltr_pattern key;
1306         bool is_tun;
1307         int ret;
1308
1309         ice_fdir_extract_fltr_key(&key, filter);
1310         node = ice_fdir_entry_lookup(fdir_info, &key);
1311         if (node) {
1312                 rte_flow_error_set(error, EEXIST,
1313                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1314                                    "Rule already exists!");
1315                 return -rte_errno;
1316         }
1317
1318         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1319         if (!entry) {
1320                 rte_flow_error_set(error, ENOMEM,
1321                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1322                                    "Failed to allocate memory");
1323                 return -rte_errno;
1324         }
1325
1326         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1327
1328         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1329                                       filter->input_set_i, filter->input_set_o,
1330                                       filter->tunnel_type);
1331         if (ret) {
1332                 rte_flow_error_set(error, -ret,
1333                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1334                                    "Profile configure failed.");
1335                 goto free_entry;
1336         }
1337
1338         /* alloc counter for FDIR */
1339         if (filter->input.cnt_ena) {
1340                 struct rte_flow_action_count *act_count = &filter->act_count;
1341
1342                 filter->counter = ice_fdir_counter_alloc(pf,
1343                                                          act_count->shared,
1344                                                          act_count->id);
1345                 if (!filter->counter) {
1346                         rte_flow_error_set(error, EINVAL,
1347                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1348                                         "Failed to alloc FDIR counter.");
1349                         goto free_entry;
1350                 }
1351                 filter->input.cnt_index = filter->counter->hw_index;
1352         }
1353
1354         ret = ice_fdir_add_del_filter(pf, filter, true);
1355         if (ret) {
1356                 rte_flow_error_set(error, -ret,
1357                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1358                                    "Add filter rule failed.");
1359                 goto free_counter;
1360         }
1361
1362         if (filter->mark_flag == 1)
1363                 ice_fdir_rx_parsing_enable(ad, 1);
1364
1365         rte_memcpy(entry, filter, sizeof(*entry));
1366         ret = ice_fdir_entry_insert(pf, entry, &key);
1367         if (ret) {
1368                 rte_flow_error_set(error, -ret,
1369                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1370                                    "Insert entry to table failed.");
1371                 goto free_entry;
1372         }
1373
1374         flow->rule = entry;
1375         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1376
1377         return 0;
1378
1379 free_counter:
1380         if (filter->counter) {
1381                 ice_fdir_counter_free(pf, filter->counter);
1382                 filter->counter = NULL;
1383         }
1384
1385 free_entry:
1386         rte_free(entry);
1387         return -rte_errno;
1388 }
1389
1390 static int
1391 ice_fdir_destroy_filter(struct ice_adapter *ad,
1392                         struct rte_flow *flow,
1393                         struct rte_flow_error *error)
1394 {
1395         struct ice_pf *pf = &ad->pf;
1396         struct ice_fdir_info *fdir_info = &pf->fdir;
1397         struct ice_fdir_filter_conf *filter, *entry;
1398         struct ice_fdir_fltr_pattern key;
1399         bool is_tun;
1400         int ret;
1401
1402         filter = (struct ice_fdir_filter_conf *)flow->rule;
1403
1404         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1405
1406         if (filter->counter) {
1407                 ice_fdir_counter_free(pf, filter->counter);
1408                 filter->counter = NULL;
1409         }
1410
1411         ice_fdir_extract_fltr_key(&key, filter);
1412         entry = ice_fdir_entry_lookup(fdir_info, &key);
1413         if (!entry) {
1414                 rte_flow_error_set(error, ENOENT,
1415                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1416                                    "Can't find entry.");
1417                 return -rte_errno;
1418         }
1419
1420         ret = ice_fdir_add_del_filter(pf, filter, false);
1421         if (ret) {
1422                 rte_flow_error_set(error, -ret,
1423                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1424                                    "Del filter rule failed.");
1425                 return -rte_errno;
1426         }
1427
1428         ret = ice_fdir_entry_del(pf, &key);
1429         if (ret) {
1430                 rte_flow_error_set(error, -ret,
1431                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1432                                    "Remove entry from table failed.");
1433                 return -rte_errno;
1434         }
1435
1436         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1437
1438         if (filter->mark_flag == 1)
1439                 ice_fdir_rx_parsing_enable(ad, 0);
1440
1441         flow->rule = NULL;
1442
1443         rte_free(filter);
1444
1445         return 0;
1446 }
1447
1448 static int
1449 ice_fdir_query_count(struct ice_adapter *ad,
1450                       struct rte_flow *flow,
1451                       struct rte_flow_query_count *flow_stats,
1452                       struct rte_flow_error *error)
1453 {
1454         struct ice_pf *pf = &ad->pf;
1455         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1456         struct ice_fdir_filter_conf *filter = flow->rule;
1457         struct ice_fdir_counter *counter = filter->counter;
1458         uint64_t hits_lo, hits_hi;
1459
1460         if (!counter) {
1461                 rte_flow_error_set(error, EINVAL,
1462                                   RTE_FLOW_ERROR_TYPE_ACTION,
1463                                   NULL,
1464                                   "FDIR counters not available");
1465                 return -rte_errno;
1466         }
1467
1468         /*
1469          * Reading the low 32-bits latches the high 32-bits into a shadow
1470          * register. Reading the high 32-bit returns the value in the
1471          * shadow register.
1472          */
1473         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1474         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1475
1476         flow_stats->hits_set = 1;
1477         flow_stats->hits = hits_lo | (hits_hi << 32);
1478         flow_stats->bytes_set = 0;
1479         flow_stats->bytes = 0;
1480
1481         if (flow_stats->reset) {
1482                 /* reset statistic counter value */
1483                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1484                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1485         }
1486
1487         return 0;
1488 }
1489
1490 static struct ice_flow_engine ice_fdir_engine = {
1491         .init = ice_fdir_init,
1492         .uninit = ice_fdir_uninit,
1493         .create = ice_fdir_create_filter,
1494         .destroy = ice_fdir_destroy_filter,
1495         .query_count = ice_fdir_query_count,
1496         .type = ICE_FLOW_ENGINE_FDIR,
1497 };
1498
1499 static int
1500 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1501                               struct rte_flow_error *error,
1502                               const struct rte_flow_action *act,
1503                               struct ice_fdir_filter_conf *filter)
1504 {
1505         const struct rte_flow_action_rss *rss = act->conf;
1506         uint32_t i;
1507
1508         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1509                 rte_flow_error_set(error, EINVAL,
1510                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1511                                    "Invalid action.");
1512                 return -rte_errno;
1513         }
1514
1515         if (rss->queue_num <= 1) {
1516                 rte_flow_error_set(error, EINVAL,
1517                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1518                                    "Queue region size can't be 0 or 1.");
1519                 return -rte_errno;
1520         }
1521
1522         /* check if queue index for queue region is continuous */
1523         for (i = 0; i < rss->queue_num - 1; i++) {
1524                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1525                         rte_flow_error_set(error, EINVAL,
1526                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1527                                            "Discontinuous queue region");
1528                         return -rte_errno;
1529                 }
1530         }
1531
1532         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1533                 rte_flow_error_set(error, EINVAL,
1534                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1535                                    "Invalid queue region indexes.");
1536                 return -rte_errno;
1537         }
1538
1539         if (!(rte_is_power_of_2(rss->queue_num) &&
1540              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1541                 rte_flow_error_set(error, EINVAL,
1542                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1543                                    "The region size should be any of the following values:"
1544                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1545                                    "of queues do not exceed the VSI allocation.");
1546                 return -rte_errno;
1547         }
1548
1549         filter->input.q_index = rss->queue[0];
1550         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1551         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1552
1553         return 0;
1554 }
1555
1556 static int
1557 ice_fdir_parse_action(struct ice_adapter *ad,
1558                       const struct rte_flow_action actions[],
1559                       struct rte_flow_error *error,
1560                       struct ice_fdir_filter_conf *filter)
1561 {
1562         struct ice_pf *pf = &ad->pf;
1563         const struct rte_flow_action_queue *act_q;
1564         const struct rte_flow_action_mark *mark_spec = NULL;
1565         const struct rte_flow_action_count *act_count;
1566         uint32_t dest_num = 0;
1567         uint32_t mark_num = 0;
1568         uint32_t counter_num = 0;
1569         int ret;
1570
1571         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1572                 switch (actions->type) {
1573                 case RTE_FLOW_ACTION_TYPE_VOID:
1574                         break;
1575                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1576                         dest_num++;
1577
1578                         act_q = actions->conf;
1579                         filter->input.q_index = act_q->index;
1580                         if (filter->input.q_index >=
1581                                         pf->dev_data->nb_rx_queues) {
1582                                 rte_flow_error_set(error, EINVAL,
1583                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1584                                                    actions,
1585                                                    "Invalid queue for FDIR.");
1586                                 return -rte_errno;
1587                         }
1588                         filter->input.dest_ctl =
1589                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1590                         break;
1591                 case RTE_FLOW_ACTION_TYPE_DROP:
1592                         dest_num++;
1593
1594                         filter->input.dest_ctl =
1595                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1596                         break;
1597                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1598                         dest_num++;
1599
1600                         filter->input.dest_ctl =
1601                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1602                         break;
1603                 case RTE_FLOW_ACTION_TYPE_RSS:
1604                         dest_num++;
1605
1606                         ret = ice_fdir_parse_action_qregion(pf,
1607                                                 error, actions, filter);
1608                         if (ret)
1609                                 return ret;
1610                         break;
1611                 case RTE_FLOW_ACTION_TYPE_MARK:
1612                         mark_num++;
1613                         filter->mark_flag = 1;
1614                         mark_spec = actions->conf;
1615                         filter->input.fltr_id = mark_spec->id;
1616                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1617                         break;
1618                 case RTE_FLOW_ACTION_TYPE_COUNT:
1619                         counter_num++;
1620
1621                         act_count = actions->conf;
1622                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1623                         rte_memcpy(&filter->act_count, act_count,
1624                                                 sizeof(filter->act_count));
1625
1626                         break;
1627                 default:
1628                         rte_flow_error_set(error, EINVAL,
1629                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1630                                    "Invalid action.");
1631                         return -rte_errno;
1632                 }
1633         }
1634
1635         if (dest_num >= 2) {
1636                 rte_flow_error_set(error, EINVAL,
1637                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1638                            "Unsupported action combination");
1639                 return -rte_errno;
1640         }
1641
1642         if (mark_num >= 2) {
1643                 rte_flow_error_set(error, EINVAL,
1644                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1645                            "Too many mark actions");
1646                 return -rte_errno;
1647         }
1648
1649         if (counter_num >= 2) {
1650                 rte_flow_error_set(error, EINVAL,
1651                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1652                            "Too many count actions");
1653                 return -rte_errno;
1654         }
1655
1656         if (dest_num + mark_num + counter_num == 0) {
1657                 rte_flow_error_set(error, EINVAL,
1658                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1659                            "Empty action");
1660                 return -rte_errno;
1661         }
1662
1663         /* set default action to PASSTHRU mode, in "mark/count only" case. */
1664         if (dest_num == 0)
1665                 filter->input.dest_ctl =
1666                         ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1667
1668         return 0;
1669 }
1670
1671 static int
1672 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1673                        const struct rte_flow_item pattern[],
1674                        struct rte_flow_error *error,
1675                        struct ice_fdir_filter_conf *filter)
1676 {
1677         const struct rte_flow_item *item = pattern;
1678         enum rte_flow_item_type item_type;
1679         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1680         enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
1681         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1682         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1683         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
1684         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1685         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
1686                                         *ipv6_frag_mask;
1687         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1688         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1689         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1690         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1691         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1692         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1693         const struct rte_flow_item_esp *esp_spec, *esp_mask;
1694         uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1695         uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1696         uint64_t *input_set;
1697         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1698         uint8_t  ipv6_addr_mask[16] = {
1699                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1700                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1701         };
1702         uint32_t vtc_flow_cpu;
1703         uint16_t ether_type;
1704         enum rte_flow_item_type next_type;
1705         bool is_outer = true;
1706         struct ice_fdir_extra *p_ext_data;
1707         struct ice_fdir_v4 *p_v4 = NULL;
1708         struct ice_fdir_v6 *p_v6 = NULL;
1709
1710         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1711                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1712                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1713                 /* To align with shared code behavior, save gtpu outer
1714                  * fields in inner struct.
1715                  */
1716                 if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1717                     item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1718                         is_outer = false;
1719                 }
1720         }
1721
1722         /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1723          * flow. input_set_i is used for inner part.
1724          */
1725         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1726                 item_type = item->type;
1727
1728                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1729                                     item_type ==
1730                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
1731                         rte_flow_error_set(error, EINVAL,
1732                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
1733                                            "Not support range");
1734                 }
1735
1736                 input_set = (tunnel_type && !is_outer) ?
1737                             &input_set_i : &input_set_o;
1738
1739                 switch (item_type) {
1740                 case RTE_FLOW_ITEM_TYPE_ETH:
1741                         flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1742                         eth_spec = item->spec;
1743                         eth_mask = item->mask;
1744
1745                         if (!(eth_spec && eth_mask))
1746                                 break;
1747
1748                         if (!rte_is_zero_ether_addr(&eth_mask->dst))
1749                                 *input_set |= ICE_INSET_DMAC;
1750                         if (!rte_is_zero_ether_addr(&eth_mask->src))
1751                                 *input_set |= ICE_INSET_SMAC;
1752
1753                         next_type = (item + 1)->type;
1754                         /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1755                         if (eth_mask->type == RTE_BE16(0xffff) &&
1756                             next_type == RTE_FLOW_ITEM_TYPE_END) {
1757                                 *input_set |= ICE_INSET_ETHERTYPE;
1758                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
1759
1760                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1761                                     ether_type == RTE_ETHER_TYPE_IPV6) {
1762                                         rte_flow_error_set(error, EINVAL,
1763                                                            RTE_FLOW_ERROR_TYPE_ITEM,
1764                                                            item,
1765                                                            "Unsupported ether_type.");
1766                                         return -rte_errno;
1767                                 }
1768                         }
1769
1770                         p_ext_data = (tunnel_type && is_outer) ?
1771                                      &filter->input.ext_data_outer :
1772                                      &filter->input.ext_data;
1773                         rte_memcpy(&p_ext_data->src_mac,
1774                                    &eth_spec->src, RTE_ETHER_ADDR_LEN);
1775                         rte_memcpy(&p_ext_data->dst_mac,
1776                                    &eth_spec->dst, RTE_ETHER_ADDR_LEN);
1777                         rte_memcpy(&p_ext_data->ether_type,
1778                                    &eth_spec->type, sizeof(eth_spec->type));
1779                         break;
1780                 case RTE_FLOW_ITEM_TYPE_IPV4:
1781                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1782                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1783                         ipv4_spec = item->spec;
1784                         ipv4_last = item->last;
1785                         ipv4_mask = item->mask;
1786                         p_v4 = (tunnel_type && is_outer) ?
1787                                &filter->input.ip_outer.v4 :
1788                                &filter->input.ip.v4;
1789
1790                         if (!(ipv4_spec && ipv4_mask))
1791                                 break;
1792
1793                         /* Check IPv4 mask and update input set */
1794                         if (ipv4_mask->hdr.version_ihl ||
1795                             ipv4_mask->hdr.total_length ||
1796                             ipv4_mask->hdr.hdr_checksum) {
1797                                 rte_flow_error_set(error, EINVAL,
1798                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1799                                                    item,
1800                                                    "Invalid IPv4 mask.");
1801                                 return -rte_errno;
1802                         }
1803
1804                         if (ipv4_last &&
1805                             (ipv4_last->hdr.version_ihl ||
1806                              ipv4_last->hdr.type_of_service ||
1807                              ipv4_last->hdr.time_to_live ||
1808                              ipv4_last->hdr.total_length |
1809                              ipv4_last->hdr.next_proto_id ||
1810                              ipv4_last->hdr.hdr_checksum ||
1811                              ipv4_last->hdr.src_addr ||
1812                              ipv4_last->hdr.dst_addr)) {
1813                                 rte_flow_error_set(error, EINVAL,
1814                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1815                                                    item, "Invalid IPv4 last.");
1816                                 return -rte_errno;
1817                         }
1818
1819                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1820                                 *input_set |= ICE_INSET_IPV4_DST;
1821                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1822                                 *input_set |= ICE_INSET_IPV4_SRC;
1823                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1824                                 *input_set |= ICE_INSET_IPV4_TTL;
1825                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1826                                 *input_set |= ICE_INSET_IPV4_PROTO;
1827                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1828                                 *input_set |= ICE_INSET_IPV4_TOS;
1829
1830                         p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
1831                         p_v4->src_ip = ipv4_spec->hdr.src_addr;
1832                         p_v4->ttl = ipv4_spec->hdr.time_to_live;
1833                         p_v4->proto = ipv4_spec->hdr.next_proto_id;
1834                         p_v4->tos = ipv4_spec->hdr.type_of_service;
1835
1836                         /* fragment Ipv4:
1837                          * spec is 0x2000, mask is 0x2000
1838                          */
1839                         if (ipv4_spec->hdr.fragment_offset ==
1840                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
1841                             ipv4_mask->hdr.fragment_offset ==
1842                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
1843                                 /* all IPv4 fragment packet has the same
1844                                  * ethertype, if the spec and mask is valid,
1845                                  * set ethertype into input set.
1846                                  */
1847                                 flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
1848                                 *input_set |= ICE_INSET_ETHERTYPE;
1849                                 input_set_o |= ICE_INSET_ETHERTYPE;
1850                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
1851                                 rte_flow_error_set(error, EINVAL,
1852                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1853                                                    item, "Invalid IPv4 mask.");
1854                                 return -rte_errno;
1855                         }
1856
1857                         break;
1858                 case RTE_FLOW_ITEM_TYPE_IPV6:
1859                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1860                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1861                         ipv6_spec = item->spec;
1862                         ipv6_mask = item->mask;
1863                         p_v6 = (tunnel_type && is_outer) ?
1864                                &filter->input.ip_outer.v6 :
1865                                &filter->input.ip.v6;
1866
1867                         if (!(ipv6_spec && ipv6_mask))
1868                                 break;
1869
1870                         /* Check IPv6 mask and update input set */
1871                         if (ipv6_mask->hdr.payload_len) {
1872                                 rte_flow_error_set(error, EINVAL,
1873                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1874                                                    item,
1875                                                    "Invalid IPv6 mask");
1876                                 return -rte_errno;
1877                         }
1878
1879                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1880                                     RTE_DIM(ipv6_mask->hdr.src_addr)))
1881                                 *input_set |= ICE_INSET_IPV6_SRC;
1882                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1883                                     RTE_DIM(ipv6_mask->hdr.dst_addr)))
1884                                 *input_set |= ICE_INSET_IPV6_DST;
1885
1886                         if ((ipv6_mask->hdr.vtc_flow &
1887                              rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1888                             == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1889                                 *input_set |= ICE_INSET_IPV6_TC;
1890                         if (ipv6_mask->hdr.proto == UINT8_MAX)
1891                                 *input_set |= ICE_INSET_IPV6_NEXT_HDR;
1892                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1893                                 *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1894
1895                         rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
1896                         rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
1897                         vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1898                         p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
1899                         p_v6->proto = ipv6_spec->hdr.proto;
1900                         p_v6->hlim = ipv6_spec->hdr.hop_limits;
1901                         break;
1902                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
1903                         l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
1904                         flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
1905                         ipv6_frag_spec = item->spec;
1906                         ipv6_frag_mask = item->mask;
1907
1908                         if (!(ipv6_frag_spec && ipv6_frag_mask))
1909                                 break;
1910
1911                         /* fragment Ipv6:
1912                          * spec is 0x1, mask is 0x1
1913                          */
1914                         if (ipv6_frag_spec->hdr.frag_data ==
1915                             rte_cpu_to_be_16(1) &&
1916                             ipv6_frag_mask->hdr.frag_data ==
1917                             rte_cpu_to_be_16(1)) {
1918                                 /* all IPv6 fragment packet has the same
1919                                  * ethertype, if the spec and mask is valid,
1920                                  * set ethertype into input set.
1921                                  */
1922                                 *input_set |= ICE_INSET_ETHERTYPE;
1923                                 input_set_o |= ICE_INSET_ETHERTYPE;
1924                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
1925                                 rte_flow_error_set(error, EINVAL,
1926                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1927                                                    item, "Invalid IPv6 mask.");
1928                                 return -rte_errno;
1929                         }
1930
1931                         break;
1932
1933                 case RTE_FLOW_ITEM_TYPE_TCP:
1934                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1935                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1936                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1937                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1938
1939                         tcp_spec = item->spec;
1940                         tcp_mask = item->mask;
1941
1942                         if (!(tcp_spec && tcp_mask))
1943                                 break;
1944
1945                         /* Check TCP mask and update input set */
1946                         if (tcp_mask->hdr.sent_seq ||
1947                             tcp_mask->hdr.recv_ack ||
1948                             tcp_mask->hdr.data_off ||
1949                             tcp_mask->hdr.tcp_flags ||
1950                             tcp_mask->hdr.rx_win ||
1951                             tcp_mask->hdr.cksum ||
1952                             tcp_mask->hdr.tcp_urp) {
1953                                 rte_flow_error_set(error, EINVAL,
1954                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1955                                                    item,
1956                                                    "Invalid TCP mask");
1957                                 return -rte_errno;
1958                         }
1959
1960                         if (tcp_mask->hdr.src_port == UINT16_MAX)
1961                                 *input_set |= ICE_INSET_TCP_SRC_PORT;
1962                         if (tcp_mask->hdr.dst_port == UINT16_MAX)
1963                                 *input_set |= ICE_INSET_TCP_DST_PORT;
1964
1965                         /* Get filter info */
1966                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1967                                 assert(p_v4);
1968                                 p_v4->dst_port = tcp_spec->hdr.dst_port;
1969                                 p_v4->src_port = tcp_spec->hdr.src_port;
1970                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1971                                 assert(p_v6);
1972                                 p_v6->dst_port = tcp_spec->hdr.dst_port;
1973                                 p_v6->src_port = tcp_spec->hdr.src_port;
1974                         }
1975                         break;
1976                 case RTE_FLOW_ITEM_TYPE_UDP:
1977                         l4 = RTE_FLOW_ITEM_TYPE_UDP;
1978                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1979                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1980                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1981                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1982
1983                         udp_spec = item->spec;
1984                         udp_mask = item->mask;
1985
1986                         if (!(udp_spec && udp_mask))
1987                                 break;
1988
1989                         /* Check UDP mask and update input set*/
1990                         if (udp_mask->hdr.dgram_len ||
1991                             udp_mask->hdr.dgram_cksum) {
1992                                 rte_flow_error_set(error, EINVAL,
1993                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1994                                                    item,
1995                                                    "Invalid UDP mask");
1996                                 return -rte_errno;
1997                         }
1998
1999                         if (udp_mask->hdr.src_port == UINT16_MAX)
2000                                 *input_set |= ICE_INSET_UDP_SRC_PORT;
2001                         if (udp_mask->hdr.dst_port == UINT16_MAX)
2002                                 *input_set |= ICE_INSET_UDP_DST_PORT;
2003
2004                         /* Get filter info */
2005                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2006                                 assert(p_v4);
2007                                 p_v4->dst_port = udp_spec->hdr.dst_port;
2008                                 p_v4->src_port = udp_spec->hdr.src_port;
2009                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2010                                 assert(p_v6);
2011                                 p_v6->src_port = udp_spec->hdr.src_port;
2012                                 p_v6->dst_port = udp_spec->hdr.dst_port;
2013                         }
2014                         break;
2015                 case RTE_FLOW_ITEM_TYPE_SCTP:
2016                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2017                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
2018                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2019                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
2020
2021                         sctp_spec = item->spec;
2022                         sctp_mask = item->mask;
2023
2024                         if (!(sctp_spec && sctp_mask))
2025                                 break;
2026
2027                         /* Check SCTP mask and update input set */
2028                         if (sctp_mask->hdr.cksum) {
2029                                 rte_flow_error_set(error, EINVAL,
2030                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2031                                                    item,
2032                                                    "Invalid UDP mask");
2033                                 return -rte_errno;
2034                         }
2035
2036                         if (sctp_mask->hdr.src_port == UINT16_MAX)
2037                                 *input_set |= ICE_INSET_SCTP_SRC_PORT;
2038                         if (sctp_mask->hdr.dst_port == UINT16_MAX)
2039                                 *input_set |= ICE_INSET_SCTP_DST_PORT;
2040
2041                         /* Get filter info */
2042                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2043                                 assert(p_v4);
2044                                 p_v4->dst_port = sctp_spec->hdr.dst_port;
2045                                 p_v4->src_port = sctp_spec->hdr.src_port;
2046                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2047                                 assert(p_v6);
2048                                 p_v6->dst_port = sctp_spec->hdr.dst_port;
2049                                 p_v6->src_port = sctp_spec->hdr.src_port;
2050                         }
2051                         break;
2052                 case RTE_FLOW_ITEM_TYPE_VOID:
2053                         break;
2054                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2055                         l3 = RTE_FLOW_ITEM_TYPE_END;
2056                         vxlan_spec = item->spec;
2057                         vxlan_mask = item->mask;
2058                         is_outer = false;
2059
2060                         if (!(vxlan_spec && vxlan_mask))
2061                                 break;
2062
2063                         if (vxlan_mask->hdr.vx_flags) {
2064                                 rte_flow_error_set(error, EINVAL,
2065                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2066                                                    item,
2067                                                    "Invalid vxlan field");
2068                                 return -rte_errno;
2069                         }
2070
2071                         if (vxlan_mask->hdr.vx_vni)
2072                                 *input_set |= ICE_INSET_VXLAN_VNI;
2073
2074                         filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
2075
2076                         break;
2077                 case RTE_FLOW_ITEM_TYPE_GTPU:
2078                         l3 = RTE_FLOW_ITEM_TYPE_END;
2079                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
2080                         gtp_spec = item->spec;
2081                         gtp_mask = item->mask;
2082
2083                         if (!(gtp_spec && gtp_mask))
2084                                 break;
2085
2086                         if (gtp_mask->v_pt_rsv_flags ||
2087                             gtp_mask->msg_type ||
2088                             gtp_mask->msg_len) {
2089                                 rte_flow_error_set(error, EINVAL,
2090                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2091                                                    item,
2092                                                    "Invalid GTP mask");
2093                                 return -rte_errno;
2094                         }
2095
2096                         if (gtp_mask->teid == UINT32_MAX)
2097                                 input_set_o |= ICE_INSET_GTPU_TEID;
2098
2099                         filter->input.gtpu_data.teid = gtp_spec->teid;
2100                         break;
2101                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
2102                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
2103                         gtp_psc_spec = item->spec;
2104                         gtp_psc_mask = item->mask;
2105
2106                         if (!(gtp_psc_spec && gtp_psc_mask))
2107                                 break;
2108
2109                         if (gtp_psc_mask->qfi == UINT8_MAX)
2110                                 input_set_o |= ICE_INSET_GTPU_QFI;
2111
2112                         filter->input.gtpu_data.qfi =
2113                                 gtp_psc_spec->qfi;
2114                         break;
2115                 case RTE_FLOW_ITEM_TYPE_ESP:
2116                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2117                             l4 == RTE_FLOW_ITEM_TYPE_UDP)
2118                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
2119                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2120                                  l4 == RTE_FLOW_ITEM_TYPE_UDP)
2121                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
2122                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2123                                  l4 == RTE_FLOW_ITEM_TYPE_END)
2124                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
2125                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2126                                  l4 == RTE_FLOW_ITEM_TYPE_END)
2127                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
2128
2129                         esp_spec = item->spec;
2130                         esp_mask = item->mask;
2131
2132                         if (!(esp_spec && esp_mask))
2133                                 break;
2134
2135                         if (esp_mask->hdr.spi == UINT32_MAX) {
2136                                 if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
2137                                         *input_set |= ICE_INSET_NAT_T_ESP_SPI;
2138                                 else
2139                                         *input_set |= ICE_INSET_ESP_SPI;
2140                         }
2141
2142                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2143                                 filter->input.ip.v4.sec_parm_idx =
2144                                         esp_spec->hdr.spi;
2145                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2146                                 filter->input.ip.v6.sec_parm_idx =
2147                                         esp_spec->hdr.spi;
2148                         break;
2149                 default:
2150                         rte_flow_error_set(error, EINVAL,
2151                                            RTE_FLOW_ERROR_TYPE_ITEM,
2152                                            item,
2153                                            "Invalid pattern item.");
2154                         return -rte_errno;
2155                 }
2156         }
2157
2158         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2159                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2160                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
2161         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2162                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2163                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
2164         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2165                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2166                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
2167         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2168                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2169                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
2170         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2171                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2172                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
2173         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2174                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
2175                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
2176         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2177                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
2178                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
2179         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2180                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
2181                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
2182
2183         filter->tunnel_type = tunnel_type;
2184         filter->input.flow_type = flow_type;
2185         filter->input_set_o = input_set_o;
2186         filter->input_set_i = input_set_i;
2187
2188         return 0;
2189 }
2190
2191 static int
2192 ice_fdir_parse(struct ice_adapter *ad,
2193                struct ice_pattern_match_item *array,
2194                uint32_t array_len,
2195                const struct rte_flow_item pattern[],
2196                const struct rte_flow_action actions[],
2197                uint32_t priority __rte_unused,
2198                void **meta,
2199                struct rte_flow_error *error)
2200 {
2201         struct ice_pf *pf = &ad->pf;
2202         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2203         struct ice_pattern_match_item *item = NULL;
2204         uint64_t input_set;
2205         int ret;
2206
2207         memset(filter, 0, sizeof(*filter));
2208         item = ice_search_pattern_match_item(ad, pattern, array, array_len,
2209                                              error);
2210         if (!item)
2211                 return -rte_errno;
2212
2213         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2214         if (ret)
2215                 goto error;
2216         input_set = filter->input_set_o | filter->input_set_i;
2217         if (!input_set || filter->input_set_o &
2218             ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
2219             filter->input_set_i & ~item->input_set_mask_i) {
2220                 rte_flow_error_set(error, EINVAL,
2221                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2222                                    pattern,
2223                                    "Invalid input set");
2224                 ret = -rte_errno;
2225                 goto error;
2226         }
2227
2228         ret = ice_fdir_parse_action(ad, actions, error, filter);
2229         if (ret)
2230                 goto error;
2231
2232         if (meta)
2233                 *meta = filter;
2234 error:
2235         rte_free(item);
2236         return ret;
2237 }
2238
2239 static struct ice_flow_parser ice_fdir_parser = {
2240         .engine = &ice_fdir_engine,
2241         .array = ice_fdir_pattern_list,
2242         .array_len = RTE_DIM(ice_fdir_pattern_list),
2243         .parse_pattern_action = ice_fdir_parse,
2244         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2245 };
2246
2247 RTE_INIT(ice_fdir_engine_register)
2248 {
2249         ice_register_flow_engine(&ice_fdir_engine);
2250 }