net/ice: fix data path in secondary process
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH (\
22         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25         ICE_FDIR_INSET_ETH | \
26         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30         ICE_FDIR_INSET_ETH_IPV4 | \
31         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34         ICE_FDIR_INSET_ETH_IPV4 | \
35         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38         ICE_FDIR_INSET_ETH_IPV4 | \
39         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42         ICE_INSET_DMAC | \
43         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
45         ICE_INSET_IPV6_PKID)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
54
55 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
56         ICE_FDIR_INSET_ETH_IPV6 | \
57         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
58
59 #define ICE_FDIR_INSET_IPV4 (\
60         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
61         ICE_INSET_IPV4_PKID)
62
63 #define ICE_FDIR_INSET_IPV4_TCP (\
64         ICE_FDIR_INSET_IPV4 | \
65         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
66
67 #define ICE_FDIR_INSET_IPV4_UDP (\
68         ICE_FDIR_INSET_IPV4 | \
69         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
70
71 #define ICE_FDIR_INSET_IPV4_SCTP (\
72         ICE_FDIR_INSET_IPV4 | \
73         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
74
75 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
76         ICE_FDIR_INSET_ETH | ICE_FDIR_INSET_ETH_IPV4 | \
77         ICE_INSET_VXLAN_VNI)
78
79 #define ICE_FDIR_INSET_IPV4_GTPU (\
80         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
81
82 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
83         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
84         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
85
86 #define ICE_FDIR_INSET_IPV6_GTPU (\
87         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
88
89 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
90         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
91         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
92
93 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
94         {pattern_ethertype,                             ICE_FDIR_INSET_ETH,             ICE_INSET_NONE,                 ICE_INSET_NONE},
95         {pattern_eth_ipv4,                              ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE,                 ICE_INSET_NONE},
96         {pattern_eth_ipv4_udp,                          ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
97         {pattern_eth_ipv4_tcp,                          ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
98         {pattern_eth_ipv4_sctp,                         ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
99         {pattern_eth_ipv6,                              ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
100         {pattern_eth_ipv6_frag_ext,                     ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
101         {pattern_eth_ipv6_udp,                          ICE_FDIR_INSET_ETH_IPV6_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
102         {pattern_eth_ipv6_tcp,                          ICE_FDIR_INSET_ETH_IPV6_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
103         {pattern_eth_ipv6_sctp,                         ICE_FDIR_INSET_ETH_IPV6_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
104         {pattern_eth_ipv4_udp_vxlan_ipv4,               ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4,            ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_UDP,        ICE_INSET_NONE},
106         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_TCP,        ICE_INSET_NONE},
107         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,          ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_SCTP,       ICE_INSET_NONE},
108         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE},
110         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,      ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE},
112         /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
113         {pattern_eth_ipv4_gtpu,                         ICE_FDIR_INSET_IPV4_GTPU,       ICE_FDIR_INSET_IPV4_GTPU,       ICE_INSET_NONE},
114         {pattern_eth_ipv4_gtpu_eh,                      ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_INSET_NONE},
115         {pattern_eth_ipv6_gtpu,                         ICE_FDIR_INSET_IPV6_GTPU,       ICE_FDIR_INSET_IPV6_GTPU,       ICE_INSET_NONE},
116         {pattern_eth_ipv6_gtpu_eh,                      ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_INSET_NONE},
117 };
118
119 static struct ice_flow_parser ice_fdir_parser;
120
121 static int
122 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
123
124 static const struct rte_memzone *
125 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
126 {
127         const struct rte_memzone *mz;
128
129         mz = rte_memzone_lookup(name);
130         if (mz)
131                 return mz;
132
133         return rte_memzone_reserve_aligned(name, len, socket_id,
134                                            RTE_MEMZONE_IOVA_CONTIG,
135                                            ICE_RING_BASE_ALIGN);
136 }
137
138 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
139
140 static int
141 ice_fdir_prof_alloc(struct ice_hw *hw)
142 {
143         enum ice_fltr_ptype ptype, fltr_ptype;
144
145         if (!hw->fdir_prof) {
146                 hw->fdir_prof = (struct ice_fd_hw_prof **)
147                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
148                                    sizeof(*hw->fdir_prof));
149                 if (!hw->fdir_prof)
150                         return -ENOMEM;
151         }
152         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
153              ptype < ICE_FLTR_PTYPE_MAX;
154              ptype++) {
155                 if (!hw->fdir_prof[ptype]) {
156                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
157                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
158                         if (!hw->fdir_prof[ptype])
159                                 goto fail_mem;
160                 }
161         }
162         return 0;
163
164 fail_mem:
165         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
166              fltr_ptype < ptype;
167              fltr_ptype++) {
168                 rte_free(hw->fdir_prof[fltr_ptype]);
169                 hw->fdir_prof[fltr_ptype] = NULL;
170         }
171
172         rte_free(hw->fdir_prof);
173         hw->fdir_prof = NULL;
174
175         return -ENOMEM;
176 }
177
178 static int
179 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
180                           struct ice_fdir_counter_pool_container *container,
181                           uint32_t index_start,
182                           uint32_t len)
183 {
184         struct ice_fdir_counter_pool *pool;
185         uint32_t i;
186         int ret = 0;
187
188         pool = rte_zmalloc("ice_fdir_counter_pool",
189                            sizeof(*pool) +
190                            sizeof(struct ice_fdir_counter) * len,
191                            0);
192         if (!pool) {
193                 PMD_INIT_LOG(ERR,
194                              "Failed to allocate memory for fdir counter pool");
195                 return -ENOMEM;
196         }
197
198         TAILQ_INIT(&pool->counter_list);
199         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
200
201         for (i = 0; i < len; i++) {
202                 struct ice_fdir_counter *counter = &pool->counters[i];
203
204                 counter->hw_index = index_start + i;
205                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
206         }
207
208         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
209                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
210                 ret = -EINVAL;
211                 goto free_pool;
212         }
213
214         container->pools[container->index_free++] = pool;
215         return 0;
216
217 free_pool:
218         rte_free(pool);
219         return ret;
220 }
221
222 static int
223 ice_fdir_counter_init(struct ice_pf *pf)
224 {
225         struct ice_hw *hw = ICE_PF_TO_HW(pf);
226         struct ice_fdir_info *fdir_info = &pf->fdir;
227         struct ice_fdir_counter_pool_container *container =
228                                 &fdir_info->counter;
229         uint32_t cnt_index, len;
230         int ret;
231
232         TAILQ_INIT(&container->pool_list);
233
234         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
235         len = ICE_FDIR_COUNTERS_PER_BLOCK;
236
237         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
238         if (ret) {
239                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
240                 return ret;
241         }
242
243         return 0;
244 }
245
246 static int
247 ice_fdir_counter_release(struct ice_pf *pf)
248 {
249         struct ice_fdir_info *fdir_info = &pf->fdir;
250         struct ice_fdir_counter_pool_container *container =
251                                 &fdir_info->counter;
252         uint8_t i;
253
254         for (i = 0; i < container->index_free; i++) {
255                 rte_free(container->pools[i]);
256                 container->pools[i] = NULL;
257         }
258
259         TAILQ_INIT(&container->pool_list);
260         container->index_free = 0;
261
262         return 0;
263 }
264
265 static struct ice_fdir_counter *
266 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
267                                         *container,
268                                uint32_t id)
269 {
270         struct ice_fdir_counter_pool *pool;
271         struct ice_fdir_counter *counter;
272         int i;
273
274         TAILQ_FOREACH(pool, &container->pool_list, next) {
275                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
276                         counter = &pool->counters[i];
277
278                         if (counter->shared &&
279                             counter->ref_cnt &&
280                             counter->id == id)
281                                 return counter;
282                 }
283         }
284
285         return NULL;
286 }
287
288 static struct ice_fdir_counter *
289 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
290 {
291         struct ice_hw *hw = ICE_PF_TO_HW(pf);
292         struct ice_fdir_info *fdir_info = &pf->fdir;
293         struct ice_fdir_counter_pool_container *container =
294                                 &fdir_info->counter;
295         struct ice_fdir_counter_pool *pool = NULL;
296         struct ice_fdir_counter *counter_free = NULL;
297
298         if (shared) {
299                 counter_free = ice_fdir_counter_shared_search(container, id);
300                 if (counter_free) {
301                         if (counter_free->ref_cnt + 1 == 0) {
302                                 rte_errno = E2BIG;
303                                 return NULL;
304                         }
305                         counter_free->ref_cnt++;
306                         return counter_free;
307                 }
308         }
309
310         TAILQ_FOREACH(pool, &container->pool_list, next) {
311                 counter_free = TAILQ_FIRST(&pool->counter_list);
312                 if (counter_free)
313                         break;
314                 counter_free = NULL;
315         }
316
317         if (!counter_free) {
318                 PMD_DRV_LOG(ERR, "No free counter found\n");
319                 return NULL;
320         }
321
322         counter_free->shared = shared;
323         counter_free->id = id;
324         counter_free->ref_cnt = 1;
325         counter_free->pool = pool;
326
327         /* reset statistic counter value */
328         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
329         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
330
331         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
332         if (TAILQ_EMPTY(&pool->counter_list)) {
333                 TAILQ_REMOVE(&container->pool_list, pool, next);
334                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
335         }
336
337         return counter_free;
338 }
339
340 static void
341 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
342                       struct ice_fdir_counter *counter)
343 {
344         if (!counter)
345                 return;
346
347         if (--counter->ref_cnt == 0) {
348                 struct ice_fdir_counter_pool *pool = counter->pool;
349
350                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
351         }
352 }
353
354 static int
355 ice_fdir_init_filter_list(struct ice_pf *pf)
356 {
357         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
358         struct ice_fdir_info *fdir_info = &pf->fdir;
359         char fdir_hash_name[RTE_HASH_NAMESIZE];
360         int ret;
361
362         struct rte_hash_parameters fdir_hash_params = {
363                 .name = fdir_hash_name,
364                 .entries = ICE_MAX_FDIR_FILTER_NUM,
365                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
366                 .hash_func = rte_hash_crc,
367                 .hash_func_init_val = 0,
368                 .socket_id = rte_socket_id(),
369                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
370         };
371
372         /* Initialize hash */
373         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
374                  "fdir_%s", dev->device->name);
375         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
376         if (!fdir_info->hash_table) {
377                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
378                 return -EINVAL;
379         }
380         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
381                                           sizeof(*fdir_info->hash_map) *
382                                           ICE_MAX_FDIR_FILTER_NUM,
383                                           0);
384         if (!fdir_info->hash_map) {
385                 PMD_INIT_LOG(ERR,
386                              "Failed to allocate memory for fdir hash map!");
387                 ret = -ENOMEM;
388                 goto err_fdir_hash_map_alloc;
389         }
390         return 0;
391
392 err_fdir_hash_map_alloc:
393         rte_hash_free(fdir_info->hash_table);
394
395         return ret;
396 }
397
398 static void
399 ice_fdir_release_filter_list(struct ice_pf *pf)
400 {
401         struct ice_fdir_info *fdir_info = &pf->fdir;
402
403         if (fdir_info->hash_map)
404                 rte_free(fdir_info->hash_map);
405         if (fdir_info->hash_table)
406                 rte_hash_free(fdir_info->hash_table);
407
408         fdir_info->hash_map = NULL;
409         fdir_info->hash_table = NULL;
410 }
411
412 /*
413  * ice_fdir_setup - reserve and initialize the Flow Director resources
414  * @pf: board private structure
415  */
416 static int
417 ice_fdir_setup(struct ice_pf *pf)
418 {
419         struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
420         struct ice_hw *hw = ICE_PF_TO_HW(pf);
421         const struct rte_memzone *mz = NULL;
422         char z_name[RTE_MEMZONE_NAMESIZE];
423         struct ice_vsi *vsi;
424         int err = ICE_SUCCESS;
425
426         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
427                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
428                 return -ENOTSUP;
429         }
430
431         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
432                     " fd_fltr_best_effort = %u.",
433                     hw->func_caps.fd_fltr_guar,
434                     hw->func_caps.fd_fltr_best_effort);
435
436         if (pf->fdir.fdir_vsi) {
437                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
438                 return ICE_SUCCESS;
439         }
440
441         /* make new FDIR VSI */
442         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
443         if (!vsi) {
444                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
445                 return -EINVAL;
446         }
447         pf->fdir.fdir_vsi = vsi;
448
449         err = ice_fdir_init_filter_list(pf);
450         if (err) {
451                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
452                 return -EINVAL;
453         }
454
455         err = ice_fdir_counter_init(pf);
456         if (err) {
457                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
458                 return -EINVAL;
459         }
460
461         /*Fdir tx queue setup*/
462         err = ice_fdir_setup_tx_resources(pf);
463         if (err) {
464                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
465                 goto fail_setup_tx;
466         }
467
468         /*Fdir rx queue setup*/
469         err = ice_fdir_setup_rx_resources(pf);
470         if (err) {
471                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
472                 goto fail_setup_rx;
473         }
474
475         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
476         if (err) {
477                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
478                 goto fail_mem;
479         }
480
481         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
482         if (err) {
483                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
484                 goto fail_mem;
485         }
486
487         /* Enable FDIR MSIX interrupt */
488         vsi->nb_used_qps = 1;
489         ice_vsi_queues_bind_intr(vsi);
490         ice_vsi_enable_queues_intr(vsi);
491
492         /* reserve memory for the fdir programming packet */
493         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
494                  ICE_FDIR_MZ_NAME,
495                  eth_dev->data->port_id);
496         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
497         if (!mz) {
498                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
499                             "flow director program packet.");
500                 err = -ENOMEM;
501                 goto fail_mem;
502         }
503         pf->fdir.prg_pkt = mz->addr;
504         pf->fdir.dma_addr = mz->iova;
505         pf->fdir.mz = mz;
506
507         err = ice_fdir_prof_alloc(hw);
508         if (err) {
509                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
510                             "flow director profile.");
511                 err = -ENOMEM;
512                 goto fail_prof;
513         }
514
515         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
516                     vsi->base_queue);
517         return ICE_SUCCESS;
518
519 fail_prof:
520         rte_memzone_free(pf->fdir.mz);
521         pf->fdir.mz = NULL;
522 fail_mem:
523         ice_rx_queue_release(pf->fdir.rxq);
524         pf->fdir.rxq = NULL;
525 fail_setup_rx:
526         ice_tx_queue_release(pf->fdir.txq);
527         pf->fdir.txq = NULL;
528 fail_setup_tx:
529         ice_release_vsi(vsi);
530         pf->fdir.fdir_vsi = NULL;
531         return err;
532 }
533
534 static void
535 ice_fdir_prof_free(struct ice_hw *hw)
536 {
537         enum ice_fltr_ptype ptype;
538
539         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
540              ptype < ICE_FLTR_PTYPE_MAX;
541              ptype++) {
542                 rte_free(hw->fdir_prof[ptype]);
543                 hw->fdir_prof[ptype] = NULL;
544         }
545
546         rte_free(hw->fdir_prof);
547         hw->fdir_prof = NULL;
548 }
549
550 /* Remove a profile for some filter type */
551 static void
552 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
553 {
554         struct ice_hw *hw = ICE_PF_TO_HW(pf);
555         struct ice_fd_hw_prof *hw_prof;
556         uint64_t prof_id;
557         uint16_t vsi_num;
558         int i;
559
560         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
561                 return;
562
563         hw_prof = hw->fdir_prof[ptype];
564
565         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
566         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
567                 if (hw_prof->entry_h[i][is_tunnel]) {
568                         vsi_num = ice_get_hw_vsi_num(hw,
569                                                      hw_prof->vsi_h[i]);
570                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
571                                              vsi_num, ptype);
572                         ice_flow_rem_entry(hw, ICE_BLK_FD,
573                                            hw_prof->entry_h[i][is_tunnel]);
574                         hw_prof->entry_h[i][is_tunnel] = 0;
575                 }
576         }
577         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
578         rte_free(hw_prof->fdir_seg[is_tunnel]);
579         hw_prof->fdir_seg[is_tunnel] = NULL;
580
581         for (i = 0; i < hw_prof->cnt; i++)
582                 hw_prof->vsi_h[i] = 0;
583         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
584 }
585
586 /* Remove all created profiles */
587 static void
588 ice_fdir_prof_rm_all(struct ice_pf *pf)
589 {
590         enum ice_fltr_ptype ptype;
591
592         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
593              ptype < ICE_FLTR_PTYPE_MAX;
594              ptype++) {
595                 ice_fdir_prof_rm(pf, ptype, false);
596                 ice_fdir_prof_rm(pf, ptype, true);
597         }
598 }
599
600 /*
601  * ice_fdir_teardown - release the Flow Director resources
602  * @pf: board private structure
603  */
604 static void
605 ice_fdir_teardown(struct ice_pf *pf)
606 {
607         struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
608         struct ice_hw *hw = ICE_PF_TO_HW(pf);
609         struct ice_vsi *vsi;
610         int err;
611
612         vsi = pf->fdir.fdir_vsi;
613         if (!vsi)
614                 return;
615
616         ice_vsi_disable_queues_intr(vsi);
617
618         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
619         if (err)
620                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
621
622         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
623         if (err)
624                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
625
626         err = ice_fdir_counter_release(pf);
627         if (err)
628                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
629
630         ice_fdir_release_filter_list(pf);
631
632         ice_tx_queue_release(pf->fdir.txq);
633         pf->fdir.txq = NULL;
634         ice_rx_queue_release(pf->fdir.rxq);
635         pf->fdir.rxq = NULL;
636         ice_fdir_prof_rm_all(pf);
637         ice_fdir_prof_free(hw);
638         ice_release_vsi(vsi);
639         pf->fdir.fdir_vsi = NULL;
640
641         if (pf->fdir.mz) {
642                 err = rte_memzone_free(pf->fdir.mz);
643                 pf->fdir.mz = NULL;
644                 if (err)
645                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
646         }
647 }
648
649 static int
650 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
651                            enum ice_fltr_ptype ptype,
652                            struct ice_flow_seg_info *seg,
653                            bool is_tunnel)
654 {
655         struct ice_hw *hw = ICE_PF_TO_HW(pf);
656         struct ice_flow_seg_info *ori_seg;
657         struct ice_fd_hw_prof *hw_prof;
658
659         hw_prof = hw->fdir_prof[ptype];
660         ori_seg = hw_prof->fdir_seg[is_tunnel];
661
662         /* profile does not exist */
663         if (!ori_seg)
664                 return 0;
665
666         /* if no input set conflict, return -EEXIST */
667         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
668             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
669                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
670                             ptype);
671                 return -EEXIST;
672         }
673
674         /* a rule with input set conflict already exist, so give up */
675         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
676                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
677                             ptype);
678                 return -EINVAL;
679         }
680
681         /* it's safe to delete an empty profile */
682         ice_fdir_prof_rm(pf, ptype, is_tunnel);
683         return 0;
684 }
685
686 static bool
687 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
688                                enum ice_fltr_ptype ptype,
689                                bool is_tunnel)
690 {
691         struct ice_hw *hw = ICE_PF_TO_HW(pf);
692         struct ice_fd_hw_prof *hw_prof;
693         struct ice_flow_seg_info *seg;
694
695         hw_prof = hw->fdir_prof[ptype];
696         seg = hw_prof->fdir_seg[is_tunnel];
697
698         /* profile does not exist */
699         if (!seg)
700                 return true;
701
702         /* profile exists and rule exists, fail to resolve the conflict */
703         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
704                 return false;
705
706         /* it's safe to delete an empty profile */
707         ice_fdir_prof_rm(pf, ptype, is_tunnel);
708
709         return true;
710 }
711
712 static int
713 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
714                              enum ice_fltr_ptype ptype,
715                              bool is_tunnel)
716 {
717         enum ice_fltr_ptype cflct_ptype;
718
719         switch (ptype) {
720         /* IPv4 */
721         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
722         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
723         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
724                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
725                 if (!ice_fdir_prof_resolve_conflict
726                         (pf, cflct_ptype, is_tunnel))
727                         goto err;
728                 break;
729         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
730                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
731                 if (!ice_fdir_prof_resolve_conflict
732                         (pf, cflct_ptype, is_tunnel))
733                         goto err;
734                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
735                 if (!ice_fdir_prof_resolve_conflict
736                         (pf, cflct_ptype, is_tunnel))
737                         goto err;
738                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
739                 if (!ice_fdir_prof_resolve_conflict
740                         (pf, cflct_ptype, is_tunnel))
741                         goto err;
742                 break;
743         /* IPv4 GTPU */
744         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
745         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
746         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
747                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
748                 if (!ice_fdir_prof_resolve_conflict
749                         (pf, cflct_ptype, is_tunnel))
750                         goto err;
751                 break;
752         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
753                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
754                 if (!ice_fdir_prof_resolve_conflict
755                         (pf, cflct_ptype, is_tunnel))
756                         goto err;
757                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
758                 if (!ice_fdir_prof_resolve_conflict
759                         (pf, cflct_ptype, is_tunnel))
760                         goto err;
761                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
762                 if (!ice_fdir_prof_resolve_conflict
763                         (pf, cflct_ptype, is_tunnel))
764                         goto err;
765                 break;
766         /* IPv6 */
767         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
768         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
769         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
770                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
771                 if (!ice_fdir_prof_resolve_conflict
772                         (pf, cflct_ptype, is_tunnel))
773                         goto err;
774                 break;
775         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
776                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
777                 if (!ice_fdir_prof_resolve_conflict
778                         (pf, cflct_ptype, is_tunnel))
779                         goto err;
780                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
781                 if (!ice_fdir_prof_resolve_conflict
782                         (pf, cflct_ptype, is_tunnel))
783                         goto err;
784                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
785                 if (!ice_fdir_prof_resolve_conflict
786                         (pf, cflct_ptype, is_tunnel))
787                         goto err;
788                 break;
789         default:
790                 break;
791         }
792         return 0;
793 err:
794         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
795                     ptype, cflct_ptype);
796         return -EINVAL;
797 }
798
799 static int
800 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
801                      struct ice_vsi *ctrl_vsi,
802                      struct ice_flow_seg_info *seg,
803                      enum ice_fltr_ptype ptype,
804                      bool is_tunnel)
805 {
806         struct ice_hw *hw = ICE_PF_TO_HW(pf);
807         enum ice_flow_dir dir = ICE_FLOW_RX;
808         struct ice_fd_hw_prof *hw_prof;
809         struct ice_flow_prof *prof;
810         uint64_t entry_1 = 0;
811         uint64_t entry_2 = 0;
812         uint16_t vsi_num;
813         int ret;
814         uint64_t prof_id;
815
816         /* check if have input set conflict on current profile. */
817         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
818         if (ret)
819                 return ret;
820
821         /* check if the profile is conflict with other profile. */
822         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
823         if (ret)
824                 return ret;
825
826         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
827         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
828                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
829         if (ret)
830                 return ret;
831         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
832                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
833                                  seg, NULL, 0, &entry_1);
834         if (ret) {
835                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
836                             ptype);
837                 goto err_add_prof;
838         }
839         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
840                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
841                                  seg, NULL, 0, &entry_2);
842         if (ret) {
843                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
844                             ptype);
845                 goto err_add_entry;
846         }
847
848         hw_prof = hw->fdir_prof[ptype];
849         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
850         hw_prof->cnt = 0;
851         hw_prof->fdir_seg[is_tunnel] = seg;
852         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
853         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
854         pf->hw_prof_cnt[ptype][is_tunnel]++;
855         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
856         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
857         pf->hw_prof_cnt[ptype][is_tunnel]++;
858
859         return ret;
860
861 err_add_entry:
862         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
863         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
864         ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
865 err_add_prof:
866         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
867
868         return ret;
869 }
870
871 static void
872 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
873 {
874         uint32_t i, j;
875
876         struct ice_inset_map {
877                 uint64_t inset;
878                 enum ice_flow_field fld;
879         };
880         static const struct ice_inset_map ice_inset_map[] = {
881                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
882                 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
883                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
884                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
885                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
886                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
887                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
888                 {ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
889                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
890                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
891                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
892                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
893                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
894                 {ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
895                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
896                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
897                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
898                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
899                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
900                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
901                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
902                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
903                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
904                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
905                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
906                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
907                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
908                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
909                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
910                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
911                 {ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
912         };
913
914         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
915                 if ((inset & ice_inset_map[i].inset) ==
916                     ice_inset_map[i].inset)
917                         field[j++] = ice_inset_map[i].fld;
918         }
919 }
920
921 static void
922 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
923 {
924         switch (flow) {
925         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
926                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
927                                   ICE_FLOW_SEG_HDR_IPV4 |
928                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
929                 break;
930         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
931                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
932                                   ICE_FLOW_SEG_HDR_IPV4 |
933                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
934                 break;
935         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
936                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
937                                   ICE_FLOW_SEG_HDR_IPV4 |
938                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
939                 break;
940         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
941                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
942                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
943                 break;
944         case ICE_FLTR_PTYPE_FRAG_IPV4:
945                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
946                                   ICE_FLOW_SEG_HDR_IPV_FRAG);
947                 break;
948         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
949                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
950                                   ICE_FLOW_SEG_HDR_IPV6 |
951                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
952                 break;
953         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
954                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
955                                   ICE_FLOW_SEG_HDR_IPV6 |
956                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
957                 break;
958         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
959                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
960                                   ICE_FLOW_SEG_HDR_IPV6 |
961                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
962                 break;
963         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
964                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
965                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
966                 break;
967         case ICE_FLTR_PTYPE_FRAG_IPV6:
968                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
969                                   ICE_FLOW_SEG_HDR_IPV_FRAG);
970                 break;
971         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN:
972                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
973                                 ICE_FLOW_SEG_HDR_IPV4 |
974                                 ICE_FLOW_SEG_HDR_VXLAN |
975                                 ICE_FLOW_SEG_HDR_IPV_OTHER);
976                 break;
977         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
978                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
979                                   ICE_FLOW_SEG_HDR_IPV4 |
980                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
981                 break;
982         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
983                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
984                                   ICE_FLOW_SEG_HDR_GTPU_IP |
985                                   ICE_FLOW_SEG_HDR_IPV4 |
986                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
987                 break;
988         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
989                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
990                                   ICE_FLOW_SEG_HDR_IPV6 |
991                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
992                 break;
993         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
994                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
995                                   ICE_FLOW_SEG_HDR_GTPU_IP |
996                                   ICE_FLOW_SEG_HDR_IPV6 |
997                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
998                 break;
999         case ICE_FLTR_PTYPE_NON_IP_L2:
1000                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1001                 break;
1002         default:
1003                 PMD_DRV_LOG(ERR, "not supported filter type.");
1004                 break;
1005         }
1006 }
1007
1008 static int
1009 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
1010                         uint64_t inner_input_set, uint64_t outer_input_set,
1011                         enum ice_fdir_tunnel_type ttype)
1012 {
1013         struct ice_flow_seg_info *seg;
1014         struct ice_flow_seg_info *seg_tun = NULL;
1015         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1016         uint64_t input_set;
1017         bool is_tunnel;
1018         int k, i, ret = 0;
1019
1020         if (!(inner_input_set | outer_input_set))
1021                 return -EINVAL;
1022
1023         seg_tun = (struct ice_flow_seg_info *)
1024                 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1025         if (!seg_tun) {
1026                 PMD_DRV_LOG(ERR, "No memory can be allocated");
1027                 return -ENOMEM;
1028         }
1029
1030         /* use seg_tun[1] to record tunnel inner part */
1031         for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1032                 seg = &seg_tun[k];
1033                 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1034                 if (input_set == 0)
1035                         continue;
1036
1037                 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1038                         field[i] = ICE_FLOW_FIELD_IDX_MAX;
1039
1040                 ice_fdir_input_set_parse(input_set, field);
1041
1042                 ice_fdir_input_set_hdrs(flow, seg);
1043
1044                 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1045                         ice_flow_set_fld(seg, field[i],
1046                                          ICE_FLOW_FLD_OFF_INVAL,
1047                                          ICE_FLOW_FLD_OFF_INVAL,
1048                                          ICE_FLOW_FLD_OFF_INVAL, false);
1049                 }
1050         }
1051
1052         is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1053
1054         ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1055                                    seg_tun, flow, is_tunnel);
1056
1057         if (!ret) {
1058                 return ret;
1059         } else if (ret < 0) {
1060                 rte_free(seg_tun);
1061                 return (ret == -EEXIST) ? 0 : ret;
1062         } else {
1063                 return ret;
1064         }
1065 }
1066
1067 static void
1068 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1069                     bool is_tunnel, bool add)
1070 {
1071         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1072         int cnt;
1073
1074         cnt = (add) ? 1 : -1;
1075         hw->fdir_active_fltr += cnt;
1076         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1077                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1078         else
1079                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1080 }
1081
1082 static int
1083 ice_fdir_init(struct ice_adapter *ad)
1084 {
1085         struct ice_pf *pf = &ad->pf;
1086         struct ice_flow_parser *parser;
1087         int ret;
1088
1089         if (ad->hw.dcf_enabled)
1090                 return 0;
1091
1092         ret = ice_fdir_setup(pf);
1093         if (ret)
1094                 return ret;
1095
1096         parser = &ice_fdir_parser;
1097
1098         return ice_register_parser(parser, ad);
1099 }
1100
1101 static void
1102 ice_fdir_uninit(struct ice_adapter *ad)
1103 {
1104         struct ice_flow_parser *parser;
1105         struct ice_pf *pf = &ad->pf;
1106
1107         if (ad->hw.dcf_enabled)
1108                 return;
1109
1110         parser = &ice_fdir_parser;
1111
1112         ice_unregister_parser(parser, ad);
1113
1114         ice_fdir_teardown(pf);
1115 }
1116
1117 static int
1118 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1119 {
1120         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1121                 return 1;
1122         else
1123                 return 0;
1124 }
1125
1126 static int
1127 ice_fdir_add_del_filter(struct ice_pf *pf,
1128                         struct ice_fdir_filter_conf *filter,
1129                         bool add)
1130 {
1131         struct ice_fltr_desc desc;
1132         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1133         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1134         bool is_tun;
1135         int ret;
1136
1137         filter->input.dest_vsi = pf->main_vsi->idx;
1138
1139         memset(&desc, 0, sizeof(desc));
1140         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1141         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1142
1143         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1144
1145         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1146         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1147         if (ret) {
1148                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1149                 return -EINVAL;
1150         }
1151
1152         return ice_fdir_programming(pf, &desc);
1153 }
1154
1155 static void
1156 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1157                           struct ice_fdir_filter_conf *filter)
1158 {
1159         struct ice_fdir_fltr *input = &filter->input;
1160         memset(key, 0, sizeof(*key));
1161
1162         key->flow_type = input->flow_type;
1163         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1164         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1165         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1166         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1167
1168         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1169         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1170
1171         key->tunnel_type = filter->tunnel_type;
1172 }
1173
1174 /* Check if there exists the flow director filter */
1175 static struct ice_fdir_filter_conf *
1176 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1177                         const struct ice_fdir_fltr_pattern *key)
1178 {
1179         int ret;
1180
1181         ret = rte_hash_lookup(fdir_info->hash_table, key);
1182         if (ret < 0)
1183                 return NULL;
1184
1185         return fdir_info->hash_map[ret];
1186 }
1187
1188 /* Add a flow director entry into the SW list */
1189 static int
1190 ice_fdir_entry_insert(struct ice_pf *pf,
1191                       struct ice_fdir_filter_conf *entry,
1192                       struct ice_fdir_fltr_pattern *key)
1193 {
1194         struct ice_fdir_info *fdir_info = &pf->fdir;
1195         int ret;
1196
1197         ret = rte_hash_add_key(fdir_info->hash_table, key);
1198         if (ret < 0) {
1199                 PMD_DRV_LOG(ERR,
1200                             "Failed to insert fdir entry to hash table %d!",
1201                             ret);
1202                 return ret;
1203         }
1204         fdir_info->hash_map[ret] = entry;
1205
1206         return 0;
1207 }
1208
1209 /* Delete a flow director entry from the SW list */
1210 static int
1211 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1212 {
1213         struct ice_fdir_info *fdir_info = &pf->fdir;
1214         int ret;
1215
1216         ret = rte_hash_del_key(fdir_info->hash_table, key);
1217         if (ret < 0) {
1218                 PMD_DRV_LOG(ERR,
1219                             "Failed to delete fdir filter to hash table %d!",
1220                             ret);
1221                 return ret;
1222         }
1223         fdir_info->hash_map[ret] = NULL;
1224
1225         return 0;
1226 }
1227
1228 static int
1229 ice_fdir_create_filter(struct ice_adapter *ad,
1230                        struct rte_flow *flow,
1231                        void *meta,
1232                        struct rte_flow_error *error)
1233 {
1234         struct ice_pf *pf = &ad->pf;
1235         struct ice_fdir_filter_conf *filter = meta;
1236         struct ice_fdir_info *fdir_info = &pf->fdir;
1237         struct ice_fdir_filter_conf *entry, *node;
1238         struct ice_fdir_fltr_pattern key;
1239         bool is_tun;
1240         int ret;
1241
1242         ice_fdir_extract_fltr_key(&key, filter);
1243         node = ice_fdir_entry_lookup(fdir_info, &key);
1244         if (node) {
1245                 rte_flow_error_set(error, EEXIST,
1246                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1247                                    "Rule already exists!");
1248                 return -rte_errno;
1249         }
1250
1251         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1252         if (!entry) {
1253                 rte_flow_error_set(error, ENOMEM,
1254                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1255                                    "Failed to allocate memory");
1256                 return -rte_errno;
1257         }
1258
1259         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1260
1261         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1262                                       filter->input_set_i, filter->input_set_o,
1263                                       filter->tunnel_type);
1264         if (ret) {
1265                 rte_flow_error_set(error, -ret,
1266                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1267                                    "Profile configure failed.");
1268                 goto free_entry;
1269         }
1270
1271         /* alloc counter for FDIR */
1272         if (filter->input.cnt_ena) {
1273                 struct rte_flow_action_count *act_count = &filter->act_count;
1274
1275                 filter->counter = ice_fdir_counter_alloc(pf,
1276                                                          act_count->shared,
1277                                                          act_count->id);
1278                 if (!filter->counter) {
1279                         rte_flow_error_set(error, EINVAL,
1280                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1281                                         "Failed to alloc FDIR counter.");
1282                         goto free_entry;
1283                 }
1284                 filter->input.cnt_index = filter->counter->hw_index;
1285         }
1286
1287         ret = ice_fdir_add_del_filter(pf, filter, true);
1288         if (ret) {
1289                 rte_flow_error_set(error, -ret,
1290                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1291                                    "Add filter rule failed.");
1292                 goto free_counter;
1293         }
1294
1295         if (filter->mark_flag == 1)
1296                 ice_fdir_rx_parsing_enable(ad, 1);
1297
1298         rte_memcpy(entry, filter, sizeof(*entry));
1299         ret = ice_fdir_entry_insert(pf, entry, &key);
1300         if (ret) {
1301                 rte_flow_error_set(error, -ret,
1302                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1303                                    "Insert entry to table failed.");
1304                 goto free_entry;
1305         }
1306
1307         flow->rule = entry;
1308         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1309
1310         return 0;
1311
1312 free_counter:
1313         if (filter->counter) {
1314                 ice_fdir_counter_free(pf, filter->counter);
1315                 filter->counter = NULL;
1316         }
1317
1318 free_entry:
1319         rte_free(entry);
1320         return -rte_errno;
1321 }
1322
1323 static int
1324 ice_fdir_destroy_filter(struct ice_adapter *ad,
1325                         struct rte_flow *flow,
1326                         struct rte_flow_error *error)
1327 {
1328         struct ice_pf *pf = &ad->pf;
1329         struct ice_fdir_info *fdir_info = &pf->fdir;
1330         struct ice_fdir_filter_conf *filter, *entry;
1331         struct ice_fdir_fltr_pattern key;
1332         bool is_tun;
1333         int ret;
1334
1335         filter = (struct ice_fdir_filter_conf *)flow->rule;
1336
1337         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1338
1339         if (filter->counter) {
1340                 ice_fdir_counter_free(pf, filter->counter);
1341                 filter->counter = NULL;
1342         }
1343
1344         ice_fdir_extract_fltr_key(&key, filter);
1345         entry = ice_fdir_entry_lookup(fdir_info, &key);
1346         if (!entry) {
1347                 rte_flow_error_set(error, ENOENT,
1348                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1349                                    "Can't find entry.");
1350                 return -rte_errno;
1351         }
1352
1353         ret = ice_fdir_add_del_filter(pf, filter, false);
1354         if (ret) {
1355                 rte_flow_error_set(error, -ret,
1356                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1357                                    "Del filter rule failed.");
1358                 return -rte_errno;
1359         }
1360
1361         ret = ice_fdir_entry_del(pf, &key);
1362         if (ret) {
1363                 rte_flow_error_set(error, -ret,
1364                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1365                                    "Remove entry from table failed.");
1366                 return -rte_errno;
1367         }
1368
1369         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1370
1371         if (filter->mark_flag == 1)
1372                 ice_fdir_rx_parsing_enable(ad, 0);
1373
1374         flow->rule = NULL;
1375
1376         rte_free(filter);
1377
1378         return 0;
1379 }
1380
1381 static int
1382 ice_fdir_query_count(struct ice_adapter *ad,
1383                       struct rte_flow *flow,
1384                       struct rte_flow_query_count *flow_stats,
1385                       struct rte_flow_error *error)
1386 {
1387         struct ice_pf *pf = &ad->pf;
1388         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1389         struct ice_fdir_filter_conf *filter = flow->rule;
1390         struct ice_fdir_counter *counter = filter->counter;
1391         uint64_t hits_lo, hits_hi;
1392
1393         if (!counter) {
1394                 rte_flow_error_set(error, EINVAL,
1395                                   RTE_FLOW_ERROR_TYPE_ACTION,
1396                                   NULL,
1397                                   "FDIR counters not available");
1398                 return -rte_errno;
1399         }
1400
1401         /*
1402          * Reading the low 32-bits latches the high 32-bits into a shadow
1403          * register. Reading the high 32-bit returns the value in the
1404          * shadow register.
1405          */
1406         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1407         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1408
1409         flow_stats->hits_set = 1;
1410         flow_stats->hits = hits_lo | (hits_hi << 32);
1411         flow_stats->bytes_set = 0;
1412         flow_stats->bytes = 0;
1413
1414         if (flow_stats->reset) {
1415                 /* reset statistic counter value */
1416                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1417                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1418         }
1419
1420         return 0;
1421 }
1422
1423 static struct ice_flow_engine ice_fdir_engine = {
1424         .init = ice_fdir_init,
1425         .uninit = ice_fdir_uninit,
1426         .create = ice_fdir_create_filter,
1427         .destroy = ice_fdir_destroy_filter,
1428         .query_count = ice_fdir_query_count,
1429         .type = ICE_FLOW_ENGINE_FDIR,
1430 };
1431
1432 static int
1433 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1434                               struct rte_flow_error *error,
1435                               const struct rte_flow_action *act,
1436                               struct ice_fdir_filter_conf *filter)
1437 {
1438         const struct rte_flow_action_rss *rss = act->conf;
1439         uint32_t i;
1440
1441         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1442                 rte_flow_error_set(error, EINVAL,
1443                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1444                                    "Invalid action.");
1445                 return -rte_errno;
1446         }
1447
1448         if (rss->queue_num <= 1) {
1449                 rte_flow_error_set(error, EINVAL,
1450                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1451                                    "Queue region size can't be 0 or 1.");
1452                 return -rte_errno;
1453         }
1454
1455         /* check if queue index for queue region is continuous */
1456         for (i = 0; i < rss->queue_num - 1; i++) {
1457                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1458                         rte_flow_error_set(error, EINVAL,
1459                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1460                                            "Discontinuous queue region");
1461                         return -rte_errno;
1462                 }
1463         }
1464
1465         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1466                 rte_flow_error_set(error, EINVAL,
1467                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1468                                    "Invalid queue region indexes.");
1469                 return -rte_errno;
1470         }
1471
1472         if (!(rte_is_power_of_2(rss->queue_num) &&
1473              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1474                 rte_flow_error_set(error, EINVAL,
1475                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1476                                    "The region size should be any of the following values:"
1477                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1478                                    "of queues do not exceed the VSI allocation.");
1479                 return -rte_errno;
1480         }
1481
1482         filter->input.q_index = rss->queue[0];
1483         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1484         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1485
1486         return 0;
1487 }
1488
1489 static int
1490 ice_fdir_parse_action(struct ice_adapter *ad,
1491                       const struct rte_flow_action actions[],
1492                       struct rte_flow_error *error,
1493                       struct ice_fdir_filter_conf *filter)
1494 {
1495         struct ice_pf *pf = &ad->pf;
1496         const struct rte_flow_action_queue *act_q;
1497         const struct rte_flow_action_mark *mark_spec = NULL;
1498         const struct rte_flow_action_count *act_count;
1499         uint32_t dest_num = 0;
1500         uint32_t mark_num = 0;
1501         uint32_t counter_num = 0;
1502         int ret;
1503
1504         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1505                 switch (actions->type) {
1506                 case RTE_FLOW_ACTION_TYPE_VOID:
1507                         break;
1508                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1509                         dest_num++;
1510
1511                         act_q = actions->conf;
1512                         filter->input.q_index = act_q->index;
1513                         if (filter->input.q_index >=
1514                                         pf->dev_data->nb_rx_queues) {
1515                                 rte_flow_error_set(error, EINVAL,
1516                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1517                                                    actions,
1518                                                    "Invalid queue for FDIR.");
1519                                 return -rte_errno;
1520                         }
1521                         filter->input.dest_ctl =
1522                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1523                         break;
1524                 case RTE_FLOW_ACTION_TYPE_DROP:
1525                         dest_num++;
1526
1527                         filter->input.dest_ctl =
1528                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1529                         break;
1530                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1531                         dest_num++;
1532
1533                         filter->input.dest_ctl =
1534                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1535                         break;
1536                 case RTE_FLOW_ACTION_TYPE_RSS:
1537                         dest_num++;
1538
1539                         ret = ice_fdir_parse_action_qregion(pf,
1540                                                 error, actions, filter);
1541                         if (ret)
1542                                 return ret;
1543                         break;
1544                 case RTE_FLOW_ACTION_TYPE_MARK:
1545                         mark_num++;
1546                         filter->mark_flag = 1;
1547                         mark_spec = actions->conf;
1548                         filter->input.fltr_id = mark_spec->id;
1549                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1550                         break;
1551                 case RTE_FLOW_ACTION_TYPE_COUNT:
1552                         counter_num++;
1553
1554                         act_count = actions->conf;
1555                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1556                         rte_memcpy(&filter->act_count, act_count,
1557                                                 sizeof(filter->act_count));
1558
1559                         break;
1560                 default:
1561                         rte_flow_error_set(error, EINVAL,
1562                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1563                                    "Invalid action.");
1564                         return -rte_errno;
1565                 }
1566         }
1567
1568         if (dest_num >= 2) {
1569                 rte_flow_error_set(error, EINVAL,
1570                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1571                            "Unsupported action combination");
1572                 return -rte_errno;
1573         }
1574
1575         if (mark_num >= 2) {
1576                 rte_flow_error_set(error, EINVAL,
1577                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1578                            "Too many mark actions");
1579                 return -rte_errno;
1580         }
1581
1582         if (counter_num >= 2) {
1583                 rte_flow_error_set(error, EINVAL,
1584                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1585                            "Too many count actions");
1586                 return -rte_errno;
1587         }
1588
1589         if (dest_num + mark_num + counter_num == 0) {
1590                 rte_flow_error_set(error, EINVAL,
1591                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1592                            "Empty action");
1593                 return -rte_errno;
1594         }
1595
1596         /* set default action to PASSTHRU mode, in "mark/count only" case. */
1597         if (dest_num == 0)
1598                 filter->input.dest_ctl =
1599                         ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1600
1601         return 0;
1602 }
1603
1604 static int
1605 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1606                        const struct rte_flow_item pattern[],
1607                        struct rte_flow_error *error,
1608                        struct ice_fdir_filter_conf *filter)
1609 {
1610         const struct rte_flow_item *item = pattern;
1611         enum rte_flow_item_type item_type;
1612         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1613         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1614         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1615         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
1616         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1617         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
1618                                         *ipv6_frag_last, *ipv6_frag_mask;
1619         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1620         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1621         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1622         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1623         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1624         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1625         uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1626         uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1627         uint64_t *input_set;
1628         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1629         uint8_t  ipv6_addr_mask[16] = {
1630                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1631                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1632         };
1633         uint32_t vtc_flow_cpu;
1634         uint16_t ether_type;
1635         enum rte_flow_item_type next_type;
1636         bool is_outer = true;
1637         struct ice_fdir_extra *p_ext_data;
1638         struct ice_fdir_v4 *p_v4 = NULL;
1639         struct ice_fdir_v6 *p_v6 = NULL;
1640
1641         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1642                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1643                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1644                 /* To align with shared code behavior, save gtpu outer
1645                  * fields in inner struct.
1646                  */
1647                 if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1648                     item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1649                         is_outer = false;
1650                 }
1651         }
1652
1653         /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1654          * flow. input_set_i is used for inner part.
1655          */
1656         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1657                 item_type = item->type;
1658
1659                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1660                                     item_type ==
1661                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
1662                         rte_flow_error_set(error, EINVAL,
1663                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
1664                                            "Not support range");
1665                 }
1666
1667                 input_set = (tunnel_type && !is_outer) ?
1668                             &input_set_i : &input_set_o;
1669
1670                 switch (item_type) {
1671                 case RTE_FLOW_ITEM_TYPE_ETH:
1672                         flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1673                         eth_spec = item->spec;
1674                         eth_mask = item->mask;
1675
1676                         if (!(eth_spec && eth_mask))
1677                                 break;
1678
1679                         if (!rte_is_zero_ether_addr(&eth_mask->dst))
1680                                 *input_set |= ICE_INSET_DMAC;
1681                         if (!rte_is_zero_ether_addr(&eth_mask->src))
1682                                 *input_set |= ICE_INSET_SMAC;
1683
1684                         next_type = (item + 1)->type;
1685                         /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1686                         if (eth_mask->type == RTE_BE16(0xffff) &&
1687                             next_type == RTE_FLOW_ITEM_TYPE_END) {
1688                                 *input_set |= ICE_INSET_ETHERTYPE;
1689                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
1690
1691                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1692                                     ether_type == RTE_ETHER_TYPE_IPV6) {
1693                                         rte_flow_error_set(error, EINVAL,
1694                                                            RTE_FLOW_ERROR_TYPE_ITEM,
1695                                                            item,
1696                                                            "Unsupported ether_type.");
1697                                         return -rte_errno;
1698                                 }
1699                         }
1700
1701                         p_ext_data = (tunnel_type && is_outer) ?
1702                                      &filter->input.ext_data_outer :
1703                                      &filter->input.ext_data;
1704                         rte_memcpy(&p_ext_data->src_mac,
1705                                    &eth_spec->src, RTE_ETHER_ADDR_LEN);
1706                         rte_memcpy(&p_ext_data->dst_mac,
1707                                    &eth_spec->dst, RTE_ETHER_ADDR_LEN);
1708                         rte_memcpy(&p_ext_data->ether_type,
1709                                    &eth_spec->type, sizeof(eth_spec->type));
1710                         break;
1711                 case RTE_FLOW_ITEM_TYPE_IPV4:
1712                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1713                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1714                         ipv4_spec = item->spec;
1715                         ipv4_last = item->last;
1716                         ipv4_mask = item->mask;
1717                         p_v4 = (tunnel_type && is_outer) ?
1718                                &filter->input.ip_outer.v4 :
1719                                &filter->input.ip.v4;
1720
1721                         if (!(ipv4_spec && ipv4_mask))
1722                                 break;
1723
1724                         /* Check IPv4 mask and update input set */
1725                         if (ipv4_mask->hdr.version_ihl ||
1726                             ipv4_mask->hdr.total_length ||
1727                             ipv4_mask->hdr.hdr_checksum) {
1728                                 rte_flow_error_set(error, EINVAL,
1729                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1730                                                    item,
1731                                                    "Invalid IPv4 mask.");
1732                                 return -rte_errno;
1733                         }
1734
1735                         if (ipv4_last &&
1736                             (ipv4_last->hdr.version_ihl ||
1737                              ipv4_last->hdr.type_of_service ||
1738                              ipv4_last->hdr.time_to_live ||
1739                              ipv4_last->hdr.total_length |
1740                              ipv4_last->hdr.next_proto_id ||
1741                              ipv4_last->hdr.hdr_checksum ||
1742                              ipv4_last->hdr.src_addr ||
1743                              ipv4_last->hdr.dst_addr)) {
1744                                 rte_flow_error_set(error, EINVAL,
1745                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1746                                                    item, "Invalid IPv4 last.");
1747                                 return -rte_errno;
1748                         }
1749
1750                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1751                                 *input_set |= ICE_INSET_IPV4_DST;
1752                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1753                                 *input_set |= ICE_INSET_IPV4_SRC;
1754                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1755                                 *input_set |= ICE_INSET_IPV4_TTL;
1756                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1757                                 *input_set |= ICE_INSET_IPV4_PROTO;
1758                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1759                                 *input_set |= ICE_INSET_IPV4_TOS;
1760
1761                         p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
1762                         p_v4->src_ip = ipv4_spec->hdr.src_addr;
1763                         p_v4->ttl = ipv4_spec->hdr.time_to_live;
1764                         p_v4->proto = ipv4_spec->hdr.next_proto_id;
1765                         p_v4->tos = ipv4_spec->hdr.type_of_service;
1766
1767                         /* only support any packet id for fragment IPv4
1768                          * any packet_id:
1769                          * spec is 0, last is 0xffff, mask is 0xffff
1770                          * fragment Ipv4:
1771                          * spec is 0x2000, mask is 0xffff
1772                          */
1773                         if (ipv4_last && ipv4_spec->hdr.packet_id == 0 &&
1774                             ipv4_last->hdr.packet_id == UINT16_MAX &&
1775                             ipv4_mask->hdr.packet_id == UINT16_MAX &&
1776                             ipv4_spec->hdr.fragment_offset ==
1777                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
1778                             ipv4_mask->hdr.fragment_offset == UINT16_MAX) {
1779                                 /* all IPv4 fragment packet has the same
1780                                  * ethertype, if the spec is for all valid
1781                                  * packet id, set ethertype into input set.
1782                                  */
1783                                 *input_set |= ICE_INSET_ETHERTYPE;
1784                                 input_set_o |= ICE_INSET_ETHERTYPE;
1785                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
1786                                 rte_flow_error_set(error, EINVAL,
1787                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1788                                                    item, "Invalid IPv4 mask.");
1789                                 return -rte_errno;
1790                         }
1791
1792                         break;
1793                 case RTE_FLOW_ITEM_TYPE_IPV6:
1794                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1795                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1796                         ipv6_spec = item->spec;
1797                         ipv6_mask = item->mask;
1798                         p_v6 = (tunnel_type && is_outer) ?
1799                                &filter->input.ip_outer.v6 :
1800                                &filter->input.ip.v6;
1801
1802                         if (!(ipv6_spec && ipv6_mask))
1803                                 break;
1804
1805                         /* Check IPv6 mask and update input set */
1806                         if (ipv6_mask->hdr.payload_len) {
1807                                 rte_flow_error_set(error, EINVAL,
1808                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1809                                                    item,
1810                                                    "Invalid IPv6 mask");
1811                                 return -rte_errno;
1812                         }
1813
1814                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1815                                     RTE_DIM(ipv6_mask->hdr.src_addr)))
1816                                 *input_set |= ICE_INSET_IPV6_SRC;
1817                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1818                                     RTE_DIM(ipv6_mask->hdr.dst_addr)))
1819                                 *input_set |= ICE_INSET_IPV6_DST;
1820
1821                         if ((ipv6_mask->hdr.vtc_flow &
1822                              rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1823                             == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1824                                 *input_set |= ICE_INSET_IPV6_TC;
1825                         if (ipv6_mask->hdr.proto == UINT8_MAX)
1826                                 *input_set |= ICE_INSET_IPV6_NEXT_HDR;
1827                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1828                                 *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1829
1830                         rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
1831                         rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
1832                         vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1833                         p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
1834                         p_v6->proto = ipv6_spec->hdr.proto;
1835                         p_v6->hlim = ipv6_spec->hdr.hop_limits;
1836                         break;
1837                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
1838                         l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
1839                         flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
1840                         ipv6_frag_spec = item->spec;
1841                         ipv6_frag_last = item->last;
1842                         ipv6_frag_mask = item->mask;
1843
1844                         if (!(ipv6_frag_spec && ipv6_frag_mask))
1845                                 break;
1846
1847                         /* only support any packet id for fragment IPv6
1848                          * any packet_id:
1849                          * spec is 0, last is 0xffffffff, mask is 0xffffffff
1850                          * fragment Ipv6:
1851                          * spec is 0x1, mask is 0xffff
1852                          */
1853                         if (ipv6_frag_last && ipv6_frag_spec->hdr.id == 0 &&
1854                             ipv6_frag_last->hdr.id == UINT32_MAX &&
1855                             ipv6_frag_mask->hdr.id == UINT32_MAX &&
1856                             ipv6_frag_spec->hdr.frag_data ==
1857                             rte_cpu_to_be_16(1) &&
1858                             ipv6_frag_mask->hdr.frag_data == UINT16_MAX) {
1859                                 /* all IPv6 fragment packet has the same
1860                                  * ethertype, if the spec is for all valid
1861                                  * packet id, set ethertype into input set.
1862                                  */
1863                                 *input_set |= ICE_INSET_ETHERTYPE;
1864                                 input_set_o |= ICE_INSET_ETHERTYPE;
1865                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
1866                                 rte_flow_error_set(error, EINVAL,
1867                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1868                                                    item, "Invalid IPv6 mask.");
1869                                 return -rte_errno;
1870                         }
1871
1872                         break;
1873
1874                 case RTE_FLOW_ITEM_TYPE_TCP:
1875                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1876                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1877                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1878                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1879
1880                         tcp_spec = item->spec;
1881                         tcp_mask = item->mask;
1882
1883                         if (!(tcp_spec && tcp_mask))
1884                                 break;
1885
1886                         /* Check TCP mask and update input set */
1887                         if (tcp_mask->hdr.sent_seq ||
1888                             tcp_mask->hdr.recv_ack ||
1889                             tcp_mask->hdr.data_off ||
1890                             tcp_mask->hdr.tcp_flags ||
1891                             tcp_mask->hdr.rx_win ||
1892                             tcp_mask->hdr.cksum ||
1893                             tcp_mask->hdr.tcp_urp) {
1894                                 rte_flow_error_set(error, EINVAL,
1895                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1896                                                    item,
1897                                                    "Invalid TCP mask");
1898                                 return -rte_errno;
1899                         }
1900
1901                         if (tcp_mask->hdr.src_port == UINT16_MAX)
1902                                 *input_set |= ICE_INSET_TCP_SRC_PORT;
1903                         if (tcp_mask->hdr.dst_port == UINT16_MAX)
1904                                 *input_set |= ICE_INSET_TCP_DST_PORT;
1905
1906                         /* Get filter info */
1907                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1908                                 assert(p_v4);
1909                                 p_v4->dst_port = tcp_spec->hdr.dst_port;
1910                                 p_v4->src_port = tcp_spec->hdr.src_port;
1911                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1912                                 assert(p_v6);
1913                                 p_v6->dst_port = tcp_spec->hdr.dst_port;
1914                                 p_v6->src_port = tcp_spec->hdr.src_port;
1915                         }
1916                         break;
1917                 case RTE_FLOW_ITEM_TYPE_UDP:
1918                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1919                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1920                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1921                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1922
1923                         udp_spec = item->spec;
1924                         udp_mask = item->mask;
1925
1926                         if (!(udp_spec && udp_mask))
1927                                 break;
1928
1929                         /* Check UDP mask and update input set*/
1930                         if (udp_mask->hdr.dgram_len ||
1931                             udp_mask->hdr.dgram_cksum) {
1932                                 rte_flow_error_set(error, EINVAL,
1933                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1934                                                    item,
1935                                                    "Invalid UDP mask");
1936                                 return -rte_errno;
1937                         }
1938
1939                         if (udp_mask->hdr.src_port == UINT16_MAX)
1940                                 *input_set |= ICE_INSET_UDP_SRC_PORT;
1941                         if (udp_mask->hdr.dst_port == UINT16_MAX)
1942                                 *input_set |= ICE_INSET_UDP_DST_PORT;
1943
1944                         /* Get filter info */
1945                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1946                                 assert(p_v4);
1947                                 p_v4->dst_port = udp_spec->hdr.dst_port;
1948                                 p_v4->src_port = udp_spec->hdr.src_port;
1949                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1950                                 assert(p_v6);
1951                                 p_v6->src_port = udp_spec->hdr.src_port;
1952                                 p_v6->dst_port = udp_spec->hdr.dst_port;
1953                         }
1954                         break;
1955                 case RTE_FLOW_ITEM_TYPE_SCTP:
1956                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1957                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1958                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1959                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1960
1961                         sctp_spec = item->spec;
1962                         sctp_mask = item->mask;
1963
1964                         if (!(sctp_spec && sctp_mask))
1965                                 break;
1966
1967                         /* Check SCTP mask and update input set */
1968                         if (sctp_mask->hdr.cksum) {
1969                                 rte_flow_error_set(error, EINVAL,
1970                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1971                                                    item,
1972                                                    "Invalid UDP mask");
1973                                 return -rte_errno;
1974                         }
1975
1976                         if (sctp_mask->hdr.src_port == UINT16_MAX)
1977                                 *input_set |= ICE_INSET_SCTP_SRC_PORT;
1978                         if (sctp_mask->hdr.dst_port == UINT16_MAX)
1979                                 *input_set |= ICE_INSET_SCTP_DST_PORT;
1980
1981                         /* Get filter info */
1982                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1983                                 assert(p_v4);
1984                                 p_v4->dst_port = sctp_spec->hdr.dst_port;
1985                                 p_v4->src_port = sctp_spec->hdr.src_port;
1986                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1987                                 assert(p_v6);
1988                                 p_v6->dst_port = sctp_spec->hdr.dst_port;
1989                                 p_v6->src_port = sctp_spec->hdr.src_port;
1990                         }
1991                         break;
1992                 case RTE_FLOW_ITEM_TYPE_VOID:
1993                         break;
1994                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1995                         l3 = RTE_FLOW_ITEM_TYPE_END;
1996                         vxlan_spec = item->spec;
1997                         vxlan_mask = item->mask;
1998                         is_outer = false;
1999
2000                         if (!(vxlan_spec && vxlan_mask))
2001                                 break;
2002
2003                         if (vxlan_mask->hdr.vx_flags) {
2004                                 rte_flow_error_set(error, EINVAL,
2005                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2006                                                    item,
2007                                                    "Invalid vxlan field");
2008                                 return -rte_errno;
2009                         }
2010
2011                         if (vxlan_mask->hdr.vx_vni)
2012                                 *input_set |= ICE_INSET_VXLAN_VNI;
2013
2014                         filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
2015
2016                         break;
2017                 case RTE_FLOW_ITEM_TYPE_GTPU:
2018                         l3 = RTE_FLOW_ITEM_TYPE_END;
2019                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
2020                         gtp_spec = item->spec;
2021                         gtp_mask = item->mask;
2022
2023                         if (!(gtp_spec && gtp_mask))
2024                                 break;
2025
2026                         if (gtp_mask->v_pt_rsv_flags ||
2027                             gtp_mask->msg_type ||
2028                             gtp_mask->msg_len) {
2029                                 rte_flow_error_set(error, EINVAL,
2030                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2031                                                    item,
2032                                                    "Invalid GTP mask");
2033                                 return -rte_errno;
2034                         }
2035
2036                         if (gtp_mask->teid == UINT32_MAX)
2037                                 input_set_o |= ICE_INSET_GTPU_TEID;
2038
2039                         filter->input.gtpu_data.teid = gtp_spec->teid;
2040                         break;
2041                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
2042                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
2043                         gtp_psc_spec = item->spec;
2044                         gtp_psc_mask = item->mask;
2045
2046                         if (!(gtp_psc_spec && gtp_psc_mask))
2047                                 break;
2048
2049                         if (gtp_psc_mask->qfi == UINT8_MAX)
2050                                 input_set_o |= ICE_INSET_GTPU_QFI;
2051
2052                         filter->input.gtpu_data.qfi =
2053                                 gtp_psc_spec->qfi;
2054                         break;
2055                 default:
2056                         rte_flow_error_set(error, EINVAL,
2057                                            RTE_FLOW_ERROR_TYPE_ITEM,
2058                                            item,
2059                                            "Invalid pattern item.");
2060                         return -rte_errno;
2061                 }
2062         }
2063
2064         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2065                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2066                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
2067         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2068                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2069                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
2070         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2071                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2072                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
2073         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2074                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2075                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
2076         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
2077                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN;
2078
2079         filter->tunnel_type = tunnel_type;
2080         filter->input.flow_type = flow_type;
2081         filter->input_set_o = input_set_o;
2082         filter->input_set_i = input_set_i;
2083
2084         return 0;
2085 }
2086
2087 static int
2088 ice_fdir_parse(struct ice_adapter *ad,
2089                struct ice_pattern_match_item *array,
2090                uint32_t array_len,
2091                const struct rte_flow_item pattern[],
2092                const struct rte_flow_action actions[],
2093                uint32_t priority __rte_unused,
2094                void **meta,
2095                struct rte_flow_error *error)
2096 {
2097         struct ice_pf *pf = &ad->pf;
2098         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2099         struct ice_pattern_match_item *item = NULL;
2100         uint64_t input_set;
2101         int ret;
2102
2103         memset(filter, 0, sizeof(*filter));
2104         item = ice_search_pattern_match_item(ad, pattern, array, array_len,
2105                                              error);
2106         if (!item)
2107                 return -rte_errno;
2108
2109         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2110         if (ret)
2111                 goto error;
2112         input_set = filter->input_set_o | filter->input_set_i;
2113         if (!input_set || filter->input_set_o &
2114             ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
2115             filter->input_set_i & ~item->input_set_mask_i) {
2116                 rte_flow_error_set(error, EINVAL,
2117                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2118                                    pattern,
2119                                    "Invalid input set");
2120                 ret = -rte_errno;
2121                 goto error;
2122         }
2123
2124         ret = ice_fdir_parse_action(ad, actions, error, filter);
2125         if (ret)
2126                 goto error;
2127
2128         if (meta)
2129                 *meta = filter;
2130 error:
2131         rte_free(item);
2132         return ret;
2133 }
2134
2135 static struct ice_flow_parser ice_fdir_parser = {
2136         .engine = &ice_fdir_engine,
2137         .array = ice_fdir_pattern_list,
2138         .array_len = RTE_DIM(ice_fdir_pattern_list),
2139         .parse_pattern_action = ice_fdir_parse,
2140         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2141 };
2142
2143 RTE_INIT(ice_fdir_engine_register)
2144 {
2145         ice_register_flow_engine(&ice_fdir_engine);
2146 }