net/ice: fix flow FDIR/switch memory leak
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
22         ICE_INSET_DMAC | \
23         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35         ICE_FDIR_INSET_ETH_IPV4 | \
36         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
37
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
39         ICE_INSET_DMAC | \
40         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
54
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
65
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67         ICE_FDIR_INSET_VXLAN_IPV4 | \
68         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
69
70 #define ICE_FDIR_INSET_GTPU_IPV4 (\
71         ICE_INSET_GTPU_TEID)
72
73 #define ICE_FDIR_INSET_GTPU_EH_IPV4 (\
74         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
75
76 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
77         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
78         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
79         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
80         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
81         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
82         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
83         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
84         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
85         {pattern_eth_ipv4_udp_vxlan_ipv4,
86                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
87         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
88                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
89         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
90                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
91         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
92                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
93         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
94                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
95         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
96                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
97         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
98                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
99         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
100                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
101 };
102
103 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
104         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
106         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
107         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
108         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
109         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
110         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
111         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
112         {pattern_eth_ipv4_udp_vxlan_ipv4,
113                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
114         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
115                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
116         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
117                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
118         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
119                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
120         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
121                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
122         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
123                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
124         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
125                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
126         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
127                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
128         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
129         {pattern_eth_ipv4_gtpu_eh_ipv4,
130                                        ICE_FDIR_INSET_GTPU_EH_IPV4,          ICE_INSET_NONE},
131 };
132
133 static struct ice_flow_parser ice_fdir_parser_os;
134 static struct ice_flow_parser ice_fdir_parser_comms;
135
136 static const struct rte_memzone *
137 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
138 {
139         const struct rte_memzone *mz;
140
141         mz = rte_memzone_lookup(name);
142         if (mz)
143                 return mz;
144
145         return rte_memzone_reserve_aligned(name, len, socket_id,
146                                            RTE_MEMZONE_IOVA_CONTIG,
147                                            ICE_RING_BASE_ALIGN);
148 }
149
150 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
151
152 static int
153 ice_fdir_prof_alloc(struct ice_hw *hw)
154 {
155         enum ice_fltr_ptype ptype, fltr_ptype;
156
157         if (!hw->fdir_prof) {
158                 hw->fdir_prof = (struct ice_fd_hw_prof **)
159                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
160                                    sizeof(*hw->fdir_prof));
161                 if (!hw->fdir_prof)
162                         return -ENOMEM;
163         }
164         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
165              ptype < ICE_FLTR_PTYPE_MAX;
166              ptype++) {
167                 if (!hw->fdir_prof[ptype]) {
168                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
169                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
170                         if (!hw->fdir_prof[ptype])
171                                 goto fail_mem;
172                 }
173         }
174         return 0;
175
176 fail_mem:
177         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
178              fltr_ptype < ptype;
179              fltr_ptype++) {
180                 rte_free(hw->fdir_prof[fltr_ptype]);
181                 hw->fdir_prof[fltr_ptype] = NULL;
182         }
183
184         rte_free(hw->fdir_prof);
185         hw->fdir_prof = NULL;
186
187         return -ENOMEM;
188 }
189
190 static int
191 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
192                           struct ice_fdir_counter_pool_container *container,
193                           uint32_t index_start,
194                           uint32_t len)
195 {
196         struct ice_fdir_counter_pool *pool;
197         uint32_t i;
198         int ret = 0;
199
200         pool = rte_zmalloc("ice_fdir_counter_pool",
201                            sizeof(*pool) +
202                            sizeof(struct ice_fdir_counter) * len,
203                            0);
204         if (!pool) {
205                 PMD_INIT_LOG(ERR,
206                              "Failed to allocate memory for fdir counter pool");
207                 return -ENOMEM;
208         }
209
210         TAILQ_INIT(&pool->counter_list);
211         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
212
213         for (i = 0; i < len; i++) {
214                 struct ice_fdir_counter *counter = &pool->counters[i];
215
216                 counter->hw_index = index_start + i;
217                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
218         }
219
220         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
221                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
222                 ret = -EINVAL;
223                 goto free_pool;
224         }
225
226         container->pools[container->index_free++] = pool;
227         return 0;
228
229 free_pool:
230         rte_free(pool);
231         return ret;
232 }
233
234 static int
235 ice_fdir_counter_init(struct ice_pf *pf)
236 {
237         struct ice_hw *hw = ICE_PF_TO_HW(pf);
238         struct ice_fdir_info *fdir_info = &pf->fdir;
239         struct ice_fdir_counter_pool_container *container =
240                                 &fdir_info->counter;
241         uint32_t cnt_index, len;
242         int ret;
243
244         TAILQ_INIT(&container->pool_list);
245
246         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
247         len = ICE_FDIR_COUNTERS_PER_BLOCK;
248
249         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
250         if (ret) {
251                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
252                 return ret;
253         }
254
255         return 0;
256 }
257
258 static int
259 ice_fdir_counter_release(struct ice_pf *pf)
260 {
261         struct ice_fdir_info *fdir_info = &pf->fdir;
262         struct ice_fdir_counter_pool_container *container =
263                                 &fdir_info->counter;
264         uint8_t i;
265
266         for (i = 0; i < container->index_free; i++) {
267                 rte_free(container->pools[i]);
268                 container->pools[i] = NULL;
269         }
270
271         TAILQ_INIT(&container->pool_list);
272         container->index_free = 0;
273
274         return 0;
275 }
276
277 static struct ice_fdir_counter *
278 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
279                                         *container,
280                                uint32_t id)
281 {
282         struct ice_fdir_counter_pool *pool;
283         struct ice_fdir_counter *counter;
284         int i;
285
286         TAILQ_FOREACH(pool, &container->pool_list, next) {
287                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
288                         counter = &pool->counters[i];
289
290                         if (counter->shared &&
291                             counter->ref_cnt &&
292                             counter->id == id)
293                                 return counter;
294                 }
295         }
296
297         return NULL;
298 }
299
300 static struct ice_fdir_counter *
301 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
302 {
303         struct ice_hw *hw = ICE_PF_TO_HW(pf);
304         struct ice_fdir_info *fdir_info = &pf->fdir;
305         struct ice_fdir_counter_pool_container *container =
306                                 &fdir_info->counter;
307         struct ice_fdir_counter_pool *pool = NULL;
308         struct ice_fdir_counter *counter_free = NULL;
309
310         if (shared) {
311                 counter_free = ice_fdir_counter_shared_search(container, id);
312                 if (counter_free) {
313                         if (counter_free->ref_cnt + 1 == 0) {
314                                 rte_errno = E2BIG;
315                                 return NULL;
316                         }
317                         counter_free->ref_cnt++;
318                         return counter_free;
319                 }
320         }
321
322         TAILQ_FOREACH(pool, &container->pool_list, next) {
323                 counter_free = TAILQ_FIRST(&pool->counter_list);
324                 if (counter_free)
325                         break;
326                 counter_free = NULL;
327         }
328
329         if (!counter_free) {
330                 PMD_DRV_LOG(ERR, "No free counter found\n");
331                 return NULL;
332         }
333
334         counter_free->shared = shared;
335         counter_free->id = id;
336         counter_free->ref_cnt = 1;
337         counter_free->pool = pool;
338
339         /* reset statistic counter value */
340         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
341         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
342
343         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
344         if (TAILQ_EMPTY(&pool->counter_list)) {
345                 TAILQ_REMOVE(&container->pool_list, pool, next);
346                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
347         }
348
349         return counter_free;
350 }
351
352 static void
353 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
354                       struct ice_fdir_counter *counter)
355 {
356         if (!counter)
357                 return;
358
359         if (--counter->ref_cnt == 0) {
360                 struct ice_fdir_counter_pool *pool = counter->pool;
361
362                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
363         }
364 }
365
366 static int
367 ice_fdir_init_filter_list(struct ice_pf *pf)
368 {
369         struct rte_eth_dev *dev = pf->adapter->eth_dev;
370         struct ice_fdir_info *fdir_info = &pf->fdir;
371         char fdir_hash_name[RTE_HASH_NAMESIZE];
372         int ret;
373
374         struct rte_hash_parameters fdir_hash_params = {
375                 .name = fdir_hash_name,
376                 .entries = ICE_MAX_FDIR_FILTER_NUM,
377                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
378                 .hash_func = rte_hash_crc,
379                 .hash_func_init_val = 0,
380                 .socket_id = rte_socket_id(),
381                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
382         };
383
384         /* Initialize hash */
385         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
386                  "fdir_%s", dev->device->name);
387         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
388         if (!fdir_info->hash_table) {
389                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
390                 return -EINVAL;
391         }
392         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
393                                           sizeof(*fdir_info->hash_map) *
394                                           ICE_MAX_FDIR_FILTER_NUM,
395                                           0);
396         if (!fdir_info->hash_map) {
397                 PMD_INIT_LOG(ERR,
398                              "Failed to allocate memory for fdir hash map!");
399                 ret = -ENOMEM;
400                 goto err_fdir_hash_map_alloc;
401         }
402         return 0;
403
404 err_fdir_hash_map_alloc:
405         rte_hash_free(fdir_info->hash_table);
406
407         return ret;
408 }
409
410 static void
411 ice_fdir_release_filter_list(struct ice_pf *pf)
412 {
413         struct ice_fdir_info *fdir_info = &pf->fdir;
414
415         if (fdir_info->hash_map)
416                 rte_free(fdir_info->hash_map);
417         if (fdir_info->hash_table)
418                 rte_hash_free(fdir_info->hash_table);
419
420         fdir_info->hash_map = NULL;
421         fdir_info->hash_table = NULL;
422 }
423
424 /*
425  * ice_fdir_setup - reserve and initialize the Flow Director resources
426  * @pf: board private structure
427  */
428 static int
429 ice_fdir_setup(struct ice_pf *pf)
430 {
431         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
432         struct ice_hw *hw = ICE_PF_TO_HW(pf);
433         const struct rte_memzone *mz = NULL;
434         char z_name[RTE_MEMZONE_NAMESIZE];
435         struct ice_vsi *vsi;
436         int err = ICE_SUCCESS;
437
438         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
439                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
440                 return -ENOTSUP;
441         }
442
443         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
444                     " fd_fltr_best_effort = %u.",
445                     hw->func_caps.fd_fltr_guar,
446                     hw->func_caps.fd_fltr_best_effort);
447
448         if (pf->fdir.fdir_vsi) {
449                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
450                 return ICE_SUCCESS;
451         }
452
453         /* make new FDIR VSI */
454         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
455         if (!vsi) {
456                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
457                 return -EINVAL;
458         }
459         pf->fdir.fdir_vsi = vsi;
460
461         err = ice_fdir_init_filter_list(pf);
462         if (err) {
463                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
464                 return -EINVAL;
465         }
466
467         err = ice_fdir_counter_init(pf);
468         if (err) {
469                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
470                 return -EINVAL;
471         }
472
473         /*Fdir tx queue setup*/
474         err = ice_fdir_setup_tx_resources(pf);
475         if (err) {
476                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
477                 goto fail_setup_tx;
478         }
479
480         /*Fdir rx queue setup*/
481         err = ice_fdir_setup_rx_resources(pf);
482         if (err) {
483                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
484                 goto fail_setup_rx;
485         }
486
487         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
488         if (err) {
489                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
490                 goto fail_mem;
491         }
492
493         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
494         if (err) {
495                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
496                 goto fail_mem;
497         }
498
499         /* Enable FDIR MSIX interrupt */
500         vsi->nb_used_qps = 1;
501         ice_vsi_queues_bind_intr(vsi);
502         ice_vsi_enable_queues_intr(vsi);
503
504         /* reserve memory for the fdir programming packet */
505         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
506                  ICE_FDIR_MZ_NAME,
507                  eth_dev->data->port_id);
508         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
509         if (!mz) {
510                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
511                             "flow director program packet.");
512                 err = -ENOMEM;
513                 goto fail_mem;
514         }
515         pf->fdir.prg_pkt = mz->addr;
516         pf->fdir.dma_addr = mz->iova;
517         pf->fdir.mz = mz;
518
519         err = ice_fdir_prof_alloc(hw);
520         if (err) {
521                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
522                             "flow director profile.");
523                 err = -ENOMEM;
524                 goto fail_prof;
525         }
526
527         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
528                     vsi->base_queue);
529         return ICE_SUCCESS;
530
531 fail_prof:
532         rte_memzone_free(pf->fdir.mz);
533         pf->fdir.mz = NULL;
534 fail_mem:
535         ice_rx_queue_release(pf->fdir.rxq);
536         pf->fdir.rxq = NULL;
537 fail_setup_rx:
538         ice_tx_queue_release(pf->fdir.txq);
539         pf->fdir.txq = NULL;
540 fail_setup_tx:
541         ice_release_vsi(vsi);
542         pf->fdir.fdir_vsi = NULL;
543         return err;
544 }
545
546 static void
547 ice_fdir_prof_free(struct ice_hw *hw)
548 {
549         enum ice_fltr_ptype ptype;
550
551         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
552              ptype < ICE_FLTR_PTYPE_MAX;
553              ptype++) {
554                 rte_free(hw->fdir_prof[ptype]);
555                 hw->fdir_prof[ptype] = NULL;
556         }
557
558         rte_free(hw->fdir_prof);
559         hw->fdir_prof = NULL;
560 }
561
562 /* Remove a profile for some filter type */
563 static void
564 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
565 {
566         struct ice_hw *hw = ICE_PF_TO_HW(pf);
567         struct ice_fd_hw_prof *hw_prof;
568         uint64_t prof_id;
569         uint16_t vsi_num;
570         int i;
571
572         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
573                 return;
574
575         hw_prof = hw->fdir_prof[ptype];
576
577         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
578         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
579                 if (hw_prof->entry_h[i][is_tunnel]) {
580                         vsi_num = ice_get_hw_vsi_num(hw,
581                                                      hw_prof->vsi_h[i]);
582                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
583                                              vsi_num, ptype);
584                         ice_flow_rem_entry(hw,
585                                            hw_prof->entry_h[i][is_tunnel]);
586                         hw_prof->entry_h[i][is_tunnel] = 0;
587                 }
588         }
589         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
590         rte_free(hw_prof->fdir_seg[is_tunnel]);
591         hw_prof->fdir_seg[is_tunnel] = NULL;
592
593         for (i = 0; i < hw_prof->cnt; i++)
594                 hw_prof->vsi_h[i] = 0;
595         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
596 }
597
598 /* Remove all created profiles */
599 static void
600 ice_fdir_prof_rm_all(struct ice_pf *pf)
601 {
602         enum ice_fltr_ptype ptype;
603
604         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
605              ptype < ICE_FLTR_PTYPE_MAX;
606              ptype++) {
607                 ice_fdir_prof_rm(pf, ptype, false);
608                 ice_fdir_prof_rm(pf, ptype, true);
609         }
610 }
611
612 /*
613  * ice_fdir_teardown - release the Flow Director resources
614  * @pf: board private structure
615  */
616 static void
617 ice_fdir_teardown(struct ice_pf *pf)
618 {
619         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
620         struct ice_hw *hw = ICE_PF_TO_HW(pf);
621         struct ice_vsi *vsi;
622         int err;
623
624         vsi = pf->fdir.fdir_vsi;
625         if (!vsi)
626                 return;
627
628         ice_vsi_disable_queues_intr(vsi);
629
630         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
631         if (err)
632                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
633
634         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
635         if (err)
636                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
637
638         err = ice_fdir_counter_release(pf);
639         if (err)
640                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
641
642         ice_fdir_release_filter_list(pf);
643
644         ice_tx_queue_release(pf->fdir.txq);
645         pf->fdir.txq = NULL;
646         ice_rx_queue_release(pf->fdir.rxq);
647         pf->fdir.rxq = NULL;
648         ice_fdir_prof_rm_all(pf);
649         ice_fdir_prof_free(hw);
650         ice_release_vsi(vsi);
651         pf->fdir.fdir_vsi = NULL;
652
653         if (pf->fdir.mz) {
654                 err = rte_memzone_free(pf->fdir.mz);
655                 pf->fdir.mz = NULL;
656                 if (err)
657                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
658         }
659 }
660
661 static int
662 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
663                            enum ice_fltr_ptype ptype,
664                            struct ice_flow_seg_info *seg,
665                            bool is_tunnel)
666 {
667         struct ice_hw *hw = ICE_PF_TO_HW(pf);
668         struct ice_flow_seg_info *ori_seg;
669         struct ice_fd_hw_prof *hw_prof;
670
671         hw_prof = hw->fdir_prof[ptype];
672         ori_seg = hw_prof->fdir_seg[is_tunnel];
673
674         /* profile does not exist */
675         if (!ori_seg)
676                 return 0;
677
678         /* if no input set conflict, return -EEXIST */
679         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
680             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
681                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
682                             ptype);
683                 return -EEXIST;
684         }
685
686         /* a rule with input set conflict already exist, so give up */
687         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
688                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
689                             ptype);
690                 return -EINVAL;
691         }
692
693         /* it's safe to delete an empty profile */
694         ice_fdir_prof_rm(pf, ptype, is_tunnel);
695         return 0;
696 }
697
698 static bool
699 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
700                                enum ice_fltr_ptype ptype,
701                                bool is_tunnel)
702 {
703         struct ice_hw *hw = ICE_PF_TO_HW(pf);
704         struct ice_fd_hw_prof *hw_prof;
705         struct ice_flow_seg_info *seg;
706
707         hw_prof = hw->fdir_prof[ptype];
708         seg = hw_prof->fdir_seg[is_tunnel];
709
710         /* profile does not exist */
711         if (!seg)
712                 return true;
713
714         /* profile exists and rule exists, fail to resolve the conflict */
715         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
716                 return false;
717
718         /* it's safe to delete an empty profile */
719         ice_fdir_prof_rm(pf, ptype, is_tunnel);
720
721         return true;
722 }
723
724 static int
725 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
726                              enum ice_fltr_ptype ptype,
727                              bool is_tunnel)
728 {
729         enum ice_fltr_ptype cflct_ptype;
730
731         switch (ptype) {
732         /* IPv4 */
733         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
734         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
735         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
736                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
737                 if (!ice_fdir_prof_resolve_conflict
738                         (pf, cflct_ptype, is_tunnel))
739                         goto err;
740                 break;
741         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
742                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
743                 if (!ice_fdir_prof_resolve_conflict
744                         (pf, cflct_ptype, is_tunnel))
745                         goto err;
746                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
747                 if (!ice_fdir_prof_resolve_conflict
748                         (pf, cflct_ptype, is_tunnel))
749                         goto err;
750                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
751                 if (!ice_fdir_prof_resolve_conflict
752                         (pf, cflct_ptype, is_tunnel))
753                         goto err;
754                 break;
755         /* IPv4 GTPU */
756         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
757         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
758         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
759                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
760                 if (!ice_fdir_prof_resolve_conflict
761                         (pf, cflct_ptype, is_tunnel))
762                         goto err;
763                 break;
764         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
765                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
766                 if (!ice_fdir_prof_resolve_conflict
767                         (pf, cflct_ptype, is_tunnel))
768                         goto err;
769                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
770                 if (!ice_fdir_prof_resolve_conflict
771                         (pf, cflct_ptype, is_tunnel))
772                         goto err;
773                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
774                 if (!ice_fdir_prof_resolve_conflict
775                         (pf, cflct_ptype, is_tunnel))
776                         goto err;
777                 break;
778         /* IPv6 */
779         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
780         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
781         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
782                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
783                 if (!ice_fdir_prof_resolve_conflict
784                         (pf, cflct_ptype, is_tunnel))
785                         goto err;
786                 break;
787         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
788                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
789                 if (!ice_fdir_prof_resolve_conflict
790                         (pf, cflct_ptype, is_tunnel))
791                         goto err;
792                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
793                 if (!ice_fdir_prof_resolve_conflict
794                         (pf, cflct_ptype, is_tunnel))
795                         goto err;
796                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
797                 if (!ice_fdir_prof_resolve_conflict
798                         (pf, cflct_ptype, is_tunnel))
799                         goto err;
800                 break;
801         default:
802                 break;
803         }
804         return 0;
805 err:
806         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
807                     ptype, cflct_ptype);
808         return -EINVAL;
809 }
810
811 static int
812 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
813                      struct ice_vsi *ctrl_vsi,
814                      struct ice_flow_seg_info *seg,
815                      enum ice_fltr_ptype ptype,
816                      bool is_tunnel)
817 {
818         struct ice_hw *hw = ICE_PF_TO_HW(pf);
819         enum ice_flow_dir dir = ICE_FLOW_RX;
820         struct ice_fd_hw_prof *hw_prof;
821         struct ice_flow_prof *prof;
822         uint64_t entry_1 = 0;
823         uint64_t entry_2 = 0;
824         uint16_t vsi_num;
825         int ret;
826         uint64_t prof_id;
827
828         /* check if have input set conflict on current profile. */
829         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
830         if (ret)
831                 return ret;
832
833         /* check if the profile is conflict with other profile. */
834         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
835         if (ret)
836                 return ret;
837
838         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
839         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
840                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
841         if (ret)
842                 return ret;
843         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
844                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
845                                  seg, NULL, 0, &entry_1);
846         if (ret) {
847                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
848                             ptype);
849                 goto err_add_prof;
850         }
851         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
852                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
853                                  seg, NULL, 0, &entry_2);
854         if (ret) {
855                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
856                             ptype);
857                 goto err_add_entry;
858         }
859
860         hw_prof = hw->fdir_prof[ptype];
861         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
862         hw_prof->cnt = 0;
863         hw_prof->fdir_seg[is_tunnel] = seg;
864         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
865         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
866         pf->hw_prof_cnt[ptype][is_tunnel]++;
867         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
868         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
869         pf->hw_prof_cnt[ptype][is_tunnel]++;
870
871         return ret;
872
873 err_add_entry:
874         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
875         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
876         ice_flow_rem_entry(hw, entry_1);
877 err_add_prof:
878         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
879
880         return ret;
881 }
882
883 static void
884 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
885 {
886         uint32_t i, j;
887
888         struct ice_inset_map {
889                 uint64_t inset;
890                 enum ice_flow_field fld;
891         };
892         static const struct ice_inset_map ice_inset_map[] = {
893                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
894                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
895                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
896                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
897                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
898                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
899                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
900                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
901                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
902                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
903                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
904                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
905                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
906                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
907                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
908                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
909                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
910                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
911                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
912                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
913                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
914                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
915                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
916                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
917                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
918                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
919                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
920         };
921
922         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
923                 if ((inset & ice_inset_map[i].inset) ==
924                     ice_inset_map[i].inset)
925                         field[j++] = ice_inset_map[i].fld;
926         }
927 }
928
929 static int
930 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
931                         uint64_t input_set, bool is_tunnel)
932 {
933         struct ice_flow_seg_info *seg;
934         struct ice_flow_seg_info *seg_tun = NULL;
935         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
936         int i, ret;
937
938         if (!input_set)
939                 return -EINVAL;
940
941         seg = (struct ice_flow_seg_info *)
942                 ice_malloc(hw, sizeof(*seg));
943         if (!seg) {
944                 PMD_DRV_LOG(ERR, "No memory can be allocated");
945                 return -ENOMEM;
946         }
947
948         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
949                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
950         ice_fdir_input_set_parse(input_set, field);
951
952         switch (flow) {
953         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
954                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
955                                   ICE_FLOW_SEG_HDR_IPV4);
956                 break;
957         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
958                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
959                                   ICE_FLOW_SEG_HDR_IPV4);
960                 break;
961         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
962                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
963                                   ICE_FLOW_SEG_HDR_IPV4);
964                 break;
965         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
966                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
967                 break;
968         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
969                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
970                                   ICE_FLOW_SEG_HDR_IPV6);
971                 break;
972         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
973                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
974                                   ICE_FLOW_SEG_HDR_IPV6);
975                 break;
976         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
977                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
978                                   ICE_FLOW_SEG_HDR_IPV6);
979                 break;
980         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
981                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
982                 break;
983         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
984         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
985         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
986         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
987                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
988                                        ICE_FLOW_SEG_HDR_GTPU_IP |
989                                   ICE_FLOW_SEG_HDR_IPV4);
990                 break;
991         default:
992                 PMD_DRV_LOG(ERR, "not supported filter type.");
993                 break;
994         }
995
996         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
997                 ice_flow_set_fld(seg, field[i],
998                                  ICE_FLOW_FLD_OFF_INVAL,
999                                  ICE_FLOW_FLD_OFF_INVAL,
1000                                  ICE_FLOW_FLD_OFF_INVAL, false);
1001         }
1002
1003         if (!is_tunnel) {
1004                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1005                                            seg, flow, false);
1006         } else {
1007                 seg_tun = (struct ice_flow_seg_info *)
1008                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1009                 if (!seg_tun) {
1010                         PMD_DRV_LOG(ERR, "No memory can be allocated");
1011                         rte_free(seg);
1012                         return -ENOMEM;
1013                 }
1014                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1015                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1016                                            seg_tun, flow, true);
1017         }
1018
1019         if (!ret) {
1020                 return ret;
1021         } else if (ret < 0) {
1022                 rte_free(seg);
1023                 if (is_tunnel)
1024                         rte_free(seg_tun);
1025                 return (ret == -EEXIST) ? 0 : ret;
1026         } else {
1027                 return ret;
1028         }
1029 }
1030
1031 static void
1032 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1033                     bool is_tunnel, bool add)
1034 {
1035         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1036         int cnt;
1037
1038         cnt = (add) ? 1 : -1;
1039         hw->fdir_active_fltr += cnt;
1040         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1041                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1042         else
1043                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1044 }
1045
1046 static int
1047 ice_fdir_init(struct ice_adapter *ad)
1048 {
1049         struct ice_pf *pf = &ad->pf;
1050         struct ice_flow_parser *parser;
1051         int ret;
1052
1053         ret = ice_fdir_setup(pf);
1054         if (ret)
1055                 return ret;
1056
1057         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1058                 parser = &ice_fdir_parser_comms;
1059         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1060                 parser = &ice_fdir_parser_os;
1061         else
1062                 return -EINVAL;
1063
1064         return ice_register_parser(parser, ad);
1065 }
1066
1067 static void
1068 ice_fdir_uninit(struct ice_adapter *ad)
1069 {
1070         struct ice_pf *pf = &ad->pf;
1071         struct ice_flow_parser *parser;
1072
1073         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1074                 parser = &ice_fdir_parser_comms;
1075         else
1076                 parser = &ice_fdir_parser_os;
1077
1078         ice_unregister_parser(parser, ad);
1079
1080         ice_fdir_teardown(pf);
1081 }
1082
1083 static int
1084 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1085 {
1086         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1087                 return 1;
1088         else
1089                 return 0;
1090 }
1091
1092 static int
1093 ice_fdir_add_del_filter(struct ice_pf *pf,
1094                         struct ice_fdir_filter_conf *filter,
1095                         bool add)
1096 {
1097         struct ice_fltr_desc desc;
1098         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1099         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1100         bool is_tun;
1101         int ret;
1102
1103         filter->input.dest_vsi = pf->main_vsi->idx;
1104
1105         memset(&desc, 0, sizeof(desc));
1106         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1107
1108         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1109
1110         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1111         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1112         if (ret) {
1113                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1114                 return -EINVAL;
1115         }
1116
1117         return ice_fdir_programming(pf, &desc);
1118 }
1119
1120 static void
1121 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1122                           struct ice_fdir_filter_conf *filter)
1123 {
1124         struct ice_fdir_fltr *input = &filter->input;
1125         memset(key, 0, sizeof(*key));
1126
1127         key->flow_type = input->flow_type;
1128         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1129         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1130         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1131         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1132
1133         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1134         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1135
1136         key->tunnel_type = filter->tunnel_type;
1137 }
1138
1139 /* Check if there exists the flow director filter */
1140 static struct ice_fdir_filter_conf *
1141 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1142                         const struct ice_fdir_fltr_pattern *key)
1143 {
1144         int ret;
1145
1146         ret = rte_hash_lookup(fdir_info->hash_table, key);
1147         if (ret < 0)
1148                 return NULL;
1149
1150         return fdir_info->hash_map[ret];
1151 }
1152
1153 /* Add a flow director entry into the SW list */
1154 static int
1155 ice_fdir_entry_insert(struct ice_pf *pf,
1156                       struct ice_fdir_filter_conf *entry,
1157                       struct ice_fdir_fltr_pattern *key)
1158 {
1159         struct ice_fdir_info *fdir_info = &pf->fdir;
1160         int ret;
1161
1162         ret = rte_hash_add_key(fdir_info->hash_table, key);
1163         if (ret < 0) {
1164                 PMD_DRV_LOG(ERR,
1165                             "Failed to insert fdir entry to hash table %d!",
1166                             ret);
1167                 return ret;
1168         }
1169         fdir_info->hash_map[ret] = entry;
1170
1171         return 0;
1172 }
1173
1174 /* Delete a flow director entry from the SW list */
1175 static int
1176 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1177 {
1178         struct ice_fdir_info *fdir_info = &pf->fdir;
1179         int ret;
1180
1181         ret = rte_hash_del_key(fdir_info->hash_table, key);
1182         if (ret < 0) {
1183                 PMD_DRV_LOG(ERR,
1184                             "Failed to delete fdir filter to hash table %d!",
1185                             ret);
1186                 return ret;
1187         }
1188         fdir_info->hash_map[ret] = NULL;
1189
1190         return 0;
1191 }
1192
1193 static int
1194 ice_fdir_create_filter(struct ice_adapter *ad,
1195                        struct rte_flow *flow,
1196                        void *meta,
1197                        struct rte_flow_error *error)
1198 {
1199         struct ice_pf *pf = &ad->pf;
1200         struct ice_fdir_filter_conf *filter = meta;
1201         struct ice_fdir_info *fdir_info = &pf->fdir;
1202         struct ice_fdir_filter_conf *entry, *node;
1203         struct ice_fdir_fltr_pattern key;
1204         bool is_tun;
1205         int ret;
1206
1207         ice_fdir_extract_fltr_key(&key, filter);
1208         node = ice_fdir_entry_lookup(fdir_info, &key);
1209         if (node) {
1210                 rte_flow_error_set(error, EEXIST,
1211                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1212                                    "Rule already exists!");
1213                 return -rte_errno;
1214         }
1215
1216         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1217         if (!entry) {
1218                 rte_flow_error_set(error, ENOMEM,
1219                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1220                                    "Failed to allocate memory");
1221                 return -rte_errno;
1222         }
1223
1224         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1225
1226         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1227                         filter->input_set, is_tun);
1228         if (ret) {
1229                 rte_flow_error_set(error, -ret,
1230                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1231                                    "Profile configure failed.");
1232                 goto free_entry;
1233         }
1234
1235         /* alloc counter for FDIR */
1236         if (filter->input.cnt_ena) {
1237                 struct rte_flow_action_count *act_count = &filter->act_count;
1238
1239                 filter->counter = ice_fdir_counter_alloc(pf,
1240                                                          act_count->shared,
1241                                                          act_count->id);
1242                 if (!filter->counter) {
1243                         rte_flow_error_set(error, EINVAL,
1244                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1245                                         "Failed to alloc FDIR counter.");
1246                         goto free_entry;
1247                 }
1248                 filter->input.cnt_index = filter->counter->hw_index;
1249         }
1250
1251         ret = ice_fdir_add_del_filter(pf, filter, true);
1252         if (ret) {
1253                 rte_flow_error_set(error, -ret,
1254                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1255                                    "Add filter rule failed.");
1256                 goto free_counter;
1257         }
1258
1259         rte_memcpy(entry, filter, sizeof(*entry));
1260         ret = ice_fdir_entry_insert(pf, entry, &key);
1261         if (ret) {
1262                 rte_flow_error_set(error, -ret,
1263                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1264                                    "Insert entry to table failed.");
1265                 goto free_entry;
1266         }
1267
1268         flow->rule = entry;
1269         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1270
1271         return 0;
1272
1273 free_counter:
1274         if (filter->counter) {
1275                 ice_fdir_counter_free(pf, filter->counter);
1276                 filter->counter = NULL;
1277         }
1278
1279 free_entry:
1280         rte_free(entry);
1281         return -rte_errno;
1282 }
1283
1284 static int
1285 ice_fdir_destroy_filter(struct ice_adapter *ad,
1286                         struct rte_flow *flow,
1287                         struct rte_flow_error *error)
1288 {
1289         struct ice_pf *pf = &ad->pf;
1290         struct ice_fdir_info *fdir_info = &pf->fdir;
1291         struct ice_fdir_filter_conf *filter, *entry;
1292         struct ice_fdir_fltr_pattern key;
1293         bool is_tun;
1294         int ret;
1295
1296         filter = (struct ice_fdir_filter_conf *)flow->rule;
1297
1298         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1299
1300         if (filter->counter) {
1301                 ice_fdir_counter_free(pf, filter->counter);
1302                 filter->counter = NULL;
1303         }
1304
1305         ice_fdir_extract_fltr_key(&key, filter);
1306         entry = ice_fdir_entry_lookup(fdir_info, &key);
1307         if (!entry) {
1308                 rte_flow_error_set(error, ENOENT,
1309                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1310                                    "Can't find entry.");
1311                 return -rte_errno;
1312         }
1313
1314         ret = ice_fdir_add_del_filter(pf, filter, false);
1315         if (ret) {
1316                 rte_flow_error_set(error, -ret,
1317                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1318                                    "Del filter rule failed.");
1319                 return -rte_errno;
1320         }
1321
1322         ret = ice_fdir_entry_del(pf, &key);
1323         if (ret) {
1324                 rte_flow_error_set(error, -ret,
1325                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1326                                    "Remove entry from table failed.");
1327                 return -rte_errno;
1328         }
1329
1330         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1331         flow->rule = NULL;
1332
1333         rte_free(filter);
1334
1335         return 0;
1336 }
1337
1338 static int
1339 ice_fdir_query_count(struct ice_adapter *ad,
1340                       struct rte_flow *flow,
1341                       struct rte_flow_query_count *flow_stats,
1342                       struct rte_flow_error *error)
1343 {
1344         struct ice_pf *pf = &ad->pf;
1345         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1346         struct ice_fdir_filter_conf *filter = flow->rule;
1347         struct ice_fdir_counter *counter = filter->counter;
1348         uint64_t hits_lo, hits_hi;
1349
1350         if (!counter) {
1351                 rte_flow_error_set(error, EINVAL,
1352                                   RTE_FLOW_ERROR_TYPE_ACTION,
1353                                   NULL,
1354                                   "FDIR counters not available");
1355                 return -rte_errno;
1356         }
1357
1358         /*
1359          * Reading the low 32-bits latches the high 32-bits into a shadow
1360          * register. Reading the high 32-bit returns the value in the
1361          * shadow register.
1362          */
1363         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1364         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1365
1366         flow_stats->hits_set = 1;
1367         flow_stats->hits = hits_lo | (hits_hi << 32);
1368         flow_stats->bytes_set = 0;
1369         flow_stats->bytes = 0;
1370
1371         if (flow_stats->reset) {
1372                 /* reset statistic counter value */
1373                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1374                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1375         }
1376
1377         return 0;
1378 }
1379
1380 static struct ice_flow_engine ice_fdir_engine = {
1381         .init = ice_fdir_init,
1382         .uninit = ice_fdir_uninit,
1383         .create = ice_fdir_create_filter,
1384         .destroy = ice_fdir_destroy_filter,
1385         .query_count = ice_fdir_query_count,
1386         .type = ICE_FLOW_ENGINE_FDIR,
1387 };
1388
1389 static int
1390 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1391                               struct rte_flow_error *error,
1392                               const struct rte_flow_action *act,
1393                               struct ice_fdir_filter_conf *filter)
1394 {
1395         const struct rte_flow_action_rss *rss = act->conf;
1396         uint32_t i;
1397
1398         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1399                 rte_flow_error_set(error, EINVAL,
1400                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1401                                    "Invalid action.");
1402                 return -rte_errno;
1403         }
1404
1405         if (rss->queue_num <= 1) {
1406                 rte_flow_error_set(error, EINVAL,
1407                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1408                                    "Queue region size can't be 0 or 1.");
1409                 return -rte_errno;
1410         }
1411
1412         /* check if queue index for queue region is continuous */
1413         for (i = 0; i < rss->queue_num - 1; i++) {
1414                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1415                         rte_flow_error_set(error, EINVAL,
1416                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1417                                            "Discontinuous queue region");
1418                         return -rte_errno;
1419                 }
1420         }
1421
1422         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1423                 rte_flow_error_set(error, EINVAL,
1424                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1425                                    "Invalid queue region indexes.");
1426                 return -rte_errno;
1427         }
1428
1429         if (!(rte_is_power_of_2(rss->queue_num) &&
1430              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1431                 rte_flow_error_set(error, EINVAL,
1432                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1433                                    "The region size should be any of the following values:"
1434                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1435                                    "of queues do not exceed the VSI allocation.");
1436                 return -rte_errno;
1437         }
1438
1439         filter->input.q_index = rss->queue[0];
1440         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1441         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1442
1443         return 0;
1444 }
1445
1446 static int
1447 ice_fdir_parse_action(struct ice_adapter *ad,
1448                       const struct rte_flow_action actions[],
1449                       struct rte_flow_error *error,
1450                       struct ice_fdir_filter_conf *filter)
1451 {
1452         struct ice_pf *pf = &ad->pf;
1453         const struct rte_flow_action_queue *act_q;
1454         const struct rte_flow_action_mark *mark_spec = NULL;
1455         const struct rte_flow_action_count *act_count;
1456         uint32_t dest_num = 0;
1457         uint32_t mark_num = 0;
1458         uint32_t counter_num = 0;
1459         int ret;
1460
1461         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1462                 switch (actions->type) {
1463                 case RTE_FLOW_ACTION_TYPE_VOID:
1464                         break;
1465                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1466                         dest_num++;
1467
1468                         act_q = actions->conf;
1469                         filter->input.q_index = act_q->index;
1470                         if (filter->input.q_index >=
1471                                         pf->dev_data->nb_rx_queues) {
1472                                 rte_flow_error_set(error, EINVAL,
1473                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1474                                                    actions,
1475                                                    "Invalid queue for FDIR.");
1476                                 return -rte_errno;
1477                         }
1478                         filter->input.dest_ctl =
1479                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1480                         break;
1481                 case RTE_FLOW_ACTION_TYPE_DROP:
1482                         dest_num++;
1483
1484                         filter->input.dest_ctl =
1485                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1486                         break;
1487                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1488                         dest_num++;
1489
1490                         filter->input.dest_ctl =
1491                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1492                         filter->input.q_index = 0;
1493                         break;
1494                 case RTE_FLOW_ACTION_TYPE_RSS:
1495                         dest_num++;
1496
1497                         ret = ice_fdir_parse_action_qregion(pf,
1498                                                 error, actions, filter);
1499                         if (ret)
1500                                 return ret;
1501                         break;
1502                 case RTE_FLOW_ACTION_TYPE_MARK:
1503                         mark_num++;
1504
1505                         mark_spec = actions->conf;
1506                         filter->input.fltr_id = mark_spec->id;
1507                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1508                         break;
1509                 case RTE_FLOW_ACTION_TYPE_COUNT:
1510                         counter_num++;
1511
1512                         act_count = actions->conf;
1513                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1514                         rte_memcpy(&filter->act_count, act_count,
1515                                                 sizeof(filter->act_count));
1516
1517                         break;
1518                 default:
1519                         rte_flow_error_set(error, EINVAL,
1520                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1521                                    "Invalid action.");
1522                         return -rte_errno;
1523                 }
1524         }
1525
1526         if (dest_num == 0 || dest_num >= 2) {
1527                 rte_flow_error_set(error, EINVAL,
1528                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1529                            "Unsupported action combination");
1530                 return -rte_errno;
1531         }
1532
1533         if (mark_num >= 2) {
1534                 rte_flow_error_set(error, EINVAL,
1535                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1536                            "Too many mark actions");
1537                 return -rte_errno;
1538         }
1539
1540         if (counter_num >= 2) {
1541                 rte_flow_error_set(error, EINVAL,
1542                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1543                            "Too many count actions");
1544                 return -rte_errno;
1545         }
1546
1547         return 0;
1548 }
1549
1550 static int
1551 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1552                        const struct rte_flow_item pattern[],
1553                        struct rte_flow_error *error,
1554                        struct ice_fdir_filter_conf *filter)
1555 {
1556         const struct rte_flow_item *item = pattern;
1557         enum rte_flow_item_type item_type;
1558         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1559         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1560         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1561         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1562         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1563         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1564         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1565         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1566         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1567         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1568         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1569         uint64_t input_set = ICE_INSET_NONE;
1570         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1571         uint8_t  ipv6_addr_mask[16] = {
1572                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1573                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1574         };
1575         uint32_t vtc_flow_cpu;
1576
1577
1578         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1579                 if (item->last) {
1580                         rte_flow_error_set(error, EINVAL,
1581                                         RTE_FLOW_ERROR_TYPE_ITEM,
1582                                         item,
1583                                         "Not support range");
1584                         return -rte_errno;
1585                 }
1586                 item_type = item->type;
1587
1588                 switch (item_type) {
1589                 case RTE_FLOW_ITEM_TYPE_ETH:
1590                         eth_spec = item->spec;
1591                         eth_mask = item->mask;
1592
1593                         if (eth_spec && eth_mask) {
1594                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1595                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1596                                         rte_flow_error_set(error, EINVAL,
1597                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1598                                                 item,
1599                                                 "Src mac not support");
1600                                         return -rte_errno;
1601                                 }
1602
1603                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1604                                         rte_flow_error_set(error, EINVAL,
1605                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1606                                                 item,
1607                                                 "Invalid mac addr mask");
1608                                         return -rte_errno;
1609                                 }
1610
1611                                 input_set |= ICE_INSET_DMAC;
1612                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1613                                            &eth_spec->dst,
1614                                            RTE_ETHER_ADDR_LEN);
1615                         }
1616                         break;
1617                 case RTE_FLOW_ITEM_TYPE_IPV4:
1618                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1619                         ipv4_spec = item->spec;
1620                         ipv4_mask = item->mask;
1621
1622                         if (ipv4_spec && ipv4_mask) {
1623                                 /* Check IPv4 mask and update input set */
1624                                 if (ipv4_mask->hdr.version_ihl ||
1625                                     ipv4_mask->hdr.total_length ||
1626                                     ipv4_mask->hdr.packet_id ||
1627                                     ipv4_mask->hdr.fragment_offset ||
1628                                     ipv4_mask->hdr.hdr_checksum) {
1629                                         rte_flow_error_set(error, EINVAL,
1630                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1631                                                    item,
1632                                                    "Invalid IPv4 mask.");
1633                                         return -rte_errno;
1634                                 }
1635                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1636                                         input_set |= tunnel_type ?
1637                                                      ICE_INSET_TUN_IPV4_SRC :
1638                                                      ICE_INSET_IPV4_SRC;
1639                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1640                                         input_set |= tunnel_type ?
1641                                                      ICE_INSET_TUN_IPV4_DST :
1642                                                      ICE_INSET_IPV4_DST;
1643                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1644                                         input_set |= ICE_INSET_IPV4_TOS;
1645                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1646                                         input_set |= ICE_INSET_IPV4_TTL;
1647                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1648                                         input_set |= ICE_INSET_IPV4_PROTO;
1649
1650                                 filter->input.ip.v4.dst_ip =
1651                                         ipv4_spec->hdr.src_addr;
1652                                 filter->input.ip.v4.src_ip =
1653                                         ipv4_spec->hdr.dst_addr;
1654                                 filter->input.ip.v4.tos =
1655                                         ipv4_spec->hdr.type_of_service;
1656                                 filter->input.ip.v4.ttl =
1657                                         ipv4_spec->hdr.time_to_live;
1658                                 filter->input.ip.v4.proto =
1659                                         ipv4_spec->hdr.next_proto_id;
1660                         }
1661
1662                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1663                         break;
1664                 case RTE_FLOW_ITEM_TYPE_IPV6:
1665                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1666                         ipv6_spec = item->spec;
1667                         ipv6_mask = item->mask;
1668
1669                         if (ipv6_spec && ipv6_mask) {
1670                                 /* Check IPv6 mask and update input set */
1671                                 if (ipv6_mask->hdr.payload_len) {
1672                                         rte_flow_error_set(error, EINVAL,
1673                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1674                                                    item,
1675                                                    "Invalid IPv6 mask");
1676                                         return -rte_errno;
1677                                 }
1678
1679                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1680                                             ipv6_addr_mask,
1681                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1682                                         input_set |= ICE_INSET_IPV6_SRC;
1683                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1684                                             ipv6_addr_mask,
1685                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1686                                         input_set |= ICE_INSET_IPV6_DST;
1687
1688                                 if ((ipv6_mask->hdr.vtc_flow &
1689                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1690                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1691                                         input_set |= ICE_INSET_IPV6_TC;
1692                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1693                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1694                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1695                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1696
1697                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1698                                            ipv6_spec->hdr.src_addr, 16);
1699                                 rte_memcpy(filter->input.ip.v6.src_ip,
1700                                            ipv6_spec->hdr.dst_addr, 16);
1701
1702                                 vtc_flow_cpu =
1703                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1704                                 filter->input.ip.v6.tc =
1705                                         (uint8_t)(vtc_flow_cpu >>
1706                                                   ICE_FDIR_IPV6_TC_OFFSET);
1707                                 filter->input.ip.v6.proto =
1708                                         ipv6_spec->hdr.proto;
1709                                 filter->input.ip.v6.hlim =
1710                                         ipv6_spec->hdr.hop_limits;
1711                         }
1712
1713                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1714                         break;
1715                 case RTE_FLOW_ITEM_TYPE_TCP:
1716                         tcp_spec = item->spec;
1717                         tcp_mask = item->mask;
1718
1719                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1720                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1721                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1722                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1723
1724                         if (tcp_spec && tcp_mask) {
1725                                 /* Check TCP mask and update input set */
1726                                 if (tcp_mask->hdr.sent_seq ||
1727                                     tcp_mask->hdr.recv_ack ||
1728                                     tcp_mask->hdr.data_off ||
1729                                     tcp_mask->hdr.tcp_flags ||
1730                                     tcp_mask->hdr.rx_win ||
1731                                     tcp_mask->hdr.cksum ||
1732                                     tcp_mask->hdr.tcp_urp) {
1733                                         rte_flow_error_set(error, EINVAL,
1734                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1735                                                    item,
1736                                                    "Invalid TCP mask");
1737                                         return -rte_errno;
1738                                 }
1739
1740                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1741                                         input_set |= tunnel_type ?
1742                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1743                                                      ICE_INSET_TCP_SRC_PORT;
1744                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1745                                         input_set |= tunnel_type ?
1746                                                      ICE_INSET_TUN_TCP_DST_PORT :
1747                                                      ICE_INSET_TCP_DST_PORT;
1748
1749                                 /* Get filter info */
1750                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1751                                         filter->input.ip.v4.dst_port =
1752                                                 tcp_spec->hdr.src_port;
1753                                         filter->input.ip.v4.src_port =
1754                                                 tcp_spec->hdr.dst_port;
1755                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1756                                         filter->input.ip.v6.dst_port =
1757                                                 tcp_spec->hdr.src_port;
1758                                         filter->input.ip.v6.src_port =
1759                                                 tcp_spec->hdr.dst_port;
1760                                 }
1761                         }
1762                         break;
1763                 case RTE_FLOW_ITEM_TYPE_UDP:
1764                         udp_spec = item->spec;
1765                         udp_mask = item->mask;
1766
1767                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1768                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1769                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1770                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1771
1772                         if (udp_spec && udp_mask) {
1773                                 /* Check UDP mask and update input set*/
1774                                 if (udp_mask->hdr.dgram_len ||
1775                                     udp_mask->hdr.dgram_cksum) {
1776                                         rte_flow_error_set(error, EINVAL,
1777                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1778                                                    item,
1779                                                    "Invalid UDP mask");
1780                                         return -rte_errno;
1781                                 }
1782
1783                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1784                                         input_set |= tunnel_type ?
1785                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1786                                                      ICE_INSET_UDP_SRC_PORT;
1787                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1788                                         input_set |= tunnel_type ?
1789                                                      ICE_INSET_TUN_UDP_DST_PORT :
1790                                                      ICE_INSET_UDP_DST_PORT;
1791
1792                                 /* Get filter info */
1793                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1794                                         filter->input.ip.v4.dst_port =
1795                                                 udp_spec->hdr.src_port;
1796                                         filter->input.ip.v4.src_port =
1797                                                 udp_spec->hdr.dst_port;
1798                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1799                                         filter->input.ip.v6.src_port =
1800                                                 udp_spec->hdr.dst_port;
1801                                         filter->input.ip.v6.dst_port =
1802                                                 udp_spec->hdr.src_port;
1803                                 }
1804                         }
1805                         break;
1806                 case RTE_FLOW_ITEM_TYPE_SCTP:
1807                         sctp_spec = item->spec;
1808                         sctp_mask = item->mask;
1809
1810                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1811                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1812                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1813                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1814
1815                         if (sctp_spec && sctp_mask) {
1816                                 /* Check SCTP mask and update input set */
1817                                 if (sctp_mask->hdr.cksum) {
1818                                         rte_flow_error_set(error, EINVAL,
1819                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1820                                                    item,
1821                                                    "Invalid UDP mask");
1822                                         return -rte_errno;
1823                                 }
1824
1825                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1826                                         input_set |= tunnel_type ?
1827                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1828                                                      ICE_INSET_SCTP_SRC_PORT;
1829                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1830                                         input_set |= tunnel_type ?
1831                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1832                                                      ICE_INSET_SCTP_DST_PORT;
1833
1834                                 /* Get filter info */
1835                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1836                                         filter->input.ip.v4.dst_port =
1837                                                 sctp_spec->hdr.src_port;
1838                                         filter->input.ip.v4.src_port =
1839                                                 sctp_spec->hdr.dst_port;
1840                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1841                                         filter->input.ip.v6.dst_port =
1842                                                 sctp_spec->hdr.src_port;
1843                                         filter->input.ip.v6.src_port =
1844                                                 sctp_spec->hdr.dst_port;
1845                                 }
1846                         }
1847                         break;
1848                 case RTE_FLOW_ITEM_TYPE_VOID:
1849                         break;
1850                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1851                         l3 = RTE_FLOW_ITEM_TYPE_END;
1852                         vxlan_spec = item->spec;
1853                         vxlan_mask = item->mask;
1854
1855                         if (vxlan_spec || vxlan_mask) {
1856                                 rte_flow_error_set(error, EINVAL,
1857                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1858                                                    item,
1859                                                    "Invalid vxlan field");
1860                                 return -rte_errno;
1861                         }
1862
1863                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1864                         break;
1865                 case RTE_FLOW_ITEM_TYPE_GTPU:
1866                         l3 = RTE_FLOW_ITEM_TYPE_END;
1867                         gtp_spec = item->spec;
1868                         gtp_mask = item->mask;
1869
1870                         if (gtp_spec && gtp_mask) {
1871                                 if (gtp_mask->v_pt_rsv_flags ||
1872                                     gtp_mask->msg_type ||
1873                                     gtp_mask->msg_len) {
1874                                         rte_flow_error_set(error, EINVAL,
1875                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1876                                                    item,
1877                                                    "Invalid GTP mask");
1878                                         return -rte_errno;
1879                                 }
1880
1881                                 if (gtp_mask->teid == UINT32_MAX)
1882                                         input_set |= ICE_INSET_GTPU_TEID;
1883
1884                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1885                         }
1886
1887                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1888                         break;
1889                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1890                         gtp_psc_spec = item->spec;
1891                         gtp_psc_mask = item->mask;
1892
1893                         if (gtp_psc_spec && gtp_psc_mask) {
1894                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1895                                         input_set |= ICE_INSET_GTPU_QFI;
1896
1897                                 filter->input.gtpu_data.qfi =
1898                                         gtp_psc_spec->qfi;
1899                         }
1900                         break;
1901                 default:
1902                         rte_flow_error_set(error, EINVAL,
1903                                    RTE_FLOW_ERROR_TYPE_ITEM,
1904                                    item,
1905                                    "Invalid pattern item.");
1906                         return -rte_errno;
1907                 }
1908         }
1909
1910         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1911                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1912
1913         filter->tunnel_type = tunnel_type;
1914         filter->input.flow_type = flow_type;
1915         filter->input_set = input_set;
1916
1917         return 0;
1918 }
1919
1920 static int
1921 ice_fdir_parse(struct ice_adapter *ad,
1922                struct ice_pattern_match_item *array,
1923                uint32_t array_len,
1924                const struct rte_flow_item pattern[],
1925                const struct rte_flow_action actions[],
1926                void **meta,
1927                struct rte_flow_error *error)
1928 {
1929         struct ice_pf *pf = &ad->pf;
1930         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1931         struct ice_pattern_match_item *item = NULL;
1932         uint64_t input_set;
1933         int ret;
1934
1935         memset(filter, 0, sizeof(*filter));
1936         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1937         if (!item)
1938                 return -rte_errno;
1939
1940         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1941         if (ret)
1942                 goto error;
1943         input_set = filter->input_set;
1944         if (!input_set || input_set & ~item->input_set_mask) {
1945                 rte_flow_error_set(error, EINVAL,
1946                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1947                                    pattern,
1948                                    "Invalid input set");
1949                 ret = -rte_errno;
1950                 goto error;
1951         }
1952
1953         ret = ice_fdir_parse_action(ad, actions, error, filter);
1954         if (ret)
1955                 goto error;
1956
1957         *meta = filter;
1958 error:
1959         rte_free(item);
1960         return ret;
1961 }
1962
1963 static struct ice_flow_parser ice_fdir_parser_os = {
1964         .engine = &ice_fdir_engine,
1965         .array = ice_fdir_pattern_os,
1966         .array_len = RTE_DIM(ice_fdir_pattern_os),
1967         .parse_pattern_action = ice_fdir_parse,
1968         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1969 };
1970
1971 static struct ice_flow_parser ice_fdir_parser_comms = {
1972         .engine = &ice_fdir_engine,
1973         .array = ice_fdir_pattern_comms,
1974         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1975         .parse_pattern_action = ice_fdir_parse,
1976         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1977 };
1978
1979 RTE_INIT(ice_fdir_engine_register)
1980 {
1981         ice_register_flow_engine(&ice_fdir_engine);
1982 }