net/ice: fix flow type selection for flow director
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
22         ICE_INSET_DMAC | \
23         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35         ICE_FDIR_INSET_ETH_IPV4 | \
36         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
37
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
39         ICE_INSET_DMAC | \
40         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
54
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
65
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67         ICE_FDIR_INSET_VXLAN_IPV4 | \
68         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
69
70 #define ICE_FDIR_INSET_GTPU_IPV4 (\
71         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
72
73 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
74         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
75         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
76         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
77         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
78         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
79         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
80         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
81         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_ipv4,
83                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
87                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
91                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
94         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
95                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
96         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
97                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
98 };
99
100 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
101         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
102         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
103         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
104         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
105         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
106         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
107         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
108         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_ipv4,
110                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
112                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
114                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
118                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
121         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
122                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
123         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
124                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
125         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
126 };
127
128 static struct ice_flow_parser ice_fdir_parser_os;
129 static struct ice_flow_parser ice_fdir_parser_comms;
130
131 static const struct rte_memzone *
132 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
133 {
134         const struct rte_memzone *mz;
135
136         mz = rte_memzone_lookup(name);
137         if (mz)
138                 return mz;
139
140         return rte_memzone_reserve_aligned(name, len, socket_id,
141                                            RTE_MEMZONE_IOVA_CONTIG,
142                                            ICE_RING_BASE_ALIGN);
143 }
144
145 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
146
147 static int
148 ice_fdir_prof_alloc(struct ice_hw *hw)
149 {
150         enum ice_fltr_ptype ptype, fltr_ptype;
151
152         if (!hw->fdir_prof) {
153                 hw->fdir_prof = (struct ice_fd_hw_prof **)
154                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
155                                    sizeof(*hw->fdir_prof));
156                 if (!hw->fdir_prof)
157                         return -ENOMEM;
158         }
159         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
160              ptype < ICE_FLTR_PTYPE_MAX;
161              ptype++) {
162                 if (!hw->fdir_prof[ptype]) {
163                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
164                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
165                         if (!hw->fdir_prof[ptype])
166                                 goto fail_mem;
167                 }
168         }
169         return 0;
170
171 fail_mem:
172         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
173              fltr_ptype < ptype;
174              fltr_ptype++) {
175                 rte_free(hw->fdir_prof[fltr_ptype]);
176                 hw->fdir_prof[fltr_ptype] = NULL;
177         }
178
179         rte_free(hw->fdir_prof);
180         hw->fdir_prof = NULL;
181
182         return -ENOMEM;
183 }
184
185 static int
186 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
187                           struct ice_fdir_counter_pool_container *container,
188                           uint32_t index_start,
189                           uint32_t len)
190 {
191         struct ice_fdir_counter_pool *pool;
192         uint32_t i;
193         int ret = 0;
194
195         pool = rte_zmalloc("ice_fdir_counter_pool",
196                            sizeof(*pool) +
197                            sizeof(struct ice_fdir_counter) * len,
198                            0);
199         if (!pool) {
200                 PMD_INIT_LOG(ERR,
201                              "Failed to allocate memory for fdir counter pool");
202                 return -ENOMEM;
203         }
204
205         TAILQ_INIT(&pool->counter_list);
206         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
207
208         for (i = 0; i < len; i++) {
209                 struct ice_fdir_counter *counter = &pool->counters[i];
210
211                 counter->hw_index = index_start + i;
212                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
213         }
214
215         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
216                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
217                 ret = -EINVAL;
218                 goto free_pool;
219         }
220
221         container->pools[container->index_free++] = pool;
222         return 0;
223
224 free_pool:
225         rte_free(pool);
226         return ret;
227 }
228
229 static int
230 ice_fdir_counter_init(struct ice_pf *pf)
231 {
232         struct ice_hw *hw = ICE_PF_TO_HW(pf);
233         struct ice_fdir_info *fdir_info = &pf->fdir;
234         struct ice_fdir_counter_pool_container *container =
235                                 &fdir_info->counter;
236         uint32_t cnt_index, len;
237         int ret;
238
239         TAILQ_INIT(&container->pool_list);
240
241         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
242         len = ICE_FDIR_COUNTERS_PER_BLOCK;
243
244         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
245         if (ret) {
246                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
247                 return ret;
248         }
249
250         return 0;
251 }
252
253 static int
254 ice_fdir_counter_release(struct ice_pf *pf)
255 {
256         struct ice_fdir_info *fdir_info = &pf->fdir;
257         struct ice_fdir_counter_pool_container *container =
258                                 &fdir_info->counter;
259         uint8_t i;
260
261         for (i = 0; i < container->index_free; i++) {
262                 rte_free(container->pools[i]);
263                 container->pools[i] = NULL;
264         }
265
266         TAILQ_INIT(&container->pool_list);
267         container->index_free = 0;
268
269         return 0;
270 }
271
272 static struct ice_fdir_counter *
273 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
274                                         *container,
275                                uint32_t id)
276 {
277         struct ice_fdir_counter_pool *pool;
278         struct ice_fdir_counter *counter;
279         int i;
280
281         TAILQ_FOREACH(pool, &container->pool_list, next) {
282                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
283                         counter = &pool->counters[i];
284
285                         if (counter->shared &&
286                             counter->ref_cnt &&
287                             counter->id == id)
288                                 return counter;
289                 }
290         }
291
292         return NULL;
293 }
294
295 static struct ice_fdir_counter *
296 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
297 {
298         struct ice_hw *hw = ICE_PF_TO_HW(pf);
299         struct ice_fdir_info *fdir_info = &pf->fdir;
300         struct ice_fdir_counter_pool_container *container =
301                                 &fdir_info->counter;
302         struct ice_fdir_counter_pool *pool = NULL;
303         struct ice_fdir_counter *counter_free = NULL;
304
305         if (shared) {
306                 counter_free = ice_fdir_counter_shared_search(container, id);
307                 if (counter_free) {
308                         if (counter_free->ref_cnt + 1 == 0) {
309                                 rte_errno = E2BIG;
310                                 return NULL;
311                         }
312                         counter_free->ref_cnt++;
313                         return counter_free;
314                 }
315         }
316
317         TAILQ_FOREACH(pool, &container->pool_list, next) {
318                 counter_free = TAILQ_FIRST(&pool->counter_list);
319                 if (counter_free)
320                         break;
321                 counter_free = NULL;
322         }
323
324         if (!counter_free) {
325                 PMD_DRV_LOG(ERR, "No free counter found\n");
326                 return NULL;
327         }
328
329         counter_free->shared = shared;
330         counter_free->id = id;
331         counter_free->ref_cnt = 1;
332         counter_free->pool = pool;
333
334         /* reset statistic counter value */
335         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
336         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
337
338         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
339         if (TAILQ_EMPTY(&pool->counter_list)) {
340                 TAILQ_REMOVE(&container->pool_list, pool, next);
341                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
342         }
343
344         return counter_free;
345 }
346
347 static void
348 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
349                       struct ice_fdir_counter *counter)
350 {
351         if (!counter)
352                 return;
353
354         if (--counter->ref_cnt == 0) {
355                 struct ice_fdir_counter_pool *pool = counter->pool;
356
357                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
358         }
359 }
360
361 static int
362 ice_fdir_init_filter_list(struct ice_pf *pf)
363 {
364         struct rte_eth_dev *dev = pf->adapter->eth_dev;
365         struct ice_fdir_info *fdir_info = &pf->fdir;
366         char fdir_hash_name[RTE_HASH_NAMESIZE];
367         int ret;
368
369         struct rte_hash_parameters fdir_hash_params = {
370                 .name = fdir_hash_name,
371                 .entries = ICE_MAX_FDIR_FILTER_NUM,
372                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
373                 .hash_func = rte_hash_crc,
374                 .hash_func_init_val = 0,
375                 .socket_id = rte_socket_id(),
376                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
377         };
378
379         /* Initialize hash */
380         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
381                  "fdir_%s", dev->device->name);
382         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
383         if (!fdir_info->hash_table) {
384                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
385                 return -EINVAL;
386         }
387         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
388                                           sizeof(*fdir_info->hash_map) *
389                                           ICE_MAX_FDIR_FILTER_NUM,
390                                           0);
391         if (!fdir_info->hash_map) {
392                 PMD_INIT_LOG(ERR,
393                              "Failed to allocate memory for fdir hash map!");
394                 ret = -ENOMEM;
395                 goto err_fdir_hash_map_alloc;
396         }
397         return 0;
398
399 err_fdir_hash_map_alloc:
400         rte_hash_free(fdir_info->hash_table);
401
402         return ret;
403 }
404
405 static void
406 ice_fdir_release_filter_list(struct ice_pf *pf)
407 {
408         struct ice_fdir_info *fdir_info = &pf->fdir;
409
410         if (fdir_info->hash_map)
411                 rte_free(fdir_info->hash_map);
412         if (fdir_info->hash_table)
413                 rte_hash_free(fdir_info->hash_table);
414
415         fdir_info->hash_map = NULL;
416         fdir_info->hash_table = NULL;
417 }
418
419 /*
420  * ice_fdir_setup - reserve and initialize the Flow Director resources
421  * @pf: board private structure
422  */
423 static int
424 ice_fdir_setup(struct ice_pf *pf)
425 {
426         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
427         struct ice_hw *hw = ICE_PF_TO_HW(pf);
428         const struct rte_memzone *mz = NULL;
429         char z_name[RTE_MEMZONE_NAMESIZE];
430         struct ice_vsi *vsi;
431         int err = ICE_SUCCESS;
432
433         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
434                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
435                 return -ENOTSUP;
436         }
437
438         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
439                     " fd_fltr_best_effort = %u.",
440                     hw->func_caps.fd_fltr_guar,
441                     hw->func_caps.fd_fltr_best_effort);
442
443         if (pf->fdir.fdir_vsi) {
444                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
445                 return ICE_SUCCESS;
446         }
447
448         /* make new FDIR VSI */
449         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
450         if (!vsi) {
451                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
452                 return -EINVAL;
453         }
454         pf->fdir.fdir_vsi = vsi;
455
456         err = ice_fdir_init_filter_list(pf);
457         if (err) {
458                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
459                 return -EINVAL;
460         }
461
462         err = ice_fdir_counter_init(pf);
463         if (err) {
464                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
465                 return -EINVAL;
466         }
467
468         /*Fdir tx queue setup*/
469         err = ice_fdir_setup_tx_resources(pf);
470         if (err) {
471                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
472                 goto fail_setup_tx;
473         }
474
475         /*Fdir rx queue setup*/
476         err = ice_fdir_setup_rx_resources(pf);
477         if (err) {
478                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
479                 goto fail_setup_rx;
480         }
481
482         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
483         if (err) {
484                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
485                 goto fail_mem;
486         }
487
488         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
489         if (err) {
490                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
491                 goto fail_mem;
492         }
493
494         /* reserve memory for the fdir programming packet */
495         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
496                  ICE_FDIR_MZ_NAME,
497                  eth_dev->data->port_id);
498         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
499         if (!mz) {
500                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
501                             "flow director program packet.");
502                 err = -ENOMEM;
503                 goto fail_mem;
504         }
505         pf->fdir.prg_pkt = mz->addr;
506         pf->fdir.dma_addr = mz->iova;
507         pf->fdir.mz = mz;
508
509         err = ice_fdir_prof_alloc(hw);
510         if (err) {
511                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
512                             "flow director profile.");
513                 err = -ENOMEM;
514                 goto fail_prof;
515         }
516
517         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
518                     vsi->base_queue);
519         return ICE_SUCCESS;
520
521 fail_prof:
522         rte_memzone_free(pf->fdir.mz);
523         pf->fdir.mz = NULL;
524 fail_mem:
525         ice_rx_queue_release(pf->fdir.rxq);
526         pf->fdir.rxq = NULL;
527 fail_setup_rx:
528         ice_tx_queue_release(pf->fdir.txq);
529         pf->fdir.txq = NULL;
530 fail_setup_tx:
531         ice_release_vsi(vsi);
532         pf->fdir.fdir_vsi = NULL;
533         return err;
534 }
535
536 static void
537 ice_fdir_prof_free(struct ice_hw *hw)
538 {
539         enum ice_fltr_ptype ptype;
540
541         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
542              ptype < ICE_FLTR_PTYPE_MAX;
543              ptype++) {
544                 rte_free(hw->fdir_prof[ptype]);
545                 hw->fdir_prof[ptype] = NULL;
546         }
547
548         rte_free(hw->fdir_prof);
549         hw->fdir_prof = NULL;
550 }
551
552 /* Remove a profile for some filter type */
553 static void
554 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
555 {
556         struct ice_hw *hw = ICE_PF_TO_HW(pf);
557         struct ice_fd_hw_prof *hw_prof;
558         uint64_t prof_id;
559         uint16_t vsi_num;
560         int i;
561
562         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
563                 return;
564
565         hw_prof = hw->fdir_prof[ptype];
566
567         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
568         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
569                 if (hw_prof->entry_h[i][is_tunnel]) {
570                         vsi_num = ice_get_hw_vsi_num(hw,
571                                                      hw_prof->vsi_h[i]);
572                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
573                                              vsi_num, ptype);
574                         ice_flow_rem_entry(hw,
575                                            hw_prof->entry_h[i][is_tunnel]);
576                         hw_prof->entry_h[i][is_tunnel] = 0;
577                 }
578         }
579         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
580         rte_free(hw_prof->fdir_seg[is_tunnel]);
581         hw_prof->fdir_seg[is_tunnel] = NULL;
582
583         for (i = 0; i < hw_prof->cnt; i++)
584                 hw_prof->vsi_h[i] = 0;
585         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
586 }
587
588 /* Remove all created profiles */
589 static void
590 ice_fdir_prof_rm_all(struct ice_pf *pf)
591 {
592         enum ice_fltr_ptype ptype;
593
594         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
595              ptype < ICE_FLTR_PTYPE_MAX;
596              ptype++) {
597                 ice_fdir_prof_rm(pf, ptype, false);
598                 ice_fdir_prof_rm(pf, ptype, true);
599         }
600 }
601
602 /*
603  * ice_fdir_teardown - release the Flow Director resources
604  * @pf: board private structure
605  */
606 static void
607 ice_fdir_teardown(struct ice_pf *pf)
608 {
609         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
610         struct ice_hw *hw = ICE_PF_TO_HW(pf);
611         struct ice_vsi *vsi;
612         int err;
613
614         vsi = pf->fdir.fdir_vsi;
615         if (!vsi)
616                 return;
617
618         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
619         if (err)
620                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
621
622         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
623         if (err)
624                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
625
626         err = ice_fdir_counter_release(pf);
627         if (err)
628                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
629
630         ice_fdir_release_filter_list(pf);
631
632         ice_tx_queue_release(pf->fdir.txq);
633         pf->fdir.txq = NULL;
634         ice_rx_queue_release(pf->fdir.rxq);
635         pf->fdir.rxq = NULL;
636         ice_fdir_prof_rm_all(pf);
637         ice_fdir_prof_free(hw);
638         ice_release_vsi(vsi);
639         pf->fdir.fdir_vsi = NULL;
640
641         if (pf->fdir.mz) {
642                 err = rte_memzone_free(pf->fdir.mz);
643                 pf->fdir.mz = NULL;
644                 if (err)
645                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
646         }
647 }
648
649 static int
650 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
651                      struct ice_vsi *ctrl_vsi,
652                      struct ice_flow_seg_info *seg,
653                      enum ice_fltr_ptype ptype,
654                      bool is_tunnel)
655 {
656         struct ice_hw *hw = ICE_PF_TO_HW(pf);
657         enum ice_flow_dir dir = ICE_FLOW_RX;
658         struct ice_flow_seg_info *ori_seg;
659         struct ice_fd_hw_prof *hw_prof;
660         struct ice_flow_prof *prof;
661         uint64_t entry_1 = 0;
662         uint64_t entry_2 = 0;
663         uint16_t vsi_num;
664         int ret;
665         uint64_t prof_id;
666
667         hw_prof = hw->fdir_prof[ptype];
668         ori_seg = hw_prof->fdir_seg[is_tunnel];
669         if (ori_seg) {
670                 if (!is_tunnel) {
671                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
672                                 return -EAGAIN;
673                 } else {
674                         if (!memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))
675                                 return -EAGAIN;
676                 }
677
678                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
679                         return -EINVAL;
680
681                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
682         }
683
684         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
685         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
686                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
687         if (ret)
688                 return ret;
689         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
690                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
691                                  seg, NULL, 0, &entry_1);
692         if (ret) {
693                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
694                             ptype);
695                 goto err_add_prof;
696         }
697         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
698                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
699                                  seg, NULL, 0, &entry_2);
700         if (ret) {
701                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
702                             ptype);
703                 goto err_add_entry;
704         }
705
706         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
707         hw_prof->cnt = 0;
708         hw_prof->fdir_seg[is_tunnel] = seg;
709         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
710         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
711         pf->hw_prof_cnt[ptype][is_tunnel]++;
712         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
713         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
714         pf->hw_prof_cnt[ptype][is_tunnel]++;
715
716         return ret;
717
718 err_add_entry:
719         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
720         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
721         ice_flow_rem_entry(hw, entry_1);
722 err_add_prof:
723         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
724
725         return ret;
726 }
727
728 static void
729 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
730 {
731         uint32_t i, j;
732
733         struct ice_inset_map {
734                 uint64_t inset;
735                 enum ice_flow_field fld;
736         };
737         static const struct ice_inset_map ice_inset_map[] = {
738                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
739                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
740                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
741                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
742                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
743                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
744                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
745                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
746                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
747                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
748                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
749                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
750                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
751                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
752                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
753                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
754                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
755                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
756                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
757                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
758                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
759                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
760                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
761                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
762                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
763                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
764                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
765         };
766
767         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
768                 if ((inset & ice_inset_map[i].inset) ==
769                     ice_inset_map[i].inset)
770                         field[j++] = ice_inset_map[i].fld;
771         }
772 }
773
774 static int
775 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
776                         uint64_t input_set, bool is_tunnel)
777 {
778         struct ice_flow_seg_info *seg;
779         struct ice_flow_seg_info *seg_tun = NULL;
780         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
781         int i, ret;
782
783         if (!input_set)
784                 return -EINVAL;
785
786         seg = (struct ice_flow_seg_info *)
787                 ice_malloc(hw, sizeof(*seg));
788         if (!seg) {
789                 PMD_DRV_LOG(ERR, "No memory can be allocated");
790                 return -ENOMEM;
791         }
792
793         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
794                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
795         ice_fdir_input_set_parse(input_set, field);
796
797         switch (flow) {
798         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
799                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
800                                   ICE_FLOW_SEG_HDR_IPV4);
801                 break;
802         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
803                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
804                                   ICE_FLOW_SEG_HDR_IPV4);
805                 break;
806         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
807                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
808                                   ICE_FLOW_SEG_HDR_IPV4);
809                 break;
810         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
811                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
812                 break;
813         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
814                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
815                                   ICE_FLOW_SEG_HDR_IPV6);
816                 break;
817         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
818                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
819                                   ICE_FLOW_SEG_HDR_IPV6);
820                 break;
821         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
822                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
823                                   ICE_FLOW_SEG_HDR_IPV6);
824                 break;
825         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
826                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
827                 break;
828         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
829         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
830         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
831         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
832                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
833                                   ICE_FLOW_SEG_HDR_IPV4);
834                 break;
835         default:
836                 PMD_DRV_LOG(ERR, "not supported filter type.");
837                 break;
838         }
839
840         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
841                 ice_flow_set_fld(seg, field[i],
842                                  ICE_FLOW_FLD_OFF_INVAL,
843                                  ICE_FLOW_FLD_OFF_INVAL,
844                                  ICE_FLOW_FLD_OFF_INVAL, false);
845         }
846
847         if (!is_tunnel) {
848                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
849                                            seg, flow, false);
850         } else {
851                 seg_tun = (struct ice_flow_seg_info *)
852                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
853                 if (!seg_tun) {
854                         PMD_DRV_LOG(ERR, "No memory can be allocated");
855                         rte_free(seg);
856                         return -ENOMEM;
857                 }
858                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
859                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
860                                            seg_tun, flow, true);
861         }
862
863         if (!ret) {
864                 return ret;
865         } else if (ret < 0) {
866                 rte_free(seg);
867                 if (is_tunnel)
868                         rte_free(seg_tun);
869                 return (ret == -EAGAIN) ? 0 : ret;
870         } else {
871                 return ret;
872         }
873 }
874
875 static void
876 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
877                     bool is_tunnel, bool add)
878 {
879         struct ice_hw *hw = ICE_PF_TO_HW(pf);
880         int cnt;
881
882         cnt = (add) ? 1 : -1;
883         hw->fdir_active_fltr += cnt;
884         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
885                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
886         else
887                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
888 }
889
890 static int
891 ice_fdir_init(struct ice_adapter *ad)
892 {
893         struct ice_pf *pf = &ad->pf;
894         struct ice_flow_parser *parser;
895         int ret;
896
897         ret = ice_fdir_setup(pf);
898         if (ret)
899                 return ret;
900
901         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
902                 parser = &ice_fdir_parser_comms;
903         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
904                 parser = &ice_fdir_parser_os;
905         else
906                 return -EINVAL;
907
908         return ice_register_parser(parser, ad);
909 }
910
911 static void
912 ice_fdir_uninit(struct ice_adapter *ad)
913 {
914         struct ice_pf *pf = &ad->pf;
915         struct ice_flow_parser *parser;
916
917         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
918                 parser = &ice_fdir_parser_comms;
919         else
920                 parser = &ice_fdir_parser_os;
921
922         ice_unregister_parser(parser, ad);
923
924         ice_fdir_teardown(pf);
925 }
926
927 static int
928 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
929 {
930         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
931                 return 1;
932         else
933                 return 0;
934 }
935
936 static int
937 ice_fdir_add_del_filter(struct ice_pf *pf,
938                         struct ice_fdir_filter_conf *filter,
939                         bool add)
940 {
941         struct ice_fltr_desc desc;
942         struct ice_hw *hw = ICE_PF_TO_HW(pf);
943         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
944         bool is_tun;
945         int ret;
946
947         filter->input.dest_vsi = pf->main_vsi->idx;
948
949         memset(&desc, 0, sizeof(desc));
950         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
951
952         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
953
954         memset(pkt, 0, ICE_FDIR_PKT_LEN);
955         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
956         if (ret) {
957                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
958                 return -EINVAL;
959         }
960
961         return ice_fdir_programming(pf, &desc);
962 }
963
964 static void
965 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
966                           struct ice_fdir_filter_conf *filter)
967 {
968         struct ice_fdir_fltr *input = &filter->input;
969         memset(key, 0, sizeof(*key));
970
971         key->flow_type = input->flow_type;
972         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
973         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
974         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
975         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
976
977         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
978         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
979
980         key->tunnel_type = filter->tunnel_type;
981 }
982
983 /* Check if there exists the flow director filter */
984 static struct ice_fdir_filter_conf *
985 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
986                         const struct ice_fdir_fltr_pattern *key)
987 {
988         int ret;
989
990         ret = rte_hash_lookup(fdir_info->hash_table, key);
991         if (ret < 0)
992                 return NULL;
993
994         return fdir_info->hash_map[ret];
995 }
996
997 /* Add a flow director entry into the SW list */
998 static int
999 ice_fdir_entry_insert(struct ice_pf *pf,
1000                       struct ice_fdir_filter_conf *entry,
1001                       struct ice_fdir_fltr_pattern *key)
1002 {
1003         struct ice_fdir_info *fdir_info = &pf->fdir;
1004         int ret;
1005
1006         ret = rte_hash_add_key(fdir_info->hash_table, key);
1007         if (ret < 0) {
1008                 PMD_DRV_LOG(ERR,
1009                             "Failed to insert fdir entry to hash table %d!",
1010                             ret);
1011                 return ret;
1012         }
1013         fdir_info->hash_map[ret] = entry;
1014
1015         return 0;
1016 }
1017
1018 /* Delete a flow director entry from the SW list */
1019 static int
1020 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1021 {
1022         struct ice_fdir_info *fdir_info = &pf->fdir;
1023         int ret;
1024
1025         ret = rte_hash_del_key(fdir_info->hash_table, key);
1026         if (ret < 0) {
1027                 PMD_DRV_LOG(ERR,
1028                             "Failed to delete fdir filter to hash table %d!",
1029                             ret);
1030                 return ret;
1031         }
1032         fdir_info->hash_map[ret] = NULL;
1033
1034         return 0;
1035 }
1036
1037 static int
1038 ice_fdir_create_filter(struct ice_adapter *ad,
1039                        struct rte_flow *flow,
1040                        void *meta,
1041                        struct rte_flow_error *error)
1042 {
1043         struct ice_pf *pf = &ad->pf;
1044         struct ice_fdir_filter_conf *filter = meta;
1045         struct ice_fdir_info *fdir_info = &pf->fdir;
1046         struct ice_fdir_filter_conf *entry, *node;
1047         struct ice_fdir_fltr_pattern key;
1048         bool is_tun;
1049         int ret;
1050
1051         ice_fdir_extract_fltr_key(&key, filter);
1052         node = ice_fdir_entry_lookup(fdir_info, &key);
1053         if (node) {
1054                 rte_flow_error_set(error, EEXIST,
1055                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1056                                    "Rule already exists!");
1057                 return -rte_errno;
1058         }
1059
1060         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1061         if (!entry) {
1062                 rte_flow_error_set(error, ENOMEM,
1063                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1064                                    "Failed to allocate memory");
1065                 return -rte_errno;
1066         }
1067
1068         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1069
1070         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1071                         filter->input_set, is_tun);
1072         if (ret) {
1073                 rte_flow_error_set(error, -ret,
1074                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1075                                    "Profile configure failed.");
1076                 goto free_entry;
1077         }
1078
1079         /* alloc counter for FDIR */
1080         if (filter->input.cnt_ena) {
1081                 struct rte_flow_action_count *act_count = &filter->act_count;
1082
1083                 filter->counter = ice_fdir_counter_alloc(pf,
1084                                                          act_count->shared,
1085                                                          act_count->id);
1086                 if (!filter->counter) {
1087                         rte_flow_error_set(error, EINVAL,
1088                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1089                                         "Failed to alloc FDIR counter.");
1090                         goto free_entry;
1091                 }
1092                 filter->input.cnt_index = filter->counter->hw_index;
1093         }
1094
1095         ret = ice_fdir_add_del_filter(pf, filter, true);
1096         if (ret) {
1097                 rte_flow_error_set(error, -ret,
1098                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1099                                    "Add filter rule failed.");
1100                 goto free_counter;
1101         }
1102
1103         rte_memcpy(entry, filter, sizeof(*entry));
1104         ret = ice_fdir_entry_insert(pf, entry, &key);
1105         if (ret) {
1106                 rte_flow_error_set(error, -ret,
1107                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1108                                    "Insert entry to table failed.");
1109                 goto free_entry;
1110         }
1111
1112         flow->rule = entry;
1113         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1114
1115         return 0;
1116
1117 free_counter:
1118         if (filter->counter) {
1119                 ice_fdir_counter_free(pf, filter->counter);
1120                 filter->counter = NULL;
1121         }
1122
1123 free_entry:
1124         rte_free(entry);
1125         return -rte_errno;
1126 }
1127
1128 static int
1129 ice_fdir_destroy_filter(struct ice_adapter *ad,
1130                         struct rte_flow *flow,
1131                         struct rte_flow_error *error)
1132 {
1133         struct ice_pf *pf = &ad->pf;
1134         struct ice_fdir_info *fdir_info = &pf->fdir;
1135         struct ice_fdir_filter_conf *filter, *entry;
1136         struct ice_fdir_fltr_pattern key;
1137         bool is_tun;
1138         int ret;
1139
1140         filter = (struct ice_fdir_filter_conf *)flow->rule;
1141
1142         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1143
1144         if (filter->counter) {
1145                 ice_fdir_counter_free(pf, filter->counter);
1146                 filter->counter = NULL;
1147         }
1148
1149         ice_fdir_extract_fltr_key(&key, filter);
1150         entry = ice_fdir_entry_lookup(fdir_info, &key);
1151         if (!entry) {
1152                 rte_flow_error_set(error, ENOENT,
1153                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1154                                    "Can't find entry.");
1155                 return -rte_errno;
1156         }
1157
1158         ret = ice_fdir_add_del_filter(pf, filter, false);
1159         if (ret) {
1160                 rte_flow_error_set(error, -ret,
1161                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1162                                    "Del filter rule failed.");
1163                 return -rte_errno;
1164         }
1165
1166         ret = ice_fdir_entry_del(pf, &key);
1167         if (ret) {
1168                 rte_flow_error_set(error, -ret,
1169                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1170                                    "Remove entry from table failed.");
1171                 return -rte_errno;
1172         }
1173
1174         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1175         flow->rule = NULL;
1176
1177         rte_free(filter);
1178
1179         return 0;
1180 }
1181
1182 static int
1183 ice_fdir_query_count(struct ice_adapter *ad,
1184                       struct rte_flow *flow,
1185                       struct rte_flow_query_count *flow_stats,
1186                       struct rte_flow_error *error)
1187 {
1188         struct ice_pf *pf = &ad->pf;
1189         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1190         struct ice_fdir_filter_conf *filter = flow->rule;
1191         struct ice_fdir_counter *counter = filter->counter;
1192         uint64_t hits_lo, hits_hi;
1193
1194         if (!counter) {
1195                 rte_flow_error_set(error, EINVAL,
1196                                   RTE_FLOW_ERROR_TYPE_ACTION,
1197                                   NULL,
1198                                   "FDIR counters not available");
1199                 return -rte_errno;
1200         }
1201
1202         /*
1203          * Reading the low 32-bits latches the high 32-bits into a shadow
1204          * register. Reading the high 32-bit returns the value in the
1205          * shadow register.
1206          */
1207         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1208         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1209
1210         flow_stats->hits_set = 1;
1211         flow_stats->hits = hits_lo | (hits_hi << 32);
1212         flow_stats->bytes_set = 0;
1213         flow_stats->bytes = 0;
1214
1215         if (flow_stats->reset) {
1216                 /* reset statistic counter value */
1217                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1218                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1219         }
1220
1221         return 0;
1222 }
1223
1224 static struct ice_flow_engine ice_fdir_engine = {
1225         .init = ice_fdir_init,
1226         .uninit = ice_fdir_uninit,
1227         .create = ice_fdir_create_filter,
1228         .destroy = ice_fdir_destroy_filter,
1229         .query_count = ice_fdir_query_count,
1230         .type = ICE_FLOW_ENGINE_FDIR,
1231 };
1232
1233 static int
1234 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1235                               struct rte_flow_error *error,
1236                               const struct rte_flow_action *act,
1237                               struct ice_fdir_filter_conf *filter)
1238 {
1239         const struct rte_flow_action_rss *rss = act->conf;
1240         uint32_t i;
1241
1242         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1243                 rte_flow_error_set(error, EINVAL,
1244                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1245                                    "Invalid action.");
1246                 return -rte_errno;
1247         }
1248
1249         if (rss->queue_num <= 1) {
1250                 rte_flow_error_set(error, EINVAL,
1251                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1252                                    "Queue region size can't be 0 or 1.");
1253                 return -rte_errno;
1254         }
1255
1256         /* check if queue index for queue region is continuous */
1257         for (i = 0; i < rss->queue_num - 1; i++) {
1258                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1259                         rte_flow_error_set(error, EINVAL,
1260                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1261                                            "Discontinuous queue region");
1262                         return -rte_errno;
1263                 }
1264         }
1265
1266         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1267                 rte_flow_error_set(error, EINVAL,
1268                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1269                                    "Invalid queue region indexes.");
1270                 return -rte_errno;
1271         }
1272
1273         if (!(rte_is_power_of_2(rss->queue_num) &&
1274              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1275                 rte_flow_error_set(error, EINVAL,
1276                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1277                                    "The region size should be any of the following values:"
1278                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1279                                    "of queues do not exceed the VSI allocation.");
1280                 return -rte_errno;
1281         }
1282
1283         filter->input.q_index = rss->queue[0];
1284         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1285         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1286
1287         return 0;
1288 }
1289
1290 static int
1291 ice_fdir_parse_action(struct ice_adapter *ad,
1292                       const struct rte_flow_action actions[],
1293                       struct rte_flow_error *error,
1294                       struct ice_fdir_filter_conf *filter)
1295 {
1296         struct ice_pf *pf = &ad->pf;
1297         const struct rte_flow_action_queue *act_q;
1298         const struct rte_flow_action_mark *mark_spec = NULL;
1299         const struct rte_flow_action_count *act_count;
1300         uint32_t dest_num = 0;
1301         uint32_t mark_num = 0;
1302         uint32_t counter_num = 0;
1303         int ret;
1304
1305         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1306                 switch (actions->type) {
1307                 case RTE_FLOW_ACTION_TYPE_VOID:
1308                         break;
1309                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1310                         dest_num++;
1311
1312                         act_q = actions->conf;
1313                         filter->input.q_index = act_q->index;
1314                         if (filter->input.q_index >=
1315                                         pf->dev_data->nb_rx_queues) {
1316                                 rte_flow_error_set(error, EINVAL,
1317                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1318                                                    actions,
1319                                                    "Invalid queue for FDIR.");
1320                                 return -rte_errno;
1321                         }
1322                         filter->input.dest_ctl =
1323                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1324                         break;
1325                 case RTE_FLOW_ACTION_TYPE_DROP:
1326                         dest_num++;
1327
1328                         filter->input.dest_ctl =
1329                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1330                         break;
1331                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1332                         dest_num++;
1333
1334                         filter->input.dest_ctl =
1335                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1336                         filter->input.q_index = 0;
1337                         break;
1338                 case RTE_FLOW_ACTION_TYPE_RSS:
1339                         dest_num++;
1340
1341                         ret = ice_fdir_parse_action_qregion(pf,
1342                                                 error, actions, filter);
1343                         if (ret)
1344                                 return ret;
1345                         break;
1346                 case RTE_FLOW_ACTION_TYPE_MARK:
1347                         mark_num++;
1348
1349                         mark_spec = actions->conf;
1350                         filter->input.fltr_id = mark_spec->id;
1351                         break;
1352                 case RTE_FLOW_ACTION_TYPE_COUNT:
1353                         counter_num++;
1354
1355                         act_count = actions->conf;
1356                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1357                         rte_memcpy(&filter->act_count, act_count,
1358                                                 sizeof(filter->act_count));
1359
1360                         break;
1361                 default:
1362                         rte_flow_error_set(error, EINVAL,
1363                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1364                                    "Invalid action.");
1365                         return -rte_errno;
1366                 }
1367         }
1368
1369         if (dest_num == 0 || dest_num >= 2) {
1370                 rte_flow_error_set(error, EINVAL,
1371                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1372                            "Unsupported action combination");
1373                 return -rte_errno;
1374         }
1375
1376         if (mark_num >= 2) {
1377                 rte_flow_error_set(error, EINVAL,
1378                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1379                            "Too many mark actions");
1380                 return -rte_errno;
1381         }
1382
1383         if (counter_num >= 2) {
1384                 rte_flow_error_set(error, EINVAL,
1385                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1386                            "Too many count actions");
1387                 return -rte_errno;
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int
1394 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1395                        const struct rte_flow_item pattern[],
1396                        struct rte_flow_error *error,
1397                        struct ice_fdir_filter_conf *filter)
1398 {
1399         const struct rte_flow_item *item = pattern;
1400         enum rte_flow_item_type item_type;
1401         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1402         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1403         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1404         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1405         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1406         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1407         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1408         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1409         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1410         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1411         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1412         uint64_t input_set = ICE_INSET_NONE;
1413         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1414         uint8_t  ipv6_addr_mask[16] = {
1415                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1416                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1417         };
1418         uint32_t vtc_flow_cpu;
1419
1420
1421         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1422                 if (item->last) {
1423                         rte_flow_error_set(error, EINVAL,
1424                                         RTE_FLOW_ERROR_TYPE_ITEM,
1425                                         item,
1426                                         "Not support range");
1427                         return -rte_errno;
1428                 }
1429                 item_type = item->type;
1430
1431                 switch (item_type) {
1432                 case RTE_FLOW_ITEM_TYPE_ETH:
1433                         eth_spec = item->spec;
1434                         eth_mask = item->mask;
1435
1436                         if (eth_spec && eth_mask) {
1437                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1438                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1439                                         rte_flow_error_set(error, EINVAL,
1440                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1441                                                 item,
1442                                                 "Src mac not support");
1443                                         return -rte_errno;
1444                                 }
1445
1446                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1447                                         rte_flow_error_set(error, EINVAL,
1448                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1449                                                 item,
1450                                                 "Invalid mac addr mask");
1451                                         return -rte_errno;
1452                                 }
1453
1454                                 input_set |= ICE_INSET_DMAC;
1455                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1456                                            &eth_spec->dst,
1457                                            RTE_ETHER_ADDR_LEN);
1458                         }
1459                         break;
1460                 case RTE_FLOW_ITEM_TYPE_IPV4:
1461                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1462                         ipv4_spec = item->spec;
1463                         ipv4_mask = item->mask;
1464
1465                         if (ipv4_spec && ipv4_mask) {
1466                                 /* Check IPv4 mask and update input set */
1467                                 if (ipv4_mask->hdr.version_ihl ||
1468                                     ipv4_mask->hdr.total_length ||
1469                                     ipv4_mask->hdr.packet_id ||
1470                                     ipv4_mask->hdr.fragment_offset ||
1471                                     ipv4_mask->hdr.hdr_checksum) {
1472                                         rte_flow_error_set(error, EINVAL,
1473                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1474                                                    item,
1475                                                    "Invalid IPv4 mask.");
1476                                         return -rte_errno;
1477                                 }
1478                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1479                                         input_set |= tunnel_type ?
1480                                                      ICE_INSET_TUN_IPV4_SRC :
1481                                                      ICE_INSET_IPV4_SRC;
1482                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1483                                         input_set |= tunnel_type ?
1484                                                      ICE_INSET_TUN_IPV4_DST :
1485                                                      ICE_INSET_IPV4_DST;
1486                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1487                                         input_set |= ICE_INSET_IPV4_TOS;
1488                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1489                                         input_set |= ICE_INSET_IPV4_TTL;
1490                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1491                                         input_set |= ICE_INSET_IPV4_PROTO;
1492
1493                                 filter->input.ip.v4.dst_ip =
1494                                         ipv4_spec->hdr.src_addr;
1495                                 filter->input.ip.v4.src_ip =
1496                                         ipv4_spec->hdr.dst_addr;
1497                                 filter->input.ip.v4.tos =
1498                                         ipv4_spec->hdr.type_of_service;
1499                                 filter->input.ip.v4.ttl =
1500                                         ipv4_spec->hdr.time_to_live;
1501                                 filter->input.ip.v4.proto =
1502                                         ipv4_spec->hdr.next_proto_id;
1503                         }
1504
1505                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1506                         break;
1507                 case RTE_FLOW_ITEM_TYPE_IPV6:
1508                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1509                         ipv6_spec = item->spec;
1510                         ipv6_mask = item->mask;
1511
1512                         if (ipv6_spec && ipv6_mask) {
1513                                 /* Check IPv6 mask and update input set */
1514                                 if (ipv6_mask->hdr.payload_len) {
1515                                         rte_flow_error_set(error, EINVAL,
1516                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1517                                                    item,
1518                                                    "Invalid IPv6 mask");
1519                                         return -rte_errno;
1520                                 }
1521
1522                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1523                                             ipv6_addr_mask,
1524                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1525                                         input_set |= ICE_INSET_IPV6_SRC;
1526                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1527                                             ipv6_addr_mask,
1528                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1529                                         input_set |= ICE_INSET_IPV6_DST;
1530
1531                                 if ((ipv6_mask->hdr.vtc_flow &
1532                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1533                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1534                                         input_set |= ICE_INSET_IPV6_TC;
1535                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1536                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1537                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1538                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1539
1540                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1541                                            ipv6_spec->hdr.src_addr, 16);
1542                                 rte_memcpy(filter->input.ip.v6.src_ip,
1543                                            ipv6_spec->hdr.dst_addr, 16);
1544
1545                                 vtc_flow_cpu =
1546                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1547                                 filter->input.ip.v6.tc =
1548                                         (uint8_t)(vtc_flow_cpu >>
1549                                                   ICE_FDIR_IPV6_TC_OFFSET);
1550                                 filter->input.ip.v6.proto =
1551                                         ipv6_spec->hdr.proto;
1552                                 filter->input.ip.v6.hlim =
1553                                         ipv6_spec->hdr.hop_limits;
1554                         }
1555
1556                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1557                         break;
1558                 case RTE_FLOW_ITEM_TYPE_TCP:
1559                         tcp_spec = item->spec;
1560                         tcp_mask = item->mask;
1561
1562                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1563                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1564                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1565                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1566
1567                         if (tcp_spec && tcp_mask) {
1568                                 /* Check TCP mask and update input set */
1569                                 if (tcp_mask->hdr.sent_seq ||
1570                                     tcp_mask->hdr.recv_ack ||
1571                                     tcp_mask->hdr.data_off ||
1572                                     tcp_mask->hdr.tcp_flags ||
1573                                     tcp_mask->hdr.rx_win ||
1574                                     tcp_mask->hdr.cksum ||
1575                                     tcp_mask->hdr.tcp_urp) {
1576                                         rte_flow_error_set(error, EINVAL,
1577                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1578                                                    item,
1579                                                    "Invalid TCP mask");
1580                                         return -rte_errno;
1581                                 }
1582
1583                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1584                                         input_set |= tunnel_type ?
1585                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1586                                                      ICE_INSET_TCP_SRC_PORT;
1587                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1588                                         input_set |= tunnel_type ?
1589                                                      ICE_INSET_TUN_TCP_DST_PORT :
1590                                                      ICE_INSET_TCP_DST_PORT;
1591
1592                                 /* Get filter info */
1593                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1594                                         filter->input.ip.v4.dst_port =
1595                                                 tcp_spec->hdr.src_port;
1596                                         filter->input.ip.v4.src_port =
1597                                                 tcp_spec->hdr.dst_port;
1598                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1599                                         filter->input.ip.v6.dst_port =
1600                                                 tcp_spec->hdr.src_port;
1601                                         filter->input.ip.v6.src_port =
1602                                                 tcp_spec->hdr.dst_port;
1603                                 }
1604                         }
1605                         break;
1606                 case RTE_FLOW_ITEM_TYPE_UDP:
1607                         udp_spec = item->spec;
1608                         udp_mask = item->mask;
1609
1610                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1611                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1612                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1613                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1614
1615                         if (udp_spec && udp_mask) {
1616                                 /* Check UDP mask and update input set*/
1617                                 if (udp_mask->hdr.dgram_len ||
1618                                     udp_mask->hdr.dgram_cksum) {
1619                                         rte_flow_error_set(error, EINVAL,
1620                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1621                                                    item,
1622                                                    "Invalid UDP mask");
1623                                         return -rte_errno;
1624                                 }
1625
1626                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1627                                         input_set |= tunnel_type ?
1628                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1629                                                      ICE_INSET_UDP_SRC_PORT;
1630                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1631                                         input_set |= tunnel_type ?
1632                                                      ICE_INSET_TUN_UDP_DST_PORT :
1633                                                      ICE_INSET_UDP_DST_PORT;
1634
1635                                 /* Get filter info */
1636                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1637                                         filter->input.ip.v4.dst_port =
1638                                                 udp_spec->hdr.src_port;
1639                                         filter->input.ip.v4.src_port =
1640                                                 udp_spec->hdr.dst_port;
1641                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1642                                         filter->input.ip.v6.src_port =
1643                                                 udp_spec->hdr.dst_port;
1644                                         filter->input.ip.v6.dst_port =
1645                                                 udp_spec->hdr.src_port;
1646                                 }
1647                         }
1648                         break;
1649                 case RTE_FLOW_ITEM_TYPE_SCTP:
1650                         sctp_spec = item->spec;
1651                         sctp_mask = item->mask;
1652
1653                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1654                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1655                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1656                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1657
1658                         if (sctp_spec && sctp_mask) {
1659                                 /* Check SCTP mask and update input set */
1660                                 if (sctp_mask->hdr.cksum) {
1661                                         rte_flow_error_set(error, EINVAL,
1662                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1663                                                    item,
1664                                                    "Invalid UDP mask");
1665                                         return -rte_errno;
1666                                 }
1667
1668                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1669                                         input_set |= tunnel_type ?
1670                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1671                                                      ICE_INSET_SCTP_SRC_PORT;
1672                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1673                                         input_set |= tunnel_type ?
1674                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1675                                                      ICE_INSET_SCTP_DST_PORT;
1676
1677                                 /* Get filter info */
1678                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1679                                         filter->input.ip.v4.dst_port =
1680                                                 sctp_spec->hdr.src_port;
1681                                         filter->input.ip.v4.src_port =
1682                                                 sctp_spec->hdr.dst_port;
1683                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1684                                         filter->input.ip.v6.dst_port =
1685                                                 sctp_spec->hdr.src_port;
1686                                         filter->input.ip.v6.src_port =
1687                                                 sctp_spec->hdr.dst_port;
1688                                 }
1689                         }
1690                         break;
1691                 case RTE_FLOW_ITEM_TYPE_VOID:
1692                         break;
1693                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1694                         l3 = RTE_FLOW_ITEM_TYPE_END;
1695                         vxlan_spec = item->spec;
1696                         vxlan_mask = item->mask;
1697
1698                         if (vxlan_spec || vxlan_mask) {
1699                                 rte_flow_error_set(error, EINVAL,
1700                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1701                                                    item,
1702                                                    "Invalid vxlan field");
1703                                 return -rte_errno;
1704                         }
1705
1706                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1707                         break;
1708                 case RTE_FLOW_ITEM_TYPE_GTPU:
1709                         l3 = RTE_FLOW_ITEM_TYPE_END;
1710                         gtp_spec = item->spec;
1711                         gtp_mask = item->mask;
1712
1713                         if (gtp_spec && gtp_mask) {
1714                                 if (gtp_mask->v_pt_rsv_flags ||
1715                                     gtp_mask->msg_type ||
1716                                     gtp_mask->msg_len) {
1717                                         rte_flow_error_set(error, EINVAL,
1718                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1719                                                    item,
1720                                                    "Invalid GTP mask");
1721                                         return -rte_errno;
1722                                 }
1723
1724                                 if (gtp_mask->teid == UINT32_MAX)
1725                                         input_set |= ICE_INSET_GTPU_TEID;
1726
1727                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1728                         }
1729                         break;
1730                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1731                         gtp_psc_spec = item->spec;
1732                         gtp_psc_mask = item->mask;
1733
1734                         if (gtp_psc_spec && gtp_psc_mask) {
1735                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1736                                         input_set |= ICE_INSET_GTPU_QFI;
1737
1738                                 filter->input.gtpu_data.qfi =
1739                                         gtp_psc_spec->qfi;
1740                         }
1741
1742                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1743                         break;
1744                 default:
1745                         rte_flow_error_set(error, EINVAL,
1746                                    RTE_FLOW_ERROR_TYPE_ITEM,
1747                                    item,
1748                                    "Invalid pattern item.");
1749                         return -rte_errno;
1750                 }
1751         }
1752
1753         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1754                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1755
1756         filter->tunnel_type = tunnel_type;
1757         filter->input.flow_type = flow_type;
1758         filter->input_set = input_set;
1759
1760         return 0;
1761 }
1762
1763 static int
1764 ice_fdir_parse(struct ice_adapter *ad,
1765                struct ice_pattern_match_item *array,
1766                uint32_t array_len,
1767                const struct rte_flow_item pattern[],
1768                const struct rte_flow_action actions[],
1769                void **meta,
1770                struct rte_flow_error *error)
1771 {
1772         struct ice_pf *pf = &ad->pf;
1773         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1774         struct ice_pattern_match_item *item = NULL;
1775         uint64_t input_set;
1776         int ret;
1777
1778         memset(filter, 0, sizeof(*filter));
1779         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1780         if (!item)
1781                 return -rte_errno;
1782
1783         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1784         if (ret)
1785                 return ret;
1786         input_set = filter->input_set;
1787         if (!input_set || input_set & ~item->input_set_mask) {
1788                 rte_flow_error_set(error, EINVAL,
1789                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1790                                    pattern,
1791                                    "Invalid input set");
1792                 return -rte_errno;
1793         }
1794
1795         ret = ice_fdir_parse_action(ad, actions, error, filter);
1796         if (ret)
1797                 return ret;
1798
1799         *meta = filter;
1800
1801         return 0;
1802 }
1803
1804 static struct ice_flow_parser ice_fdir_parser_os = {
1805         .engine = &ice_fdir_engine,
1806         .array = ice_fdir_pattern_os,
1807         .array_len = RTE_DIM(ice_fdir_pattern_os),
1808         .parse_pattern_action = ice_fdir_parse,
1809         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1810 };
1811
1812 static struct ice_flow_parser ice_fdir_parser_comms = {
1813         .engine = &ice_fdir_engine,
1814         .array = ice_fdir_pattern_comms,
1815         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1816         .parse_pattern_action = ice_fdir_parse,
1817         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1818 };
1819
1820 RTE_INIT(ice_fdir_engine_register)
1821 {
1822         ice_register_flow_engine(&ice_fdir_engine);
1823 }