net/ice: fix pattern name of GTPU with extension header
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
22         ICE_INSET_DMAC | \
23         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35         ICE_FDIR_INSET_ETH_IPV4 | \
36         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
37
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
39         ICE_INSET_DMAC | \
40         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
54
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
65
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67         ICE_FDIR_INSET_VXLAN_IPV4 | \
68         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
69
70 #define ICE_FDIR_INSET_GTPU_EH_IPV4 (\
71         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
72
73 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
74         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
75         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
76         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
77         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
78         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
79         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
80         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
81         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_ipv4,
83                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
87                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
91                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
94         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
95                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
96         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
97                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
98 };
99
100 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
101         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
102         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
103         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
104         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
105         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
106         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
107         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
108         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_ipv4,
110                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
112                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
114                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
118                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
121         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
122                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
123         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
124                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
125         {pattern_eth_ipv4_gtpu_eh_ipv4,
126                                        ICE_FDIR_INSET_GTPU_EH_IPV4,          ICE_INSET_NONE},
127 };
128
129 static struct ice_flow_parser ice_fdir_parser_os;
130 static struct ice_flow_parser ice_fdir_parser_comms;
131
132 static const struct rte_memzone *
133 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
134 {
135         const struct rte_memzone *mz;
136
137         mz = rte_memzone_lookup(name);
138         if (mz)
139                 return mz;
140
141         return rte_memzone_reserve_aligned(name, len, socket_id,
142                                            RTE_MEMZONE_IOVA_CONTIG,
143                                            ICE_RING_BASE_ALIGN);
144 }
145
146 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
147
148 static int
149 ice_fdir_prof_alloc(struct ice_hw *hw)
150 {
151         enum ice_fltr_ptype ptype, fltr_ptype;
152
153         if (!hw->fdir_prof) {
154                 hw->fdir_prof = (struct ice_fd_hw_prof **)
155                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
156                                    sizeof(*hw->fdir_prof));
157                 if (!hw->fdir_prof)
158                         return -ENOMEM;
159         }
160         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
161              ptype < ICE_FLTR_PTYPE_MAX;
162              ptype++) {
163                 if (!hw->fdir_prof[ptype]) {
164                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
165                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
166                         if (!hw->fdir_prof[ptype])
167                                 goto fail_mem;
168                 }
169         }
170         return 0;
171
172 fail_mem:
173         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
174              fltr_ptype < ptype;
175              fltr_ptype++) {
176                 rte_free(hw->fdir_prof[fltr_ptype]);
177                 hw->fdir_prof[fltr_ptype] = NULL;
178         }
179
180         rte_free(hw->fdir_prof);
181         hw->fdir_prof = NULL;
182
183         return -ENOMEM;
184 }
185
186 static int
187 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
188                           struct ice_fdir_counter_pool_container *container,
189                           uint32_t index_start,
190                           uint32_t len)
191 {
192         struct ice_fdir_counter_pool *pool;
193         uint32_t i;
194         int ret = 0;
195
196         pool = rte_zmalloc("ice_fdir_counter_pool",
197                            sizeof(*pool) +
198                            sizeof(struct ice_fdir_counter) * len,
199                            0);
200         if (!pool) {
201                 PMD_INIT_LOG(ERR,
202                              "Failed to allocate memory for fdir counter pool");
203                 return -ENOMEM;
204         }
205
206         TAILQ_INIT(&pool->counter_list);
207         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
208
209         for (i = 0; i < len; i++) {
210                 struct ice_fdir_counter *counter = &pool->counters[i];
211
212                 counter->hw_index = index_start + i;
213                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
214         }
215
216         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
217                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
218                 ret = -EINVAL;
219                 goto free_pool;
220         }
221
222         container->pools[container->index_free++] = pool;
223         return 0;
224
225 free_pool:
226         rte_free(pool);
227         return ret;
228 }
229
230 static int
231 ice_fdir_counter_init(struct ice_pf *pf)
232 {
233         struct ice_hw *hw = ICE_PF_TO_HW(pf);
234         struct ice_fdir_info *fdir_info = &pf->fdir;
235         struct ice_fdir_counter_pool_container *container =
236                                 &fdir_info->counter;
237         uint32_t cnt_index, len;
238         int ret;
239
240         TAILQ_INIT(&container->pool_list);
241
242         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
243         len = ICE_FDIR_COUNTERS_PER_BLOCK;
244
245         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
246         if (ret) {
247                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
248                 return ret;
249         }
250
251         return 0;
252 }
253
254 static int
255 ice_fdir_counter_release(struct ice_pf *pf)
256 {
257         struct ice_fdir_info *fdir_info = &pf->fdir;
258         struct ice_fdir_counter_pool_container *container =
259                                 &fdir_info->counter;
260         uint8_t i;
261
262         for (i = 0; i < container->index_free; i++) {
263                 rte_free(container->pools[i]);
264                 container->pools[i] = NULL;
265         }
266
267         TAILQ_INIT(&container->pool_list);
268         container->index_free = 0;
269
270         return 0;
271 }
272
273 static struct ice_fdir_counter *
274 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
275                                         *container,
276                                uint32_t id)
277 {
278         struct ice_fdir_counter_pool *pool;
279         struct ice_fdir_counter *counter;
280         int i;
281
282         TAILQ_FOREACH(pool, &container->pool_list, next) {
283                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
284                         counter = &pool->counters[i];
285
286                         if (counter->shared &&
287                             counter->ref_cnt &&
288                             counter->id == id)
289                                 return counter;
290                 }
291         }
292
293         return NULL;
294 }
295
296 static struct ice_fdir_counter *
297 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
298 {
299         struct ice_hw *hw = ICE_PF_TO_HW(pf);
300         struct ice_fdir_info *fdir_info = &pf->fdir;
301         struct ice_fdir_counter_pool_container *container =
302                                 &fdir_info->counter;
303         struct ice_fdir_counter_pool *pool = NULL;
304         struct ice_fdir_counter *counter_free = NULL;
305
306         if (shared) {
307                 counter_free = ice_fdir_counter_shared_search(container, id);
308                 if (counter_free) {
309                         if (counter_free->ref_cnt + 1 == 0) {
310                                 rte_errno = E2BIG;
311                                 return NULL;
312                         }
313                         counter_free->ref_cnt++;
314                         return counter_free;
315                 }
316         }
317
318         TAILQ_FOREACH(pool, &container->pool_list, next) {
319                 counter_free = TAILQ_FIRST(&pool->counter_list);
320                 if (counter_free)
321                         break;
322                 counter_free = NULL;
323         }
324
325         if (!counter_free) {
326                 PMD_DRV_LOG(ERR, "No free counter found\n");
327                 return NULL;
328         }
329
330         counter_free->shared = shared;
331         counter_free->id = id;
332         counter_free->ref_cnt = 1;
333         counter_free->pool = pool;
334
335         /* reset statistic counter value */
336         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
337         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
338
339         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
340         if (TAILQ_EMPTY(&pool->counter_list)) {
341                 TAILQ_REMOVE(&container->pool_list, pool, next);
342                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
343         }
344
345         return counter_free;
346 }
347
348 static void
349 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
350                       struct ice_fdir_counter *counter)
351 {
352         if (!counter)
353                 return;
354
355         if (--counter->ref_cnt == 0) {
356                 struct ice_fdir_counter_pool *pool = counter->pool;
357
358                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
359         }
360 }
361
362 static int
363 ice_fdir_init_filter_list(struct ice_pf *pf)
364 {
365         struct rte_eth_dev *dev = pf->adapter->eth_dev;
366         struct ice_fdir_info *fdir_info = &pf->fdir;
367         char fdir_hash_name[RTE_HASH_NAMESIZE];
368         int ret;
369
370         struct rte_hash_parameters fdir_hash_params = {
371                 .name = fdir_hash_name,
372                 .entries = ICE_MAX_FDIR_FILTER_NUM,
373                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
374                 .hash_func = rte_hash_crc,
375                 .hash_func_init_val = 0,
376                 .socket_id = rte_socket_id(),
377                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
378         };
379
380         /* Initialize hash */
381         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
382                  "fdir_%s", dev->device->name);
383         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
384         if (!fdir_info->hash_table) {
385                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
386                 return -EINVAL;
387         }
388         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
389                                           sizeof(*fdir_info->hash_map) *
390                                           ICE_MAX_FDIR_FILTER_NUM,
391                                           0);
392         if (!fdir_info->hash_map) {
393                 PMD_INIT_LOG(ERR,
394                              "Failed to allocate memory for fdir hash map!");
395                 ret = -ENOMEM;
396                 goto err_fdir_hash_map_alloc;
397         }
398         return 0;
399
400 err_fdir_hash_map_alloc:
401         rte_hash_free(fdir_info->hash_table);
402
403         return ret;
404 }
405
406 static void
407 ice_fdir_release_filter_list(struct ice_pf *pf)
408 {
409         struct ice_fdir_info *fdir_info = &pf->fdir;
410
411         if (fdir_info->hash_map)
412                 rte_free(fdir_info->hash_map);
413         if (fdir_info->hash_table)
414                 rte_hash_free(fdir_info->hash_table);
415
416         fdir_info->hash_map = NULL;
417         fdir_info->hash_table = NULL;
418 }
419
420 /*
421  * ice_fdir_setup - reserve and initialize the Flow Director resources
422  * @pf: board private structure
423  */
424 static int
425 ice_fdir_setup(struct ice_pf *pf)
426 {
427         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
428         struct ice_hw *hw = ICE_PF_TO_HW(pf);
429         const struct rte_memzone *mz = NULL;
430         char z_name[RTE_MEMZONE_NAMESIZE];
431         struct ice_vsi *vsi;
432         int err = ICE_SUCCESS;
433
434         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
435                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
436                 return -ENOTSUP;
437         }
438
439         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
440                     " fd_fltr_best_effort = %u.",
441                     hw->func_caps.fd_fltr_guar,
442                     hw->func_caps.fd_fltr_best_effort);
443
444         if (pf->fdir.fdir_vsi) {
445                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
446                 return ICE_SUCCESS;
447         }
448
449         /* make new FDIR VSI */
450         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
451         if (!vsi) {
452                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
453                 return -EINVAL;
454         }
455         pf->fdir.fdir_vsi = vsi;
456
457         err = ice_fdir_init_filter_list(pf);
458         if (err) {
459                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
460                 return -EINVAL;
461         }
462
463         err = ice_fdir_counter_init(pf);
464         if (err) {
465                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
466                 return -EINVAL;
467         }
468
469         /*Fdir tx queue setup*/
470         err = ice_fdir_setup_tx_resources(pf);
471         if (err) {
472                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
473                 goto fail_setup_tx;
474         }
475
476         /*Fdir rx queue setup*/
477         err = ice_fdir_setup_rx_resources(pf);
478         if (err) {
479                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
480                 goto fail_setup_rx;
481         }
482
483         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
484         if (err) {
485                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
486                 goto fail_mem;
487         }
488
489         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
490         if (err) {
491                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
492                 goto fail_mem;
493         }
494
495         /* Enable FDIR MSIX interrupt */
496         vsi->nb_used_qps = 1;
497         ice_vsi_queues_bind_intr(vsi);
498         ice_vsi_enable_queues_intr(vsi);
499
500         /* reserve memory for the fdir programming packet */
501         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
502                  ICE_FDIR_MZ_NAME,
503                  eth_dev->data->port_id);
504         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
505         if (!mz) {
506                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
507                             "flow director program packet.");
508                 err = -ENOMEM;
509                 goto fail_mem;
510         }
511         pf->fdir.prg_pkt = mz->addr;
512         pf->fdir.dma_addr = mz->iova;
513         pf->fdir.mz = mz;
514
515         err = ice_fdir_prof_alloc(hw);
516         if (err) {
517                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
518                             "flow director profile.");
519                 err = -ENOMEM;
520                 goto fail_prof;
521         }
522
523         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
524                     vsi->base_queue);
525         return ICE_SUCCESS;
526
527 fail_prof:
528         rte_memzone_free(pf->fdir.mz);
529         pf->fdir.mz = NULL;
530 fail_mem:
531         ice_rx_queue_release(pf->fdir.rxq);
532         pf->fdir.rxq = NULL;
533 fail_setup_rx:
534         ice_tx_queue_release(pf->fdir.txq);
535         pf->fdir.txq = NULL;
536 fail_setup_tx:
537         ice_release_vsi(vsi);
538         pf->fdir.fdir_vsi = NULL;
539         return err;
540 }
541
542 static void
543 ice_fdir_prof_free(struct ice_hw *hw)
544 {
545         enum ice_fltr_ptype ptype;
546
547         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
548              ptype < ICE_FLTR_PTYPE_MAX;
549              ptype++) {
550                 rte_free(hw->fdir_prof[ptype]);
551                 hw->fdir_prof[ptype] = NULL;
552         }
553
554         rte_free(hw->fdir_prof);
555         hw->fdir_prof = NULL;
556 }
557
558 /* Remove a profile for some filter type */
559 static void
560 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
561 {
562         struct ice_hw *hw = ICE_PF_TO_HW(pf);
563         struct ice_fd_hw_prof *hw_prof;
564         uint64_t prof_id;
565         uint16_t vsi_num;
566         int i;
567
568         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
569                 return;
570
571         hw_prof = hw->fdir_prof[ptype];
572
573         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
574         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
575                 if (hw_prof->entry_h[i][is_tunnel]) {
576                         vsi_num = ice_get_hw_vsi_num(hw,
577                                                      hw_prof->vsi_h[i]);
578                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
579                                              vsi_num, ptype);
580                         ice_flow_rem_entry(hw,
581                                            hw_prof->entry_h[i][is_tunnel]);
582                         hw_prof->entry_h[i][is_tunnel] = 0;
583                 }
584         }
585         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
586         rte_free(hw_prof->fdir_seg[is_tunnel]);
587         hw_prof->fdir_seg[is_tunnel] = NULL;
588
589         for (i = 0; i < hw_prof->cnt; i++)
590                 hw_prof->vsi_h[i] = 0;
591         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
592 }
593
594 /* Remove all created profiles */
595 static void
596 ice_fdir_prof_rm_all(struct ice_pf *pf)
597 {
598         enum ice_fltr_ptype ptype;
599
600         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
601              ptype < ICE_FLTR_PTYPE_MAX;
602              ptype++) {
603                 ice_fdir_prof_rm(pf, ptype, false);
604                 ice_fdir_prof_rm(pf, ptype, true);
605         }
606 }
607
608 /*
609  * ice_fdir_teardown - release the Flow Director resources
610  * @pf: board private structure
611  */
612 static void
613 ice_fdir_teardown(struct ice_pf *pf)
614 {
615         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
616         struct ice_hw *hw = ICE_PF_TO_HW(pf);
617         struct ice_vsi *vsi;
618         int err;
619
620         vsi = pf->fdir.fdir_vsi;
621         if (!vsi)
622                 return;
623
624         ice_vsi_disable_queues_intr(vsi);
625
626         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
627         if (err)
628                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
629
630         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
631         if (err)
632                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
633
634         err = ice_fdir_counter_release(pf);
635         if (err)
636                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
637
638         ice_fdir_release_filter_list(pf);
639
640         ice_tx_queue_release(pf->fdir.txq);
641         pf->fdir.txq = NULL;
642         ice_rx_queue_release(pf->fdir.rxq);
643         pf->fdir.rxq = NULL;
644         ice_fdir_prof_rm_all(pf);
645         ice_fdir_prof_free(hw);
646         ice_release_vsi(vsi);
647         pf->fdir.fdir_vsi = NULL;
648
649         if (pf->fdir.mz) {
650                 err = rte_memzone_free(pf->fdir.mz);
651                 pf->fdir.mz = NULL;
652                 if (err)
653                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
654         }
655 }
656
657 static int
658 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
659                            enum ice_fltr_ptype ptype,
660                            struct ice_flow_seg_info *seg,
661                            bool is_tunnel)
662 {
663         struct ice_hw *hw = ICE_PF_TO_HW(pf);
664         struct ice_flow_seg_info *ori_seg;
665         struct ice_fd_hw_prof *hw_prof;
666
667         hw_prof = hw->fdir_prof[ptype];
668         ori_seg = hw_prof->fdir_seg[is_tunnel];
669
670         /* profile does not exist */
671         if (!ori_seg)
672                 return 0;
673
674         /* if no input set conflict, return -EEXIST */
675         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
676             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
677                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
678                             ptype);
679                 return -EEXIST;
680         }
681
682         /* a rule with input set conflict already exist, so give up */
683         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
684                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
685                             ptype);
686                 return -EINVAL;
687         }
688
689         /* it's safe to delete an empty profile */
690         ice_fdir_prof_rm(pf, ptype, is_tunnel);
691         return 0;
692 }
693
694 static bool
695 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
696                                enum ice_fltr_ptype ptype,
697                                bool is_tunnel)
698 {
699         struct ice_hw *hw = ICE_PF_TO_HW(pf);
700         struct ice_fd_hw_prof *hw_prof;
701         struct ice_flow_seg_info *seg;
702
703         hw_prof = hw->fdir_prof[ptype];
704         seg = hw_prof->fdir_seg[is_tunnel];
705
706         /* profile does not exist */
707         if (!seg)
708                 return true;
709
710         /* profile exists and rule exists, fail to resolve the conflict */
711         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
712                 return false;
713
714         /* it's safe to delete an empty profile */
715         ice_fdir_prof_rm(pf, ptype, is_tunnel);
716
717         return true;
718 }
719
720 static int
721 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
722                              enum ice_fltr_ptype ptype,
723                              bool is_tunnel)
724 {
725         enum ice_fltr_ptype cflct_ptype;
726
727         switch (ptype) {
728         /* IPv4 */
729         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
730         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
731         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
732                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
733                 if (!ice_fdir_prof_resolve_conflict
734                         (pf, cflct_ptype, is_tunnel))
735                         goto err;
736                 break;
737         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
738                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
739                 if (!ice_fdir_prof_resolve_conflict
740                         (pf, cflct_ptype, is_tunnel))
741                         goto err;
742                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
743                 if (!ice_fdir_prof_resolve_conflict
744                         (pf, cflct_ptype, is_tunnel))
745                         goto err;
746                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
747                 if (!ice_fdir_prof_resolve_conflict
748                         (pf, cflct_ptype, is_tunnel))
749                         goto err;
750                 break;
751         /* IPv4 GTPU */
752         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
753         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
754         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
755                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
756                 if (!ice_fdir_prof_resolve_conflict
757                         (pf, cflct_ptype, is_tunnel))
758                         goto err;
759                 break;
760         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
761                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
762                 if (!ice_fdir_prof_resolve_conflict
763                         (pf, cflct_ptype, is_tunnel))
764                         goto err;
765                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
766                 if (!ice_fdir_prof_resolve_conflict
767                         (pf, cflct_ptype, is_tunnel))
768                         goto err;
769                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
770                 if (!ice_fdir_prof_resolve_conflict
771                         (pf, cflct_ptype, is_tunnel))
772                         goto err;
773                 break;
774         /* IPv6 */
775         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
776         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
777         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
778                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
779                 if (!ice_fdir_prof_resolve_conflict
780                         (pf, cflct_ptype, is_tunnel))
781                         goto err;
782                 break;
783         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
784                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
785                 if (!ice_fdir_prof_resolve_conflict
786                         (pf, cflct_ptype, is_tunnel))
787                         goto err;
788                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
789                 if (!ice_fdir_prof_resolve_conflict
790                         (pf, cflct_ptype, is_tunnel))
791                         goto err;
792                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
793                 if (!ice_fdir_prof_resolve_conflict
794                         (pf, cflct_ptype, is_tunnel))
795                         goto err;
796                 break;
797         default:
798                 break;
799         }
800         return 0;
801 err:
802         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
803                     ptype, cflct_ptype);
804         return -EINVAL;
805 }
806
807 static int
808 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
809                      struct ice_vsi *ctrl_vsi,
810                      struct ice_flow_seg_info *seg,
811                      enum ice_fltr_ptype ptype,
812                      bool is_tunnel)
813 {
814         struct ice_hw *hw = ICE_PF_TO_HW(pf);
815         enum ice_flow_dir dir = ICE_FLOW_RX;
816         struct ice_fd_hw_prof *hw_prof;
817         struct ice_flow_prof *prof;
818         uint64_t entry_1 = 0;
819         uint64_t entry_2 = 0;
820         uint16_t vsi_num;
821         int ret;
822         uint64_t prof_id;
823
824         /* check if have input set conflict on current profile. */
825         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
826         if (ret)
827                 return ret;
828
829         /* check if the profile is conflict with other profile. */
830         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
831         if (ret)
832                 return ret;
833
834         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
835         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
836                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
837         if (ret)
838                 return ret;
839         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
840                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
841                                  seg, NULL, 0, &entry_1);
842         if (ret) {
843                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
844                             ptype);
845                 goto err_add_prof;
846         }
847         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
848                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
849                                  seg, NULL, 0, &entry_2);
850         if (ret) {
851                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
852                             ptype);
853                 goto err_add_entry;
854         }
855
856         hw_prof = hw->fdir_prof[ptype];
857         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
858         hw_prof->cnt = 0;
859         hw_prof->fdir_seg[is_tunnel] = seg;
860         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
861         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
862         pf->hw_prof_cnt[ptype][is_tunnel]++;
863         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
864         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
865         pf->hw_prof_cnt[ptype][is_tunnel]++;
866
867         return ret;
868
869 err_add_entry:
870         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
871         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
872         ice_flow_rem_entry(hw, entry_1);
873 err_add_prof:
874         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
875
876         return ret;
877 }
878
879 static void
880 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
881 {
882         uint32_t i, j;
883
884         struct ice_inset_map {
885                 uint64_t inset;
886                 enum ice_flow_field fld;
887         };
888         static const struct ice_inset_map ice_inset_map[] = {
889                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
890                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
891                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
892                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
893                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
894                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
895                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
896                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
897                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
898                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
899                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
900                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
901                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
902                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
903                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
904                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
905                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
906                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
907                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
908                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
909                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
910                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
911                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
912                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
913                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
914                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
915                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
916         };
917
918         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
919                 if ((inset & ice_inset_map[i].inset) ==
920                     ice_inset_map[i].inset)
921                         field[j++] = ice_inset_map[i].fld;
922         }
923 }
924
925 static int
926 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
927                         uint64_t input_set, bool is_tunnel)
928 {
929         struct ice_flow_seg_info *seg;
930         struct ice_flow_seg_info *seg_tun = NULL;
931         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
932         int i, ret;
933
934         if (!input_set)
935                 return -EINVAL;
936
937         seg = (struct ice_flow_seg_info *)
938                 ice_malloc(hw, sizeof(*seg));
939         if (!seg) {
940                 PMD_DRV_LOG(ERR, "No memory can be allocated");
941                 return -ENOMEM;
942         }
943
944         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
945                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
946         ice_fdir_input_set_parse(input_set, field);
947
948         switch (flow) {
949         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
950                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
951                                   ICE_FLOW_SEG_HDR_IPV4);
952                 break;
953         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
954                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
955                                   ICE_FLOW_SEG_HDR_IPV4);
956                 break;
957         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
958                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
959                                   ICE_FLOW_SEG_HDR_IPV4);
960                 break;
961         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
962                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
963                 break;
964         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
965                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
966                                   ICE_FLOW_SEG_HDR_IPV6);
967                 break;
968         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
969                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
970                                   ICE_FLOW_SEG_HDR_IPV6);
971                 break;
972         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
973                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
974                                   ICE_FLOW_SEG_HDR_IPV6);
975                 break;
976         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
977                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
978                 break;
979         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
980         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
981         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
982         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
983                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
984                                   ICE_FLOW_SEG_HDR_IPV4);
985                 break;
986         default:
987                 PMD_DRV_LOG(ERR, "not supported filter type.");
988                 break;
989         }
990
991         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
992                 ice_flow_set_fld(seg, field[i],
993                                  ICE_FLOW_FLD_OFF_INVAL,
994                                  ICE_FLOW_FLD_OFF_INVAL,
995                                  ICE_FLOW_FLD_OFF_INVAL, false);
996         }
997
998         if (!is_tunnel) {
999                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1000                                            seg, flow, false);
1001         } else {
1002                 seg_tun = (struct ice_flow_seg_info *)
1003                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1004                 if (!seg_tun) {
1005                         PMD_DRV_LOG(ERR, "No memory can be allocated");
1006                         rte_free(seg);
1007                         return -ENOMEM;
1008                 }
1009                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1010                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1011                                            seg_tun, flow, true);
1012         }
1013
1014         if (!ret) {
1015                 return ret;
1016         } else if (ret < 0) {
1017                 rte_free(seg);
1018                 if (is_tunnel)
1019                         rte_free(seg_tun);
1020                 return (ret == -EEXIST) ? 0 : ret;
1021         } else {
1022                 return ret;
1023         }
1024 }
1025
1026 static void
1027 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1028                     bool is_tunnel, bool add)
1029 {
1030         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1031         int cnt;
1032
1033         cnt = (add) ? 1 : -1;
1034         hw->fdir_active_fltr += cnt;
1035         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1036                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1037         else
1038                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1039 }
1040
1041 static int
1042 ice_fdir_init(struct ice_adapter *ad)
1043 {
1044         struct ice_pf *pf = &ad->pf;
1045         struct ice_flow_parser *parser;
1046         int ret;
1047
1048         ret = ice_fdir_setup(pf);
1049         if (ret)
1050                 return ret;
1051
1052         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1053                 parser = &ice_fdir_parser_comms;
1054         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1055                 parser = &ice_fdir_parser_os;
1056         else
1057                 return -EINVAL;
1058
1059         return ice_register_parser(parser, ad);
1060 }
1061
1062 static void
1063 ice_fdir_uninit(struct ice_adapter *ad)
1064 {
1065         struct ice_pf *pf = &ad->pf;
1066         struct ice_flow_parser *parser;
1067
1068         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1069                 parser = &ice_fdir_parser_comms;
1070         else
1071                 parser = &ice_fdir_parser_os;
1072
1073         ice_unregister_parser(parser, ad);
1074
1075         ice_fdir_teardown(pf);
1076 }
1077
1078 static int
1079 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1080 {
1081         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1082                 return 1;
1083         else
1084                 return 0;
1085 }
1086
1087 static int
1088 ice_fdir_add_del_filter(struct ice_pf *pf,
1089                         struct ice_fdir_filter_conf *filter,
1090                         bool add)
1091 {
1092         struct ice_fltr_desc desc;
1093         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1094         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1095         bool is_tun;
1096         int ret;
1097
1098         filter->input.dest_vsi = pf->main_vsi->idx;
1099
1100         memset(&desc, 0, sizeof(desc));
1101         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1102
1103         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1104
1105         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1106         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1107         if (ret) {
1108                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1109                 return -EINVAL;
1110         }
1111
1112         return ice_fdir_programming(pf, &desc);
1113 }
1114
1115 static void
1116 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1117                           struct ice_fdir_filter_conf *filter)
1118 {
1119         struct ice_fdir_fltr *input = &filter->input;
1120         memset(key, 0, sizeof(*key));
1121
1122         key->flow_type = input->flow_type;
1123         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1124         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1125         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1126         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1127
1128         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1129         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1130
1131         key->tunnel_type = filter->tunnel_type;
1132 }
1133
1134 /* Check if there exists the flow director filter */
1135 static struct ice_fdir_filter_conf *
1136 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1137                         const struct ice_fdir_fltr_pattern *key)
1138 {
1139         int ret;
1140
1141         ret = rte_hash_lookup(fdir_info->hash_table, key);
1142         if (ret < 0)
1143                 return NULL;
1144
1145         return fdir_info->hash_map[ret];
1146 }
1147
1148 /* Add a flow director entry into the SW list */
1149 static int
1150 ice_fdir_entry_insert(struct ice_pf *pf,
1151                       struct ice_fdir_filter_conf *entry,
1152                       struct ice_fdir_fltr_pattern *key)
1153 {
1154         struct ice_fdir_info *fdir_info = &pf->fdir;
1155         int ret;
1156
1157         ret = rte_hash_add_key(fdir_info->hash_table, key);
1158         if (ret < 0) {
1159                 PMD_DRV_LOG(ERR,
1160                             "Failed to insert fdir entry to hash table %d!",
1161                             ret);
1162                 return ret;
1163         }
1164         fdir_info->hash_map[ret] = entry;
1165
1166         return 0;
1167 }
1168
1169 /* Delete a flow director entry from the SW list */
1170 static int
1171 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1172 {
1173         struct ice_fdir_info *fdir_info = &pf->fdir;
1174         int ret;
1175
1176         ret = rte_hash_del_key(fdir_info->hash_table, key);
1177         if (ret < 0) {
1178                 PMD_DRV_LOG(ERR,
1179                             "Failed to delete fdir filter to hash table %d!",
1180                             ret);
1181                 return ret;
1182         }
1183         fdir_info->hash_map[ret] = NULL;
1184
1185         return 0;
1186 }
1187
1188 static int
1189 ice_fdir_create_filter(struct ice_adapter *ad,
1190                        struct rte_flow *flow,
1191                        void *meta,
1192                        struct rte_flow_error *error)
1193 {
1194         struct ice_pf *pf = &ad->pf;
1195         struct ice_fdir_filter_conf *filter = meta;
1196         struct ice_fdir_info *fdir_info = &pf->fdir;
1197         struct ice_fdir_filter_conf *entry, *node;
1198         struct ice_fdir_fltr_pattern key;
1199         bool is_tun;
1200         int ret;
1201
1202         ice_fdir_extract_fltr_key(&key, filter);
1203         node = ice_fdir_entry_lookup(fdir_info, &key);
1204         if (node) {
1205                 rte_flow_error_set(error, EEXIST,
1206                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1207                                    "Rule already exists!");
1208                 return -rte_errno;
1209         }
1210
1211         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1212         if (!entry) {
1213                 rte_flow_error_set(error, ENOMEM,
1214                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1215                                    "Failed to allocate memory");
1216                 return -rte_errno;
1217         }
1218
1219         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1220
1221         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1222                         filter->input_set, is_tun);
1223         if (ret) {
1224                 rte_flow_error_set(error, -ret,
1225                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1226                                    "Profile configure failed.");
1227                 goto free_entry;
1228         }
1229
1230         /* alloc counter for FDIR */
1231         if (filter->input.cnt_ena) {
1232                 struct rte_flow_action_count *act_count = &filter->act_count;
1233
1234                 filter->counter = ice_fdir_counter_alloc(pf,
1235                                                          act_count->shared,
1236                                                          act_count->id);
1237                 if (!filter->counter) {
1238                         rte_flow_error_set(error, EINVAL,
1239                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1240                                         "Failed to alloc FDIR counter.");
1241                         goto free_entry;
1242                 }
1243                 filter->input.cnt_index = filter->counter->hw_index;
1244         }
1245
1246         ret = ice_fdir_add_del_filter(pf, filter, true);
1247         if (ret) {
1248                 rte_flow_error_set(error, -ret,
1249                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1250                                    "Add filter rule failed.");
1251                 goto free_counter;
1252         }
1253
1254         rte_memcpy(entry, filter, sizeof(*entry));
1255         ret = ice_fdir_entry_insert(pf, entry, &key);
1256         if (ret) {
1257                 rte_flow_error_set(error, -ret,
1258                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1259                                    "Insert entry to table failed.");
1260                 goto free_entry;
1261         }
1262
1263         flow->rule = entry;
1264         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1265
1266         return 0;
1267
1268 free_counter:
1269         if (filter->counter) {
1270                 ice_fdir_counter_free(pf, filter->counter);
1271                 filter->counter = NULL;
1272         }
1273
1274 free_entry:
1275         rte_free(entry);
1276         return -rte_errno;
1277 }
1278
1279 static int
1280 ice_fdir_destroy_filter(struct ice_adapter *ad,
1281                         struct rte_flow *flow,
1282                         struct rte_flow_error *error)
1283 {
1284         struct ice_pf *pf = &ad->pf;
1285         struct ice_fdir_info *fdir_info = &pf->fdir;
1286         struct ice_fdir_filter_conf *filter, *entry;
1287         struct ice_fdir_fltr_pattern key;
1288         bool is_tun;
1289         int ret;
1290
1291         filter = (struct ice_fdir_filter_conf *)flow->rule;
1292
1293         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1294
1295         if (filter->counter) {
1296                 ice_fdir_counter_free(pf, filter->counter);
1297                 filter->counter = NULL;
1298         }
1299
1300         ice_fdir_extract_fltr_key(&key, filter);
1301         entry = ice_fdir_entry_lookup(fdir_info, &key);
1302         if (!entry) {
1303                 rte_flow_error_set(error, ENOENT,
1304                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1305                                    "Can't find entry.");
1306                 return -rte_errno;
1307         }
1308
1309         ret = ice_fdir_add_del_filter(pf, filter, false);
1310         if (ret) {
1311                 rte_flow_error_set(error, -ret,
1312                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1313                                    "Del filter rule failed.");
1314                 return -rte_errno;
1315         }
1316
1317         ret = ice_fdir_entry_del(pf, &key);
1318         if (ret) {
1319                 rte_flow_error_set(error, -ret,
1320                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1321                                    "Remove entry from table failed.");
1322                 return -rte_errno;
1323         }
1324
1325         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1326         flow->rule = NULL;
1327
1328         rte_free(filter);
1329
1330         return 0;
1331 }
1332
1333 static int
1334 ice_fdir_query_count(struct ice_adapter *ad,
1335                       struct rte_flow *flow,
1336                       struct rte_flow_query_count *flow_stats,
1337                       struct rte_flow_error *error)
1338 {
1339         struct ice_pf *pf = &ad->pf;
1340         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1341         struct ice_fdir_filter_conf *filter = flow->rule;
1342         struct ice_fdir_counter *counter = filter->counter;
1343         uint64_t hits_lo, hits_hi;
1344
1345         if (!counter) {
1346                 rte_flow_error_set(error, EINVAL,
1347                                   RTE_FLOW_ERROR_TYPE_ACTION,
1348                                   NULL,
1349                                   "FDIR counters not available");
1350                 return -rte_errno;
1351         }
1352
1353         /*
1354          * Reading the low 32-bits latches the high 32-bits into a shadow
1355          * register. Reading the high 32-bit returns the value in the
1356          * shadow register.
1357          */
1358         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1359         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1360
1361         flow_stats->hits_set = 1;
1362         flow_stats->hits = hits_lo | (hits_hi << 32);
1363         flow_stats->bytes_set = 0;
1364         flow_stats->bytes = 0;
1365
1366         if (flow_stats->reset) {
1367                 /* reset statistic counter value */
1368                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1369                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1370         }
1371
1372         return 0;
1373 }
1374
1375 static struct ice_flow_engine ice_fdir_engine = {
1376         .init = ice_fdir_init,
1377         .uninit = ice_fdir_uninit,
1378         .create = ice_fdir_create_filter,
1379         .destroy = ice_fdir_destroy_filter,
1380         .query_count = ice_fdir_query_count,
1381         .type = ICE_FLOW_ENGINE_FDIR,
1382 };
1383
1384 static int
1385 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1386                               struct rte_flow_error *error,
1387                               const struct rte_flow_action *act,
1388                               struct ice_fdir_filter_conf *filter)
1389 {
1390         const struct rte_flow_action_rss *rss = act->conf;
1391         uint32_t i;
1392
1393         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1394                 rte_flow_error_set(error, EINVAL,
1395                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1396                                    "Invalid action.");
1397                 return -rte_errno;
1398         }
1399
1400         if (rss->queue_num <= 1) {
1401                 rte_flow_error_set(error, EINVAL,
1402                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1403                                    "Queue region size can't be 0 or 1.");
1404                 return -rte_errno;
1405         }
1406
1407         /* check if queue index for queue region is continuous */
1408         for (i = 0; i < rss->queue_num - 1; i++) {
1409                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1410                         rte_flow_error_set(error, EINVAL,
1411                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1412                                            "Discontinuous queue region");
1413                         return -rte_errno;
1414                 }
1415         }
1416
1417         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1418                 rte_flow_error_set(error, EINVAL,
1419                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1420                                    "Invalid queue region indexes.");
1421                 return -rte_errno;
1422         }
1423
1424         if (!(rte_is_power_of_2(rss->queue_num) &&
1425              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1426                 rte_flow_error_set(error, EINVAL,
1427                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1428                                    "The region size should be any of the following values:"
1429                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1430                                    "of queues do not exceed the VSI allocation.");
1431                 return -rte_errno;
1432         }
1433
1434         filter->input.q_index = rss->queue[0];
1435         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1436         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1437
1438         return 0;
1439 }
1440
1441 static int
1442 ice_fdir_parse_action(struct ice_adapter *ad,
1443                       const struct rte_flow_action actions[],
1444                       struct rte_flow_error *error,
1445                       struct ice_fdir_filter_conf *filter)
1446 {
1447         struct ice_pf *pf = &ad->pf;
1448         const struct rte_flow_action_queue *act_q;
1449         const struct rte_flow_action_mark *mark_spec = NULL;
1450         const struct rte_flow_action_count *act_count;
1451         uint32_t dest_num = 0;
1452         uint32_t mark_num = 0;
1453         uint32_t counter_num = 0;
1454         int ret;
1455
1456         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1457                 switch (actions->type) {
1458                 case RTE_FLOW_ACTION_TYPE_VOID:
1459                         break;
1460                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1461                         dest_num++;
1462
1463                         act_q = actions->conf;
1464                         filter->input.q_index = act_q->index;
1465                         if (filter->input.q_index >=
1466                                         pf->dev_data->nb_rx_queues) {
1467                                 rte_flow_error_set(error, EINVAL,
1468                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1469                                                    actions,
1470                                                    "Invalid queue for FDIR.");
1471                                 return -rte_errno;
1472                         }
1473                         filter->input.dest_ctl =
1474                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1475                         break;
1476                 case RTE_FLOW_ACTION_TYPE_DROP:
1477                         dest_num++;
1478
1479                         filter->input.dest_ctl =
1480                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1481                         break;
1482                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1483                         dest_num++;
1484
1485                         filter->input.dest_ctl =
1486                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1487                         filter->input.q_index = 0;
1488                         break;
1489                 case RTE_FLOW_ACTION_TYPE_RSS:
1490                         dest_num++;
1491
1492                         ret = ice_fdir_parse_action_qregion(pf,
1493                                                 error, actions, filter);
1494                         if (ret)
1495                                 return ret;
1496                         break;
1497                 case RTE_FLOW_ACTION_TYPE_MARK:
1498                         mark_num++;
1499
1500                         mark_spec = actions->conf;
1501                         filter->input.fltr_id = mark_spec->id;
1502                         break;
1503                 case RTE_FLOW_ACTION_TYPE_COUNT:
1504                         counter_num++;
1505
1506                         act_count = actions->conf;
1507                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1508                         rte_memcpy(&filter->act_count, act_count,
1509                                                 sizeof(filter->act_count));
1510
1511                         break;
1512                 default:
1513                         rte_flow_error_set(error, EINVAL,
1514                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1515                                    "Invalid action.");
1516                         return -rte_errno;
1517                 }
1518         }
1519
1520         if (dest_num == 0 || dest_num >= 2) {
1521                 rte_flow_error_set(error, EINVAL,
1522                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1523                            "Unsupported action combination");
1524                 return -rte_errno;
1525         }
1526
1527         if (mark_num >= 2) {
1528                 rte_flow_error_set(error, EINVAL,
1529                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1530                            "Too many mark actions");
1531                 return -rte_errno;
1532         }
1533
1534         if (counter_num >= 2) {
1535                 rte_flow_error_set(error, EINVAL,
1536                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1537                            "Too many count actions");
1538                 return -rte_errno;
1539         }
1540
1541         return 0;
1542 }
1543
1544 static int
1545 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1546                        const struct rte_flow_item pattern[],
1547                        struct rte_flow_error *error,
1548                        struct ice_fdir_filter_conf *filter)
1549 {
1550         const struct rte_flow_item *item = pattern;
1551         enum rte_flow_item_type item_type;
1552         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1553         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1554         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1555         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1556         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1557         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1558         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1559         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1560         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1561         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1562         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1563         uint64_t input_set = ICE_INSET_NONE;
1564         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1565         uint8_t  ipv6_addr_mask[16] = {
1566                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1567                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1568         };
1569         uint32_t vtc_flow_cpu;
1570
1571
1572         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1573                 if (item->last) {
1574                         rte_flow_error_set(error, EINVAL,
1575                                         RTE_FLOW_ERROR_TYPE_ITEM,
1576                                         item,
1577                                         "Not support range");
1578                         return -rte_errno;
1579                 }
1580                 item_type = item->type;
1581
1582                 switch (item_type) {
1583                 case RTE_FLOW_ITEM_TYPE_ETH:
1584                         eth_spec = item->spec;
1585                         eth_mask = item->mask;
1586
1587                         if (eth_spec && eth_mask) {
1588                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1589                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1590                                         rte_flow_error_set(error, EINVAL,
1591                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1592                                                 item,
1593                                                 "Src mac not support");
1594                                         return -rte_errno;
1595                                 }
1596
1597                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1598                                         rte_flow_error_set(error, EINVAL,
1599                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1600                                                 item,
1601                                                 "Invalid mac addr mask");
1602                                         return -rte_errno;
1603                                 }
1604
1605                                 input_set |= ICE_INSET_DMAC;
1606                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1607                                            &eth_spec->dst,
1608                                            RTE_ETHER_ADDR_LEN);
1609                         }
1610                         break;
1611                 case RTE_FLOW_ITEM_TYPE_IPV4:
1612                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1613                         ipv4_spec = item->spec;
1614                         ipv4_mask = item->mask;
1615
1616                         if (ipv4_spec && ipv4_mask) {
1617                                 /* Check IPv4 mask and update input set */
1618                                 if (ipv4_mask->hdr.version_ihl ||
1619                                     ipv4_mask->hdr.total_length ||
1620                                     ipv4_mask->hdr.packet_id ||
1621                                     ipv4_mask->hdr.fragment_offset ||
1622                                     ipv4_mask->hdr.hdr_checksum) {
1623                                         rte_flow_error_set(error, EINVAL,
1624                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1625                                                    item,
1626                                                    "Invalid IPv4 mask.");
1627                                         return -rte_errno;
1628                                 }
1629                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1630                                         input_set |= tunnel_type ?
1631                                                      ICE_INSET_TUN_IPV4_SRC :
1632                                                      ICE_INSET_IPV4_SRC;
1633                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1634                                         input_set |= tunnel_type ?
1635                                                      ICE_INSET_TUN_IPV4_DST :
1636                                                      ICE_INSET_IPV4_DST;
1637                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1638                                         input_set |= ICE_INSET_IPV4_TOS;
1639                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1640                                         input_set |= ICE_INSET_IPV4_TTL;
1641                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1642                                         input_set |= ICE_INSET_IPV4_PROTO;
1643
1644                                 filter->input.ip.v4.dst_ip =
1645                                         ipv4_spec->hdr.src_addr;
1646                                 filter->input.ip.v4.src_ip =
1647                                         ipv4_spec->hdr.dst_addr;
1648                                 filter->input.ip.v4.tos =
1649                                         ipv4_spec->hdr.type_of_service;
1650                                 filter->input.ip.v4.ttl =
1651                                         ipv4_spec->hdr.time_to_live;
1652                                 filter->input.ip.v4.proto =
1653                                         ipv4_spec->hdr.next_proto_id;
1654                         }
1655
1656                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1657                         break;
1658                 case RTE_FLOW_ITEM_TYPE_IPV6:
1659                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1660                         ipv6_spec = item->spec;
1661                         ipv6_mask = item->mask;
1662
1663                         if (ipv6_spec && ipv6_mask) {
1664                                 /* Check IPv6 mask and update input set */
1665                                 if (ipv6_mask->hdr.payload_len) {
1666                                         rte_flow_error_set(error, EINVAL,
1667                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1668                                                    item,
1669                                                    "Invalid IPv6 mask");
1670                                         return -rte_errno;
1671                                 }
1672
1673                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1674                                             ipv6_addr_mask,
1675                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1676                                         input_set |= ICE_INSET_IPV6_SRC;
1677                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1678                                             ipv6_addr_mask,
1679                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1680                                         input_set |= ICE_INSET_IPV6_DST;
1681
1682                                 if ((ipv6_mask->hdr.vtc_flow &
1683                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1684                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1685                                         input_set |= ICE_INSET_IPV6_TC;
1686                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1687                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1688                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1689                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1690
1691                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1692                                            ipv6_spec->hdr.src_addr, 16);
1693                                 rte_memcpy(filter->input.ip.v6.src_ip,
1694                                            ipv6_spec->hdr.dst_addr, 16);
1695
1696                                 vtc_flow_cpu =
1697                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1698                                 filter->input.ip.v6.tc =
1699                                         (uint8_t)(vtc_flow_cpu >>
1700                                                   ICE_FDIR_IPV6_TC_OFFSET);
1701                                 filter->input.ip.v6.proto =
1702                                         ipv6_spec->hdr.proto;
1703                                 filter->input.ip.v6.hlim =
1704                                         ipv6_spec->hdr.hop_limits;
1705                         }
1706
1707                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1708                         break;
1709                 case RTE_FLOW_ITEM_TYPE_TCP:
1710                         tcp_spec = item->spec;
1711                         tcp_mask = item->mask;
1712
1713                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1714                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1715                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1716                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1717
1718                         if (tcp_spec && tcp_mask) {
1719                                 /* Check TCP mask and update input set */
1720                                 if (tcp_mask->hdr.sent_seq ||
1721                                     tcp_mask->hdr.recv_ack ||
1722                                     tcp_mask->hdr.data_off ||
1723                                     tcp_mask->hdr.tcp_flags ||
1724                                     tcp_mask->hdr.rx_win ||
1725                                     tcp_mask->hdr.cksum ||
1726                                     tcp_mask->hdr.tcp_urp) {
1727                                         rte_flow_error_set(error, EINVAL,
1728                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1729                                                    item,
1730                                                    "Invalid TCP mask");
1731                                         return -rte_errno;
1732                                 }
1733
1734                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1735                                         input_set |= tunnel_type ?
1736                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1737                                                      ICE_INSET_TCP_SRC_PORT;
1738                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1739                                         input_set |= tunnel_type ?
1740                                                      ICE_INSET_TUN_TCP_DST_PORT :
1741                                                      ICE_INSET_TCP_DST_PORT;
1742
1743                                 /* Get filter info */
1744                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1745                                         filter->input.ip.v4.dst_port =
1746                                                 tcp_spec->hdr.src_port;
1747                                         filter->input.ip.v4.src_port =
1748                                                 tcp_spec->hdr.dst_port;
1749                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1750                                         filter->input.ip.v6.dst_port =
1751                                                 tcp_spec->hdr.src_port;
1752                                         filter->input.ip.v6.src_port =
1753                                                 tcp_spec->hdr.dst_port;
1754                                 }
1755                         }
1756                         break;
1757                 case RTE_FLOW_ITEM_TYPE_UDP:
1758                         udp_spec = item->spec;
1759                         udp_mask = item->mask;
1760
1761                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1762                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1763                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1764                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1765
1766                         if (udp_spec && udp_mask) {
1767                                 /* Check UDP mask and update input set*/
1768                                 if (udp_mask->hdr.dgram_len ||
1769                                     udp_mask->hdr.dgram_cksum) {
1770                                         rte_flow_error_set(error, EINVAL,
1771                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1772                                                    item,
1773                                                    "Invalid UDP mask");
1774                                         return -rte_errno;
1775                                 }
1776
1777                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1778                                         input_set |= tunnel_type ?
1779                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1780                                                      ICE_INSET_UDP_SRC_PORT;
1781                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1782                                         input_set |= tunnel_type ?
1783                                                      ICE_INSET_TUN_UDP_DST_PORT :
1784                                                      ICE_INSET_UDP_DST_PORT;
1785
1786                                 /* Get filter info */
1787                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1788                                         filter->input.ip.v4.dst_port =
1789                                                 udp_spec->hdr.src_port;
1790                                         filter->input.ip.v4.src_port =
1791                                                 udp_spec->hdr.dst_port;
1792                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1793                                         filter->input.ip.v6.src_port =
1794                                                 udp_spec->hdr.dst_port;
1795                                         filter->input.ip.v6.dst_port =
1796                                                 udp_spec->hdr.src_port;
1797                                 }
1798                         }
1799                         break;
1800                 case RTE_FLOW_ITEM_TYPE_SCTP:
1801                         sctp_spec = item->spec;
1802                         sctp_mask = item->mask;
1803
1804                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1805                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1806                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1807                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1808
1809                         if (sctp_spec && sctp_mask) {
1810                                 /* Check SCTP mask and update input set */
1811                                 if (sctp_mask->hdr.cksum) {
1812                                         rte_flow_error_set(error, EINVAL,
1813                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1814                                                    item,
1815                                                    "Invalid UDP mask");
1816                                         return -rte_errno;
1817                                 }
1818
1819                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1820                                         input_set |= tunnel_type ?
1821                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1822                                                      ICE_INSET_SCTP_SRC_PORT;
1823                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1824                                         input_set |= tunnel_type ?
1825                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1826                                                      ICE_INSET_SCTP_DST_PORT;
1827
1828                                 /* Get filter info */
1829                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1830                                         filter->input.ip.v4.dst_port =
1831                                                 sctp_spec->hdr.src_port;
1832                                         filter->input.ip.v4.src_port =
1833                                                 sctp_spec->hdr.dst_port;
1834                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1835                                         filter->input.ip.v6.dst_port =
1836                                                 sctp_spec->hdr.src_port;
1837                                         filter->input.ip.v6.src_port =
1838                                                 sctp_spec->hdr.dst_port;
1839                                 }
1840                         }
1841                         break;
1842                 case RTE_FLOW_ITEM_TYPE_VOID:
1843                         break;
1844                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1845                         l3 = RTE_FLOW_ITEM_TYPE_END;
1846                         vxlan_spec = item->spec;
1847                         vxlan_mask = item->mask;
1848
1849                         if (vxlan_spec || vxlan_mask) {
1850                                 rte_flow_error_set(error, EINVAL,
1851                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1852                                                    item,
1853                                                    "Invalid vxlan field");
1854                                 return -rte_errno;
1855                         }
1856
1857                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1858                         break;
1859                 case RTE_FLOW_ITEM_TYPE_GTPU:
1860                         l3 = RTE_FLOW_ITEM_TYPE_END;
1861                         gtp_spec = item->spec;
1862                         gtp_mask = item->mask;
1863
1864                         if (gtp_spec && gtp_mask) {
1865                                 if (gtp_mask->v_pt_rsv_flags ||
1866                                     gtp_mask->msg_type ||
1867                                     gtp_mask->msg_len) {
1868                                         rte_flow_error_set(error, EINVAL,
1869                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1870                                                    item,
1871                                                    "Invalid GTP mask");
1872                                         return -rte_errno;
1873                                 }
1874
1875                                 if (gtp_mask->teid == UINT32_MAX)
1876                                         input_set |= ICE_INSET_GTPU_TEID;
1877
1878                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1879                         }
1880                         break;
1881                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1882                         gtp_psc_spec = item->spec;
1883                         gtp_psc_mask = item->mask;
1884
1885                         if (gtp_psc_spec && gtp_psc_mask) {
1886                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1887                                         input_set |= ICE_INSET_GTPU_QFI;
1888
1889                                 filter->input.gtpu_data.qfi =
1890                                         gtp_psc_spec->qfi;
1891                         }
1892
1893                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1894                         break;
1895                 default:
1896                         rte_flow_error_set(error, EINVAL,
1897                                    RTE_FLOW_ERROR_TYPE_ITEM,
1898                                    item,
1899                                    "Invalid pattern item.");
1900                         return -rte_errno;
1901                 }
1902         }
1903
1904         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1905                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1906
1907         filter->tunnel_type = tunnel_type;
1908         filter->input.flow_type = flow_type;
1909         filter->input_set = input_set;
1910
1911         return 0;
1912 }
1913
1914 static int
1915 ice_fdir_parse(struct ice_adapter *ad,
1916                struct ice_pattern_match_item *array,
1917                uint32_t array_len,
1918                const struct rte_flow_item pattern[],
1919                const struct rte_flow_action actions[],
1920                void **meta,
1921                struct rte_flow_error *error)
1922 {
1923         struct ice_pf *pf = &ad->pf;
1924         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1925         struct ice_pattern_match_item *item = NULL;
1926         uint64_t input_set;
1927         int ret;
1928
1929         memset(filter, 0, sizeof(*filter));
1930         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1931         if (!item)
1932                 return -rte_errno;
1933
1934         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1935         if (ret)
1936                 return ret;
1937         input_set = filter->input_set;
1938         if (!input_set || input_set & ~item->input_set_mask) {
1939                 rte_flow_error_set(error, EINVAL,
1940                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1941                                    pattern,
1942                                    "Invalid input set");
1943                 return -rte_errno;
1944         }
1945
1946         ret = ice_fdir_parse_action(ad, actions, error, filter);
1947         if (ret)
1948                 return ret;
1949
1950         *meta = filter;
1951
1952         return 0;
1953 }
1954
1955 static struct ice_flow_parser ice_fdir_parser_os = {
1956         .engine = &ice_fdir_engine,
1957         .array = ice_fdir_pattern_os,
1958         .array_len = RTE_DIM(ice_fdir_pattern_os),
1959         .parse_pattern_action = ice_fdir_parse,
1960         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1961 };
1962
1963 static struct ice_flow_parser ice_fdir_parser_comms = {
1964         .engine = &ice_fdir_engine,
1965         .array = ice_fdir_pattern_comms,
1966         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1967         .parse_pattern_action = ice_fdir_parse,
1968         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1969 };
1970
1971 RTE_INIT(ice_fdir_engine_register)
1972 {
1973         ice_register_flow_engine(&ice_fdir_engine);
1974 }