6b104911c78a86f37225b2a2b9d89ab409088d86
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
22         ICE_INSET_DMAC | \
23         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35         ICE_FDIR_INSET_ETH_IPV4 | \
36         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
37
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
39         ICE_INSET_DMAC | \
40         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
54
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
65
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67         ICE_FDIR_INSET_VXLAN_IPV4 | \
68         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
69
70 #define ICE_FDIR_INSET_GTPU_IPV4 (\
71         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
72
73 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
74         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
75         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
76         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
77         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
78         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
79         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
80         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
81         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_ipv4,
83                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
87                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
91                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
94         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
95                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
96         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
97                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
98 };
99
100 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
101         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
102         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
103         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
104         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
105         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
106         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
107         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
108         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_ipv4,
110                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
112                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
114                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
118                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
121         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
122                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
123         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
124                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
125         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
126 };
127
128 static struct ice_flow_parser ice_fdir_parser_os;
129 static struct ice_flow_parser ice_fdir_parser_comms;
130
131 static const struct rte_memzone *
132 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
133 {
134         const struct rte_memzone *mz;
135
136         mz = rte_memzone_lookup(name);
137         if (mz)
138                 return mz;
139
140         return rte_memzone_reserve_aligned(name, len, socket_id,
141                                            RTE_MEMZONE_IOVA_CONTIG,
142                                            ICE_RING_BASE_ALIGN);
143 }
144
145 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
146
147 static int
148 ice_fdir_prof_alloc(struct ice_hw *hw)
149 {
150         enum ice_fltr_ptype ptype, fltr_ptype;
151
152         if (!hw->fdir_prof) {
153                 hw->fdir_prof = (struct ice_fd_hw_prof **)
154                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
155                                    sizeof(*hw->fdir_prof));
156                 if (!hw->fdir_prof)
157                         return -ENOMEM;
158         }
159         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
160              ptype < ICE_FLTR_PTYPE_MAX;
161              ptype++) {
162                 if (!hw->fdir_prof[ptype]) {
163                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
164                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
165                         if (!hw->fdir_prof[ptype])
166                                 goto fail_mem;
167                 }
168         }
169         return 0;
170
171 fail_mem:
172         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
173              fltr_ptype < ptype;
174              fltr_ptype++) {
175                 rte_free(hw->fdir_prof[fltr_ptype]);
176                 hw->fdir_prof[fltr_ptype] = NULL;
177         }
178
179         rte_free(hw->fdir_prof);
180         hw->fdir_prof = NULL;
181
182         return -ENOMEM;
183 }
184
185 static int
186 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
187                           struct ice_fdir_counter_pool_container *container,
188                           uint32_t index_start,
189                           uint32_t len)
190 {
191         struct ice_fdir_counter_pool *pool;
192         uint32_t i;
193         int ret = 0;
194
195         pool = rte_zmalloc("ice_fdir_counter_pool",
196                            sizeof(*pool) +
197                            sizeof(struct ice_fdir_counter) * len,
198                            0);
199         if (!pool) {
200                 PMD_INIT_LOG(ERR,
201                              "Failed to allocate memory for fdir counter pool");
202                 return -ENOMEM;
203         }
204
205         TAILQ_INIT(&pool->counter_list);
206         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
207
208         for (i = 0; i < len; i++) {
209                 struct ice_fdir_counter *counter = &pool->counters[i];
210
211                 counter->hw_index = index_start + i;
212                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
213         }
214
215         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
216                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
217                 ret = -EINVAL;
218                 goto free_pool;
219         }
220
221         container->pools[container->index_free++] = pool;
222         return 0;
223
224 free_pool:
225         rte_free(pool);
226         return ret;
227 }
228
229 static int
230 ice_fdir_counter_init(struct ice_pf *pf)
231 {
232         struct ice_hw *hw = ICE_PF_TO_HW(pf);
233         struct ice_fdir_info *fdir_info = &pf->fdir;
234         struct ice_fdir_counter_pool_container *container =
235                                 &fdir_info->counter;
236         uint32_t cnt_index, len;
237         int ret;
238
239         TAILQ_INIT(&container->pool_list);
240
241         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
242         len = ICE_FDIR_COUNTERS_PER_BLOCK;
243
244         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
245         if (ret) {
246                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
247                 return ret;
248         }
249
250         return 0;
251 }
252
253 static int
254 ice_fdir_counter_release(struct ice_pf *pf)
255 {
256         struct ice_fdir_info *fdir_info = &pf->fdir;
257         struct ice_fdir_counter_pool_container *container =
258                                 &fdir_info->counter;
259         uint8_t i;
260
261         for (i = 0; i < container->index_free; i++) {
262                 rte_free(container->pools[i]);
263                 container->pools[i] = NULL;
264         }
265
266         TAILQ_INIT(&container->pool_list);
267         container->index_free = 0;
268
269         return 0;
270 }
271
272 static struct ice_fdir_counter *
273 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
274                                         *container,
275                                uint32_t id)
276 {
277         struct ice_fdir_counter_pool *pool;
278         struct ice_fdir_counter *counter;
279         int i;
280
281         TAILQ_FOREACH(pool, &container->pool_list, next) {
282                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
283                         counter = &pool->counters[i];
284
285                         if (counter->shared &&
286                             counter->ref_cnt &&
287                             counter->id == id)
288                                 return counter;
289                 }
290         }
291
292         return NULL;
293 }
294
295 static struct ice_fdir_counter *
296 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
297 {
298         struct ice_hw *hw = ICE_PF_TO_HW(pf);
299         struct ice_fdir_info *fdir_info = &pf->fdir;
300         struct ice_fdir_counter_pool_container *container =
301                                 &fdir_info->counter;
302         struct ice_fdir_counter_pool *pool = NULL;
303         struct ice_fdir_counter *counter_free = NULL;
304
305         if (shared) {
306                 counter_free = ice_fdir_counter_shared_search(container, id);
307                 if (counter_free) {
308                         if (counter_free->ref_cnt + 1 == 0) {
309                                 rte_errno = E2BIG;
310                                 return NULL;
311                         }
312                         counter_free->ref_cnt++;
313                         return counter_free;
314                 }
315         }
316
317         TAILQ_FOREACH(pool, &container->pool_list, next) {
318                 counter_free = TAILQ_FIRST(&pool->counter_list);
319                 if (counter_free)
320                         break;
321                 counter_free = NULL;
322         }
323
324         if (!counter_free) {
325                 PMD_DRV_LOG(ERR, "No free counter found\n");
326                 return NULL;
327         }
328
329         counter_free->shared = shared;
330         counter_free->id = id;
331         counter_free->ref_cnt = 1;
332         counter_free->pool = pool;
333
334         /* reset statistic counter value */
335         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
336         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
337
338         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
339         if (TAILQ_EMPTY(&pool->counter_list)) {
340                 TAILQ_REMOVE(&container->pool_list, pool, next);
341                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
342         }
343
344         return counter_free;
345 }
346
347 static void
348 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
349                       struct ice_fdir_counter *counter)
350 {
351         if (!counter)
352                 return;
353
354         if (--counter->ref_cnt == 0) {
355                 struct ice_fdir_counter_pool *pool = counter->pool;
356
357                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
358         }
359 }
360
361 static int
362 ice_fdir_init_filter_list(struct ice_pf *pf)
363 {
364         struct rte_eth_dev *dev = pf->adapter->eth_dev;
365         struct ice_fdir_info *fdir_info = &pf->fdir;
366         char fdir_hash_name[RTE_HASH_NAMESIZE];
367         int ret;
368
369         struct rte_hash_parameters fdir_hash_params = {
370                 .name = fdir_hash_name,
371                 .entries = ICE_MAX_FDIR_FILTER_NUM,
372                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
373                 .hash_func = rte_hash_crc,
374                 .hash_func_init_val = 0,
375                 .socket_id = rte_socket_id(),
376                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
377         };
378
379         /* Initialize hash */
380         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
381                  "fdir_%s", dev->device->name);
382         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
383         if (!fdir_info->hash_table) {
384                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
385                 return -EINVAL;
386         }
387         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
388                                           sizeof(*fdir_info->hash_map) *
389                                           ICE_MAX_FDIR_FILTER_NUM,
390                                           0);
391         if (!fdir_info->hash_map) {
392                 PMD_INIT_LOG(ERR,
393                              "Failed to allocate memory for fdir hash map!");
394                 ret = -ENOMEM;
395                 goto err_fdir_hash_map_alloc;
396         }
397         return 0;
398
399 err_fdir_hash_map_alloc:
400         rte_hash_free(fdir_info->hash_table);
401
402         return ret;
403 }
404
405 static void
406 ice_fdir_release_filter_list(struct ice_pf *pf)
407 {
408         struct ice_fdir_info *fdir_info = &pf->fdir;
409
410         if (fdir_info->hash_map)
411                 rte_free(fdir_info->hash_map);
412         if (fdir_info->hash_table)
413                 rte_hash_free(fdir_info->hash_table);
414
415         fdir_info->hash_map = NULL;
416         fdir_info->hash_table = NULL;
417 }
418
419 /*
420  * ice_fdir_setup - reserve and initialize the Flow Director resources
421  * @pf: board private structure
422  */
423 static int
424 ice_fdir_setup(struct ice_pf *pf)
425 {
426         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
427         struct ice_hw *hw = ICE_PF_TO_HW(pf);
428         const struct rte_memzone *mz = NULL;
429         char z_name[RTE_MEMZONE_NAMESIZE];
430         struct ice_vsi *vsi;
431         int err = ICE_SUCCESS;
432
433         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
434                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
435                 return -ENOTSUP;
436         }
437
438         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
439                     " fd_fltr_best_effort = %u.",
440                     hw->func_caps.fd_fltr_guar,
441                     hw->func_caps.fd_fltr_best_effort);
442
443         if (pf->fdir.fdir_vsi) {
444                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
445                 return ICE_SUCCESS;
446         }
447
448         /* make new FDIR VSI */
449         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
450         if (!vsi) {
451                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
452                 return -EINVAL;
453         }
454         pf->fdir.fdir_vsi = vsi;
455
456         err = ice_fdir_init_filter_list(pf);
457         if (err) {
458                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
459                 return -EINVAL;
460         }
461
462         err = ice_fdir_counter_init(pf);
463         if (err) {
464                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
465                 return -EINVAL;
466         }
467
468         /*Fdir tx queue setup*/
469         err = ice_fdir_setup_tx_resources(pf);
470         if (err) {
471                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
472                 goto fail_setup_tx;
473         }
474
475         /*Fdir rx queue setup*/
476         err = ice_fdir_setup_rx_resources(pf);
477         if (err) {
478                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
479                 goto fail_setup_rx;
480         }
481
482         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
483         if (err) {
484                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
485                 goto fail_mem;
486         }
487
488         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
489         if (err) {
490                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
491                 goto fail_mem;
492         }
493
494         /* Enable FDIR MSIX interrupt */
495         vsi->nb_used_qps = 1;
496         ice_vsi_queues_bind_intr(vsi);
497         ice_vsi_enable_queues_intr(vsi);
498
499         /* reserve memory for the fdir programming packet */
500         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
501                  ICE_FDIR_MZ_NAME,
502                  eth_dev->data->port_id);
503         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
504         if (!mz) {
505                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
506                             "flow director program packet.");
507                 err = -ENOMEM;
508                 goto fail_mem;
509         }
510         pf->fdir.prg_pkt = mz->addr;
511         pf->fdir.dma_addr = mz->iova;
512         pf->fdir.mz = mz;
513
514         err = ice_fdir_prof_alloc(hw);
515         if (err) {
516                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
517                             "flow director profile.");
518                 err = -ENOMEM;
519                 goto fail_prof;
520         }
521
522         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
523                     vsi->base_queue);
524         return ICE_SUCCESS;
525
526 fail_prof:
527         rte_memzone_free(pf->fdir.mz);
528         pf->fdir.mz = NULL;
529 fail_mem:
530         ice_rx_queue_release(pf->fdir.rxq);
531         pf->fdir.rxq = NULL;
532 fail_setup_rx:
533         ice_tx_queue_release(pf->fdir.txq);
534         pf->fdir.txq = NULL;
535 fail_setup_tx:
536         ice_release_vsi(vsi);
537         pf->fdir.fdir_vsi = NULL;
538         return err;
539 }
540
541 static void
542 ice_fdir_prof_free(struct ice_hw *hw)
543 {
544         enum ice_fltr_ptype ptype;
545
546         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
547              ptype < ICE_FLTR_PTYPE_MAX;
548              ptype++) {
549                 rte_free(hw->fdir_prof[ptype]);
550                 hw->fdir_prof[ptype] = NULL;
551         }
552
553         rte_free(hw->fdir_prof);
554         hw->fdir_prof = NULL;
555 }
556
557 /* Remove a profile for some filter type */
558 static void
559 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
560 {
561         struct ice_hw *hw = ICE_PF_TO_HW(pf);
562         struct ice_fd_hw_prof *hw_prof;
563         uint64_t prof_id;
564         uint16_t vsi_num;
565         int i;
566
567         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
568                 return;
569
570         hw_prof = hw->fdir_prof[ptype];
571
572         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
573         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
574                 if (hw_prof->entry_h[i][is_tunnel]) {
575                         vsi_num = ice_get_hw_vsi_num(hw,
576                                                      hw_prof->vsi_h[i]);
577                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
578                                              vsi_num, ptype);
579                         ice_flow_rem_entry(hw,
580                                            hw_prof->entry_h[i][is_tunnel]);
581                         hw_prof->entry_h[i][is_tunnel] = 0;
582                 }
583         }
584         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
585         rte_free(hw_prof->fdir_seg[is_tunnel]);
586         hw_prof->fdir_seg[is_tunnel] = NULL;
587
588         for (i = 0; i < hw_prof->cnt; i++)
589                 hw_prof->vsi_h[i] = 0;
590         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
591 }
592
593 /* Remove all created profiles */
594 static void
595 ice_fdir_prof_rm_all(struct ice_pf *pf)
596 {
597         enum ice_fltr_ptype ptype;
598
599         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
600              ptype < ICE_FLTR_PTYPE_MAX;
601              ptype++) {
602                 ice_fdir_prof_rm(pf, ptype, false);
603                 ice_fdir_prof_rm(pf, ptype, true);
604         }
605 }
606
607 /*
608  * ice_fdir_teardown - release the Flow Director resources
609  * @pf: board private structure
610  */
611 static void
612 ice_fdir_teardown(struct ice_pf *pf)
613 {
614         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
615         struct ice_hw *hw = ICE_PF_TO_HW(pf);
616         struct ice_vsi *vsi;
617         int err;
618
619         vsi = pf->fdir.fdir_vsi;
620         if (!vsi)
621                 return;
622
623         ice_vsi_disable_queues_intr(vsi);
624
625         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
626         if (err)
627                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
628
629         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
630         if (err)
631                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
632
633         err = ice_fdir_counter_release(pf);
634         if (err)
635                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
636
637         ice_fdir_release_filter_list(pf);
638
639         ice_tx_queue_release(pf->fdir.txq);
640         pf->fdir.txq = NULL;
641         ice_rx_queue_release(pf->fdir.rxq);
642         pf->fdir.rxq = NULL;
643         ice_fdir_prof_rm_all(pf);
644         ice_fdir_prof_free(hw);
645         ice_release_vsi(vsi);
646         pf->fdir.fdir_vsi = NULL;
647
648         if (pf->fdir.mz) {
649                 err = rte_memzone_free(pf->fdir.mz);
650                 pf->fdir.mz = NULL;
651                 if (err)
652                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
653         }
654 }
655
656 static int
657 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
658                            enum ice_fltr_ptype ptype,
659                            struct ice_flow_seg_info *seg,
660                            bool is_tunnel)
661 {
662         struct ice_hw *hw = ICE_PF_TO_HW(pf);
663         struct ice_flow_seg_info *ori_seg;
664         struct ice_fd_hw_prof *hw_prof;
665
666         hw_prof = hw->fdir_prof[ptype];
667         ori_seg = hw_prof->fdir_seg[is_tunnel];
668
669         /* profile does not exist */
670         if (!ori_seg)
671                 return 0;
672
673         /* if no input set conflict, return -EEXIST */
674         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
675             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
676                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
677                             ptype);
678                 return -EEXIST;
679         }
680
681         /* a rule with input set conflict already exist, so give up */
682         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
683                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
684                             ptype);
685                 return -EINVAL;
686         }
687
688         /* it's safe to delete an empty profile */
689         ice_fdir_prof_rm(pf, ptype, is_tunnel);
690         return 0;
691 }
692
693 static bool
694 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
695                                enum ice_fltr_ptype ptype,
696                                bool is_tunnel)
697 {
698         struct ice_hw *hw = ICE_PF_TO_HW(pf);
699         struct ice_fd_hw_prof *hw_prof;
700         struct ice_flow_seg_info *seg;
701
702         hw_prof = hw->fdir_prof[ptype];
703         seg = hw_prof->fdir_seg[is_tunnel];
704
705         /* profile does not exist */
706         if (!seg)
707                 return true;
708
709         /* profile exists and rule exists, fail to resolve the conflict */
710         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
711                 return false;
712
713         /* it's safe to delete an empty profile */
714         ice_fdir_prof_rm(pf, ptype, is_tunnel);
715
716         return true;
717 }
718
719 static int
720 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
721                              enum ice_fltr_ptype ptype,
722                              bool is_tunnel)
723 {
724         enum ice_fltr_ptype cflct_ptype;
725
726         switch (ptype) {
727         /* IPv4 */
728         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
729         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
730         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
731                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
732                 if (!ice_fdir_prof_resolve_conflict
733                         (pf, cflct_ptype, is_tunnel))
734                         goto err;
735                 break;
736         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
737                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
738                 if (!ice_fdir_prof_resolve_conflict
739                         (pf, cflct_ptype, is_tunnel))
740                         goto err;
741                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
742                 if (!ice_fdir_prof_resolve_conflict
743                         (pf, cflct_ptype, is_tunnel))
744                         goto err;
745                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
746                 if (!ice_fdir_prof_resolve_conflict
747                         (pf, cflct_ptype, is_tunnel))
748                         goto err;
749                 break;
750         /* IPv4 GTPU */
751         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
752         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
753         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
754                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
755                 if (!ice_fdir_prof_resolve_conflict
756                         (pf, cflct_ptype, is_tunnel))
757                         goto err;
758                 break;
759         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
760                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
761                 if (!ice_fdir_prof_resolve_conflict
762                         (pf, cflct_ptype, is_tunnel))
763                         goto err;
764                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
765                 if (!ice_fdir_prof_resolve_conflict
766                         (pf, cflct_ptype, is_tunnel))
767                         goto err;
768                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
769                 if (!ice_fdir_prof_resolve_conflict
770                         (pf, cflct_ptype, is_tunnel))
771                         goto err;
772                 break;
773         /* IPv6 */
774         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
775         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
776         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
777                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
778                 if (!ice_fdir_prof_resolve_conflict
779                         (pf, cflct_ptype, is_tunnel))
780                         goto err;
781                 break;
782         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
783                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
784                 if (!ice_fdir_prof_resolve_conflict
785                         (pf, cflct_ptype, is_tunnel))
786                         goto err;
787                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
788                 if (!ice_fdir_prof_resolve_conflict
789                         (pf, cflct_ptype, is_tunnel))
790                         goto err;
791                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
792                 if (!ice_fdir_prof_resolve_conflict
793                         (pf, cflct_ptype, is_tunnel))
794                         goto err;
795                 break;
796         default:
797                 break;
798         }
799         return 0;
800 err:
801         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
802                     ptype, cflct_ptype);
803         return -EINVAL;
804 }
805
806 static int
807 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
808                      struct ice_vsi *ctrl_vsi,
809                      struct ice_flow_seg_info *seg,
810                      enum ice_fltr_ptype ptype,
811                      bool is_tunnel)
812 {
813         struct ice_hw *hw = ICE_PF_TO_HW(pf);
814         enum ice_flow_dir dir = ICE_FLOW_RX;
815         struct ice_fd_hw_prof *hw_prof;
816         struct ice_flow_prof *prof;
817         uint64_t entry_1 = 0;
818         uint64_t entry_2 = 0;
819         uint16_t vsi_num;
820         int ret;
821         uint64_t prof_id;
822
823         /* check if have input set conflict on current profile. */
824         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
825         if (ret)
826                 return ret;
827
828         /* check if the profile is conflict with other profile. */
829         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
830         if (ret)
831                 return ret;
832
833         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
834         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
835                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
836         if (ret)
837                 return ret;
838         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
839                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
840                                  seg, NULL, 0, &entry_1);
841         if (ret) {
842                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
843                             ptype);
844                 goto err_add_prof;
845         }
846         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
847                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
848                                  seg, NULL, 0, &entry_2);
849         if (ret) {
850                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
851                             ptype);
852                 goto err_add_entry;
853         }
854
855         hw_prof = hw->fdir_prof[ptype];
856         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
857         hw_prof->cnt = 0;
858         hw_prof->fdir_seg[is_tunnel] = seg;
859         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
860         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
861         pf->hw_prof_cnt[ptype][is_tunnel]++;
862         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
863         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
864         pf->hw_prof_cnt[ptype][is_tunnel]++;
865
866         return ret;
867
868 err_add_entry:
869         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
870         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
871         ice_flow_rem_entry(hw, entry_1);
872 err_add_prof:
873         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
874
875         return ret;
876 }
877
878 static void
879 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
880 {
881         uint32_t i, j;
882
883         struct ice_inset_map {
884                 uint64_t inset;
885                 enum ice_flow_field fld;
886         };
887         static const struct ice_inset_map ice_inset_map[] = {
888                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
889                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
890                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
891                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
892                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
893                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
894                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
895                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
896                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
897                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
898                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
899                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
900                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
901                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
902                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
903                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
904                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
905                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
906                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
907                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
908                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
909                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
910                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
911                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
912                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
913                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
914                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
915         };
916
917         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
918                 if ((inset & ice_inset_map[i].inset) ==
919                     ice_inset_map[i].inset)
920                         field[j++] = ice_inset_map[i].fld;
921         }
922 }
923
924 static int
925 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
926                         uint64_t input_set, bool is_tunnel)
927 {
928         struct ice_flow_seg_info *seg;
929         struct ice_flow_seg_info *seg_tun = NULL;
930         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
931         int i, ret;
932
933         if (!input_set)
934                 return -EINVAL;
935
936         seg = (struct ice_flow_seg_info *)
937                 ice_malloc(hw, sizeof(*seg));
938         if (!seg) {
939                 PMD_DRV_LOG(ERR, "No memory can be allocated");
940                 return -ENOMEM;
941         }
942
943         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
944                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
945         ice_fdir_input_set_parse(input_set, field);
946
947         switch (flow) {
948         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
949                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
950                                   ICE_FLOW_SEG_HDR_IPV4);
951                 break;
952         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
953                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
954                                   ICE_FLOW_SEG_HDR_IPV4);
955                 break;
956         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
957                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
958                                   ICE_FLOW_SEG_HDR_IPV4);
959                 break;
960         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
961                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
962                 break;
963         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
964                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
965                                   ICE_FLOW_SEG_HDR_IPV6);
966                 break;
967         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
968                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
969                                   ICE_FLOW_SEG_HDR_IPV6);
970                 break;
971         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
972                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
973                                   ICE_FLOW_SEG_HDR_IPV6);
974                 break;
975         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
976                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
977                 break;
978         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
979         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
980         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
981         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
982                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
983                                   ICE_FLOW_SEG_HDR_IPV4);
984                 break;
985         default:
986                 PMD_DRV_LOG(ERR, "not supported filter type.");
987                 break;
988         }
989
990         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
991                 ice_flow_set_fld(seg, field[i],
992                                  ICE_FLOW_FLD_OFF_INVAL,
993                                  ICE_FLOW_FLD_OFF_INVAL,
994                                  ICE_FLOW_FLD_OFF_INVAL, false);
995         }
996
997         if (!is_tunnel) {
998                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
999                                            seg, flow, false);
1000         } else {
1001                 seg_tun = (struct ice_flow_seg_info *)
1002                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1003                 if (!seg_tun) {
1004                         PMD_DRV_LOG(ERR, "No memory can be allocated");
1005                         rte_free(seg);
1006                         return -ENOMEM;
1007                 }
1008                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1009                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1010                                            seg_tun, flow, true);
1011         }
1012
1013         if (!ret) {
1014                 return ret;
1015         } else if (ret < 0) {
1016                 rte_free(seg);
1017                 if (is_tunnel)
1018                         rte_free(seg_tun);
1019                 return (ret == -EEXIST) ? 0 : ret;
1020         } else {
1021                 return ret;
1022         }
1023 }
1024
1025 static void
1026 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1027                     bool is_tunnel, bool add)
1028 {
1029         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1030         int cnt;
1031
1032         cnt = (add) ? 1 : -1;
1033         hw->fdir_active_fltr += cnt;
1034         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1035                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1036         else
1037                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1038 }
1039
1040 static int
1041 ice_fdir_init(struct ice_adapter *ad)
1042 {
1043         struct ice_pf *pf = &ad->pf;
1044         struct ice_flow_parser *parser;
1045         int ret;
1046
1047         ret = ice_fdir_setup(pf);
1048         if (ret)
1049                 return ret;
1050
1051         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1052                 parser = &ice_fdir_parser_comms;
1053         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1054                 parser = &ice_fdir_parser_os;
1055         else
1056                 return -EINVAL;
1057
1058         return ice_register_parser(parser, ad);
1059 }
1060
1061 static void
1062 ice_fdir_uninit(struct ice_adapter *ad)
1063 {
1064         struct ice_pf *pf = &ad->pf;
1065         struct ice_flow_parser *parser;
1066
1067         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1068                 parser = &ice_fdir_parser_comms;
1069         else
1070                 parser = &ice_fdir_parser_os;
1071
1072         ice_unregister_parser(parser, ad);
1073
1074         ice_fdir_teardown(pf);
1075 }
1076
1077 static int
1078 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1079 {
1080         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1081                 return 1;
1082         else
1083                 return 0;
1084 }
1085
1086 static int
1087 ice_fdir_add_del_filter(struct ice_pf *pf,
1088                         struct ice_fdir_filter_conf *filter,
1089                         bool add)
1090 {
1091         struct ice_fltr_desc desc;
1092         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1093         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1094         bool is_tun;
1095         int ret;
1096
1097         filter->input.dest_vsi = pf->main_vsi->idx;
1098
1099         memset(&desc, 0, sizeof(desc));
1100         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1101
1102         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1103
1104         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1105         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1106         if (ret) {
1107                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1108                 return -EINVAL;
1109         }
1110
1111         return ice_fdir_programming(pf, &desc);
1112 }
1113
1114 static void
1115 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1116                           struct ice_fdir_filter_conf *filter)
1117 {
1118         struct ice_fdir_fltr *input = &filter->input;
1119         memset(key, 0, sizeof(*key));
1120
1121         key->flow_type = input->flow_type;
1122         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1123         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1124         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1125         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1126
1127         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1128         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1129
1130         key->tunnel_type = filter->tunnel_type;
1131 }
1132
1133 /* Check if there exists the flow director filter */
1134 static struct ice_fdir_filter_conf *
1135 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1136                         const struct ice_fdir_fltr_pattern *key)
1137 {
1138         int ret;
1139
1140         ret = rte_hash_lookup(fdir_info->hash_table, key);
1141         if (ret < 0)
1142                 return NULL;
1143
1144         return fdir_info->hash_map[ret];
1145 }
1146
1147 /* Add a flow director entry into the SW list */
1148 static int
1149 ice_fdir_entry_insert(struct ice_pf *pf,
1150                       struct ice_fdir_filter_conf *entry,
1151                       struct ice_fdir_fltr_pattern *key)
1152 {
1153         struct ice_fdir_info *fdir_info = &pf->fdir;
1154         int ret;
1155
1156         ret = rte_hash_add_key(fdir_info->hash_table, key);
1157         if (ret < 0) {
1158                 PMD_DRV_LOG(ERR,
1159                             "Failed to insert fdir entry to hash table %d!",
1160                             ret);
1161                 return ret;
1162         }
1163         fdir_info->hash_map[ret] = entry;
1164
1165         return 0;
1166 }
1167
1168 /* Delete a flow director entry from the SW list */
1169 static int
1170 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1171 {
1172         struct ice_fdir_info *fdir_info = &pf->fdir;
1173         int ret;
1174
1175         ret = rte_hash_del_key(fdir_info->hash_table, key);
1176         if (ret < 0) {
1177                 PMD_DRV_LOG(ERR,
1178                             "Failed to delete fdir filter to hash table %d!",
1179                             ret);
1180                 return ret;
1181         }
1182         fdir_info->hash_map[ret] = NULL;
1183
1184         return 0;
1185 }
1186
1187 static int
1188 ice_fdir_create_filter(struct ice_adapter *ad,
1189                        struct rte_flow *flow,
1190                        void *meta,
1191                        struct rte_flow_error *error)
1192 {
1193         struct ice_pf *pf = &ad->pf;
1194         struct ice_fdir_filter_conf *filter = meta;
1195         struct ice_fdir_info *fdir_info = &pf->fdir;
1196         struct ice_fdir_filter_conf *entry, *node;
1197         struct ice_fdir_fltr_pattern key;
1198         bool is_tun;
1199         int ret;
1200
1201         ice_fdir_extract_fltr_key(&key, filter);
1202         node = ice_fdir_entry_lookup(fdir_info, &key);
1203         if (node) {
1204                 rte_flow_error_set(error, EEXIST,
1205                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1206                                    "Rule already exists!");
1207                 return -rte_errno;
1208         }
1209
1210         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1211         if (!entry) {
1212                 rte_flow_error_set(error, ENOMEM,
1213                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1214                                    "Failed to allocate memory");
1215                 return -rte_errno;
1216         }
1217
1218         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1219
1220         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1221                         filter->input_set, is_tun);
1222         if (ret) {
1223                 rte_flow_error_set(error, -ret,
1224                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1225                                    "Profile configure failed.");
1226                 goto free_entry;
1227         }
1228
1229         /* alloc counter for FDIR */
1230         if (filter->input.cnt_ena) {
1231                 struct rte_flow_action_count *act_count = &filter->act_count;
1232
1233                 filter->counter = ice_fdir_counter_alloc(pf,
1234                                                          act_count->shared,
1235                                                          act_count->id);
1236                 if (!filter->counter) {
1237                         rte_flow_error_set(error, EINVAL,
1238                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1239                                         "Failed to alloc FDIR counter.");
1240                         goto free_entry;
1241                 }
1242                 filter->input.cnt_index = filter->counter->hw_index;
1243         }
1244
1245         ret = ice_fdir_add_del_filter(pf, filter, true);
1246         if (ret) {
1247                 rte_flow_error_set(error, -ret,
1248                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1249                                    "Add filter rule failed.");
1250                 goto free_counter;
1251         }
1252
1253         rte_memcpy(entry, filter, sizeof(*entry));
1254         ret = ice_fdir_entry_insert(pf, entry, &key);
1255         if (ret) {
1256                 rte_flow_error_set(error, -ret,
1257                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1258                                    "Insert entry to table failed.");
1259                 goto free_entry;
1260         }
1261
1262         flow->rule = entry;
1263         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1264
1265         return 0;
1266
1267 free_counter:
1268         if (filter->counter) {
1269                 ice_fdir_counter_free(pf, filter->counter);
1270                 filter->counter = NULL;
1271         }
1272
1273 free_entry:
1274         rte_free(entry);
1275         return -rte_errno;
1276 }
1277
1278 static int
1279 ice_fdir_destroy_filter(struct ice_adapter *ad,
1280                         struct rte_flow *flow,
1281                         struct rte_flow_error *error)
1282 {
1283         struct ice_pf *pf = &ad->pf;
1284         struct ice_fdir_info *fdir_info = &pf->fdir;
1285         struct ice_fdir_filter_conf *filter, *entry;
1286         struct ice_fdir_fltr_pattern key;
1287         bool is_tun;
1288         int ret;
1289
1290         filter = (struct ice_fdir_filter_conf *)flow->rule;
1291
1292         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1293
1294         if (filter->counter) {
1295                 ice_fdir_counter_free(pf, filter->counter);
1296                 filter->counter = NULL;
1297         }
1298
1299         ice_fdir_extract_fltr_key(&key, filter);
1300         entry = ice_fdir_entry_lookup(fdir_info, &key);
1301         if (!entry) {
1302                 rte_flow_error_set(error, ENOENT,
1303                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1304                                    "Can't find entry.");
1305                 return -rte_errno;
1306         }
1307
1308         ret = ice_fdir_add_del_filter(pf, filter, false);
1309         if (ret) {
1310                 rte_flow_error_set(error, -ret,
1311                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1312                                    "Del filter rule failed.");
1313                 return -rte_errno;
1314         }
1315
1316         ret = ice_fdir_entry_del(pf, &key);
1317         if (ret) {
1318                 rte_flow_error_set(error, -ret,
1319                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1320                                    "Remove entry from table failed.");
1321                 return -rte_errno;
1322         }
1323
1324         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1325         flow->rule = NULL;
1326
1327         rte_free(filter);
1328
1329         return 0;
1330 }
1331
1332 static int
1333 ice_fdir_query_count(struct ice_adapter *ad,
1334                       struct rte_flow *flow,
1335                       struct rte_flow_query_count *flow_stats,
1336                       struct rte_flow_error *error)
1337 {
1338         struct ice_pf *pf = &ad->pf;
1339         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1340         struct ice_fdir_filter_conf *filter = flow->rule;
1341         struct ice_fdir_counter *counter = filter->counter;
1342         uint64_t hits_lo, hits_hi;
1343
1344         if (!counter) {
1345                 rte_flow_error_set(error, EINVAL,
1346                                   RTE_FLOW_ERROR_TYPE_ACTION,
1347                                   NULL,
1348                                   "FDIR counters not available");
1349                 return -rte_errno;
1350         }
1351
1352         /*
1353          * Reading the low 32-bits latches the high 32-bits into a shadow
1354          * register. Reading the high 32-bit returns the value in the
1355          * shadow register.
1356          */
1357         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1358         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1359
1360         flow_stats->hits_set = 1;
1361         flow_stats->hits = hits_lo | (hits_hi << 32);
1362         flow_stats->bytes_set = 0;
1363         flow_stats->bytes = 0;
1364
1365         if (flow_stats->reset) {
1366                 /* reset statistic counter value */
1367                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1368                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1369         }
1370
1371         return 0;
1372 }
1373
1374 static struct ice_flow_engine ice_fdir_engine = {
1375         .init = ice_fdir_init,
1376         .uninit = ice_fdir_uninit,
1377         .create = ice_fdir_create_filter,
1378         .destroy = ice_fdir_destroy_filter,
1379         .query_count = ice_fdir_query_count,
1380         .type = ICE_FLOW_ENGINE_FDIR,
1381 };
1382
1383 static int
1384 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1385                               struct rte_flow_error *error,
1386                               const struct rte_flow_action *act,
1387                               struct ice_fdir_filter_conf *filter)
1388 {
1389         const struct rte_flow_action_rss *rss = act->conf;
1390         uint32_t i;
1391
1392         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1393                 rte_flow_error_set(error, EINVAL,
1394                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1395                                    "Invalid action.");
1396                 return -rte_errno;
1397         }
1398
1399         if (rss->queue_num <= 1) {
1400                 rte_flow_error_set(error, EINVAL,
1401                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1402                                    "Queue region size can't be 0 or 1.");
1403                 return -rte_errno;
1404         }
1405
1406         /* check if queue index for queue region is continuous */
1407         for (i = 0; i < rss->queue_num - 1; i++) {
1408                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1409                         rte_flow_error_set(error, EINVAL,
1410                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1411                                            "Discontinuous queue region");
1412                         return -rte_errno;
1413                 }
1414         }
1415
1416         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1417                 rte_flow_error_set(error, EINVAL,
1418                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1419                                    "Invalid queue region indexes.");
1420                 return -rte_errno;
1421         }
1422
1423         if (!(rte_is_power_of_2(rss->queue_num) &&
1424              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1425                 rte_flow_error_set(error, EINVAL,
1426                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1427                                    "The region size should be any of the following values:"
1428                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1429                                    "of queues do not exceed the VSI allocation.");
1430                 return -rte_errno;
1431         }
1432
1433         filter->input.q_index = rss->queue[0];
1434         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1435         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1436
1437         return 0;
1438 }
1439
1440 static int
1441 ice_fdir_parse_action(struct ice_adapter *ad,
1442                       const struct rte_flow_action actions[],
1443                       struct rte_flow_error *error,
1444                       struct ice_fdir_filter_conf *filter)
1445 {
1446         struct ice_pf *pf = &ad->pf;
1447         const struct rte_flow_action_queue *act_q;
1448         const struct rte_flow_action_mark *mark_spec = NULL;
1449         const struct rte_flow_action_count *act_count;
1450         uint32_t dest_num = 0;
1451         uint32_t mark_num = 0;
1452         uint32_t counter_num = 0;
1453         int ret;
1454
1455         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1456                 switch (actions->type) {
1457                 case RTE_FLOW_ACTION_TYPE_VOID:
1458                         break;
1459                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1460                         dest_num++;
1461
1462                         act_q = actions->conf;
1463                         filter->input.q_index = act_q->index;
1464                         if (filter->input.q_index >=
1465                                         pf->dev_data->nb_rx_queues) {
1466                                 rte_flow_error_set(error, EINVAL,
1467                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1468                                                    actions,
1469                                                    "Invalid queue for FDIR.");
1470                                 return -rte_errno;
1471                         }
1472                         filter->input.dest_ctl =
1473                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1474                         break;
1475                 case RTE_FLOW_ACTION_TYPE_DROP:
1476                         dest_num++;
1477
1478                         filter->input.dest_ctl =
1479                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1480                         break;
1481                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1482                         dest_num++;
1483
1484                         filter->input.dest_ctl =
1485                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1486                         filter->input.q_index = 0;
1487                         break;
1488                 case RTE_FLOW_ACTION_TYPE_RSS:
1489                         dest_num++;
1490
1491                         ret = ice_fdir_parse_action_qregion(pf,
1492                                                 error, actions, filter);
1493                         if (ret)
1494                                 return ret;
1495                         break;
1496                 case RTE_FLOW_ACTION_TYPE_MARK:
1497                         mark_num++;
1498
1499                         mark_spec = actions->conf;
1500                         filter->input.fltr_id = mark_spec->id;
1501                         break;
1502                 case RTE_FLOW_ACTION_TYPE_COUNT:
1503                         counter_num++;
1504
1505                         act_count = actions->conf;
1506                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1507                         rte_memcpy(&filter->act_count, act_count,
1508                                                 sizeof(filter->act_count));
1509
1510                         break;
1511                 default:
1512                         rte_flow_error_set(error, EINVAL,
1513                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1514                                    "Invalid action.");
1515                         return -rte_errno;
1516                 }
1517         }
1518
1519         if (dest_num == 0 || dest_num >= 2) {
1520                 rte_flow_error_set(error, EINVAL,
1521                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1522                            "Unsupported action combination");
1523                 return -rte_errno;
1524         }
1525
1526         if (mark_num >= 2) {
1527                 rte_flow_error_set(error, EINVAL,
1528                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1529                            "Too many mark actions");
1530                 return -rte_errno;
1531         }
1532
1533         if (counter_num >= 2) {
1534                 rte_flow_error_set(error, EINVAL,
1535                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1536                            "Too many count actions");
1537                 return -rte_errno;
1538         }
1539
1540         return 0;
1541 }
1542
1543 static int
1544 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1545                        const struct rte_flow_item pattern[],
1546                        struct rte_flow_error *error,
1547                        struct ice_fdir_filter_conf *filter)
1548 {
1549         const struct rte_flow_item *item = pattern;
1550         enum rte_flow_item_type item_type;
1551         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1552         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1553         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1554         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1555         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1556         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1557         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1558         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1559         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1560         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1561         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1562         uint64_t input_set = ICE_INSET_NONE;
1563         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1564         uint8_t  ipv6_addr_mask[16] = {
1565                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1566                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1567         };
1568         uint32_t vtc_flow_cpu;
1569
1570
1571         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1572                 if (item->last) {
1573                         rte_flow_error_set(error, EINVAL,
1574                                         RTE_FLOW_ERROR_TYPE_ITEM,
1575                                         item,
1576                                         "Not support range");
1577                         return -rte_errno;
1578                 }
1579                 item_type = item->type;
1580
1581                 switch (item_type) {
1582                 case RTE_FLOW_ITEM_TYPE_ETH:
1583                         eth_spec = item->spec;
1584                         eth_mask = item->mask;
1585
1586                         if (eth_spec && eth_mask) {
1587                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1588                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1589                                         rte_flow_error_set(error, EINVAL,
1590                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1591                                                 item,
1592                                                 "Src mac not support");
1593                                         return -rte_errno;
1594                                 }
1595
1596                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1597                                         rte_flow_error_set(error, EINVAL,
1598                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1599                                                 item,
1600                                                 "Invalid mac addr mask");
1601                                         return -rte_errno;
1602                                 }
1603
1604                                 input_set |= ICE_INSET_DMAC;
1605                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1606                                            &eth_spec->dst,
1607                                            RTE_ETHER_ADDR_LEN);
1608                         }
1609                         break;
1610                 case RTE_FLOW_ITEM_TYPE_IPV4:
1611                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1612                         ipv4_spec = item->spec;
1613                         ipv4_mask = item->mask;
1614
1615                         if (ipv4_spec && ipv4_mask) {
1616                                 /* Check IPv4 mask and update input set */
1617                                 if (ipv4_mask->hdr.version_ihl ||
1618                                     ipv4_mask->hdr.total_length ||
1619                                     ipv4_mask->hdr.packet_id ||
1620                                     ipv4_mask->hdr.fragment_offset ||
1621                                     ipv4_mask->hdr.hdr_checksum) {
1622                                         rte_flow_error_set(error, EINVAL,
1623                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1624                                                    item,
1625                                                    "Invalid IPv4 mask.");
1626                                         return -rte_errno;
1627                                 }
1628                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1629                                         input_set |= tunnel_type ?
1630                                                      ICE_INSET_TUN_IPV4_SRC :
1631                                                      ICE_INSET_IPV4_SRC;
1632                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1633                                         input_set |= tunnel_type ?
1634                                                      ICE_INSET_TUN_IPV4_DST :
1635                                                      ICE_INSET_IPV4_DST;
1636                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1637                                         input_set |= ICE_INSET_IPV4_TOS;
1638                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1639                                         input_set |= ICE_INSET_IPV4_TTL;
1640                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1641                                         input_set |= ICE_INSET_IPV4_PROTO;
1642
1643                                 filter->input.ip.v4.dst_ip =
1644                                         ipv4_spec->hdr.src_addr;
1645                                 filter->input.ip.v4.src_ip =
1646                                         ipv4_spec->hdr.dst_addr;
1647                                 filter->input.ip.v4.tos =
1648                                         ipv4_spec->hdr.type_of_service;
1649                                 filter->input.ip.v4.ttl =
1650                                         ipv4_spec->hdr.time_to_live;
1651                                 filter->input.ip.v4.proto =
1652                                         ipv4_spec->hdr.next_proto_id;
1653                         }
1654
1655                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1656                         break;
1657                 case RTE_FLOW_ITEM_TYPE_IPV6:
1658                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1659                         ipv6_spec = item->spec;
1660                         ipv6_mask = item->mask;
1661
1662                         if (ipv6_spec && ipv6_mask) {
1663                                 /* Check IPv6 mask and update input set */
1664                                 if (ipv6_mask->hdr.payload_len) {
1665                                         rte_flow_error_set(error, EINVAL,
1666                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1667                                                    item,
1668                                                    "Invalid IPv6 mask");
1669                                         return -rte_errno;
1670                                 }
1671
1672                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1673                                             ipv6_addr_mask,
1674                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1675                                         input_set |= ICE_INSET_IPV6_SRC;
1676                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1677                                             ipv6_addr_mask,
1678                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1679                                         input_set |= ICE_INSET_IPV6_DST;
1680
1681                                 if ((ipv6_mask->hdr.vtc_flow &
1682                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1683                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1684                                         input_set |= ICE_INSET_IPV6_TC;
1685                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1686                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1687                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1688                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1689
1690                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1691                                            ipv6_spec->hdr.src_addr, 16);
1692                                 rte_memcpy(filter->input.ip.v6.src_ip,
1693                                            ipv6_spec->hdr.dst_addr, 16);
1694
1695                                 vtc_flow_cpu =
1696                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1697                                 filter->input.ip.v6.tc =
1698                                         (uint8_t)(vtc_flow_cpu >>
1699                                                   ICE_FDIR_IPV6_TC_OFFSET);
1700                                 filter->input.ip.v6.proto =
1701                                         ipv6_spec->hdr.proto;
1702                                 filter->input.ip.v6.hlim =
1703                                         ipv6_spec->hdr.hop_limits;
1704                         }
1705
1706                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1707                         break;
1708                 case RTE_FLOW_ITEM_TYPE_TCP:
1709                         tcp_spec = item->spec;
1710                         tcp_mask = item->mask;
1711
1712                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1713                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1714                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1715                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1716
1717                         if (tcp_spec && tcp_mask) {
1718                                 /* Check TCP mask and update input set */
1719                                 if (tcp_mask->hdr.sent_seq ||
1720                                     tcp_mask->hdr.recv_ack ||
1721                                     tcp_mask->hdr.data_off ||
1722                                     tcp_mask->hdr.tcp_flags ||
1723                                     tcp_mask->hdr.rx_win ||
1724                                     tcp_mask->hdr.cksum ||
1725                                     tcp_mask->hdr.tcp_urp) {
1726                                         rte_flow_error_set(error, EINVAL,
1727                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1728                                                    item,
1729                                                    "Invalid TCP mask");
1730                                         return -rte_errno;
1731                                 }
1732
1733                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1734                                         input_set |= tunnel_type ?
1735                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1736                                                      ICE_INSET_TCP_SRC_PORT;
1737                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1738                                         input_set |= tunnel_type ?
1739                                                      ICE_INSET_TUN_TCP_DST_PORT :
1740                                                      ICE_INSET_TCP_DST_PORT;
1741
1742                                 /* Get filter info */
1743                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1744                                         filter->input.ip.v4.dst_port =
1745                                                 tcp_spec->hdr.src_port;
1746                                         filter->input.ip.v4.src_port =
1747                                                 tcp_spec->hdr.dst_port;
1748                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1749                                         filter->input.ip.v6.dst_port =
1750                                                 tcp_spec->hdr.src_port;
1751                                         filter->input.ip.v6.src_port =
1752                                                 tcp_spec->hdr.dst_port;
1753                                 }
1754                         }
1755                         break;
1756                 case RTE_FLOW_ITEM_TYPE_UDP:
1757                         udp_spec = item->spec;
1758                         udp_mask = item->mask;
1759
1760                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1761                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1762                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1763                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1764
1765                         if (udp_spec && udp_mask) {
1766                                 /* Check UDP mask and update input set*/
1767                                 if (udp_mask->hdr.dgram_len ||
1768                                     udp_mask->hdr.dgram_cksum) {
1769                                         rte_flow_error_set(error, EINVAL,
1770                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1771                                                    item,
1772                                                    "Invalid UDP mask");
1773                                         return -rte_errno;
1774                                 }
1775
1776                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1777                                         input_set |= tunnel_type ?
1778                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1779                                                      ICE_INSET_UDP_SRC_PORT;
1780                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1781                                         input_set |= tunnel_type ?
1782                                                      ICE_INSET_TUN_UDP_DST_PORT :
1783                                                      ICE_INSET_UDP_DST_PORT;
1784
1785                                 /* Get filter info */
1786                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1787                                         filter->input.ip.v4.dst_port =
1788                                                 udp_spec->hdr.src_port;
1789                                         filter->input.ip.v4.src_port =
1790                                                 udp_spec->hdr.dst_port;
1791                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1792                                         filter->input.ip.v6.src_port =
1793                                                 udp_spec->hdr.dst_port;
1794                                         filter->input.ip.v6.dst_port =
1795                                                 udp_spec->hdr.src_port;
1796                                 }
1797                         }
1798                         break;
1799                 case RTE_FLOW_ITEM_TYPE_SCTP:
1800                         sctp_spec = item->spec;
1801                         sctp_mask = item->mask;
1802
1803                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1804                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1805                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1806                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1807
1808                         if (sctp_spec && sctp_mask) {
1809                                 /* Check SCTP mask and update input set */
1810                                 if (sctp_mask->hdr.cksum) {
1811                                         rte_flow_error_set(error, EINVAL,
1812                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1813                                                    item,
1814                                                    "Invalid UDP mask");
1815                                         return -rte_errno;
1816                                 }
1817
1818                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1819                                         input_set |= tunnel_type ?
1820                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1821                                                      ICE_INSET_SCTP_SRC_PORT;
1822                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1823                                         input_set |= tunnel_type ?
1824                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1825                                                      ICE_INSET_SCTP_DST_PORT;
1826
1827                                 /* Get filter info */
1828                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1829                                         filter->input.ip.v4.dst_port =
1830                                                 sctp_spec->hdr.src_port;
1831                                         filter->input.ip.v4.src_port =
1832                                                 sctp_spec->hdr.dst_port;
1833                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1834                                         filter->input.ip.v6.dst_port =
1835                                                 sctp_spec->hdr.src_port;
1836                                         filter->input.ip.v6.src_port =
1837                                                 sctp_spec->hdr.dst_port;
1838                                 }
1839                         }
1840                         break;
1841                 case RTE_FLOW_ITEM_TYPE_VOID:
1842                         break;
1843                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1844                         l3 = RTE_FLOW_ITEM_TYPE_END;
1845                         vxlan_spec = item->spec;
1846                         vxlan_mask = item->mask;
1847
1848                         if (vxlan_spec || vxlan_mask) {
1849                                 rte_flow_error_set(error, EINVAL,
1850                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1851                                                    item,
1852                                                    "Invalid vxlan field");
1853                                 return -rte_errno;
1854                         }
1855
1856                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1857                         break;
1858                 case RTE_FLOW_ITEM_TYPE_GTPU:
1859                         l3 = RTE_FLOW_ITEM_TYPE_END;
1860                         gtp_spec = item->spec;
1861                         gtp_mask = item->mask;
1862
1863                         if (gtp_spec && gtp_mask) {
1864                                 if (gtp_mask->v_pt_rsv_flags ||
1865                                     gtp_mask->msg_type ||
1866                                     gtp_mask->msg_len) {
1867                                         rte_flow_error_set(error, EINVAL,
1868                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1869                                                    item,
1870                                                    "Invalid GTP mask");
1871                                         return -rte_errno;
1872                                 }
1873
1874                                 if (gtp_mask->teid == UINT32_MAX)
1875                                         input_set |= ICE_INSET_GTPU_TEID;
1876
1877                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1878                         }
1879                         break;
1880                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1881                         gtp_psc_spec = item->spec;
1882                         gtp_psc_mask = item->mask;
1883
1884                         if (gtp_psc_spec && gtp_psc_mask) {
1885                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1886                                         input_set |= ICE_INSET_GTPU_QFI;
1887
1888                                 filter->input.gtpu_data.qfi =
1889                                         gtp_psc_spec->qfi;
1890                         }
1891
1892                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1893                         break;
1894                 default:
1895                         rte_flow_error_set(error, EINVAL,
1896                                    RTE_FLOW_ERROR_TYPE_ITEM,
1897                                    item,
1898                                    "Invalid pattern item.");
1899                         return -rte_errno;
1900                 }
1901         }
1902
1903         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1904                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1905
1906         filter->tunnel_type = tunnel_type;
1907         filter->input.flow_type = flow_type;
1908         filter->input_set = input_set;
1909
1910         return 0;
1911 }
1912
1913 static int
1914 ice_fdir_parse(struct ice_adapter *ad,
1915                struct ice_pattern_match_item *array,
1916                uint32_t array_len,
1917                const struct rte_flow_item pattern[],
1918                const struct rte_flow_action actions[],
1919                void **meta,
1920                struct rte_flow_error *error)
1921 {
1922         struct ice_pf *pf = &ad->pf;
1923         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1924         struct ice_pattern_match_item *item = NULL;
1925         uint64_t input_set;
1926         int ret;
1927
1928         memset(filter, 0, sizeof(*filter));
1929         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1930         if (!item)
1931                 return -rte_errno;
1932
1933         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1934         if (ret)
1935                 return ret;
1936         input_set = filter->input_set;
1937         if (!input_set || input_set & ~item->input_set_mask) {
1938                 rte_flow_error_set(error, EINVAL,
1939                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1940                                    pattern,
1941                                    "Invalid input set");
1942                 return -rte_errno;
1943         }
1944
1945         ret = ice_fdir_parse_action(ad, actions, error, filter);
1946         if (ret)
1947                 return ret;
1948
1949         *meta = filter;
1950
1951         return 0;
1952 }
1953
1954 static struct ice_flow_parser ice_fdir_parser_os = {
1955         .engine = &ice_fdir_engine,
1956         .array = ice_fdir_pattern_os,
1957         .array_len = RTE_DIM(ice_fdir_pattern_os),
1958         .parse_pattern_action = ice_fdir_parse,
1959         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1960 };
1961
1962 static struct ice_flow_parser ice_fdir_parser_comms = {
1963         .engine = &ice_fdir_engine,
1964         .array = ice_fdir_pattern_comms,
1965         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1966         .parse_pattern_action = ice_fdir_parse,
1967         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1968 };
1969
1970 RTE_INIT(ice_fdir_engine_register)
1971 {
1972         ice_register_flow_engine(&ice_fdir_engine);
1973 }