net/ice/base: silence static analysis warning
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH (\
22         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25         ICE_FDIR_INSET_ETH | \
26         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30         ICE_FDIR_INSET_ETH_IPV4 | \
31         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34         ICE_FDIR_INSET_ETH_IPV4 | \
35         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38         ICE_FDIR_INSET_ETH_IPV4 | \
39         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42         ICE_INSET_DMAC | \
43         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
45
46 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
47         ICE_FDIR_INSET_ETH_IPV6 | \
48         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
49
50 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
51         ICE_FDIR_INSET_ETH_IPV6 | \
52         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
53
54 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
55         ICE_FDIR_INSET_ETH_IPV6 | \
56         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
60
61 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
62         ICE_FDIR_INSET_VXLAN_IPV4 | \
63         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
64
65 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
66         ICE_FDIR_INSET_VXLAN_IPV4 | \
67         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
68
69 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
70         ICE_FDIR_INSET_VXLAN_IPV4 | \
71         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
72
73 #define ICE_FDIR_INSET_IPV4_GTPU (\
74         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
75
76 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
77         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
78         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
79
80 #define ICE_FDIR_INSET_IPV6_GTPU (\
81         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
82
83 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
84         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
85         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
86
87 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
88         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
89         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
90         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
91         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
92         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
93         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
94         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
95         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
96         {pattern_eth_ipv4_udp_vxlan_ipv4,
97                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
99                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
100         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
101                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
102         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
103                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
104         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
105                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
106         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
107                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
108         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
109                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
110         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
111                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
112 };
113
114 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
115         {pattern_ethertype,            ICE_FDIR_INSET_ETH,                   ICE_INSET_NONE},
116         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
118         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
119         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
120         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
121         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
122         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
123         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
124         {pattern_eth_ipv4_udp_vxlan_ipv4,
125                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
126         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
127                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
128         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
129                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
130         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
131                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
132         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
133                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
134         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
135                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
136         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
137                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
138         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
139                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
140         {pattern_eth_ipv4_gtpu,        ICE_FDIR_INSET_IPV4_GTPU,             ICE_INSET_NONE},
141         {pattern_eth_ipv4_gtpu_eh,     ICE_FDIR_INSET_IPV4_GTPU_EH,          ICE_INSET_NONE},
142         {pattern_eth_ipv6_gtpu,        ICE_FDIR_INSET_IPV6_GTPU,             ICE_INSET_NONE},
143         {pattern_eth_ipv6_gtpu_eh,     ICE_FDIR_INSET_IPV6_GTPU_EH,          ICE_INSET_NONE},
144 };
145
146 static struct ice_flow_parser ice_fdir_parser_os;
147 static struct ice_flow_parser ice_fdir_parser_comms;
148
149 static int
150 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
151
152 static const struct rte_memzone *
153 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
154 {
155         const struct rte_memzone *mz;
156
157         mz = rte_memzone_lookup(name);
158         if (mz)
159                 return mz;
160
161         return rte_memzone_reserve_aligned(name, len, socket_id,
162                                            RTE_MEMZONE_IOVA_CONTIG,
163                                            ICE_RING_BASE_ALIGN);
164 }
165
166 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
167
168 static int
169 ice_fdir_prof_alloc(struct ice_hw *hw)
170 {
171         enum ice_fltr_ptype ptype, fltr_ptype;
172
173         if (!hw->fdir_prof) {
174                 hw->fdir_prof = (struct ice_fd_hw_prof **)
175                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
176                                    sizeof(*hw->fdir_prof));
177                 if (!hw->fdir_prof)
178                         return -ENOMEM;
179         }
180         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
181              ptype < ICE_FLTR_PTYPE_MAX;
182              ptype++) {
183                 if (!hw->fdir_prof[ptype]) {
184                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
185                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
186                         if (!hw->fdir_prof[ptype])
187                                 goto fail_mem;
188                 }
189         }
190         return 0;
191
192 fail_mem:
193         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
194              fltr_ptype < ptype;
195              fltr_ptype++) {
196                 rte_free(hw->fdir_prof[fltr_ptype]);
197                 hw->fdir_prof[fltr_ptype] = NULL;
198         }
199
200         rte_free(hw->fdir_prof);
201         hw->fdir_prof = NULL;
202
203         return -ENOMEM;
204 }
205
206 static int
207 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
208                           struct ice_fdir_counter_pool_container *container,
209                           uint32_t index_start,
210                           uint32_t len)
211 {
212         struct ice_fdir_counter_pool *pool;
213         uint32_t i;
214         int ret = 0;
215
216         pool = rte_zmalloc("ice_fdir_counter_pool",
217                            sizeof(*pool) +
218                            sizeof(struct ice_fdir_counter) * len,
219                            0);
220         if (!pool) {
221                 PMD_INIT_LOG(ERR,
222                              "Failed to allocate memory for fdir counter pool");
223                 return -ENOMEM;
224         }
225
226         TAILQ_INIT(&pool->counter_list);
227         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
228
229         for (i = 0; i < len; i++) {
230                 struct ice_fdir_counter *counter = &pool->counters[i];
231
232                 counter->hw_index = index_start + i;
233                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
234         }
235
236         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
237                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
238                 ret = -EINVAL;
239                 goto free_pool;
240         }
241
242         container->pools[container->index_free++] = pool;
243         return 0;
244
245 free_pool:
246         rte_free(pool);
247         return ret;
248 }
249
250 static int
251 ice_fdir_counter_init(struct ice_pf *pf)
252 {
253         struct ice_hw *hw = ICE_PF_TO_HW(pf);
254         struct ice_fdir_info *fdir_info = &pf->fdir;
255         struct ice_fdir_counter_pool_container *container =
256                                 &fdir_info->counter;
257         uint32_t cnt_index, len;
258         int ret;
259
260         TAILQ_INIT(&container->pool_list);
261
262         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
263         len = ICE_FDIR_COUNTERS_PER_BLOCK;
264
265         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
266         if (ret) {
267                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
268                 return ret;
269         }
270
271         return 0;
272 }
273
274 static int
275 ice_fdir_counter_release(struct ice_pf *pf)
276 {
277         struct ice_fdir_info *fdir_info = &pf->fdir;
278         struct ice_fdir_counter_pool_container *container =
279                                 &fdir_info->counter;
280         uint8_t i;
281
282         for (i = 0; i < container->index_free; i++) {
283                 rte_free(container->pools[i]);
284                 container->pools[i] = NULL;
285         }
286
287         TAILQ_INIT(&container->pool_list);
288         container->index_free = 0;
289
290         return 0;
291 }
292
293 static struct ice_fdir_counter *
294 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
295                                         *container,
296                                uint32_t id)
297 {
298         struct ice_fdir_counter_pool *pool;
299         struct ice_fdir_counter *counter;
300         int i;
301
302         TAILQ_FOREACH(pool, &container->pool_list, next) {
303                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
304                         counter = &pool->counters[i];
305
306                         if (counter->shared &&
307                             counter->ref_cnt &&
308                             counter->id == id)
309                                 return counter;
310                 }
311         }
312
313         return NULL;
314 }
315
316 static struct ice_fdir_counter *
317 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
318 {
319         struct ice_hw *hw = ICE_PF_TO_HW(pf);
320         struct ice_fdir_info *fdir_info = &pf->fdir;
321         struct ice_fdir_counter_pool_container *container =
322                                 &fdir_info->counter;
323         struct ice_fdir_counter_pool *pool = NULL;
324         struct ice_fdir_counter *counter_free = NULL;
325
326         if (shared) {
327                 counter_free = ice_fdir_counter_shared_search(container, id);
328                 if (counter_free) {
329                         if (counter_free->ref_cnt + 1 == 0) {
330                                 rte_errno = E2BIG;
331                                 return NULL;
332                         }
333                         counter_free->ref_cnt++;
334                         return counter_free;
335                 }
336         }
337
338         TAILQ_FOREACH(pool, &container->pool_list, next) {
339                 counter_free = TAILQ_FIRST(&pool->counter_list);
340                 if (counter_free)
341                         break;
342                 counter_free = NULL;
343         }
344
345         if (!counter_free) {
346                 PMD_DRV_LOG(ERR, "No free counter found\n");
347                 return NULL;
348         }
349
350         counter_free->shared = shared;
351         counter_free->id = id;
352         counter_free->ref_cnt = 1;
353         counter_free->pool = pool;
354
355         /* reset statistic counter value */
356         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
357         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
358
359         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
360         if (TAILQ_EMPTY(&pool->counter_list)) {
361                 TAILQ_REMOVE(&container->pool_list, pool, next);
362                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
363         }
364
365         return counter_free;
366 }
367
368 static void
369 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
370                       struct ice_fdir_counter *counter)
371 {
372         if (!counter)
373                 return;
374
375         if (--counter->ref_cnt == 0) {
376                 struct ice_fdir_counter_pool *pool = counter->pool;
377
378                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
379         }
380 }
381
382 static int
383 ice_fdir_init_filter_list(struct ice_pf *pf)
384 {
385         struct rte_eth_dev *dev = pf->adapter->eth_dev;
386         struct ice_fdir_info *fdir_info = &pf->fdir;
387         char fdir_hash_name[RTE_HASH_NAMESIZE];
388         int ret;
389
390         struct rte_hash_parameters fdir_hash_params = {
391                 .name = fdir_hash_name,
392                 .entries = ICE_MAX_FDIR_FILTER_NUM,
393                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
394                 .hash_func = rte_hash_crc,
395                 .hash_func_init_val = 0,
396                 .socket_id = rte_socket_id(),
397                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
398         };
399
400         /* Initialize hash */
401         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
402                  "fdir_%s", dev->device->name);
403         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
404         if (!fdir_info->hash_table) {
405                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
406                 return -EINVAL;
407         }
408         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
409                                           sizeof(*fdir_info->hash_map) *
410                                           ICE_MAX_FDIR_FILTER_NUM,
411                                           0);
412         if (!fdir_info->hash_map) {
413                 PMD_INIT_LOG(ERR,
414                              "Failed to allocate memory for fdir hash map!");
415                 ret = -ENOMEM;
416                 goto err_fdir_hash_map_alloc;
417         }
418         return 0;
419
420 err_fdir_hash_map_alloc:
421         rte_hash_free(fdir_info->hash_table);
422
423         return ret;
424 }
425
426 static void
427 ice_fdir_release_filter_list(struct ice_pf *pf)
428 {
429         struct ice_fdir_info *fdir_info = &pf->fdir;
430
431         if (fdir_info->hash_map)
432                 rte_free(fdir_info->hash_map);
433         if (fdir_info->hash_table)
434                 rte_hash_free(fdir_info->hash_table);
435
436         fdir_info->hash_map = NULL;
437         fdir_info->hash_table = NULL;
438 }
439
440 /*
441  * ice_fdir_setup - reserve and initialize the Flow Director resources
442  * @pf: board private structure
443  */
444 static int
445 ice_fdir_setup(struct ice_pf *pf)
446 {
447         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
448         struct ice_hw *hw = ICE_PF_TO_HW(pf);
449         const struct rte_memzone *mz = NULL;
450         char z_name[RTE_MEMZONE_NAMESIZE];
451         struct ice_vsi *vsi;
452         int err = ICE_SUCCESS;
453
454         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
455                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
456                 return -ENOTSUP;
457         }
458
459         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
460                     " fd_fltr_best_effort = %u.",
461                     hw->func_caps.fd_fltr_guar,
462                     hw->func_caps.fd_fltr_best_effort);
463
464         if (pf->fdir.fdir_vsi) {
465                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
466                 return ICE_SUCCESS;
467         }
468
469         /* make new FDIR VSI */
470         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
471         if (!vsi) {
472                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
473                 return -EINVAL;
474         }
475         pf->fdir.fdir_vsi = vsi;
476
477         err = ice_fdir_init_filter_list(pf);
478         if (err) {
479                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
480                 return -EINVAL;
481         }
482
483         err = ice_fdir_counter_init(pf);
484         if (err) {
485                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
486                 return -EINVAL;
487         }
488
489         /*Fdir tx queue setup*/
490         err = ice_fdir_setup_tx_resources(pf);
491         if (err) {
492                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
493                 goto fail_setup_tx;
494         }
495
496         /*Fdir rx queue setup*/
497         err = ice_fdir_setup_rx_resources(pf);
498         if (err) {
499                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
500                 goto fail_setup_rx;
501         }
502
503         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
504         if (err) {
505                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
506                 goto fail_mem;
507         }
508
509         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
510         if (err) {
511                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
512                 goto fail_mem;
513         }
514
515         /* Enable FDIR MSIX interrupt */
516         vsi->nb_used_qps = 1;
517         ice_vsi_queues_bind_intr(vsi);
518         ice_vsi_enable_queues_intr(vsi);
519
520         /* reserve memory for the fdir programming packet */
521         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
522                  ICE_FDIR_MZ_NAME,
523                  eth_dev->data->port_id);
524         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
525         if (!mz) {
526                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
527                             "flow director program packet.");
528                 err = -ENOMEM;
529                 goto fail_mem;
530         }
531         pf->fdir.prg_pkt = mz->addr;
532         pf->fdir.dma_addr = mz->iova;
533         pf->fdir.mz = mz;
534
535         err = ice_fdir_prof_alloc(hw);
536         if (err) {
537                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
538                             "flow director profile.");
539                 err = -ENOMEM;
540                 goto fail_prof;
541         }
542
543         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
544                     vsi->base_queue);
545         return ICE_SUCCESS;
546
547 fail_prof:
548         rte_memzone_free(pf->fdir.mz);
549         pf->fdir.mz = NULL;
550 fail_mem:
551         ice_rx_queue_release(pf->fdir.rxq);
552         pf->fdir.rxq = NULL;
553 fail_setup_rx:
554         ice_tx_queue_release(pf->fdir.txq);
555         pf->fdir.txq = NULL;
556 fail_setup_tx:
557         ice_release_vsi(vsi);
558         pf->fdir.fdir_vsi = NULL;
559         return err;
560 }
561
562 static void
563 ice_fdir_prof_free(struct ice_hw *hw)
564 {
565         enum ice_fltr_ptype ptype;
566
567         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
568              ptype < ICE_FLTR_PTYPE_MAX;
569              ptype++) {
570                 rte_free(hw->fdir_prof[ptype]);
571                 hw->fdir_prof[ptype] = NULL;
572         }
573
574         rte_free(hw->fdir_prof);
575         hw->fdir_prof = NULL;
576 }
577
578 /* Remove a profile for some filter type */
579 static void
580 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
581 {
582         struct ice_hw *hw = ICE_PF_TO_HW(pf);
583         struct ice_fd_hw_prof *hw_prof;
584         uint64_t prof_id;
585         uint16_t vsi_num;
586         int i;
587
588         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
589                 return;
590
591         hw_prof = hw->fdir_prof[ptype];
592
593         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
594         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
595                 if (hw_prof->entry_h[i][is_tunnel]) {
596                         vsi_num = ice_get_hw_vsi_num(hw,
597                                                      hw_prof->vsi_h[i]);
598                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
599                                              vsi_num, ptype);
600                         ice_flow_rem_entry(hw, ICE_BLK_FD,
601                                            hw_prof->entry_h[i][is_tunnel]);
602                         hw_prof->entry_h[i][is_tunnel] = 0;
603                 }
604         }
605         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
606         rte_free(hw_prof->fdir_seg[is_tunnel]);
607         hw_prof->fdir_seg[is_tunnel] = NULL;
608
609         for (i = 0; i < hw_prof->cnt; i++)
610                 hw_prof->vsi_h[i] = 0;
611         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
612 }
613
614 /* Remove all created profiles */
615 static void
616 ice_fdir_prof_rm_all(struct ice_pf *pf)
617 {
618         enum ice_fltr_ptype ptype;
619
620         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
621              ptype < ICE_FLTR_PTYPE_MAX;
622              ptype++) {
623                 ice_fdir_prof_rm(pf, ptype, false);
624                 ice_fdir_prof_rm(pf, ptype, true);
625         }
626 }
627
628 /*
629  * ice_fdir_teardown - release the Flow Director resources
630  * @pf: board private structure
631  */
632 static void
633 ice_fdir_teardown(struct ice_pf *pf)
634 {
635         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
636         struct ice_hw *hw = ICE_PF_TO_HW(pf);
637         struct ice_vsi *vsi;
638         int err;
639
640         vsi = pf->fdir.fdir_vsi;
641         if (!vsi)
642                 return;
643
644         ice_vsi_disable_queues_intr(vsi);
645
646         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
647         if (err)
648                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
649
650         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
651         if (err)
652                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
653
654         err = ice_fdir_counter_release(pf);
655         if (err)
656                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
657
658         ice_fdir_release_filter_list(pf);
659
660         ice_tx_queue_release(pf->fdir.txq);
661         pf->fdir.txq = NULL;
662         ice_rx_queue_release(pf->fdir.rxq);
663         pf->fdir.rxq = NULL;
664         ice_fdir_prof_rm_all(pf);
665         ice_fdir_prof_free(hw);
666         ice_release_vsi(vsi);
667         pf->fdir.fdir_vsi = NULL;
668
669         if (pf->fdir.mz) {
670                 err = rte_memzone_free(pf->fdir.mz);
671                 pf->fdir.mz = NULL;
672                 if (err)
673                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
674         }
675 }
676
677 static int
678 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
679                            enum ice_fltr_ptype ptype,
680                            struct ice_flow_seg_info *seg,
681                            bool is_tunnel)
682 {
683         struct ice_hw *hw = ICE_PF_TO_HW(pf);
684         struct ice_flow_seg_info *ori_seg;
685         struct ice_fd_hw_prof *hw_prof;
686
687         hw_prof = hw->fdir_prof[ptype];
688         ori_seg = hw_prof->fdir_seg[is_tunnel];
689
690         /* profile does not exist */
691         if (!ori_seg)
692                 return 0;
693
694         /* if no input set conflict, return -EEXIST */
695         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
696             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
697                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
698                             ptype);
699                 return -EEXIST;
700         }
701
702         /* a rule with input set conflict already exist, so give up */
703         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
704                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
705                             ptype);
706                 return -EINVAL;
707         }
708
709         /* it's safe to delete an empty profile */
710         ice_fdir_prof_rm(pf, ptype, is_tunnel);
711         return 0;
712 }
713
714 static bool
715 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
716                                enum ice_fltr_ptype ptype,
717                                bool is_tunnel)
718 {
719         struct ice_hw *hw = ICE_PF_TO_HW(pf);
720         struct ice_fd_hw_prof *hw_prof;
721         struct ice_flow_seg_info *seg;
722
723         hw_prof = hw->fdir_prof[ptype];
724         seg = hw_prof->fdir_seg[is_tunnel];
725
726         /* profile does not exist */
727         if (!seg)
728                 return true;
729
730         /* profile exists and rule exists, fail to resolve the conflict */
731         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
732                 return false;
733
734         /* it's safe to delete an empty profile */
735         ice_fdir_prof_rm(pf, ptype, is_tunnel);
736
737         return true;
738 }
739
740 static int
741 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
742                              enum ice_fltr_ptype ptype,
743                              bool is_tunnel)
744 {
745         enum ice_fltr_ptype cflct_ptype;
746
747         switch (ptype) {
748         /* IPv4 */
749         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
750         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
751         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
752                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
753                 if (!ice_fdir_prof_resolve_conflict
754                         (pf, cflct_ptype, is_tunnel))
755                         goto err;
756                 break;
757         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
758                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
759                 if (!ice_fdir_prof_resolve_conflict
760                         (pf, cflct_ptype, is_tunnel))
761                         goto err;
762                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
763                 if (!ice_fdir_prof_resolve_conflict
764                         (pf, cflct_ptype, is_tunnel))
765                         goto err;
766                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
767                 if (!ice_fdir_prof_resolve_conflict
768                         (pf, cflct_ptype, is_tunnel))
769                         goto err;
770                 break;
771         /* IPv4 GTPU */
772         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
773         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
774         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
775                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
776                 if (!ice_fdir_prof_resolve_conflict
777                         (pf, cflct_ptype, is_tunnel))
778                         goto err;
779                 break;
780         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
781                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
782                 if (!ice_fdir_prof_resolve_conflict
783                         (pf, cflct_ptype, is_tunnel))
784                         goto err;
785                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
786                 if (!ice_fdir_prof_resolve_conflict
787                         (pf, cflct_ptype, is_tunnel))
788                         goto err;
789                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
790                 if (!ice_fdir_prof_resolve_conflict
791                         (pf, cflct_ptype, is_tunnel))
792                         goto err;
793                 break;
794         /* IPv6 */
795         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
796         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
797         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
798                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
799                 if (!ice_fdir_prof_resolve_conflict
800                         (pf, cflct_ptype, is_tunnel))
801                         goto err;
802                 break;
803         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
804                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
805                 if (!ice_fdir_prof_resolve_conflict
806                         (pf, cflct_ptype, is_tunnel))
807                         goto err;
808                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
809                 if (!ice_fdir_prof_resolve_conflict
810                         (pf, cflct_ptype, is_tunnel))
811                         goto err;
812                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
813                 if (!ice_fdir_prof_resolve_conflict
814                         (pf, cflct_ptype, is_tunnel))
815                         goto err;
816                 break;
817         default:
818                 break;
819         }
820         return 0;
821 err:
822         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
823                     ptype, cflct_ptype);
824         return -EINVAL;
825 }
826
827 static int
828 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
829                      struct ice_vsi *ctrl_vsi,
830                      struct ice_flow_seg_info *seg,
831                      enum ice_fltr_ptype ptype,
832                      bool is_tunnel)
833 {
834         struct ice_hw *hw = ICE_PF_TO_HW(pf);
835         enum ice_flow_dir dir = ICE_FLOW_RX;
836         struct ice_fd_hw_prof *hw_prof;
837         struct ice_flow_prof *prof;
838         uint64_t entry_1 = 0;
839         uint64_t entry_2 = 0;
840         uint16_t vsi_num;
841         int ret;
842         uint64_t prof_id;
843
844         /* check if have input set conflict on current profile. */
845         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
846         if (ret)
847                 return ret;
848
849         /* check if the profile is conflict with other profile. */
850         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
851         if (ret)
852                 return ret;
853
854         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
855         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
856                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
857         if (ret)
858                 return ret;
859         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
860                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
861                                  seg, NULL, 0, &entry_1);
862         if (ret) {
863                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
864                             ptype);
865                 goto err_add_prof;
866         }
867         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
868                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
869                                  seg, NULL, 0, &entry_2);
870         if (ret) {
871                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
872                             ptype);
873                 goto err_add_entry;
874         }
875
876         hw_prof = hw->fdir_prof[ptype];
877         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
878         hw_prof->cnt = 0;
879         hw_prof->fdir_seg[is_tunnel] = seg;
880         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
881         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
882         pf->hw_prof_cnt[ptype][is_tunnel]++;
883         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
884         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
885         pf->hw_prof_cnt[ptype][is_tunnel]++;
886
887         return ret;
888
889 err_add_entry:
890         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
891         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
892         ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
893 err_add_prof:
894         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
895
896         return ret;
897 }
898
899 static void
900 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
901 {
902         uint32_t i, j;
903
904         struct ice_inset_map {
905                 uint64_t inset;
906                 enum ice_flow_field fld;
907         };
908         static const struct ice_inset_map ice_inset_map[] = {
909                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
910                 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
911                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
912                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
913                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
914                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
915                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
916                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
917                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
918                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
919                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
920                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
921                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
922                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
923                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
924                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
925                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
926                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
927                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
928                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
929                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
930                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
931                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
932                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
933                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
934                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
935                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
936                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
937         };
938
939         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
940                 if ((inset & ice_inset_map[i].inset) ==
941                     ice_inset_map[i].inset)
942                         field[j++] = ice_inset_map[i].fld;
943         }
944 }
945
946 static int
947 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
948                         uint64_t input_set, enum ice_fdir_tunnel_type ttype)
949 {
950         struct ice_flow_seg_info *seg;
951         struct ice_flow_seg_info *seg_tun = NULL;
952         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
953         bool is_tunnel;
954         int i, ret;
955
956         if (!input_set)
957                 return -EINVAL;
958
959         seg = (struct ice_flow_seg_info *)
960                 ice_malloc(hw, sizeof(*seg));
961         if (!seg) {
962                 PMD_DRV_LOG(ERR, "No memory can be allocated");
963                 return -ENOMEM;
964         }
965
966         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
967                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
968         ice_fdir_input_set_parse(input_set, field);
969
970         switch (flow) {
971         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
972                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
973                                   ICE_FLOW_SEG_HDR_IPV4 |
974                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
975                 break;
976         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
977                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
978                                   ICE_FLOW_SEG_HDR_IPV4 |
979                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
980                 break;
981         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
982                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
983                                   ICE_FLOW_SEG_HDR_IPV4 |
984                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
985                 break;
986         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
987                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
988                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
989                 break;
990         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
991                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
992                                   ICE_FLOW_SEG_HDR_IPV6 |
993                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
994                 break;
995         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
996                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
997                                   ICE_FLOW_SEG_HDR_IPV6 |
998                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
999                 break;
1000         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
1001                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
1002                                   ICE_FLOW_SEG_HDR_IPV6 |
1003                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1004                 break;
1005         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
1006                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1007                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1008                 break;
1009         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
1010         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
1011         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
1012         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
1013                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1014                                   ICE_FLOW_SEG_HDR_IPV4 |
1015                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1016                 break;
1017         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_OTHER:
1018                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1019                                   ICE_FLOW_SEG_HDR_GTPU_IP |
1020                                   ICE_FLOW_SEG_HDR_IPV4 |
1021                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1022                 break;
1023         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER:
1024                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1025                                   ICE_FLOW_SEG_HDR_IPV6 |
1026                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1027                 break;
1028         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_IPV6_OTHER:
1029                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1030                                   ICE_FLOW_SEG_HDR_GTPU_IP |
1031                                   ICE_FLOW_SEG_HDR_IPV6 |
1032                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1033                 break;
1034         case ICE_FLTR_PTYPE_NON_IP_L2:
1035                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1036                 break;
1037         default:
1038                 PMD_DRV_LOG(ERR, "not supported filter type.");
1039                 break;
1040         }
1041
1042         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1043                 ice_flow_set_fld(seg, field[i],
1044                                  ICE_FLOW_FLD_OFF_INVAL,
1045                                  ICE_FLOW_FLD_OFF_INVAL,
1046                                  ICE_FLOW_FLD_OFF_INVAL, false);
1047         }
1048
1049         is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1050         if (!is_tunnel) {
1051                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1052                                            seg, flow, false);
1053         } else {
1054                 seg_tun = (struct ice_flow_seg_info *)
1055                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1056                 if (!seg_tun) {
1057                         PMD_DRV_LOG(ERR, "No memory can be allocated");
1058                         rte_free(seg);
1059                         return -ENOMEM;
1060                 }
1061                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1062                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1063                                            seg_tun, flow, true);
1064         }
1065
1066         if (!ret) {
1067                 return ret;
1068         } else if (ret < 0) {
1069                 rte_free(seg);
1070                 if (is_tunnel)
1071                         rte_free(seg_tun);
1072                 return (ret == -EEXIST) ? 0 : ret;
1073         } else {
1074                 return ret;
1075         }
1076 }
1077
1078 static void
1079 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1080                     bool is_tunnel, bool add)
1081 {
1082         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1083         int cnt;
1084
1085         cnt = (add) ? 1 : -1;
1086         hw->fdir_active_fltr += cnt;
1087         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1088                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1089         else
1090                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1091 }
1092
1093 static int
1094 ice_fdir_init(struct ice_adapter *ad)
1095 {
1096         struct ice_pf *pf = &ad->pf;
1097         struct ice_flow_parser *parser;
1098         int ret;
1099
1100         if (ad->hw.dcf_enabled)
1101                 return 0;
1102
1103         ret = ice_fdir_setup(pf);
1104         if (ret)
1105                 return ret;
1106
1107         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1108                 parser = &ice_fdir_parser_comms;
1109         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1110                 parser = &ice_fdir_parser_os;
1111         else
1112                 return -EINVAL;
1113
1114         return ice_register_parser(parser, ad);
1115 }
1116
1117 static void
1118 ice_fdir_uninit(struct ice_adapter *ad)
1119 {
1120         struct ice_pf *pf = &ad->pf;
1121         struct ice_flow_parser *parser;
1122
1123         if (ad->hw.dcf_enabled)
1124                 return;
1125
1126         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1127                 parser = &ice_fdir_parser_comms;
1128         else
1129                 parser = &ice_fdir_parser_os;
1130
1131         ice_unregister_parser(parser, ad);
1132
1133         ice_fdir_teardown(pf);
1134 }
1135
1136 static int
1137 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1138 {
1139         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1140                 return 1;
1141         else
1142                 return 0;
1143 }
1144
1145 static int
1146 ice_fdir_add_del_filter(struct ice_pf *pf,
1147                         struct ice_fdir_filter_conf *filter,
1148                         bool add)
1149 {
1150         struct ice_fltr_desc desc;
1151         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1152         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1153         bool is_tun;
1154         int ret;
1155
1156         filter->input.dest_vsi = pf->main_vsi->idx;
1157
1158         memset(&desc, 0, sizeof(desc));
1159         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1160         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1161
1162         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1163
1164         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1165         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1166         if (ret) {
1167                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1168                 return -EINVAL;
1169         }
1170
1171         return ice_fdir_programming(pf, &desc);
1172 }
1173
1174 static void
1175 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1176                           struct ice_fdir_filter_conf *filter)
1177 {
1178         struct ice_fdir_fltr *input = &filter->input;
1179         memset(key, 0, sizeof(*key));
1180
1181         key->flow_type = input->flow_type;
1182         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1183         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1184         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1185         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1186
1187         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1188         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1189
1190         key->tunnel_type = filter->tunnel_type;
1191 }
1192
1193 /* Check if there exists the flow director filter */
1194 static struct ice_fdir_filter_conf *
1195 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1196                         const struct ice_fdir_fltr_pattern *key)
1197 {
1198         int ret;
1199
1200         ret = rte_hash_lookup(fdir_info->hash_table, key);
1201         if (ret < 0)
1202                 return NULL;
1203
1204         return fdir_info->hash_map[ret];
1205 }
1206
1207 /* Add a flow director entry into the SW list */
1208 static int
1209 ice_fdir_entry_insert(struct ice_pf *pf,
1210                       struct ice_fdir_filter_conf *entry,
1211                       struct ice_fdir_fltr_pattern *key)
1212 {
1213         struct ice_fdir_info *fdir_info = &pf->fdir;
1214         int ret;
1215
1216         ret = rte_hash_add_key(fdir_info->hash_table, key);
1217         if (ret < 0) {
1218                 PMD_DRV_LOG(ERR,
1219                             "Failed to insert fdir entry to hash table %d!",
1220                             ret);
1221                 return ret;
1222         }
1223         fdir_info->hash_map[ret] = entry;
1224
1225         return 0;
1226 }
1227
1228 /* Delete a flow director entry from the SW list */
1229 static int
1230 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1231 {
1232         struct ice_fdir_info *fdir_info = &pf->fdir;
1233         int ret;
1234
1235         ret = rte_hash_del_key(fdir_info->hash_table, key);
1236         if (ret < 0) {
1237                 PMD_DRV_LOG(ERR,
1238                             "Failed to delete fdir filter to hash table %d!",
1239                             ret);
1240                 return ret;
1241         }
1242         fdir_info->hash_map[ret] = NULL;
1243
1244         return 0;
1245 }
1246
1247 static int
1248 ice_fdir_create_filter(struct ice_adapter *ad,
1249                        struct rte_flow *flow,
1250                        void *meta,
1251                        struct rte_flow_error *error)
1252 {
1253         struct ice_pf *pf = &ad->pf;
1254         struct ice_fdir_filter_conf *filter = meta;
1255         struct ice_fdir_info *fdir_info = &pf->fdir;
1256         struct ice_fdir_filter_conf *entry, *node;
1257         struct ice_fdir_fltr_pattern key;
1258         bool is_tun;
1259         int ret;
1260
1261         ice_fdir_extract_fltr_key(&key, filter);
1262         node = ice_fdir_entry_lookup(fdir_info, &key);
1263         if (node) {
1264                 rte_flow_error_set(error, EEXIST,
1265                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1266                                    "Rule already exists!");
1267                 return -rte_errno;
1268         }
1269
1270         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1271         if (!entry) {
1272                 rte_flow_error_set(error, ENOMEM,
1273                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1274                                    "Failed to allocate memory");
1275                 return -rte_errno;
1276         }
1277
1278         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1279
1280         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1281                         filter->input_set, filter->tunnel_type);
1282         if (ret) {
1283                 rte_flow_error_set(error, -ret,
1284                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1285                                    "Profile configure failed.");
1286                 goto free_entry;
1287         }
1288
1289         /* alloc counter for FDIR */
1290         if (filter->input.cnt_ena) {
1291                 struct rte_flow_action_count *act_count = &filter->act_count;
1292
1293                 filter->counter = ice_fdir_counter_alloc(pf,
1294                                                          act_count->shared,
1295                                                          act_count->id);
1296                 if (!filter->counter) {
1297                         rte_flow_error_set(error, EINVAL,
1298                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1299                                         "Failed to alloc FDIR counter.");
1300                         goto free_entry;
1301                 }
1302                 filter->input.cnt_index = filter->counter->hw_index;
1303         }
1304
1305         ret = ice_fdir_add_del_filter(pf, filter, true);
1306         if (ret) {
1307                 rte_flow_error_set(error, -ret,
1308                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1309                                    "Add filter rule failed.");
1310                 goto free_counter;
1311         }
1312
1313         rte_memcpy(entry, filter, sizeof(*entry));
1314         ret = ice_fdir_entry_insert(pf, entry, &key);
1315         if (ret) {
1316                 rte_flow_error_set(error, -ret,
1317                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1318                                    "Insert entry to table failed.");
1319                 goto free_entry;
1320         }
1321
1322         flow->rule = entry;
1323         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1324
1325         return 0;
1326
1327 free_counter:
1328         if (filter->counter) {
1329                 ice_fdir_counter_free(pf, filter->counter);
1330                 filter->counter = NULL;
1331         }
1332
1333 free_entry:
1334         rte_free(entry);
1335         return -rte_errno;
1336 }
1337
1338 static int
1339 ice_fdir_destroy_filter(struct ice_adapter *ad,
1340                         struct rte_flow *flow,
1341                         struct rte_flow_error *error)
1342 {
1343         struct ice_pf *pf = &ad->pf;
1344         struct ice_fdir_info *fdir_info = &pf->fdir;
1345         struct ice_fdir_filter_conf *filter, *entry;
1346         struct ice_fdir_fltr_pattern key;
1347         bool is_tun;
1348         int ret;
1349
1350         filter = (struct ice_fdir_filter_conf *)flow->rule;
1351
1352         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1353
1354         if (filter->counter) {
1355                 ice_fdir_counter_free(pf, filter->counter);
1356                 filter->counter = NULL;
1357         }
1358
1359         ice_fdir_extract_fltr_key(&key, filter);
1360         entry = ice_fdir_entry_lookup(fdir_info, &key);
1361         if (!entry) {
1362                 rte_flow_error_set(error, ENOENT,
1363                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1364                                    "Can't find entry.");
1365                 return -rte_errno;
1366         }
1367
1368         ret = ice_fdir_add_del_filter(pf, filter, false);
1369         if (ret) {
1370                 rte_flow_error_set(error, -ret,
1371                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1372                                    "Del filter rule failed.");
1373                 return -rte_errno;
1374         }
1375
1376         ret = ice_fdir_entry_del(pf, &key);
1377         if (ret) {
1378                 rte_flow_error_set(error, -ret,
1379                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1380                                    "Remove entry from table failed.");
1381                 return -rte_errno;
1382         }
1383
1384         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1385         flow->rule = NULL;
1386
1387         rte_free(filter);
1388
1389         return 0;
1390 }
1391
1392 static int
1393 ice_fdir_query_count(struct ice_adapter *ad,
1394                       struct rte_flow *flow,
1395                       struct rte_flow_query_count *flow_stats,
1396                       struct rte_flow_error *error)
1397 {
1398         struct ice_pf *pf = &ad->pf;
1399         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1400         struct ice_fdir_filter_conf *filter = flow->rule;
1401         struct ice_fdir_counter *counter = filter->counter;
1402         uint64_t hits_lo, hits_hi;
1403
1404         if (!counter) {
1405                 rte_flow_error_set(error, EINVAL,
1406                                   RTE_FLOW_ERROR_TYPE_ACTION,
1407                                   NULL,
1408                                   "FDIR counters not available");
1409                 return -rte_errno;
1410         }
1411
1412         /*
1413          * Reading the low 32-bits latches the high 32-bits into a shadow
1414          * register. Reading the high 32-bit returns the value in the
1415          * shadow register.
1416          */
1417         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1418         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1419
1420         flow_stats->hits_set = 1;
1421         flow_stats->hits = hits_lo | (hits_hi << 32);
1422         flow_stats->bytes_set = 0;
1423         flow_stats->bytes = 0;
1424
1425         if (flow_stats->reset) {
1426                 /* reset statistic counter value */
1427                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1428                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1429         }
1430
1431         return 0;
1432 }
1433
1434 static struct ice_flow_engine ice_fdir_engine = {
1435         .init = ice_fdir_init,
1436         .uninit = ice_fdir_uninit,
1437         .create = ice_fdir_create_filter,
1438         .destroy = ice_fdir_destroy_filter,
1439         .query_count = ice_fdir_query_count,
1440         .type = ICE_FLOW_ENGINE_FDIR,
1441 };
1442
1443 static int
1444 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1445                               struct rte_flow_error *error,
1446                               const struct rte_flow_action *act,
1447                               struct ice_fdir_filter_conf *filter)
1448 {
1449         const struct rte_flow_action_rss *rss = act->conf;
1450         uint32_t i;
1451
1452         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1453                 rte_flow_error_set(error, EINVAL,
1454                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1455                                    "Invalid action.");
1456                 return -rte_errno;
1457         }
1458
1459         if (rss->queue_num <= 1) {
1460                 rte_flow_error_set(error, EINVAL,
1461                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1462                                    "Queue region size can't be 0 or 1.");
1463                 return -rte_errno;
1464         }
1465
1466         /* check if queue index for queue region is continuous */
1467         for (i = 0; i < rss->queue_num - 1; i++) {
1468                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1469                         rte_flow_error_set(error, EINVAL,
1470                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1471                                            "Discontinuous queue region");
1472                         return -rte_errno;
1473                 }
1474         }
1475
1476         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1477                 rte_flow_error_set(error, EINVAL,
1478                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1479                                    "Invalid queue region indexes.");
1480                 return -rte_errno;
1481         }
1482
1483         if (!(rte_is_power_of_2(rss->queue_num) &&
1484              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1485                 rte_flow_error_set(error, EINVAL,
1486                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1487                                    "The region size should be any of the following values:"
1488                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1489                                    "of queues do not exceed the VSI allocation.");
1490                 return -rte_errno;
1491         }
1492
1493         filter->input.q_index = rss->queue[0];
1494         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1495         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1496
1497         return 0;
1498 }
1499
1500 static int
1501 ice_fdir_parse_action(struct ice_adapter *ad,
1502                       const struct rte_flow_action actions[],
1503                       struct rte_flow_error *error,
1504                       struct ice_fdir_filter_conf *filter)
1505 {
1506         struct ice_pf *pf = &ad->pf;
1507         const struct rte_flow_action_queue *act_q;
1508         const struct rte_flow_action_mark *mark_spec = NULL;
1509         const struct rte_flow_action_count *act_count;
1510         uint32_t dest_num = 0;
1511         uint32_t mark_num = 0;
1512         uint32_t counter_num = 0;
1513         int ret;
1514
1515         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1516                 switch (actions->type) {
1517                 case RTE_FLOW_ACTION_TYPE_VOID:
1518                         break;
1519                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1520                         dest_num++;
1521
1522                         act_q = actions->conf;
1523                         filter->input.q_index = act_q->index;
1524                         if (filter->input.q_index >=
1525                                         pf->dev_data->nb_rx_queues) {
1526                                 rte_flow_error_set(error, EINVAL,
1527                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1528                                                    actions,
1529                                                    "Invalid queue for FDIR.");
1530                                 return -rte_errno;
1531                         }
1532                         filter->input.dest_ctl =
1533                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1534                         break;
1535                 case RTE_FLOW_ACTION_TYPE_DROP:
1536                         dest_num++;
1537
1538                         filter->input.dest_ctl =
1539                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1540                         break;
1541                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1542                         dest_num++;
1543
1544                         filter->input.dest_ctl =
1545                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1546                         break;
1547                 case RTE_FLOW_ACTION_TYPE_RSS:
1548                         dest_num++;
1549
1550                         ret = ice_fdir_parse_action_qregion(pf,
1551                                                 error, actions, filter);
1552                         if (ret)
1553                                 return ret;
1554                         break;
1555                 case RTE_FLOW_ACTION_TYPE_MARK:
1556                         mark_num++;
1557
1558                         mark_spec = actions->conf;
1559                         filter->input.fltr_id = mark_spec->id;
1560                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1561                         break;
1562                 case RTE_FLOW_ACTION_TYPE_COUNT:
1563                         counter_num++;
1564
1565                         act_count = actions->conf;
1566                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1567                         rte_memcpy(&filter->act_count, act_count,
1568                                                 sizeof(filter->act_count));
1569
1570                         break;
1571                 default:
1572                         rte_flow_error_set(error, EINVAL,
1573                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1574                                    "Invalid action.");
1575                         return -rte_errno;
1576                 }
1577         }
1578
1579         if (dest_num >= 2) {
1580                 rte_flow_error_set(error, EINVAL,
1581                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1582                            "Unsupported action combination");
1583                 return -rte_errno;
1584         }
1585
1586         if (mark_num >= 2) {
1587                 rte_flow_error_set(error, EINVAL,
1588                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1589                            "Too many mark actions");
1590                 return -rte_errno;
1591         }
1592
1593         if (counter_num >= 2) {
1594                 rte_flow_error_set(error, EINVAL,
1595                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1596                            "Too many count actions");
1597                 return -rte_errno;
1598         }
1599
1600         if (dest_num + mark_num + counter_num == 0) {
1601                 rte_flow_error_set(error, EINVAL,
1602                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1603                            "Empty action");
1604                 return -rte_errno;
1605         }
1606
1607         /* set default action to PASSTHRU mode, in "mark/count only" case. */
1608         if (dest_num == 0)
1609                 filter->input.dest_ctl =
1610                         ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1611
1612         return 0;
1613 }
1614
1615 static int
1616 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1617                        const struct rte_flow_item pattern[],
1618                        struct rte_flow_error *error,
1619                        struct ice_fdir_filter_conf *filter)
1620 {
1621         const struct rte_flow_item *item = pattern;
1622         enum rte_flow_item_type item_type;
1623         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1624         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1625         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1626         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1627         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1628         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1629         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1630         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1631         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1632         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1633         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1634         uint64_t input_set = ICE_INSET_NONE;
1635         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1636         uint8_t  ipv6_addr_mask[16] = {
1637                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1638                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1639         };
1640         uint32_t vtc_flow_cpu;
1641         uint16_t ether_type;
1642         enum rte_flow_item_type next_type;
1643
1644         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1645                 if (item->last) {
1646                         rte_flow_error_set(error, EINVAL,
1647                                         RTE_FLOW_ERROR_TYPE_ITEM,
1648                                         item,
1649                                         "Not support range");
1650                         return -rte_errno;
1651                 }
1652                 item_type = item->type;
1653
1654                 switch (item_type) {
1655                 case RTE_FLOW_ITEM_TYPE_ETH:
1656                         eth_spec = item->spec;
1657                         eth_mask = item->mask;
1658                         next_type = (item + 1)->type;
1659
1660                         if (eth_spec && eth_mask) {
1661                                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
1662                                         input_set |= ICE_INSET_DMAC;
1663                                         rte_memcpy(&filter->input.ext_data.dst_mac,
1664                                                    &eth_spec->dst,
1665                                                    RTE_ETHER_ADDR_LEN);
1666                                 }
1667
1668                                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
1669                                         input_set |= ICE_INSET_SMAC;
1670                                         rte_memcpy(&filter->input.ext_data.src_mac,
1671                                                    &eth_spec->src,
1672                                                    RTE_ETHER_ADDR_LEN);
1673                                 }
1674
1675                                 /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1676                                 if (eth_mask->type == RTE_BE16(0xffff) &&
1677                                     next_type == RTE_FLOW_ITEM_TYPE_END) {
1678                                         input_set |= ICE_INSET_ETHERTYPE;
1679                                         ether_type = rte_be_to_cpu_16(eth_spec->type);
1680
1681                                         if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1682                                             ether_type == RTE_ETHER_TYPE_IPV6) {
1683                                                 rte_flow_error_set(error, EINVAL,
1684                                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1685                                                                    item,
1686                                                                    "Unsupported ether_type.");
1687                                                 return -rte_errno;
1688                                         }
1689
1690                                         rte_memcpy(&filter->input.ext_data.ether_type,
1691                                                    &eth_spec->type,
1692                                                    sizeof(eth_spec->type));
1693                                         flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1694                                 }
1695                         }
1696                         break;
1697                 case RTE_FLOW_ITEM_TYPE_IPV4:
1698                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1699                         ipv4_spec = item->spec;
1700                         ipv4_mask = item->mask;
1701
1702                         if (ipv4_spec && ipv4_mask) {
1703                                 /* Check IPv4 mask and update input set */
1704                                 if (ipv4_mask->hdr.version_ihl ||
1705                                     ipv4_mask->hdr.total_length ||
1706                                     ipv4_mask->hdr.packet_id ||
1707                                     ipv4_mask->hdr.fragment_offset ||
1708                                     ipv4_mask->hdr.hdr_checksum) {
1709                                         rte_flow_error_set(error, EINVAL,
1710                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1711                                                    item,
1712                                                    "Invalid IPv4 mask.");
1713                                         return -rte_errno;
1714                                 }
1715                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1716                                         input_set |= tunnel_type ?
1717                                                      ICE_INSET_TUN_IPV4_SRC :
1718                                                      ICE_INSET_IPV4_SRC;
1719                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1720                                         input_set |= tunnel_type ?
1721                                                      ICE_INSET_TUN_IPV4_DST :
1722                                                      ICE_INSET_IPV4_DST;
1723                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1724                                         input_set |= ICE_INSET_IPV4_TOS;
1725                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1726                                         input_set |= ICE_INSET_IPV4_TTL;
1727                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1728                                         input_set |= ICE_INSET_IPV4_PROTO;
1729
1730                                 filter->input.ip.v4.dst_ip =
1731                                         ipv4_spec->hdr.dst_addr;
1732                                 filter->input.ip.v4.src_ip =
1733                                         ipv4_spec->hdr.src_addr;
1734                                 filter->input.ip.v4.tos =
1735                                         ipv4_spec->hdr.type_of_service;
1736                                 filter->input.ip.v4.ttl =
1737                                         ipv4_spec->hdr.time_to_live;
1738                                 filter->input.ip.v4.proto =
1739                                         ipv4_spec->hdr.next_proto_id;
1740                         }
1741
1742                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1743                         break;
1744                 case RTE_FLOW_ITEM_TYPE_IPV6:
1745                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1746                         ipv6_spec = item->spec;
1747                         ipv6_mask = item->mask;
1748
1749                         if (ipv6_spec && ipv6_mask) {
1750                                 /* Check IPv6 mask and update input set */
1751                                 if (ipv6_mask->hdr.payload_len) {
1752                                         rte_flow_error_set(error, EINVAL,
1753                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1754                                                    item,
1755                                                    "Invalid IPv6 mask");
1756                                         return -rte_errno;
1757                                 }
1758
1759                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1760                                             ipv6_addr_mask,
1761                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1762                                         input_set |= ICE_INSET_IPV6_SRC;
1763                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1764                                             ipv6_addr_mask,
1765                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1766                                         input_set |= ICE_INSET_IPV6_DST;
1767
1768                                 if ((ipv6_mask->hdr.vtc_flow &
1769                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1770                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1771                                         input_set |= ICE_INSET_IPV6_TC;
1772                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1773                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1774                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1775                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1776
1777                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1778                                            ipv6_spec->hdr.dst_addr, 16);
1779                                 rte_memcpy(filter->input.ip.v6.src_ip,
1780                                            ipv6_spec->hdr.src_addr, 16);
1781
1782                                 vtc_flow_cpu =
1783                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1784                                 filter->input.ip.v6.tc =
1785                                         (uint8_t)(vtc_flow_cpu >>
1786                                                   ICE_FDIR_IPV6_TC_OFFSET);
1787                                 filter->input.ip.v6.proto =
1788                                         ipv6_spec->hdr.proto;
1789                                 filter->input.ip.v6.hlim =
1790                                         ipv6_spec->hdr.hop_limits;
1791                         }
1792
1793                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1794                         break;
1795                 case RTE_FLOW_ITEM_TYPE_TCP:
1796                         tcp_spec = item->spec;
1797                         tcp_mask = item->mask;
1798
1799                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1800                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1801                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1802                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1803
1804                         if (tcp_spec && tcp_mask) {
1805                                 /* Check TCP mask and update input set */
1806                                 if (tcp_mask->hdr.sent_seq ||
1807                                     tcp_mask->hdr.recv_ack ||
1808                                     tcp_mask->hdr.data_off ||
1809                                     tcp_mask->hdr.tcp_flags ||
1810                                     tcp_mask->hdr.rx_win ||
1811                                     tcp_mask->hdr.cksum ||
1812                                     tcp_mask->hdr.tcp_urp) {
1813                                         rte_flow_error_set(error, EINVAL,
1814                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1815                                                    item,
1816                                                    "Invalid TCP mask");
1817                                         return -rte_errno;
1818                                 }
1819
1820                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1821                                         input_set |= tunnel_type ?
1822                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1823                                                      ICE_INSET_TCP_SRC_PORT;
1824                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1825                                         input_set |= tunnel_type ?
1826                                                      ICE_INSET_TUN_TCP_DST_PORT :
1827                                                      ICE_INSET_TCP_DST_PORT;
1828
1829                                 /* Get filter info */
1830                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1831                                         filter->input.ip.v4.dst_port =
1832                                                 tcp_spec->hdr.dst_port;
1833                                         filter->input.ip.v4.src_port =
1834                                                 tcp_spec->hdr.src_port;
1835                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1836                                         filter->input.ip.v6.dst_port =
1837                                                 tcp_spec->hdr.dst_port;
1838                                         filter->input.ip.v6.src_port =
1839                                                 tcp_spec->hdr.src_port;
1840                                 }
1841                         }
1842                         break;
1843                 case RTE_FLOW_ITEM_TYPE_UDP:
1844                         udp_spec = item->spec;
1845                         udp_mask = item->mask;
1846
1847                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1848                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1849                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1850                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1851
1852                         if (udp_spec && udp_mask) {
1853                                 /* Check UDP mask and update input set*/
1854                                 if (udp_mask->hdr.dgram_len ||
1855                                     udp_mask->hdr.dgram_cksum) {
1856                                         rte_flow_error_set(error, EINVAL,
1857                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1858                                                    item,
1859                                                    "Invalid UDP mask");
1860                                         return -rte_errno;
1861                                 }
1862
1863                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1864                                         input_set |= tunnel_type ?
1865                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1866                                                      ICE_INSET_UDP_SRC_PORT;
1867                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1868                                         input_set |= tunnel_type ?
1869                                                      ICE_INSET_TUN_UDP_DST_PORT :
1870                                                      ICE_INSET_UDP_DST_PORT;
1871
1872                                 /* Get filter info */
1873                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1874                                         filter->input.ip.v4.dst_port =
1875                                                 udp_spec->hdr.dst_port;
1876                                         filter->input.ip.v4.src_port =
1877                                                 udp_spec->hdr.src_port;
1878                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1879                                         filter->input.ip.v6.src_port =
1880                                                 udp_spec->hdr.src_port;
1881                                         filter->input.ip.v6.dst_port =
1882                                                 udp_spec->hdr.dst_port;
1883                                 }
1884                         }
1885                         break;
1886                 case RTE_FLOW_ITEM_TYPE_SCTP:
1887                         sctp_spec = item->spec;
1888                         sctp_mask = item->mask;
1889
1890                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1891                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1892                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1893                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1894
1895                         if (sctp_spec && sctp_mask) {
1896                                 /* Check SCTP mask and update input set */
1897                                 if (sctp_mask->hdr.cksum) {
1898                                         rte_flow_error_set(error, EINVAL,
1899                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1900                                                    item,
1901                                                    "Invalid UDP mask");
1902                                         return -rte_errno;
1903                                 }
1904
1905                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1906                                         input_set |= tunnel_type ?
1907                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1908                                                      ICE_INSET_SCTP_SRC_PORT;
1909                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1910                                         input_set |= tunnel_type ?
1911                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1912                                                      ICE_INSET_SCTP_DST_PORT;
1913
1914                                 /* Get filter info */
1915                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1916                                         filter->input.ip.v4.dst_port =
1917                                                 sctp_spec->hdr.dst_port;
1918                                         filter->input.ip.v4.src_port =
1919                                                 sctp_spec->hdr.src_port;
1920                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1921                                         filter->input.ip.v6.dst_port =
1922                                                 sctp_spec->hdr.dst_port;
1923                                         filter->input.ip.v6.src_port =
1924                                                 sctp_spec->hdr.src_port;
1925                                 }
1926                         }
1927                         break;
1928                 case RTE_FLOW_ITEM_TYPE_VOID:
1929                         break;
1930                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1931                         l3 = RTE_FLOW_ITEM_TYPE_END;
1932                         vxlan_spec = item->spec;
1933                         vxlan_mask = item->mask;
1934
1935                         if (vxlan_spec || vxlan_mask) {
1936                                 rte_flow_error_set(error, EINVAL,
1937                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1938                                                    item,
1939                                                    "Invalid vxlan field");
1940                                 return -rte_errno;
1941                         }
1942
1943                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1944                         break;
1945                 case RTE_FLOW_ITEM_TYPE_GTPU:
1946                         l3 = RTE_FLOW_ITEM_TYPE_END;
1947                         gtp_spec = item->spec;
1948                         gtp_mask = item->mask;
1949
1950                         if (gtp_spec && gtp_mask) {
1951                                 if (gtp_mask->v_pt_rsv_flags ||
1952                                     gtp_mask->msg_type ||
1953                                     gtp_mask->msg_len) {
1954                                         rte_flow_error_set(error, EINVAL,
1955                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1956                                                    item,
1957                                                    "Invalid GTP mask");
1958                                         return -rte_errno;
1959                                 }
1960
1961                                 if (gtp_mask->teid == UINT32_MAX)
1962                                         input_set |= ICE_INSET_GTPU_TEID;
1963
1964                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1965                         }
1966
1967                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1968                         break;
1969                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1970                         gtp_psc_spec = item->spec;
1971                         gtp_psc_mask = item->mask;
1972
1973                         if (gtp_psc_spec && gtp_psc_mask) {
1974                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1975                                         input_set |= ICE_INSET_GTPU_QFI;
1976
1977                                 filter->input.gtpu_data.qfi =
1978                                         gtp_psc_spec->qfi;
1979                         }
1980                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1981                         break;
1982                 default:
1983                         rte_flow_error_set(error, EINVAL,
1984                                    RTE_FLOW_ERROR_TYPE_ITEM,
1985                                    item,
1986                                    "Invalid pattern item.");
1987                         return -rte_errno;
1988                 }
1989         }
1990
1991         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
1992                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
1993                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1994         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
1995                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
1996                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_OTHER;
1997         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
1998                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1999                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER;
2000         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2001                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2002                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_IPV6_OTHER;
2003
2004         filter->tunnel_type = tunnel_type;
2005         filter->input.flow_type = flow_type;
2006         filter->input_set = input_set;
2007
2008         return 0;
2009 }
2010
2011 static int
2012 ice_fdir_parse(struct ice_adapter *ad,
2013                struct ice_pattern_match_item *array,
2014                uint32_t array_len,
2015                const struct rte_flow_item pattern[],
2016                const struct rte_flow_action actions[],
2017                void **meta,
2018                struct rte_flow_error *error)
2019 {
2020         struct ice_pf *pf = &ad->pf;
2021         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2022         struct ice_pattern_match_item *item = NULL;
2023         uint64_t input_set;
2024         int ret;
2025
2026         memset(filter, 0, sizeof(*filter));
2027         item = ice_search_pattern_match_item(pattern, array, array_len, error);
2028         if (!item)
2029                 return -rte_errno;
2030
2031         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2032         if (ret)
2033                 goto error;
2034         input_set = filter->input_set;
2035         if (!input_set || input_set & ~item->input_set_mask) {
2036                 rte_flow_error_set(error, EINVAL,
2037                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2038                                    pattern,
2039                                    "Invalid input set");
2040                 ret = -rte_errno;
2041                 goto error;
2042         }
2043
2044         ret = ice_fdir_parse_action(ad, actions, error, filter);
2045         if (ret)
2046                 goto error;
2047
2048         if (meta)
2049                 *meta = filter;
2050 error:
2051         rte_free(item);
2052         return ret;
2053 }
2054
2055 static struct ice_flow_parser ice_fdir_parser_os = {
2056         .engine = &ice_fdir_engine,
2057         .array = ice_fdir_pattern_os,
2058         .array_len = RTE_DIM(ice_fdir_pattern_os),
2059         .parse_pattern_action = ice_fdir_parse,
2060         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2061 };
2062
2063 static struct ice_flow_parser ice_fdir_parser_comms = {
2064         .engine = &ice_fdir_engine,
2065         .array = ice_fdir_pattern_comms,
2066         .array_len = RTE_DIM(ice_fdir_pattern_comms),
2067         .parse_pattern_action = ice_fdir_parse,
2068         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2069 };
2070
2071 RTE_INIT(ice_fdir_engine_register)
2072 {
2073         ice_register_flow_engine(&ice_fdir_engine);
2074 }