net/ice: support PPPoE RSS
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
22         ICE_INSET_DMAC | \
23         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35         ICE_FDIR_INSET_ETH_IPV4 | \
36         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
37
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
39         ICE_INSET_DMAC | \
40         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
54
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
65
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67         ICE_FDIR_INSET_VXLAN_IPV4 | \
68         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
69
70 #define ICE_FDIR_INSET_GTPU (\
71         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
72
73 #define ICE_FDIR_INSET_GTPU_EH (\
74         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
75         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
76
77 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
78         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
79         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
80         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
81         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
82         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
83         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
84         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
85         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_ipv4,
87                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
91                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
94         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
95                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
96         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
97                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
99                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
100         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
101                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
102 };
103
104 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
105         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
106         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
107         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
108         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
109         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
110         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
111         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
112         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_ipv4,
114                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
118                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
121         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
122                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
123         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
124                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
125         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
126                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
128                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
129         {pattern_eth_ipv4_gtpu,        ICE_FDIR_INSET_GTPU,                  ICE_INSET_NONE},
130         {pattern_eth_ipv4_gtpu_eh,     ICE_FDIR_INSET_GTPU_EH,               ICE_INSET_NONE},
131 };
132
133 static struct ice_flow_parser ice_fdir_parser_os;
134 static struct ice_flow_parser ice_fdir_parser_comms;
135
136 static int
137 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
138
139 static const struct rte_memzone *
140 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
141 {
142         const struct rte_memzone *mz;
143
144         mz = rte_memzone_lookup(name);
145         if (mz)
146                 return mz;
147
148         return rte_memzone_reserve_aligned(name, len, socket_id,
149                                            RTE_MEMZONE_IOVA_CONTIG,
150                                            ICE_RING_BASE_ALIGN);
151 }
152
153 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
154
155 static int
156 ice_fdir_prof_alloc(struct ice_hw *hw)
157 {
158         enum ice_fltr_ptype ptype, fltr_ptype;
159
160         if (!hw->fdir_prof) {
161                 hw->fdir_prof = (struct ice_fd_hw_prof **)
162                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
163                                    sizeof(*hw->fdir_prof));
164                 if (!hw->fdir_prof)
165                         return -ENOMEM;
166         }
167         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
168              ptype < ICE_FLTR_PTYPE_MAX;
169              ptype++) {
170                 if (!hw->fdir_prof[ptype]) {
171                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
172                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
173                         if (!hw->fdir_prof[ptype])
174                                 goto fail_mem;
175                 }
176         }
177         return 0;
178
179 fail_mem:
180         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
181              fltr_ptype < ptype;
182              fltr_ptype++) {
183                 rte_free(hw->fdir_prof[fltr_ptype]);
184                 hw->fdir_prof[fltr_ptype] = NULL;
185         }
186
187         rte_free(hw->fdir_prof);
188         hw->fdir_prof = NULL;
189
190         return -ENOMEM;
191 }
192
193 static int
194 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
195                           struct ice_fdir_counter_pool_container *container,
196                           uint32_t index_start,
197                           uint32_t len)
198 {
199         struct ice_fdir_counter_pool *pool;
200         uint32_t i;
201         int ret = 0;
202
203         pool = rte_zmalloc("ice_fdir_counter_pool",
204                            sizeof(*pool) +
205                            sizeof(struct ice_fdir_counter) * len,
206                            0);
207         if (!pool) {
208                 PMD_INIT_LOG(ERR,
209                              "Failed to allocate memory for fdir counter pool");
210                 return -ENOMEM;
211         }
212
213         TAILQ_INIT(&pool->counter_list);
214         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
215
216         for (i = 0; i < len; i++) {
217                 struct ice_fdir_counter *counter = &pool->counters[i];
218
219                 counter->hw_index = index_start + i;
220                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
221         }
222
223         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
224                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
225                 ret = -EINVAL;
226                 goto free_pool;
227         }
228
229         container->pools[container->index_free++] = pool;
230         return 0;
231
232 free_pool:
233         rte_free(pool);
234         return ret;
235 }
236
237 static int
238 ice_fdir_counter_init(struct ice_pf *pf)
239 {
240         struct ice_hw *hw = ICE_PF_TO_HW(pf);
241         struct ice_fdir_info *fdir_info = &pf->fdir;
242         struct ice_fdir_counter_pool_container *container =
243                                 &fdir_info->counter;
244         uint32_t cnt_index, len;
245         int ret;
246
247         TAILQ_INIT(&container->pool_list);
248
249         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
250         len = ICE_FDIR_COUNTERS_PER_BLOCK;
251
252         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
253         if (ret) {
254                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
255                 return ret;
256         }
257
258         return 0;
259 }
260
261 static int
262 ice_fdir_counter_release(struct ice_pf *pf)
263 {
264         struct ice_fdir_info *fdir_info = &pf->fdir;
265         struct ice_fdir_counter_pool_container *container =
266                                 &fdir_info->counter;
267         uint8_t i;
268
269         for (i = 0; i < container->index_free; i++) {
270                 rte_free(container->pools[i]);
271                 container->pools[i] = NULL;
272         }
273
274         TAILQ_INIT(&container->pool_list);
275         container->index_free = 0;
276
277         return 0;
278 }
279
280 static struct ice_fdir_counter *
281 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
282                                         *container,
283                                uint32_t id)
284 {
285         struct ice_fdir_counter_pool *pool;
286         struct ice_fdir_counter *counter;
287         int i;
288
289         TAILQ_FOREACH(pool, &container->pool_list, next) {
290                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
291                         counter = &pool->counters[i];
292
293                         if (counter->shared &&
294                             counter->ref_cnt &&
295                             counter->id == id)
296                                 return counter;
297                 }
298         }
299
300         return NULL;
301 }
302
303 static struct ice_fdir_counter *
304 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
305 {
306         struct ice_hw *hw = ICE_PF_TO_HW(pf);
307         struct ice_fdir_info *fdir_info = &pf->fdir;
308         struct ice_fdir_counter_pool_container *container =
309                                 &fdir_info->counter;
310         struct ice_fdir_counter_pool *pool = NULL;
311         struct ice_fdir_counter *counter_free = NULL;
312
313         if (shared) {
314                 counter_free = ice_fdir_counter_shared_search(container, id);
315                 if (counter_free) {
316                         if (counter_free->ref_cnt + 1 == 0) {
317                                 rte_errno = E2BIG;
318                                 return NULL;
319                         }
320                         counter_free->ref_cnt++;
321                         return counter_free;
322                 }
323         }
324
325         TAILQ_FOREACH(pool, &container->pool_list, next) {
326                 counter_free = TAILQ_FIRST(&pool->counter_list);
327                 if (counter_free)
328                         break;
329                 counter_free = NULL;
330         }
331
332         if (!counter_free) {
333                 PMD_DRV_LOG(ERR, "No free counter found\n");
334                 return NULL;
335         }
336
337         counter_free->shared = shared;
338         counter_free->id = id;
339         counter_free->ref_cnt = 1;
340         counter_free->pool = pool;
341
342         /* reset statistic counter value */
343         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
344         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
345
346         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
347         if (TAILQ_EMPTY(&pool->counter_list)) {
348                 TAILQ_REMOVE(&container->pool_list, pool, next);
349                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
350         }
351
352         return counter_free;
353 }
354
355 static void
356 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
357                       struct ice_fdir_counter *counter)
358 {
359         if (!counter)
360                 return;
361
362         if (--counter->ref_cnt == 0) {
363                 struct ice_fdir_counter_pool *pool = counter->pool;
364
365                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
366         }
367 }
368
369 static int
370 ice_fdir_init_filter_list(struct ice_pf *pf)
371 {
372         struct rte_eth_dev *dev = pf->adapter->eth_dev;
373         struct ice_fdir_info *fdir_info = &pf->fdir;
374         char fdir_hash_name[RTE_HASH_NAMESIZE];
375         int ret;
376
377         struct rte_hash_parameters fdir_hash_params = {
378                 .name = fdir_hash_name,
379                 .entries = ICE_MAX_FDIR_FILTER_NUM,
380                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
381                 .hash_func = rte_hash_crc,
382                 .hash_func_init_val = 0,
383                 .socket_id = rte_socket_id(),
384                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
385         };
386
387         /* Initialize hash */
388         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
389                  "fdir_%s", dev->device->name);
390         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
391         if (!fdir_info->hash_table) {
392                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
393                 return -EINVAL;
394         }
395         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
396                                           sizeof(*fdir_info->hash_map) *
397                                           ICE_MAX_FDIR_FILTER_NUM,
398                                           0);
399         if (!fdir_info->hash_map) {
400                 PMD_INIT_LOG(ERR,
401                              "Failed to allocate memory for fdir hash map!");
402                 ret = -ENOMEM;
403                 goto err_fdir_hash_map_alloc;
404         }
405         return 0;
406
407 err_fdir_hash_map_alloc:
408         rte_hash_free(fdir_info->hash_table);
409
410         return ret;
411 }
412
413 static void
414 ice_fdir_release_filter_list(struct ice_pf *pf)
415 {
416         struct ice_fdir_info *fdir_info = &pf->fdir;
417
418         if (fdir_info->hash_map)
419                 rte_free(fdir_info->hash_map);
420         if (fdir_info->hash_table)
421                 rte_hash_free(fdir_info->hash_table);
422
423         fdir_info->hash_map = NULL;
424         fdir_info->hash_table = NULL;
425 }
426
427 /*
428  * ice_fdir_setup - reserve and initialize the Flow Director resources
429  * @pf: board private structure
430  */
431 static int
432 ice_fdir_setup(struct ice_pf *pf)
433 {
434         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
435         struct ice_hw *hw = ICE_PF_TO_HW(pf);
436         const struct rte_memzone *mz = NULL;
437         char z_name[RTE_MEMZONE_NAMESIZE];
438         struct ice_vsi *vsi;
439         int err = ICE_SUCCESS;
440
441         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
442                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
443                 return -ENOTSUP;
444         }
445
446         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
447                     " fd_fltr_best_effort = %u.",
448                     hw->func_caps.fd_fltr_guar,
449                     hw->func_caps.fd_fltr_best_effort);
450
451         if (pf->fdir.fdir_vsi) {
452                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
453                 return ICE_SUCCESS;
454         }
455
456         /* make new FDIR VSI */
457         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
458         if (!vsi) {
459                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
460                 return -EINVAL;
461         }
462         pf->fdir.fdir_vsi = vsi;
463
464         err = ice_fdir_init_filter_list(pf);
465         if (err) {
466                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
467                 return -EINVAL;
468         }
469
470         err = ice_fdir_counter_init(pf);
471         if (err) {
472                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
473                 return -EINVAL;
474         }
475
476         /*Fdir tx queue setup*/
477         err = ice_fdir_setup_tx_resources(pf);
478         if (err) {
479                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
480                 goto fail_setup_tx;
481         }
482
483         /*Fdir rx queue setup*/
484         err = ice_fdir_setup_rx_resources(pf);
485         if (err) {
486                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
487                 goto fail_setup_rx;
488         }
489
490         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
491         if (err) {
492                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
493                 goto fail_mem;
494         }
495
496         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
497         if (err) {
498                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
499                 goto fail_mem;
500         }
501
502         /* Enable FDIR MSIX interrupt */
503         vsi->nb_used_qps = 1;
504         ice_vsi_queues_bind_intr(vsi);
505         ice_vsi_enable_queues_intr(vsi);
506
507         /* reserve memory for the fdir programming packet */
508         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
509                  ICE_FDIR_MZ_NAME,
510                  eth_dev->data->port_id);
511         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
512         if (!mz) {
513                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
514                             "flow director program packet.");
515                 err = -ENOMEM;
516                 goto fail_mem;
517         }
518         pf->fdir.prg_pkt = mz->addr;
519         pf->fdir.dma_addr = mz->iova;
520         pf->fdir.mz = mz;
521
522         err = ice_fdir_prof_alloc(hw);
523         if (err) {
524                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
525                             "flow director profile.");
526                 err = -ENOMEM;
527                 goto fail_prof;
528         }
529
530         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
531                     vsi->base_queue);
532         return ICE_SUCCESS;
533
534 fail_prof:
535         rte_memzone_free(pf->fdir.mz);
536         pf->fdir.mz = NULL;
537 fail_mem:
538         ice_rx_queue_release(pf->fdir.rxq);
539         pf->fdir.rxq = NULL;
540 fail_setup_rx:
541         ice_tx_queue_release(pf->fdir.txq);
542         pf->fdir.txq = NULL;
543 fail_setup_tx:
544         ice_release_vsi(vsi);
545         pf->fdir.fdir_vsi = NULL;
546         return err;
547 }
548
549 static void
550 ice_fdir_prof_free(struct ice_hw *hw)
551 {
552         enum ice_fltr_ptype ptype;
553
554         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
555              ptype < ICE_FLTR_PTYPE_MAX;
556              ptype++) {
557                 rte_free(hw->fdir_prof[ptype]);
558                 hw->fdir_prof[ptype] = NULL;
559         }
560
561         rte_free(hw->fdir_prof);
562         hw->fdir_prof = NULL;
563 }
564
565 /* Remove a profile for some filter type */
566 static void
567 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
568 {
569         struct ice_hw *hw = ICE_PF_TO_HW(pf);
570         struct ice_fd_hw_prof *hw_prof;
571         uint64_t prof_id;
572         uint16_t vsi_num;
573         int i;
574
575         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
576                 return;
577
578         hw_prof = hw->fdir_prof[ptype];
579
580         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
581         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
582                 if (hw_prof->entry_h[i][is_tunnel]) {
583                         vsi_num = ice_get_hw_vsi_num(hw,
584                                                      hw_prof->vsi_h[i]);
585                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
586                                              vsi_num, ptype);
587                         ice_flow_rem_entry(hw, ICE_BLK_FD,
588                                            hw_prof->entry_h[i][is_tunnel]);
589                         hw_prof->entry_h[i][is_tunnel] = 0;
590                 }
591         }
592         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
593         rte_free(hw_prof->fdir_seg[is_tunnel]);
594         hw_prof->fdir_seg[is_tunnel] = NULL;
595
596         for (i = 0; i < hw_prof->cnt; i++)
597                 hw_prof->vsi_h[i] = 0;
598         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
599 }
600
601 /* Remove all created profiles */
602 static void
603 ice_fdir_prof_rm_all(struct ice_pf *pf)
604 {
605         enum ice_fltr_ptype ptype;
606
607         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
608              ptype < ICE_FLTR_PTYPE_MAX;
609              ptype++) {
610                 ice_fdir_prof_rm(pf, ptype, false);
611                 ice_fdir_prof_rm(pf, ptype, true);
612         }
613 }
614
615 /*
616  * ice_fdir_teardown - release the Flow Director resources
617  * @pf: board private structure
618  */
619 static void
620 ice_fdir_teardown(struct ice_pf *pf)
621 {
622         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
623         struct ice_hw *hw = ICE_PF_TO_HW(pf);
624         struct ice_vsi *vsi;
625         int err;
626
627         vsi = pf->fdir.fdir_vsi;
628         if (!vsi)
629                 return;
630
631         ice_vsi_disable_queues_intr(vsi);
632
633         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
634         if (err)
635                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
636
637         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
638         if (err)
639                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
640
641         err = ice_fdir_counter_release(pf);
642         if (err)
643                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
644
645         ice_fdir_release_filter_list(pf);
646
647         ice_tx_queue_release(pf->fdir.txq);
648         pf->fdir.txq = NULL;
649         ice_rx_queue_release(pf->fdir.rxq);
650         pf->fdir.rxq = NULL;
651         ice_fdir_prof_rm_all(pf);
652         ice_fdir_prof_free(hw);
653         ice_release_vsi(vsi);
654         pf->fdir.fdir_vsi = NULL;
655
656         if (pf->fdir.mz) {
657                 err = rte_memzone_free(pf->fdir.mz);
658                 pf->fdir.mz = NULL;
659                 if (err)
660                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
661         }
662 }
663
664 static int
665 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
666                            enum ice_fltr_ptype ptype,
667                            struct ice_flow_seg_info *seg,
668                            bool is_tunnel)
669 {
670         struct ice_hw *hw = ICE_PF_TO_HW(pf);
671         struct ice_flow_seg_info *ori_seg;
672         struct ice_fd_hw_prof *hw_prof;
673
674         hw_prof = hw->fdir_prof[ptype];
675         ori_seg = hw_prof->fdir_seg[is_tunnel];
676
677         /* profile does not exist */
678         if (!ori_seg)
679                 return 0;
680
681         /* if no input set conflict, return -EEXIST */
682         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
683             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
684                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
685                             ptype);
686                 return -EEXIST;
687         }
688
689         /* a rule with input set conflict already exist, so give up */
690         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
691                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
692                             ptype);
693                 return -EINVAL;
694         }
695
696         /* it's safe to delete an empty profile */
697         ice_fdir_prof_rm(pf, ptype, is_tunnel);
698         return 0;
699 }
700
701 static bool
702 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
703                                enum ice_fltr_ptype ptype,
704                                bool is_tunnel)
705 {
706         struct ice_hw *hw = ICE_PF_TO_HW(pf);
707         struct ice_fd_hw_prof *hw_prof;
708         struct ice_flow_seg_info *seg;
709
710         hw_prof = hw->fdir_prof[ptype];
711         seg = hw_prof->fdir_seg[is_tunnel];
712
713         /* profile does not exist */
714         if (!seg)
715                 return true;
716
717         /* profile exists and rule exists, fail to resolve the conflict */
718         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
719                 return false;
720
721         /* it's safe to delete an empty profile */
722         ice_fdir_prof_rm(pf, ptype, is_tunnel);
723
724         return true;
725 }
726
727 static int
728 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
729                              enum ice_fltr_ptype ptype,
730                              bool is_tunnel)
731 {
732         enum ice_fltr_ptype cflct_ptype;
733
734         switch (ptype) {
735         /* IPv4 */
736         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
737         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
738         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
739                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
740                 if (!ice_fdir_prof_resolve_conflict
741                         (pf, cflct_ptype, is_tunnel))
742                         goto err;
743                 break;
744         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
745                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
746                 if (!ice_fdir_prof_resolve_conflict
747                         (pf, cflct_ptype, is_tunnel))
748                         goto err;
749                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
750                 if (!ice_fdir_prof_resolve_conflict
751                         (pf, cflct_ptype, is_tunnel))
752                         goto err;
753                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
754                 if (!ice_fdir_prof_resolve_conflict
755                         (pf, cflct_ptype, is_tunnel))
756                         goto err;
757                 break;
758         /* IPv4 GTPU */
759         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
760         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
761         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
762                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
763                 if (!ice_fdir_prof_resolve_conflict
764                         (pf, cflct_ptype, is_tunnel))
765                         goto err;
766                 break;
767         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
768                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
769                 if (!ice_fdir_prof_resolve_conflict
770                         (pf, cflct_ptype, is_tunnel))
771                         goto err;
772                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
773                 if (!ice_fdir_prof_resolve_conflict
774                         (pf, cflct_ptype, is_tunnel))
775                         goto err;
776                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
777                 if (!ice_fdir_prof_resolve_conflict
778                         (pf, cflct_ptype, is_tunnel))
779                         goto err;
780                 break;
781         /* IPv6 */
782         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
783         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
784         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
785                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
786                 if (!ice_fdir_prof_resolve_conflict
787                         (pf, cflct_ptype, is_tunnel))
788                         goto err;
789                 break;
790         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
791                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
792                 if (!ice_fdir_prof_resolve_conflict
793                         (pf, cflct_ptype, is_tunnel))
794                         goto err;
795                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
796                 if (!ice_fdir_prof_resolve_conflict
797                         (pf, cflct_ptype, is_tunnel))
798                         goto err;
799                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
800                 if (!ice_fdir_prof_resolve_conflict
801                         (pf, cflct_ptype, is_tunnel))
802                         goto err;
803                 break;
804         default:
805                 break;
806         }
807         return 0;
808 err:
809         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
810                     ptype, cflct_ptype);
811         return -EINVAL;
812 }
813
814 static int
815 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
816                      struct ice_vsi *ctrl_vsi,
817                      struct ice_flow_seg_info *seg,
818                      enum ice_fltr_ptype ptype,
819                      bool is_tunnel)
820 {
821         struct ice_hw *hw = ICE_PF_TO_HW(pf);
822         enum ice_flow_dir dir = ICE_FLOW_RX;
823         struct ice_fd_hw_prof *hw_prof;
824         struct ice_flow_prof *prof;
825         uint64_t entry_1 = 0;
826         uint64_t entry_2 = 0;
827         uint16_t vsi_num;
828         int ret;
829         uint64_t prof_id;
830
831         /* check if have input set conflict on current profile. */
832         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
833         if (ret)
834                 return ret;
835
836         /* check if the profile is conflict with other profile. */
837         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
838         if (ret)
839                 return ret;
840
841         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
842         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
843                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
844         if (ret)
845                 return ret;
846         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
847                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
848                                  seg, NULL, 0, &entry_1);
849         if (ret) {
850                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
851                             ptype);
852                 goto err_add_prof;
853         }
854         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
855                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
856                                  seg, NULL, 0, &entry_2);
857         if (ret) {
858                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
859                             ptype);
860                 goto err_add_entry;
861         }
862
863         hw_prof = hw->fdir_prof[ptype];
864         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
865         hw_prof->cnt = 0;
866         hw_prof->fdir_seg[is_tunnel] = seg;
867         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
868         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
869         pf->hw_prof_cnt[ptype][is_tunnel]++;
870         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
871         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
872         pf->hw_prof_cnt[ptype][is_tunnel]++;
873
874         return ret;
875
876 err_add_entry:
877         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
878         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
879         ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
880 err_add_prof:
881         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
882
883         return ret;
884 }
885
886 static void
887 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
888 {
889         uint32_t i, j;
890
891         struct ice_inset_map {
892                 uint64_t inset;
893                 enum ice_flow_field fld;
894         };
895         static const struct ice_inset_map ice_inset_map[] = {
896                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
897                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
898                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
899                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
900                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
901                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
902                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
903                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
904                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
905                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
906                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
907                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
908                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
909                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
910                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
911                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
912                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
913                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
914                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
915                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
916                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
917                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
918                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
919                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
920                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
921                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
922                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
923         };
924
925         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
926                 if ((inset & ice_inset_map[i].inset) ==
927                     ice_inset_map[i].inset)
928                         field[j++] = ice_inset_map[i].fld;
929         }
930 }
931
932 static int
933 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
934                         uint64_t input_set, enum ice_fdir_tunnel_type ttype)
935 {
936         struct ice_flow_seg_info *seg;
937         struct ice_flow_seg_info *seg_tun = NULL;
938         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
939         bool is_tunnel;
940         int i, ret;
941
942         if (!input_set)
943                 return -EINVAL;
944
945         seg = (struct ice_flow_seg_info *)
946                 ice_malloc(hw, sizeof(*seg));
947         if (!seg) {
948                 PMD_DRV_LOG(ERR, "No memory can be allocated");
949                 return -ENOMEM;
950         }
951
952         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
953                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
954         ice_fdir_input_set_parse(input_set, field);
955
956         switch (flow) {
957         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
958                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
959                                   ICE_FLOW_SEG_HDR_IPV4 |
960                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
961                 break;
962         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
963                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
964                                   ICE_FLOW_SEG_HDR_IPV4 |
965                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
966                 break;
967         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
968                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
969                                   ICE_FLOW_SEG_HDR_IPV4 |
970                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
971                 break;
972         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
973                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
974                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
975                 break;
976         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
977                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
978                                   ICE_FLOW_SEG_HDR_IPV6 |
979                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
980                 break;
981         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
982                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
983                                   ICE_FLOW_SEG_HDR_IPV6 |
984                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
985                 break;
986         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
987                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
988                                   ICE_FLOW_SEG_HDR_IPV6 |
989                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
990                 break;
991         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
992                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
993                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
994                 break;
995         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
996         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
997         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
998         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
999                 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU)
1000                         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1001                                           ICE_FLOW_SEG_HDR_IPV4 |
1002                                           ICE_FLOW_SEG_HDR_IPV_OTHER);
1003                 else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1004                         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1005                                           ICE_FLOW_SEG_HDR_GTPU_IP |
1006                                           ICE_FLOW_SEG_HDR_IPV4 |
1007                                           ICE_FLOW_SEG_HDR_IPV_OTHER);
1008                 else
1009                         PMD_DRV_LOG(ERR, "not supported tunnel type.");
1010                 break;
1011         default:
1012                 PMD_DRV_LOG(ERR, "not supported filter type.");
1013                 break;
1014         }
1015
1016         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1017                 ice_flow_set_fld(seg, field[i],
1018                                  ICE_FLOW_FLD_OFF_INVAL,
1019                                  ICE_FLOW_FLD_OFF_INVAL,
1020                                  ICE_FLOW_FLD_OFF_INVAL, false);
1021         }
1022
1023         is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1024         if (!is_tunnel) {
1025                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1026                                            seg, flow, false);
1027         } else {
1028                 seg_tun = (struct ice_flow_seg_info *)
1029                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1030                 if (!seg_tun) {
1031                         PMD_DRV_LOG(ERR, "No memory can be allocated");
1032                         rte_free(seg);
1033                         return -ENOMEM;
1034                 }
1035                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1036                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1037                                            seg_tun, flow, true);
1038         }
1039
1040         if (!ret) {
1041                 return ret;
1042         } else if (ret < 0) {
1043                 rte_free(seg);
1044                 if (is_tunnel)
1045                         rte_free(seg_tun);
1046                 return (ret == -EEXIST) ? 0 : ret;
1047         } else {
1048                 return ret;
1049         }
1050 }
1051
1052 static void
1053 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1054                     bool is_tunnel, bool add)
1055 {
1056         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1057         int cnt;
1058
1059         cnt = (add) ? 1 : -1;
1060         hw->fdir_active_fltr += cnt;
1061         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1062                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1063         else
1064                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1065 }
1066
1067 static int
1068 ice_fdir_init(struct ice_adapter *ad)
1069 {
1070         struct ice_pf *pf = &ad->pf;
1071         struct ice_flow_parser *parser;
1072         int ret;
1073
1074         if (ad->hw.dcf_enabled)
1075                 return 0;
1076
1077         ret = ice_fdir_setup(pf);
1078         if (ret)
1079                 return ret;
1080
1081         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1082                 parser = &ice_fdir_parser_comms;
1083         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1084                 parser = &ice_fdir_parser_os;
1085         else
1086                 return -EINVAL;
1087
1088         return ice_register_parser(parser, ad);
1089 }
1090
1091 static void
1092 ice_fdir_uninit(struct ice_adapter *ad)
1093 {
1094         struct ice_pf *pf = &ad->pf;
1095         struct ice_flow_parser *parser;
1096
1097         if (ad->hw.dcf_enabled)
1098                 return;
1099
1100         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1101                 parser = &ice_fdir_parser_comms;
1102         else
1103                 parser = &ice_fdir_parser_os;
1104
1105         ice_unregister_parser(parser, ad);
1106
1107         ice_fdir_teardown(pf);
1108 }
1109
1110 static int
1111 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1112 {
1113         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1114                 return 1;
1115         else
1116                 return 0;
1117 }
1118
1119 static int
1120 ice_fdir_add_del_filter(struct ice_pf *pf,
1121                         struct ice_fdir_filter_conf *filter,
1122                         bool add)
1123 {
1124         struct ice_fltr_desc desc;
1125         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1126         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1127         bool is_tun;
1128         int ret;
1129
1130         filter->input.dest_vsi = pf->main_vsi->idx;
1131
1132         memset(&desc, 0, sizeof(desc));
1133         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1134         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1135
1136         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1137
1138         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1139         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1140         if (ret) {
1141                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1142                 return -EINVAL;
1143         }
1144
1145         return ice_fdir_programming(pf, &desc);
1146 }
1147
1148 static void
1149 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1150                           struct ice_fdir_filter_conf *filter)
1151 {
1152         struct ice_fdir_fltr *input = &filter->input;
1153         memset(key, 0, sizeof(*key));
1154
1155         key->flow_type = input->flow_type;
1156         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1157         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1158         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1159         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1160
1161         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1162         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1163
1164         key->tunnel_type = filter->tunnel_type;
1165 }
1166
1167 /* Check if there exists the flow director filter */
1168 static struct ice_fdir_filter_conf *
1169 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1170                         const struct ice_fdir_fltr_pattern *key)
1171 {
1172         int ret;
1173
1174         ret = rte_hash_lookup(fdir_info->hash_table, key);
1175         if (ret < 0)
1176                 return NULL;
1177
1178         return fdir_info->hash_map[ret];
1179 }
1180
1181 /* Add a flow director entry into the SW list */
1182 static int
1183 ice_fdir_entry_insert(struct ice_pf *pf,
1184                       struct ice_fdir_filter_conf *entry,
1185                       struct ice_fdir_fltr_pattern *key)
1186 {
1187         struct ice_fdir_info *fdir_info = &pf->fdir;
1188         int ret;
1189
1190         ret = rte_hash_add_key(fdir_info->hash_table, key);
1191         if (ret < 0) {
1192                 PMD_DRV_LOG(ERR,
1193                             "Failed to insert fdir entry to hash table %d!",
1194                             ret);
1195                 return ret;
1196         }
1197         fdir_info->hash_map[ret] = entry;
1198
1199         return 0;
1200 }
1201
1202 /* Delete a flow director entry from the SW list */
1203 static int
1204 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1205 {
1206         struct ice_fdir_info *fdir_info = &pf->fdir;
1207         int ret;
1208
1209         ret = rte_hash_del_key(fdir_info->hash_table, key);
1210         if (ret < 0) {
1211                 PMD_DRV_LOG(ERR,
1212                             "Failed to delete fdir filter to hash table %d!",
1213                             ret);
1214                 return ret;
1215         }
1216         fdir_info->hash_map[ret] = NULL;
1217
1218         return 0;
1219 }
1220
1221 static int
1222 ice_fdir_create_filter(struct ice_adapter *ad,
1223                        struct rte_flow *flow,
1224                        void *meta,
1225                        struct rte_flow_error *error)
1226 {
1227         struct ice_pf *pf = &ad->pf;
1228         struct ice_fdir_filter_conf *filter = meta;
1229         struct ice_fdir_info *fdir_info = &pf->fdir;
1230         struct ice_fdir_filter_conf *entry, *node;
1231         struct ice_fdir_fltr_pattern key;
1232         bool is_tun;
1233         int ret;
1234
1235         ice_fdir_extract_fltr_key(&key, filter);
1236         node = ice_fdir_entry_lookup(fdir_info, &key);
1237         if (node) {
1238                 rte_flow_error_set(error, EEXIST,
1239                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1240                                    "Rule already exists!");
1241                 return -rte_errno;
1242         }
1243
1244         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1245         if (!entry) {
1246                 rte_flow_error_set(error, ENOMEM,
1247                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1248                                    "Failed to allocate memory");
1249                 return -rte_errno;
1250         }
1251
1252         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1253
1254         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1255                         filter->input_set, filter->tunnel_type);
1256         if (ret) {
1257                 rte_flow_error_set(error, -ret,
1258                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1259                                    "Profile configure failed.");
1260                 goto free_entry;
1261         }
1262
1263         /* alloc counter for FDIR */
1264         if (filter->input.cnt_ena) {
1265                 struct rte_flow_action_count *act_count = &filter->act_count;
1266
1267                 filter->counter = ice_fdir_counter_alloc(pf,
1268                                                          act_count->shared,
1269                                                          act_count->id);
1270                 if (!filter->counter) {
1271                         rte_flow_error_set(error, EINVAL,
1272                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1273                                         "Failed to alloc FDIR counter.");
1274                         goto free_entry;
1275                 }
1276                 filter->input.cnt_index = filter->counter->hw_index;
1277         }
1278
1279         ret = ice_fdir_add_del_filter(pf, filter, true);
1280         if (ret) {
1281                 rte_flow_error_set(error, -ret,
1282                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1283                                    "Add filter rule failed.");
1284                 goto free_counter;
1285         }
1286
1287         rte_memcpy(entry, filter, sizeof(*entry));
1288         ret = ice_fdir_entry_insert(pf, entry, &key);
1289         if (ret) {
1290                 rte_flow_error_set(error, -ret,
1291                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1292                                    "Insert entry to table failed.");
1293                 goto free_entry;
1294         }
1295
1296         flow->rule = entry;
1297         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1298
1299         return 0;
1300
1301 free_counter:
1302         if (filter->counter) {
1303                 ice_fdir_counter_free(pf, filter->counter);
1304                 filter->counter = NULL;
1305         }
1306
1307 free_entry:
1308         rte_free(entry);
1309         return -rte_errno;
1310 }
1311
1312 static int
1313 ice_fdir_destroy_filter(struct ice_adapter *ad,
1314                         struct rte_flow *flow,
1315                         struct rte_flow_error *error)
1316 {
1317         struct ice_pf *pf = &ad->pf;
1318         struct ice_fdir_info *fdir_info = &pf->fdir;
1319         struct ice_fdir_filter_conf *filter, *entry;
1320         struct ice_fdir_fltr_pattern key;
1321         bool is_tun;
1322         int ret;
1323
1324         filter = (struct ice_fdir_filter_conf *)flow->rule;
1325
1326         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1327
1328         if (filter->counter) {
1329                 ice_fdir_counter_free(pf, filter->counter);
1330                 filter->counter = NULL;
1331         }
1332
1333         ice_fdir_extract_fltr_key(&key, filter);
1334         entry = ice_fdir_entry_lookup(fdir_info, &key);
1335         if (!entry) {
1336                 rte_flow_error_set(error, ENOENT,
1337                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1338                                    "Can't find entry.");
1339                 return -rte_errno;
1340         }
1341
1342         ret = ice_fdir_add_del_filter(pf, filter, false);
1343         if (ret) {
1344                 rte_flow_error_set(error, -ret,
1345                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1346                                    "Del filter rule failed.");
1347                 return -rte_errno;
1348         }
1349
1350         ret = ice_fdir_entry_del(pf, &key);
1351         if (ret) {
1352                 rte_flow_error_set(error, -ret,
1353                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1354                                    "Remove entry from table failed.");
1355                 return -rte_errno;
1356         }
1357
1358         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1359         flow->rule = NULL;
1360
1361         rte_free(filter);
1362
1363         return 0;
1364 }
1365
1366 static int
1367 ice_fdir_query_count(struct ice_adapter *ad,
1368                       struct rte_flow *flow,
1369                       struct rte_flow_query_count *flow_stats,
1370                       struct rte_flow_error *error)
1371 {
1372         struct ice_pf *pf = &ad->pf;
1373         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1374         struct ice_fdir_filter_conf *filter = flow->rule;
1375         struct ice_fdir_counter *counter = filter->counter;
1376         uint64_t hits_lo, hits_hi;
1377
1378         if (!counter) {
1379                 rte_flow_error_set(error, EINVAL,
1380                                   RTE_FLOW_ERROR_TYPE_ACTION,
1381                                   NULL,
1382                                   "FDIR counters not available");
1383                 return -rte_errno;
1384         }
1385
1386         /*
1387          * Reading the low 32-bits latches the high 32-bits into a shadow
1388          * register. Reading the high 32-bit returns the value in the
1389          * shadow register.
1390          */
1391         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1392         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1393
1394         flow_stats->hits_set = 1;
1395         flow_stats->hits = hits_lo | (hits_hi << 32);
1396         flow_stats->bytes_set = 0;
1397         flow_stats->bytes = 0;
1398
1399         if (flow_stats->reset) {
1400                 /* reset statistic counter value */
1401                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1402                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1403         }
1404
1405         return 0;
1406 }
1407
1408 static struct ice_flow_engine ice_fdir_engine = {
1409         .init = ice_fdir_init,
1410         .uninit = ice_fdir_uninit,
1411         .create = ice_fdir_create_filter,
1412         .destroy = ice_fdir_destroy_filter,
1413         .query_count = ice_fdir_query_count,
1414         .type = ICE_FLOW_ENGINE_FDIR,
1415 };
1416
1417 static int
1418 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1419                               struct rte_flow_error *error,
1420                               const struct rte_flow_action *act,
1421                               struct ice_fdir_filter_conf *filter)
1422 {
1423         const struct rte_flow_action_rss *rss = act->conf;
1424         uint32_t i;
1425
1426         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1427                 rte_flow_error_set(error, EINVAL,
1428                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1429                                    "Invalid action.");
1430                 return -rte_errno;
1431         }
1432
1433         if (rss->queue_num <= 1) {
1434                 rte_flow_error_set(error, EINVAL,
1435                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1436                                    "Queue region size can't be 0 or 1.");
1437                 return -rte_errno;
1438         }
1439
1440         /* check if queue index for queue region is continuous */
1441         for (i = 0; i < rss->queue_num - 1; i++) {
1442                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1443                         rte_flow_error_set(error, EINVAL,
1444                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1445                                            "Discontinuous queue region");
1446                         return -rte_errno;
1447                 }
1448         }
1449
1450         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1451                 rte_flow_error_set(error, EINVAL,
1452                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1453                                    "Invalid queue region indexes.");
1454                 return -rte_errno;
1455         }
1456
1457         if (!(rte_is_power_of_2(rss->queue_num) &&
1458              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1459                 rte_flow_error_set(error, EINVAL,
1460                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1461                                    "The region size should be any of the following values:"
1462                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1463                                    "of queues do not exceed the VSI allocation.");
1464                 return -rte_errno;
1465         }
1466
1467         filter->input.q_index = rss->queue[0];
1468         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1469         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1470
1471         return 0;
1472 }
1473
1474 static int
1475 ice_fdir_parse_action(struct ice_adapter *ad,
1476                       const struct rte_flow_action actions[],
1477                       struct rte_flow_error *error,
1478                       struct ice_fdir_filter_conf *filter)
1479 {
1480         struct ice_pf *pf = &ad->pf;
1481         const struct rte_flow_action_queue *act_q;
1482         const struct rte_flow_action_mark *mark_spec = NULL;
1483         const struct rte_flow_action_count *act_count;
1484         uint32_t dest_num = 0;
1485         uint32_t mark_num = 0;
1486         uint32_t counter_num = 0;
1487         int ret;
1488
1489         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1490                 switch (actions->type) {
1491                 case RTE_FLOW_ACTION_TYPE_VOID:
1492                         break;
1493                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1494                         dest_num++;
1495
1496                         act_q = actions->conf;
1497                         filter->input.q_index = act_q->index;
1498                         if (filter->input.q_index >=
1499                                         pf->dev_data->nb_rx_queues) {
1500                                 rte_flow_error_set(error, EINVAL,
1501                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1502                                                    actions,
1503                                                    "Invalid queue for FDIR.");
1504                                 return -rte_errno;
1505                         }
1506                         filter->input.dest_ctl =
1507                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1508                         break;
1509                 case RTE_FLOW_ACTION_TYPE_DROP:
1510                         dest_num++;
1511
1512                         filter->input.dest_ctl =
1513                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1514                         break;
1515                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1516                         dest_num++;
1517
1518                         filter->input.dest_ctl =
1519                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1520                         break;
1521                 case RTE_FLOW_ACTION_TYPE_RSS:
1522                         dest_num++;
1523
1524                         ret = ice_fdir_parse_action_qregion(pf,
1525                                                 error, actions, filter);
1526                         if (ret)
1527                                 return ret;
1528                         break;
1529                 case RTE_FLOW_ACTION_TYPE_MARK:
1530                         mark_num++;
1531
1532                         mark_spec = actions->conf;
1533                         filter->input.fltr_id = mark_spec->id;
1534                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1535                         break;
1536                 case RTE_FLOW_ACTION_TYPE_COUNT:
1537                         counter_num++;
1538
1539                         act_count = actions->conf;
1540                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1541                         rte_memcpy(&filter->act_count, act_count,
1542                                                 sizeof(filter->act_count));
1543
1544                         break;
1545                 default:
1546                         rte_flow_error_set(error, EINVAL,
1547                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1548                                    "Invalid action.");
1549                         return -rte_errno;
1550                 }
1551         }
1552
1553         if (dest_num >= 2) {
1554                 rte_flow_error_set(error, EINVAL,
1555                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1556                            "Unsupported action combination");
1557                 return -rte_errno;
1558         }
1559
1560         if (mark_num >= 2) {
1561                 rte_flow_error_set(error, EINVAL,
1562                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1563                            "Too many mark actions");
1564                 return -rte_errno;
1565         }
1566
1567         if (counter_num >= 2) {
1568                 rte_flow_error_set(error, EINVAL,
1569                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1570                            "Too many count actions");
1571                 return -rte_errno;
1572         }
1573
1574         if (dest_num + mark_num + counter_num == 0) {
1575                 rte_flow_error_set(error, EINVAL,
1576                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1577                            "Empty action");
1578                 return -rte_errno;
1579         }
1580
1581         /* set default action to PASSTHRU mode, in "mark/count only" case. */
1582         if (dest_num == 0)
1583                 filter->input.dest_ctl =
1584                         ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1585
1586         return 0;
1587 }
1588
1589 static int
1590 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1591                        const struct rte_flow_item pattern[],
1592                        struct rte_flow_error *error,
1593                        struct ice_fdir_filter_conf *filter)
1594 {
1595         const struct rte_flow_item *item = pattern;
1596         enum rte_flow_item_type item_type;
1597         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1598         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1599         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1600         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1601         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1602         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1603         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1604         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1605         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1606         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1607         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1608         uint64_t input_set = ICE_INSET_NONE;
1609         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1610         uint8_t  ipv6_addr_mask[16] = {
1611                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1612                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1613         };
1614         uint32_t vtc_flow_cpu;
1615
1616
1617         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1618                 if (item->last) {
1619                         rte_flow_error_set(error, EINVAL,
1620                                         RTE_FLOW_ERROR_TYPE_ITEM,
1621                                         item,
1622                                         "Not support range");
1623                         return -rte_errno;
1624                 }
1625                 item_type = item->type;
1626
1627                 switch (item_type) {
1628                 case RTE_FLOW_ITEM_TYPE_ETH:
1629                         eth_spec = item->spec;
1630                         eth_mask = item->mask;
1631
1632                         if (eth_spec && eth_mask) {
1633                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1634                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1635                                         rte_flow_error_set(error, EINVAL,
1636                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1637                                                 item,
1638                                                 "Src mac not support");
1639                                         return -rte_errno;
1640                                 }
1641
1642                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1643                                         rte_flow_error_set(error, EINVAL,
1644                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1645                                                 item,
1646                                                 "Invalid mac addr mask");
1647                                         return -rte_errno;
1648                                 }
1649
1650                                 input_set |= ICE_INSET_DMAC;
1651                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1652                                            &eth_spec->dst,
1653                                            RTE_ETHER_ADDR_LEN);
1654                         }
1655                         break;
1656                 case RTE_FLOW_ITEM_TYPE_IPV4:
1657                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1658                         ipv4_spec = item->spec;
1659                         ipv4_mask = item->mask;
1660
1661                         if (ipv4_spec && ipv4_mask) {
1662                                 /* Check IPv4 mask and update input set */
1663                                 if (ipv4_mask->hdr.version_ihl ||
1664                                     ipv4_mask->hdr.total_length ||
1665                                     ipv4_mask->hdr.packet_id ||
1666                                     ipv4_mask->hdr.fragment_offset ||
1667                                     ipv4_mask->hdr.hdr_checksum) {
1668                                         rte_flow_error_set(error, EINVAL,
1669                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1670                                                    item,
1671                                                    "Invalid IPv4 mask.");
1672                                         return -rte_errno;
1673                                 }
1674                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1675                                         input_set |= tunnel_type ?
1676                                                      ICE_INSET_TUN_IPV4_SRC :
1677                                                      ICE_INSET_IPV4_SRC;
1678                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1679                                         input_set |= tunnel_type ?
1680                                                      ICE_INSET_TUN_IPV4_DST :
1681                                                      ICE_INSET_IPV4_DST;
1682                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1683                                         input_set |= ICE_INSET_IPV4_TOS;
1684                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1685                                         input_set |= ICE_INSET_IPV4_TTL;
1686                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1687                                         input_set |= ICE_INSET_IPV4_PROTO;
1688
1689                                 filter->input.ip.v4.dst_ip =
1690                                         ipv4_spec->hdr.dst_addr;
1691                                 filter->input.ip.v4.src_ip =
1692                                         ipv4_spec->hdr.src_addr;
1693                                 filter->input.ip.v4.tos =
1694                                         ipv4_spec->hdr.type_of_service;
1695                                 filter->input.ip.v4.ttl =
1696                                         ipv4_spec->hdr.time_to_live;
1697                                 filter->input.ip.v4.proto =
1698                                         ipv4_spec->hdr.next_proto_id;
1699                         }
1700
1701                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1702                         break;
1703                 case RTE_FLOW_ITEM_TYPE_IPV6:
1704                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1705                         ipv6_spec = item->spec;
1706                         ipv6_mask = item->mask;
1707
1708                         if (ipv6_spec && ipv6_mask) {
1709                                 /* Check IPv6 mask and update input set */
1710                                 if (ipv6_mask->hdr.payload_len) {
1711                                         rte_flow_error_set(error, EINVAL,
1712                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1713                                                    item,
1714                                                    "Invalid IPv6 mask");
1715                                         return -rte_errno;
1716                                 }
1717
1718                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1719                                             ipv6_addr_mask,
1720                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1721                                         input_set |= ICE_INSET_IPV6_SRC;
1722                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1723                                             ipv6_addr_mask,
1724                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1725                                         input_set |= ICE_INSET_IPV6_DST;
1726
1727                                 if ((ipv6_mask->hdr.vtc_flow &
1728                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1729                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1730                                         input_set |= ICE_INSET_IPV6_TC;
1731                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1732                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1733                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1734                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1735
1736                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1737                                            ipv6_spec->hdr.dst_addr, 16);
1738                                 rte_memcpy(filter->input.ip.v6.src_ip,
1739                                            ipv6_spec->hdr.src_addr, 16);
1740
1741                                 vtc_flow_cpu =
1742                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1743                                 filter->input.ip.v6.tc =
1744                                         (uint8_t)(vtc_flow_cpu >>
1745                                                   ICE_FDIR_IPV6_TC_OFFSET);
1746                                 filter->input.ip.v6.proto =
1747                                         ipv6_spec->hdr.proto;
1748                                 filter->input.ip.v6.hlim =
1749                                         ipv6_spec->hdr.hop_limits;
1750                         }
1751
1752                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1753                         break;
1754                 case RTE_FLOW_ITEM_TYPE_TCP:
1755                         tcp_spec = item->spec;
1756                         tcp_mask = item->mask;
1757
1758                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1759                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1760                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1761                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1762
1763                         if (tcp_spec && tcp_mask) {
1764                                 /* Check TCP mask and update input set */
1765                                 if (tcp_mask->hdr.sent_seq ||
1766                                     tcp_mask->hdr.recv_ack ||
1767                                     tcp_mask->hdr.data_off ||
1768                                     tcp_mask->hdr.tcp_flags ||
1769                                     tcp_mask->hdr.rx_win ||
1770                                     tcp_mask->hdr.cksum ||
1771                                     tcp_mask->hdr.tcp_urp) {
1772                                         rte_flow_error_set(error, EINVAL,
1773                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1774                                                    item,
1775                                                    "Invalid TCP mask");
1776                                         return -rte_errno;
1777                                 }
1778
1779                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1780                                         input_set |= tunnel_type ?
1781                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1782                                                      ICE_INSET_TCP_SRC_PORT;
1783                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1784                                         input_set |= tunnel_type ?
1785                                                      ICE_INSET_TUN_TCP_DST_PORT :
1786                                                      ICE_INSET_TCP_DST_PORT;
1787
1788                                 /* Get filter info */
1789                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1790                                         filter->input.ip.v4.dst_port =
1791                                                 tcp_spec->hdr.dst_port;
1792                                         filter->input.ip.v4.src_port =
1793                                                 tcp_spec->hdr.src_port;
1794                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1795                                         filter->input.ip.v6.dst_port =
1796                                                 tcp_spec->hdr.dst_port;
1797                                         filter->input.ip.v6.src_port =
1798                                                 tcp_spec->hdr.src_port;
1799                                 }
1800                         }
1801                         break;
1802                 case RTE_FLOW_ITEM_TYPE_UDP:
1803                         udp_spec = item->spec;
1804                         udp_mask = item->mask;
1805
1806                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1807                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1808                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1809                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1810
1811                         if (udp_spec && udp_mask) {
1812                                 /* Check UDP mask and update input set*/
1813                                 if (udp_mask->hdr.dgram_len ||
1814                                     udp_mask->hdr.dgram_cksum) {
1815                                         rte_flow_error_set(error, EINVAL,
1816                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1817                                                    item,
1818                                                    "Invalid UDP mask");
1819                                         return -rte_errno;
1820                                 }
1821
1822                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1823                                         input_set |= tunnel_type ?
1824                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1825                                                      ICE_INSET_UDP_SRC_PORT;
1826                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1827                                         input_set |= tunnel_type ?
1828                                                      ICE_INSET_TUN_UDP_DST_PORT :
1829                                                      ICE_INSET_UDP_DST_PORT;
1830
1831                                 /* Get filter info */
1832                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1833                                         filter->input.ip.v4.dst_port =
1834                                                 udp_spec->hdr.dst_port;
1835                                         filter->input.ip.v4.src_port =
1836                                                 udp_spec->hdr.src_port;
1837                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1838                                         filter->input.ip.v6.src_port =
1839                                                 udp_spec->hdr.src_port;
1840                                         filter->input.ip.v6.dst_port =
1841                                                 udp_spec->hdr.dst_port;
1842                                 }
1843                         }
1844                         break;
1845                 case RTE_FLOW_ITEM_TYPE_SCTP:
1846                         sctp_spec = item->spec;
1847                         sctp_mask = item->mask;
1848
1849                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1850                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1851                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1852                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1853
1854                         if (sctp_spec && sctp_mask) {
1855                                 /* Check SCTP mask and update input set */
1856                                 if (sctp_mask->hdr.cksum) {
1857                                         rte_flow_error_set(error, EINVAL,
1858                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1859                                                    item,
1860                                                    "Invalid UDP mask");
1861                                         return -rte_errno;
1862                                 }
1863
1864                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1865                                         input_set |= tunnel_type ?
1866                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1867                                                      ICE_INSET_SCTP_SRC_PORT;
1868                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1869                                         input_set |= tunnel_type ?
1870                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1871                                                      ICE_INSET_SCTP_DST_PORT;
1872
1873                                 /* Get filter info */
1874                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1875                                         filter->input.ip.v4.dst_port =
1876                                                 sctp_spec->hdr.dst_port;
1877                                         filter->input.ip.v4.src_port =
1878                                                 sctp_spec->hdr.src_port;
1879                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1880                                         filter->input.ip.v6.dst_port =
1881                                                 sctp_spec->hdr.dst_port;
1882                                         filter->input.ip.v6.src_port =
1883                                                 sctp_spec->hdr.src_port;
1884                                 }
1885                         }
1886                         break;
1887                 case RTE_FLOW_ITEM_TYPE_VOID:
1888                         break;
1889                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1890                         l3 = RTE_FLOW_ITEM_TYPE_END;
1891                         vxlan_spec = item->spec;
1892                         vxlan_mask = item->mask;
1893
1894                         if (vxlan_spec || vxlan_mask) {
1895                                 rte_flow_error_set(error, EINVAL,
1896                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1897                                                    item,
1898                                                    "Invalid vxlan field");
1899                                 return -rte_errno;
1900                         }
1901
1902                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1903                         break;
1904                 case RTE_FLOW_ITEM_TYPE_GTPU:
1905                         l3 = RTE_FLOW_ITEM_TYPE_END;
1906                         gtp_spec = item->spec;
1907                         gtp_mask = item->mask;
1908
1909                         if (gtp_spec && gtp_mask) {
1910                                 if (gtp_mask->v_pt_rsv_flags ||
1911                                     gtp_mask->msg_type ||
1912                                     gtp_mask->msg_len) {
1913                                         rte_flow_error_set(error, EINVAL,
1914                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1915                                                    item,
1916                                                    "Invalid GTP mask");
1917                                         return -rte_errno;
1918                                 }
1919
1920                                 if (gtp_mask->teid == UINT32_MAX)
1921                                         input_set |= ICE_INSET_GTPU_TEID;
1922
1923                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1924                         }
1925
1926                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1927                         break;
1928                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1929                         gtp_psc_spec = item->spec;
1930                         gtp_psc_mask = item->mask;
1931
1932                         if (gtp_psc_spec && gtp_psc_mask) {
1933                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1934                                         input_set |= ICE_INSET_GTPU_QFI;
1935
1936                                 filter->input.gtpu_data.qfi =
1937                                         gtp_psc_spec->qfi;
1938                         }
1939                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1940                         break;
1941                 default:
1942                         rte_flow_error_set(error, EINVAL,
1943                                    RTE_FLOW_ERROR_TYPE_ITEM,
1944                                    item,
1945                                    "Invalid pattern item.");
1946                         return -rte_errno;
1947                 }
1948         }
1949
1950         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU ||
1951             tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1952                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1953
1954         filter->tunnel_type = tunnel_type;
1955         filter->input.flow_type = flow_type;
1956         filter->input_set = input_set;
1957
1958         return 0;
1959 }
1960
1961 static int
1962 ice_fdir_parse(struct ice_adapter *ad,
1963                struct ice_pattern_match_item *array,
1964                uint32_t array_len,
1965                const struct rte_flow_item pattern[],
1966                const struct rte_flow_action actions[],
1967                void **meta,
1968                struct rte_flow_error *error)
1969 {
1970         struct ice_pf *pf = &ad->pf;
1971         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1972         struct ice_pattern_match_item *item = NULL;
1973         uint64_t input_set;
1974         int ret;
1975
1976         memset(filter, 0, sizeof(*filter));
1977         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1978         if (!item)
1979                 return -rte_errno;
1980
1981         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1982         if (ret)
1983                 goto error;
1984         input_set = filter->input_set;
1985         if (!input_set || input_set & ~item->input_set_mask) {
1986                 rte_flow_error_set(error, EINVAL,
1987                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1988                                    pattern,
1989                                    "Invalid input set");
1990                 ret = -rte_errno;
1991                 goto error;
1992         }
1993
1994         ret = ice_fdir_parse_action(ad, actions, error, filter);
1995         if (ret)
1996                 goto error;
1997
1998         if (meta)
1999                 *meta = filter;
2000 error:
2001         rte_free(item);
2002         return ret;
2003 }
2004
2005 static struct ice_flow_parser ice_fdir_parser_os = {
2006         .engine = &ice_fdir_engine,
2007         .array = ice_fdir_pattern_os,
2008         .array_len = RTE_DIM(ice_fdir_pattern_os),
2009         .parse_pattern_action = ice_fdir_parse,
2010         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2011 };
2012
2013 static struct ice_flow_parser ice_fdir_parser_comms = {
2014         .engine = &ice_fdir_engine,
2015         .array = ice_fdir_pattern_comms,
2016         .array_len = RTE_DIM(ice_fdir_pattern_comms),
2017         .parse_pattern_action = ice_fdir_parse,
2018         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2019 };
2020
2021 RTE_INIT(ice_fdir_engine_register)
2022 {
2023         ice_register_flow_engine(&ice_fdir_engine);
2024 }