net/ice: fix flow director tunnel profile existence check
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_DMAC | \
36         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
37         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
38
39 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
40         ICE_FDIR_INSET_ETH_IPV6 | \
41         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
50
51 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
52         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
53
54 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
55         ICE_FDIR_INSET_VXLAN_IPV4 | \
56         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
65
66 #define ICE_FDIR_INSET_GTPU_IPV4 (\
67         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
68
69 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
70         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
71         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
72         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
73         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
74         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
75         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
76         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
77         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
78         {pattern_eth_ipv4_udp_vxlan_ipv4,
79                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
80         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
81                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
83                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
87                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
91                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
94 };
95
96 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
97         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
99         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
100         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
101         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
102         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
103         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
104         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp_vxlan_ipv4,
106                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
107         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
108                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
110                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
112                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
114                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
118                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
121         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
122 };
123
124 static struct ice_flow_parser ice_fdir_parser_os;
125 static struct ice_flow_parser ice_fdir_parser_comms;
126
127 static const struct rte_memzone *
128 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
129 {
130         return rte_memzone_reserve_aligned(name, len, socket_id,
131                                            RTE_MEMZONE_IOVA_CONTIG,
132                                            ICE_RING_BASE_ALIGN);
133 }
134
135 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
136
137 static int
138 ice_fdir_prof_alloc(struct ice_hw *hw)
139 {
140         enum ice_fltr_ptype ptype, fltr_ptype;
141
142         if (!hw->fdir_prof) {
143                 hw->fdir_prof = (struct ice_fd_hw_prof **)
144                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
145                                    sizeof(*hw->fdir_prof));
146                 if (!hw->fdir_prof)
147                         return -ENOMEM;
148         }
149         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
150              ptype < ICE_FLTR_PTYPE_MAX;
151              ptype++) {
152                 if (!hw->fdir_prof[ptype]) {
153                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
154                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
155                         if (!hw->fdir_prof[ptype])
156                                 goto fail_mem;
157                 }
158         }
159         return 0;
160
161 fail_mem:
162         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
163              fltr_ptype < ptype;
164              fltr_ptype++)
165                 rte_free(hw->fdir_prof[fltr_ptype]);
166         rte_free(hw->fdir_prof);
167         return -ENOMEM;
168 }
169
170 static int
171 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
172                           struct ice_fdir_counter_pool_container *container,
173                           uint32_t index_start,
174                           uint32_t len)
175 {
176         struct ice_fdir_counter_pool *pool;
177         uint32_t i;
178         int ret = 0;
179
180         pool = rte_zmalloc("ice_fdir_counter_pool",
181                            sizeof(*pool) +
182                            sizeof(struct ice_fdir_counter) * len,
183                            0);
184         if (!pool) {
185                 PMD_INIT_LOG(ERR,
186                              "Failed to allocate memory for fdir counter pool");
187                 return -ENOMEM;
188         }
189
190         TAILQ_INIT(&pool->counter_list);
191         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
192
193         for (i = 0; i < len; i++) {
194                 struct ice_fdir_counter *counter = &pool->counters[i];
195
196                 counter->hw_index = index_start + i;
197                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
198         }
199
200         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
201                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
202                 ret = -EINVAL;
203                 goto free_pool;
204         }
205
206         container->pools[container->index_free++] = pool;
207         return 0;
208
209 free_pool:
210         rte_free(pool);
211         return ret;
212 }
213
214 static int
215 ice_fdir_counter_init(struct ice_pf *pf)
216 {
217         struct ice_hw *hw = ICE_PF_TO_HW(pf);
218         struct ice_fdir_info *fdir_info = &pf->fdir;
219         struct ice_fdir_counter_pool_container *container =
220                                 &fdir_info->counter;
221         uint32_t cnt_index, len;
222         int ret;
223
224         TAILQ_INIT(&container->pool_list);
225
226         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
227         len = ICE_FDIR_COUNTERS_PER_BLOCK;
228
229         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
230         if (ret) {
231                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
232                 return ret;
233         }
234
235         return 0;
236 }
237
238 static int
239 ice_fdir_counter_release(struct ice_pf *pf)
240 {
241         struct ice_fdir_info *fdir_info = &pf->fdir;
242         struct ice_fdir_counter_pool_container *container =
243                                 &fdir_info->counter;
244         uint8_t i;
245
246         for (i = 0; i < container->index_free; i++)
247                 rte_free(container->pools[i]);
248
249         return 0;
250 }
251
252 static struct ice_fdir_counter *
253 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
254                                         *container,
255                                uint32_t id)
256 {
257         struct ice_fdir_counter_pool *pool;
258         struct ice_fdir_counter *counter;
259         int i;
260
261         TAILQ_FOREACH(pool, &container->pool_list, next) {
262                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
263                         counter = &pool->counters[i];
264
265                         if (counter->shared &&
266                             counter->ref_cnt &&
267                             counter->id == id)
268                                 return counter;
269                 }
270         }
271
272         return NULL;
273 }
274
275 static struct ice_fdir_counter *
276 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
277 {
278         struct ice_hw *hw = ICE_PF_TO_HW(pf);
279         struct ice_fdir_info *fdir_info = &pf->fdir;
280         struct ice_fdir_counter_pool_container *container =
281                                 &fdir_info->counter;
282         struct ice_fdir_counter_pool *pool = NULL;
283         struct ice_fdir_counter *counter_free = NULL;
284
285         if (shared) {
286                 counter_free = ice_fdir_counter_shared_search(container, id);
287                 if (counter_free) {
288                         if (counter_free->ref_cnt + 1 == 0) {
289                                 rte_errno = E2BIG;
290                                 return NULL;
291                         }
292                         counter_free->ref_cnt++;
293                         return counter_free;
294                 }
295         }
296
297         TAILQ_FOREACH(pool, &container->pool_list, next) {
298                 counter_free = TAILQ_FIRST(&pool->counter_list);
299                 if (counter_free)
300                         break;
301                 counter_free = NULL;
302         }
303
304         if (!counter_free) {
305                 PMD_DRV_LOG(ERR, "No free counter found\n");
306                 return NULL;
307         }
308
309         counter_free->shared = shared;
310         counter_free->id = id;
311         counter_free->ref_cnt = 1;
312         counter_free->pool = pool;
313
314         /* reset statistic counter value */
315         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
316         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
317
318         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
319         if (TAILQ_EMPTY(&pool->counter_list)) {
320                 TAILQ_REMOVE(&container->pool_list, pool, next);
321                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
322         }
323
324         return counter_free;
325 }
326
327 static void
328 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
329                       struct ice_fdir_counter *counter)
330 {
331         if (!counter)
332                 return;
333
334         if (--counter->ref_cnt == 0) {
335                 struct ice_fdir_counter_pool *pool = counter->pool;
336
337                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
338         }
339 }
340
341 static int
342 ice_fdir_init_filter_list(struct ice_pf *pf)
343 {
344         struct rte_eth_dev *dev = pf->adapter->eth_dev;
345         struct ice_fdir_info *fdir_info = &pf->fdir;
346         char fdir_hash_name[RTE_HASH_NAMESIZE];
347         int ret;
348
349         struct rte_hash_parameters fdir_hash_params = {
350                 .name = fdir_hash_name,
351                 .entries = ICE_MAX_FDIR_FILTER_NUM,
352                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
353                 .hash_func = rte_hash_crc,
354                 .hash_func_init_val = 0,
355                 .socket_id = rte_socket_id(),
356                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
357         };
358
359         /* Initialize hash */
360         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
361                  "fdir_%s", dev->device->name);
362         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
363         if (!fdir_info->hash_table) {
364                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
365                 return -EINVAL;
366         }
367         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
368                                           sizeof(*fdir_info->hash_map) *
369                                           ICE_MAX_FDIR_FILTER_NUM,
370                                           0);
371         if (!fdir_info->hash_map) {
372                 PMD_INIT_LOG(ERR,
373                              "Failed to allocate memory for fdir hash map!");
374                 ret = -ENOMEM;
375                 goto err_fdir_hash_map_alloc;
376         }
377         return 0;
378
379 err_fdir_hash_map_alloc:
380         rte_hash_free(fdir_info->hash_table);
381
382         return ret;
383 }
384
385 static void
386 ice_fdir_release_filter_list(struct ice_pf *pf)
387 {
388         struct ice_fdir_info *fdir_info = &pf->fdir;
389
390         if (fdir_info->hash_map)
391                 rte_free(fdir_info->hash_map);
392         if (fdir_info->hash_table)
393                 rte_hash_free(fdir_info->hash_table);
394 }
395
396 /*
397  * ice_fdir_setup - reserve and initialize the Flow Director resources
398  * @pf: board private structure
399  */
400 static int
401 ice_fdir_setup(struct ice_pf *pf)
402 {
403         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
404         struct ice_hw *hw = ICE_PF_TO_HW(pf);
405         const struct rte_memzone *mz = NULL;
406         char z_name[RTE_MEMZONE_NAMESIZE];
407         struct ice_vsi *vsi;
408         int err = ICE_SUCCESS;
409
410         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
411                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
412                 return -ENOTSUP;
413         }
414
415         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
416                     " fd_fltr_best_effort = %u.",
417                     hw->func_caps.fd_fltr_guar,
418                     hw->func_caps.fd_fltr_best_effort);
419
420         if (pf->fdir.fdir_vsi) {
421                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
422                 return ICE_SUCCESS;
423         }
424
425         /* make new FDIR VSI */
426         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
427         if (!vsi) {
428                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
429                 return -EINVAL;
430         }
431         pf->fdir.fdir_vsi = vsi;
432
433         err = ice_fdir_init_filter_list(pf);
434         if (err) {
435                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
436                 return -EINVAL;
437         }
438
439         err = ice_fdir_counter_init(pf);
440         if (err) {
441                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
442                 return -EINVAL;
443         }
444
445         /*Fdir tx queue setup*/
446         err = ice_fdir_setup_tx_resources(pf);
447         if (err) {
448                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
449                 goto fail_setup_tx;
450         }
451
452         /*Fdir rx queue setup*/
453         err = ice_fdir_setup_rx_resources(pf);
454         if (err) {
455                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
456                 goto fail_setup_rx;
457         }
458
459         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
460         if (err) {
461                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
462                 goto fail_mem;
463         }
464
465         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
466         if (err) {
467                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
468                 goto fail_mem;
469         }
470
471         /* reserve memory for the fdir programming packet */
472         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
473                  ICE_FDIR_MZ_NAME,
474                  eth_dev->data->port_id);
475         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
476         if (!mz) {
477                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
478                             "flow director program packet.");
479                 err = -ENOMEM;
480                 goto fail_mem;
481         }
482         pf->fdir.prg_pkt = mz->addr;
483         pf->fdir.dma_addr = mz->iova;
484
485         err = ice_fdir_prof_alloc(hw);
486         if (err) {
487                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
488                             "flow director profile.");
489                 err = -ENOMEM;
490                 goto fail_mem;
491         }
492
493         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
494                     vsi->base_queue);
495         return ICE_SUCCESS;
496
497 fail_mem:
498         ice_rx_queue_release(pf->fdir.rxq);
499         pf->fdir.rxq = NULL;
500 fail_setup_rx:
501         ice_tx_queue_release(pf->fdir.txq);
502         pf->fdir.txq = NULL;
503 fail_setup_tx:
504         ice_release_vsi(vsi);
505         pf->fdir.fdir_vsi = NULL;
506         return err;
507 }
508
509 static void
510 ice_fdir_prof_free(struct ice_hw *hw)
511 {
512         enum ice_fltr_ptype ptype;
513
514         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
515              ptype < ICE_FLTR_PTYPE_MAX;
516              ptype++)
517                 rte_free(hw->fdir_prof[ptype]);
518
519         rte_free(hw->fdir_prof);
520 }
521
522 /* Remove a profile for some filter type */
523 static void
524 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
525 {
526         struct ice_hw *hw = ICE_PF_TO_HW(pf);
527         struct ice_fd_hw_prof *hw_prof;
528         uint64_t prof_id;
529         uint16_t vsi_num;
530         int i;
531
532         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
533                 return;
534
535         hw_prof = hw->fdir_prof[ptype];
536
537         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
538         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
539                 if (hw_prof->entry_h[i][is_tunnel]) {
540                         vsi_num = ice_get_hw_vsi_num(hw,
541                                                      hw_prof->vsi_h[i]);
542                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
543                                              vsi_num, ptype);
544                         ice_flow_rem_entry(hw,
545                                            hw_prof->entry_h[i][is_tunnel]);
546                         hw_prof->entry_h[i][is_tunnel] = 0;
547                 }
548         }
549         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
550         rte_free(hw_prof->fdir_seg[is_tunnel]);
551         hw_prof->fdir_seg[is_tunnel] = NULL;
552
553         for (i = 0; i < hw_prof->cnt; i++)
554                 hw_prof->vsi_h[i] = 0;
555         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
556 }
557
558 /* Remove all created profiles */
559 static void
560 ice_fdir_prof_rm_all(struct ice_pf *pf)
561 {
562         enum ice_fltr_ptype ptype;
563
564         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
565              ptype < ICE_FLTR_PTYPE_MAX;
566              ptype++) {
567                 ice_fdir_prof_rm(pf, ptype, false);
568                 ice_fdir_prof_rm(pf, ptype, true);
569         }
570 }
571
572 /*
573  * ice_fdir_teardown - release the Flow Director resources
574  * @pf: board private structure
575  */
576 static void
577 ice_fdir_teardown(struct ice_pf *pf)
578 {
579         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
580         struct ice_hw *hw = ICE_PF_TO_HW(pf);
581         struct ice_vsi *vsi;
582         int err;
583
584         vsi = pf->fdir.fdir_vsi;
585         if (!vsi)
586                 return;
587
588         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
589         if (err)
590                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
591
592         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
593         if (err)
594                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
595
596         err = ice_fdir_counter_release(pf);
597         if (err)
598                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
599
600         ice_fdir_release_filter_list(pf);
601
602         ice_tx_queue_release(pf->fdir.txq);
603         pf->fdir.txq = NULL;
604         ice_rx_queue_release(pf->fdir.rxq);
605         pf->fdir.rxq = NULL;
606         ice_fdir_prof_rm_all(pf);
607         ice_fdir_prof_free(hw);
608         ice_release_vsi(vsi);
609         pf->fdir.fdir_vsi = NULL;
610 }
611
612 static int
613 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
614                      struct ice_vsi *ctrl_vsi,
615                      struct ice_flow_seg_info *seg,
616                      enum ice_fltr_ptype ptype,
617                      bool is_tunnel)
618 {
619         struct ice_hw *hw = ICE_PF_TO_HW(pf);
620         enum ice_flow_dir dir = ICE_FLOW_RX;
621         struct ice_flow_seg_info *ori_seg;
622         struct ice_fd_hw_prof *hw_prof;
623         struct ice_flow_prof *prof;
624         uint64_t entry_1 = 0;
625         uint64_t entry_2 = 0;
626         uint16_t vsi_num;
627         int ret;
628         uint64_t prof_id;
629
630         hw_prof = hw->fdir_prof[ptype];
631         ori_seg = hw_prof->fdir_seg[is_tunnel];
632         if (ori_seg) {
633                 if (!is_tunnel) {
634                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
635                                 return -EAGAIN;
636                 } else {
637                         if (!memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))
638                                 return -EAGAIN;
639                 }
640
641                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
642                         return -EINVAL;
643
644                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
645         }
646
647         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
648         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
649                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
650         if (ret)
651                 return ret;
652         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
653                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
654                                  seg, NULL, 0, &entry_1);
655         if (ret) {
656                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
657                             ptype);
658                 goto err_add_prof;
659         }
660         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
661                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
662                                  seg, NULL, 0, &entry_2);
663         if (ret) {
664                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
665                             ptype);
666                 goto err_add_entry;
667         }
668
669         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
670         hw_prof->cnt = 0;
671         hw_prof->fdir_seg[is_tunnel] = seg;
672         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
673         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
674         pf->hw_prof_cnt[ptype][is_tunnel]++;
675         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
676         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
677         pf->hw_prof_cnt[ptype][is_tunnel]++;
678
679         return ret;
680
681 err_add_entry:
682         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
683         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
684         ice_flow_rem_entry(hw, entry_1);
685 err_add_prof:
686         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
687
688         return ret;
689 }
690
691 static void
692 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
693 {
694         uint32_t i, j;
695
696         struct ice_inset_map {
697                 uint64_t inset;
698                 enum ice_flow_field fld;
699         };
700         static const struct ice_inset_map ice_inset_map[] = {
701                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
702                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
703                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
704                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
705                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
706                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
707                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
708                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
709                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
710                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
711                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
712                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
713                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
714                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
715                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
716                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
717                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
718                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
719                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
720                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
721                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
722                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
723                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
724                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
725                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
726                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
727                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
728         };
729
730         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
731                 if ((inset & ice_inset_map[i].inset) ==
732                     ice_inset_map[i].inset)
733                         field[j++] = ice_inset_map[i].fld;
734         }
735 }
736
737 static int
738 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
739                         uint64_t input_set, bool is_tunnel)
740 {
741         struct ice_flow_seg_info *seg;
742         struct ice_flow_seg_info *seg_tun = NULL;
743         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
744         int i, ret;
745
746         if (!input_set)
747                 return -EINVAL;
748
749         seg = (struct ice_flow_seg_info *)
750                 ice_malloc(hw, sizeof(*seg));
751         if (!seg) {
752                 PMD_DRV_LOG(ERR, "No memory can be allocated");
753                 return -ENOMEM;
754         }
755
756         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
757                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
758         ice_fdir_input_set_parse(input_set, field);
759
760         switch (flow) {
761         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
762                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
763                                   ICE_FLOW_SEG_HDR_IPV4);
764                 break;
765         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
766                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
767                                   ICE_FLOW_SEG_HDR_IPV4);
768                 break;
769         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
770                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
771                                   ICE_FLOW_SEG_HDR_IPV4);
772                 break;
773         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
774                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
775                 break;
776         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
777                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
778                                   ICE_FLOW_SEG_HDR_IPV6);
779                 break;
780         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
781                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
782                                   ICE_FLOW_SEG_HDR_IPV6);
783                 break;
784         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
785                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
786                                   ICE_FLOW_SEG_HDR_IPV6);
787                 break;
788         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
789                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
790                 break;
791         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
792         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
793         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
794         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
795                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
796                                   ICE_FLOW_SEG_HDR_IPV4);
797                 break;
798         default:
799                 PMD_DRV_LOG(ERR, "not supported filter type.");
800                 break;
801         }
802
803         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
804                 ice_flow_set_fld(seg, field[i],
805                                  ICE_FLOW_FLD_OFF_INVAL,
806                                  ICE_FLOW_FLD_OFF_INVAL,
807                                  ICE_FLOW_FLD_OFF_INVAL, false);
808         }
809
810         if (!is_tunnel) {
811                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
812                                            seg, flow, false);
813         } else {
814                 seg_tun = (struct ice_flow_seg_info *)
815                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
816                 if (!seg_tun) {
817                         PMD_DRV_LOG(ERR, "No memory can be allocated");
818                         rte_free(seg);
819                         return -ENOMEM;
820                 }
821                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
822                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
823                                            seg_tun, flow, true);
824         }
825
826         if (!ret) {
827                 return ret;
828         } else if (ret < 0) {
829                 rte_free(seg);
830                 if (is_tunnel)
831                         rte_free(seg_tun);
832                 return (ret == -EAGAIN) ? 0 : ret;
833         } else {
834                 return ret;
835         }
836 }
837
838 static void
839 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
840                     bool is_tunnel, bool add)
841 {
842         struct ice_hw *hw = ICE_PF_TO_HW(pf);
843         int cnt;
844
845         cnt = (add) ? 1 : -1;
846         hw->fdir_active_fltr += cnt;
847         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
848                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
849         else
850                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
851 }
852
853 static int
854 ice_fdir_init(struct ice_adapter *ad)
855 {
856         struct ice_pf *pf = &ad->pf;
857         struct ice_flow_parser *parser;
858         int ret;
859
860         ret = ice_fdir_setup(pf);
861         if (ret)
862                 return ret;
863
864         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
865                 parser = &ice_fdir_parser_comms;
866         else
867                 parser = &ice_fdir_parser_os;
868
869         return ice_register_parser(parser, ad);
870 }
871
872 static void
873 ice_fdir_uninit(struct ice_adapter *ad)
874 {
875         struct ice_pf *pf = &ad->pf;
876         struct ice_flow_parser *parser;
877
878         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
879                 parser = &ice_fdir_parser_comms;
880         else
881                 parser = &ice_fdir_parser_os;
882
883         ice_unregister_parser(parser, ad);
884
885         ice_fdir_teardown(pf);
886 }
887
888 static int
889 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
890 {
891         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
892                 return 1;
893         else
894                 return 0;
895 }
896
897 static int
898 ice_fdir_add_del_filter(struct ice_pf *pf,
899                         struct ice_fdir_filter_conf *filter,
900                         bool add)
901 {
902         struct ice_fltr_desc desc;
903         struct ice_hw *hw = ICE_PF_TO_HW(pf);
904         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
905         bool is_tun;
906         int ret;
907
908         filter->input.dest_vsi = pf->main_vsi->idx;
909
910         memset(&desc, 0, sizeof(desc));
911         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
912
913         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
914
915         memset(pkt, 0, ICE_FDIR_PKT_LEN);
916         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
917         if (ret) {
918                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
919                 return -EINVAL;
920         }
921
922         return ice_fdir_programming(pf, &desc);
923 }
924
925 static void
926 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
927                           struct ice_fdir_filter_conf *filter)
928 {
929         struct ice_fdir_fltr *input = &filter->input;
930         memset(key, 0, sizeof(*key));
931
932         key->flow_type = input->flow_type;
933         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
934         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
935         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
936         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
937
938         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
939         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
940
941         key->tunnel_type = filter->tunnel_type;
942 }
943
944 /* Check if there exists the flow director filter */
945 static struct ice_fdir_filter_conf *
946 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
947                         const struct ice_fdir_fltr_pattern *key)
948 {
949         int ret;
950
951         ret = rte_hash_lookup(fdir_info->hash_table, key);
952         if (ret < 0)
953                 return NULL;
954
955         return fdir_info->hash_map[ret];
956 }
957
958 /* Add a flow director entry into the SW list */
959 static int
960 ice_fdir_entry_insert(struct ice_pf *pf,
961                       struct ice_fdir_filter_conf *entry,
962                       struct ice_fdir_fltr_pattern *key)
963 {
964         struct ice_fdir_info *fdir_info = &pf->fdir;
965         int ret;
966
967         ret = rte_hash_add_key(fdir_info->hash_table, key);
968         if (ret < 0) {
969                 PMD_DRV_LOG(ERR,
970                             "Failed to insert fdir entry to hash table %d!",
971                             ret);
972                 return ret;
973         }
974         fdir_info->hash_map[ret] = entry;
975
976         return 0;
977 }
978
979 /* Delete a flow director entry from the SW list */
980 static int
981 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
982 {
983         struct ice_fdir_info *fdir_info = &pf->fdir;
984         int ret;
985
986         ret = rte_hash_del_key(fdir_info->hash_table, key);
987         if (ret < 0) {
988                 PMD_DRV_LOG(ERR,
989                             "Failed to delete fdir filter to hash table %d!",
990                             ret);
991                 return ret;
992         }
993         fdir_info->hash_map[ret] = NULL;
994
995         return 0;
996 }
997
998 static int
999 ice_fdir_create_filter(struct ice_adapter *ad,
1000                        struct rte_flow *flow,
1001                        void *meta,
1002                        struct rte_flow_error *error)
1003 {
1004         struct ice_pf *pf = &ad->pf;
1005         struct ice_fdir_filter_conf *filter = meta;
1006         struct ice_fdir_info *fdir_info = &pf->fdir;
1007         struct ice_fdir_filter_conf *entry, *node;
1008         struct ice_fdir_fltr_pattern key;
1009         bool is_tun;
1010         int ret;
1011
1012         ice_fdir_extract_fltr_key(&key, filter);
1013         node = ice_fdir_entry_lookup(fdir_info, &key);
1014         if (node) {
1015                 rte_flow_error_set(error, EEXIST,
1016                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1017                                    "Rule already exists!");
1018                 return -rte_errno;
1019         }
1020
1021         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1022         if (!entry) {
1023                 rte_flow_error_set(error, ENOMEM,
1024                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1025                                    "Failed to allocate memory");
1026                 return -rte_errno;
1027         }
1028
1029         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1030
1031         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1032                         filter->input_set, is_tun);
1033         if (ret) {
1034                 rte_flow_error_set(error, -ret,
1035                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1036                                    "Profile configure failed.");
1037                 goto free_entry;
1038         }
1039
1040         /* alloc counter for FDIR */
1041         if (filter->input.cnt_ena) {
1042                 struct rte_flow_action_count *act_count = &filter->act_count;
1043
1044                 filter->counter = ice_fdir_counter_alloc(pf,
1045                                                          act_count->shared,
1046                                                          act_count->id);
1047                 if (!filter->counter) {
1048                         rte_flow_error_set(error, EINVAL,
1049                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1050                                         "Failed to alloc FDIR counter.");
1051                         goto free_entry;
1052                 }
1053                 filter->input.cnt_index = filter->counter->hw_index;
1054         }
1055
1056         ret = ice_fdir_add_del_filter(pf, filter, true);
1057         if (ret) {
1058                 rte_flow_error_set(error, -ret,
1059                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1060                                    "Add filter rule failed.");
1061                 goto free_counter;
1062         }
1063
1064         rte_memcpy(entry, filter, sizeof(*entry));
1065         ret = ice_fdir_entry_insert(pf, entry, &key);
1066         if (ret) {
1067                 rte_flow_error_set(error, -ret,
1068                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1069                                    "Insert entry to table failed.");
1070                 goto free_entry;
1071         }
1072
1073         flow->rule = entry;
1074         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1075
1076         return 0;
1077
1078 free_counter:
1079         if (filter->counter) {
1080                 ice_fdir_counter_free(pf, filter->counter);
1081                 filter->counter = NULL;
1082         }
1083
1084 free_entry:
1085         rte_free(entry);
1086         return -rte_errno;
1087 }
1088
1089 static int
1090 ice_fdir_destroy_filter(struct ice_adapter *ad,
1091                         struct rte_flow *flow,
1092                         struct rte_flow_error *error)
1093 {
1094         struct ice_pf *pf = &ad->pf;
1095         struct ice_fdir_info *fdir_info = &pf->fdir;
1096         struct ice_fdir_filter_conf *filter, *entry;
1097         struct ice_fdir_fltr_pattern key;
1098         bool is_tun;
1099         int ret;
1100
1101         filter = (struct ice_fdir_filter_conf *)flow->rule;
1102
1103         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1104
1105         if (filter->counter) {
1106                 ice_fdir_counter_free(pf, filter->counter);
1107                 filter->counter = NULL;
1108         }
1109
1110         ice_fdir_extract_fltr_key(&key, filter);
1111         entry = ice_fdir_entry_lookup(fdir_info, &key);
1112         if (!entry) {
1113                 rte_flow_error_set(error, ENOENT,
1114                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1115                                    "Can't find entry.");
1116                 return -rte_errno;
1117         }
1118
1119         ret = ice_fdir_add_del_filter(pf, filter, false);
1120         if (ret) {
1121                 rte_flow_error_set(error, -ret,
1122                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1123                                    "Del filter rule failed.");
1124                 return -rte_errno;
1125         }
1126
1127         ret = ice_fdir_entry_del(pf, &key);
1128         if (ret) {
1129                 rte_flow_error_set(error, -ret,
1130                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1131                                    "Remove entry from table failed.");
1132                 return -rte_errno;
1133         }
1134
1135         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1136         flow->rule = NULL;
1137
1138         rte_free(filter);
1139
1140         return 0;
1141 }
1142
1143 static int
1144 ice_fdir_query_count(struct ice_adapter *ad,
1145                       struct rte_flow *flow,
1146                       struct rte_flow_query_count *flow_stats,
1147                       struct rte_flow_error *error)
1148 {
1149         struct ice_pf *pf = &ad->pf;
1150         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1151         struct ice_fdir_filter_conf *filter = flow->rule;
1152         struct ice_fdir_counter *counter = filter->counter;
1153         uint64_t hits_lo, hits_hi;
1154
1155         if (!counter) {
1156                 rte_flow_error_set(error, EINVAL,
1157                                   RTE_FLOW_ERROR_TYPE_ACTION,
1158                                   NULL,
1159                                   "FDIR counters not available");
1160                 return -rte_errno;
1161         }
1162
1163         /*
1164          * Reading the low 32-bits latches the high 32-bits into a shadow
1165          * register. Reading the high 32-bit returns the value in the
1166          * shadow register.
1167          */
1168         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1169         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1170
1171         flow_stats->hits_set = 1;
1172         flow_stats->hits = hits_lo | (hits_hi << 32);
1173         flow_stats->bytes_set = 0;
1174         flow_stats->bytes = 0;
1175
1176         if (flow_stats->reset) {
1177                 /* reset statistic counter value */
1178                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1179                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1180         }
1181
1182         return 0;
1183 }
1184
1185 static struct ice_flow_engine ice_fdir_engine = {
1186         .init = ice_fdir_init,
1187         .uninit = ice_fdir_uninit,
1188         .create = ice_fdir_create_filter,
1189         .destroy = ice_fdir_destroy_filter,
1190         .query_count = ice_fdir_query_count,
1191         .type = ICE_FLOW_ENGINE_FDIR,
1192 };
1193
1194 static int
1195 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1196                               struct rte_flow_error *error,
1197                               const struct rte_flow_action *act,
1198                               struct ice_fdir_filter_conf *filter)
1199 {
1200         const struct rte_flow_action_rss *rss = act->conf;
1201         uint32_t i;
1202
1203         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1204                 rte_flow_error_set(error, EINVAL,
1205                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1206                                    "Invalid action.");
1207                 return -rte_errno;
1208         }
1209
1210         if (rss->queue_num <= 1) {
1211                 rte_flow_error_set(error, EINVAL,
1212                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1213                                    "Queue region size can't be 0 or 1.");
1214                 return -rte_errno;
1215         }
1216
1217         /* check if queue index for queue region is continuous */
1218         for (i = 0; i < rss->queue_num - 1; i++) {
1219                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1220                         rte_flow_error_set(error, EINVAL,
1221                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1222                                            "Discontinuous queue region");
1223                         return -rte_errno;
1224                 }
1225         }
1226
1227         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1228                 rte_flow_error_set(error, EINVAL,
1229                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1230                                    "Invalid queue region indexes.");
1231                 return -rte_errno;
1232         }
1233
1234         if (!(rte_is_power_of_2(rss->queue_num) &&
1235              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1236                 rte_flow_error_set(error, EINVAL,
1237                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1238                                    "The region size should be any of the following values:"
1239                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1240                                    "of queues do not exceed the VSI allocation.");
1241                 return -rte_errno;
1242         }
1243
1244         filter->input.q_index = rss->queue[0];
1245         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1246         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1247
1248         return 0;
1249 }
1250
1251 static int
1252 ice_fdir_parse_action(struct ice_adapter *ad,
1253                       const struct rte_flow_action actions[],
1254                       struct rte_flow_error *error,
1255                       struct ice_fdir_filter_conf *filter)
1256 {
1257         struct ice_pf *pf = &ad->pf;
1258         const struct rte_flow_action_queue *act_q;
1259         const struct rte_flow_action_mark *mark_spec = NULL;
1260         const struct rte_flow_action_count *act_count;
1261         uint32_t dest_num = 0;
1262         uint32_t mark_num = 0;
1263         uint32_t counter_num = 0;
1264         int ret;
1265
1266         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1267                 switch (actions->type) {
1268                 case RTE_FLOW_ACTION_TYPE_VOID:
1269                         break;
1270                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1271                         dest_num++;
1272
1273                         act_q = actions->conf;
1274                         filter->input.q_index = act_q->index;
1275                         if (filter->input.q_index >=
1276                                         pf->dev_data->nb_rx_queues) {
1277                                 rte_flow_error_set(error, EINVAL,
1278                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1279                                                    actions,
1280                                                    "Invalid queue for FDIR.");
1281                                 return -rte_errno;
1282                         }
1283                         filter->input.dest_ctl =
1284                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1285                         break;
1286                 case RTE_FLOW_ACTION_TYPE_DROP:
1287                         dest_num++;
1288
1289                         filter->input.dest_ctl =
1290                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1291                         break;
1292                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1293                         dest_num++;
1294
1295                         filter->input.dest_ctl =
1296                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1297                         filter->input.q_index = 0;
1298                         break;
1299                 case RTE_FLOW_ACTION_TYPE_RSS:
1300                         dest_num++;
1301
1302                         ret = ice_fdir_parse_action_qregion(pf,
1303                                                 error, actions, filter);
1304                         if (ret)
1305                                 return ret;
1306                         break;
1307                 case RTE_FLOW_ACTION_TYPE_MARK:
1308                         mark_num++;
1309
1310                         mark_spec = actions->conf;
1311                         filter->input.fltr_id = mark_spec->id;
1312                         break;
1313                 case RTE_FLOW_ACTION_TYPE_COUNT:
1314                         counter_num++;
1315
1316                         act_count = actions->conf;
1317                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1318                         rte_memcpy(&filter->act_count, act_count,
1319                                                 sizeof(filter->act_count));
1320
1321                         break;
1322                 default:
1323                         rte_flow_error_set(error, EINVAL,
1324                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1325                                    "Invalid action.");
1326                         return -rte_errno;
1327                 }
1328         }
1329
1330         if (dest_num == 0 || dest_num >= 2) {
1331                 rte_flow_error_set(error, EINVAL,
1332                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1333                            "Unsupported action combination");
1334                 return -rte_errno;
1335         }
1336
1337         if (mark_num >= 2) {
1338                 rte_flow_error_set(error, EINVAL,
1339                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1340                            "Too many mark actions");
1341                 return -rte_errno;
1342         }
1343
1344         if (counter_num >= 2) {
1345                 rte_flow_error_set(error, EINVAL,
1346                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1347                            "Too many count actions");
1348                 return -rte_errno;
1349         }
1350
1351         return 0;
1352 }
1353
1354 static int
1355 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1356                        const struct rte_flow_item pattern[],
1357                        struct rte_flow_error *error,
1358                        struct ice_fdir_filter_conf *filter)
1359 {
1360         const struct rte_flow_item *item = pattern;
1361         enum rte_flow_item_type item_type;
1362         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1363         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1364         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1365         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1366         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1367         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1368         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1369         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1370         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1371         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1372         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1373         uint64_t input_set = ICE_INSET_NONE;
1374         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1375         uint8_t  ipv6_addr_mask[16] = {
1376                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1377                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1378         };
1379         uint32_t vtc_flow_cpu;
1380
1381
1382         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1383                 if (item->last) {
1384                         rte_flow_error_set(error, EINVAL,
1385                                         RTE_FLOW_ERROR_TYPE_ITEM,
1386                                         item,
1387                                         "Not support range");
1388                         return -rte_errno;
1389                 }
1390                 item_type = item->type;
1391
1392                 switch (item_type) {
1393                 case RTE_FLOW_ITEM_TYPE_ETH:
1394                         eth_spec = item->spec;
1395                         eth_mask = item->mask;
1396
1397                         if (eth_spec && eth_mask) {
1398                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1399                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1400                                         rte_flow_error_set(error, EINVAL,
1401                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1402                                                 item,
1403                                                 "Src mac not support");
1404                                         return -rte_errno;
1405                                 }
1406
1407                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1408                                         rte_flow_error_set(error, EINVAL,
1409                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1410                                                 item,
1411                                                 "Invalid mac addr mask");
1412                                         return -rte_errno;
1413                                 }
1414
1415                                 input_set |= ICE_INSET_DMAC;
1416                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1417                                            &eth_spec->dst,
1418                                            RTE_ETHER_ADDR_LEN);
1419                         }
1420                         break;
1421                 case RTE_FLOW_ITEM_TYPE_IPV4:
1422                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1423                         ipv4_spec = item->spec;
1424                         ipv4_mask = item->mask;
1425
1426                         if (ipv4_spec && ipv4_mask) {
1427                                 /* Check IPv4 mask and update input set */
1428                                 if (ipv4_mask->hdr.version_ihl ||
1429                                     ipv4_mask->hdr.total_length ||
1430                                     ipv4_mask->hdr.packet_id ||
1431                                     ipv4_mask->hdr.fragment_offset ||
1432                                     ipv4_mask->hdr.hdr_checksum) {
1433                                         rte_flow_error_set(error, EINVAL,
1434                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1435                                                    item,
1436                                                    "Invalid IPv4 mask.");
1437                                         return -rte_errno;
1438                                 }
1439                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1440                                         input_set |= tunnel_type ?
1441                                                      ICE_INSET_TUN_IPV4_SRC :
1442                                                      ICE_INSET_IPV4_SRC;
1443                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1444                                         input_set |= tunnel_type ?
1445                                                      ICE_INSET_TUN_IPV4_DST :
1446                                                      ICE_INSET_IPV4_DST;
1447                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1448                                         input_set |= ICE_INSET_IPV4_TOS;
1449                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1450                                         input_set |= ICE_INSET_IPV4_TTL;
1451                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1452                                         input_set |= ICE_INSET_IPV4_PROTO;
1453
1454                                 filter->input.ip.v4.dst_ip =
1455                                         ipv4_spec->hdr.src_addr;
1456                                 filter->input.ip.v4.src_ip =
1457                                         ipv4_spec->hdr.dst_addr;
1458                                 filter->input.ip.v4.tos =
1459                                         ipv4_spec->hdr.type_of_service;
1460                                 filter->input.ip.v4.ttl =
1461                                         ipv4_spec->hdr.time_to_live;
1462                                 filter->input.ip.v4.proto =
1463                                         ipv4_spec->hdr.next_proto_id;
1464                         }
1465
1466                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1467                         break;
1468                 case RTE_FLOW_ITEM_TYPE_IPV6:
1469                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1470                         ipv6_spec = item->spec;
1471                         ipv6_mask = item->mask;
1472
1473                         if (ipv6_spec && ipv6_mask) {
1474                                 /* Check IPv6 mask and update input set */
1475                                 if (ipv6_mask->hdr.payload_len) {
1476                                         rte_flow_error_set(error, EINVAL,
1477                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1478                                                    item,
1479                                                    "Invalid IPv6 mask");
1480                                         return -rte_errno;
1481                                 }
1482
1483                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1484                                             ipv6_addr_mask,
1485                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1486                                         input_set |= ICE_INSET_IPV6_SRC;
1487                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1488                                             ipv6_addr_mask,
1489                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1490                                         input_set |= ICE_INSET_IPV6_DST;
1491
1492                                 if ((ipv6_mask->hdr.vtc_flow &
1493                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1494                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1495                                         input_set |= ICE_INSET_IPV6_TC;
1496                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1497                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1498                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1499                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1500
1501                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1502                                            ipv6_spec->hdr.src_addr, 16);
1503                                 rte_memcpy(filter->input.ip.v6.src_ip,
1504                                            ipv6_spec->hdr.dst_addr, 16);
1505
1506                                 vtc_flow_cpu =
1507                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1508                                 filter->input.ip.v6.tc =
1509                                         (uint8_t)(vtc_flow_cpu >>
1510                                                   ICE_FDIR_IPV6_TC_OFFSET);
1511                                 filter->input.ip.v6.proto =
1512                                         ipv6_spec->hdr.proto;
1513                                 filter->input.ip.v6.hlim =
1514                                         ipv6_spec->hdr.hop_limits;
1515                         }
1516
1517                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1518                         break;
1519                 case RTE_FLOW_ITEM_TYPE_TCP:
1520                         tcp_spec = item->spec;
1521                         tcp_mask = item->mask;
1522
1523                         if (tcp_spec && tcp_mask) {
1524                                 /* Check TCP mask and update input set */
1525                                 if (tcp_mask->hdr.sent_seq ||
1526                                     tcp_mask->hdr.recv_ack ||
1527                                     tcp_mask->hdr.data_off ||
1528                                     tcp_mask->hdr.tcp_flags ||
1529                                     tcp_mask->hdr.rx_win ||
1530                                     tcp_mask->hdr.cksum ||
1531                                     tcp_mask->hdr.tcp_urp) {
1532                                         rte_flow_error_set(error, EINVAL,
1533                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1534                                                    item,
1535                                                    "Invalid TCP mask");
1536                                         return -rte_errno;
1537                                 }
1538
1539                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1540                                         input_set |= tunnel_type ?
1541                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1542                                                      ICE_INSET_TCP_SRC_PORT;
1543                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1544                                         input_set |= tunnel_type ?
1545                                                      ICE_INSET_TUN_TCP_DST_PORT :
1546                                                      ICE_INSET_TCP_DST_PORT;
1547
1548                                 /* Get filter info */
1549                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1550                                         filter->input.ip.v4.dst_port =
1551                                                 tcp_spec->hdr.src_port;
1552                                         filter->input.ip.v4.src_port =
1553                                                 tcp_spec->hdr.dst_port;
1554                                         flow_type =
1555                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1556                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1557                                         filter->input.ip.v6.dst_port =
1558                                                 tcp_spec->hdr.src_port;
1559                                         filter->input.ip.v6.src_port =
1560                                                 tcp_spec->hdr.dst_port;
1561                                         flow_type =
1562                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1563                                 }
1564                         }
1565                         break;
1566                 case RTE_FLOW_ITEM_TYPE_UDP:
1567                         udp_spec = item->spec;
1568                         udp_mask = item->mask;
1569
1570                         if (udp_spec && udp_mask) {
1571                                 /* Check UDP mask and update input set*/
1572                                 if (udp_mask->hdr.dgram_len ||
1573                                     udp_mask->hdr.dgram_cksum) {
1574                                         rte_flow_error_set(error, EINVAL,
1575                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1576                                                    item,
1577                                                    "Invalid UDP mask");
1578                                         return -rte_errno;
1579                                 }
1580
1581                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1582                                         input_set |= tunnel_type ?
1583                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1584                                                      ICE_INSET_UDP_SRC_PORT;
1585                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1586                                         input_set |= tunnel_type ?
1587                                                      ICE_INSET_TUN_UDP_DST_PORT :
1588                                                      ICE_INSET_UDP_DST_PORT;
1589
1590                                 /* Get filter info */
1591                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1592                                         filter->input.ip.v4.dst_port =
1593                                                 udp_spec->hdr.src_port;
1594                                         filter->input.ip.v4.src_port =
1595                                                 udp_spec->hdr.dst_port;
1596                                         flow_type =
1597                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1598                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1599                                         filter->input.ip.v6.src_port =
1600                                                 udp_spec->hdr.dst_port;
1601                                         filter->input.ip.v6.dst_port =
1602                                                 udp_spec->hdr.src_port;
1603                                         flow_type =
1604                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1605                                 }
1606                         }
1607                         break;
1608                 case RTE_FLOW_ITEM_TYPE_SCTP:
1609                         sctp_spec = item->spec;
1610                         sctp_mask = item->mask;
1611
1612                         if (sctp_spec && sctp_mask) {
1613                                 /* Check SCTP mask and update input set */
1614                                 if (sctp_mask->hdr.cksum) {
1615                                         rte_flow_error_set(error, EINVAL,
1616                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1617                                                    item,
1618                                                    "Invalid UDP mask");
1619                                         return -rte_errno;
1620                                 }
1621
1622                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1623                                         input_set |= tunnel_type ?
1624                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1625                                                      ICE_INSET_SCTP_SRC_PORT;
1626                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1627                                         input_set |= tunnel_type ?
1628                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1629                                                      ICE_INSET_SCTP_DST_PORT;
1630
1631                                 /* Get filter info */
1632                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1633                                         filter->input.ip.v4.dst_port =
1634                                                 sctp_spec->hdr.src_port;
1635                                         filter->input.ip.v4.src_port =
1636                                                 sctp_spec->hdr.dst_port;
1637                                         flow_type =
1638                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1639                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1640                                         filter->input.ip.v6.dst_port =
1641                                                 sctp_spec->hdr.src_port;
1642                                         filter->input.ip.v6.src_port =
1643                                                 sctp_spec->hdr.dst_port;
1644                                         flow_type =
1645                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1646                                 }
1647                         }
1648                         break;
1649                 case RTE_FLOW_ITEM_TYPE_VOID:
1650                         break;
1651                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1652                         l3 = RTE_FLOW_ITEM_TYPE_END;
1653                         vxlan_spec = item->spec;
1654                         vxlan_mask = item->mask;
1655
1656                         if (vxlan_spec || vxlan_mask) {
1657                                 rte_flow_error_set(error, EINVAL,
1658                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1659                                                    item,
1660                                                    "Invalid vxlan field");
1661                                 return -rte_errno;
1662                         }
1663
1664                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1665                         break;
1666                 case RTE_FLOW_ITEM_TYPE_GTPU:
1667                         l3 = RTE_FLOW_ITEM_TYPE_END;
1668                         gtp_spec = item->spec;
1669                         gtp_mask = item->mask;
1670
1671                         if (gtp_spec && gtp_mask) {
1672                                 if (gtp_mask->v_pt_rsv_flags ||
1673                                     gtp_mask->msg_type ||
1674                                     gtp_mask->msg_len) {
1675                                         rte_flow_error_set(error, EINVAL,
1676                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1677                                                    item,
1678                                                    "Invalid GTP mask");
1679                                         return -rte_errno;
1680                                 }
1681
1682                                 if (gtp_mask->teid == UINT32_MAX)
1683                                         input_set |= ICE_INSET_GTPU_TEID;
1684
1685                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1686                         }
1687                         break;
1688                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1689                         gtp_psc_spec = item->spec;
1690                         gtp_psc_mask = item->mask;
1691
1692                         if (gtp_psc_spec && gtp_psc_mask) {
1693                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1694                                         input_set |= ICE_INSET_GTPU_QFI;
1695
1696                                 filter->input.gtpu_data.qfi =
1697                                         gtp_psc_spec->qfi;
1698                         }
1699
1700                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1701                         break;
1702                 default:
1703                         rte_flow_error_set(error, EINVAL,
1704                                    RTE_FLOW_ERROR_TYPE_ITEM,
1705                                    item,
1706                                    "Invalid pattern item.");
1707                         return -rte_errno;
1708                 }
1709         }
1710
1711         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1712                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1713
1714         filter->tunnel_type = tunnel_type;
1715         filter->input.flow_type = flow_type;
1716         filter->input_set = input_set;
1717
1718         return 0;
1719 }
1720
1721 static int
1722 ice_fdir_parse(struct ice_adapter *ad,
1723                struct ice_pattern_match_item *array,
1724                uint32_t array_len,
1725                const struct rte_flow_item pattern[],
1726                const struct rte_flow_action actions[],
1727                void **meta,
1728                struct rte_flow_error *error)
1729 {
1730         struct ice_pf *pf = &ad->pf;
1731         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1732         struct ice_pattern_match_item *item = NULL;
1733         uint64_t input_set;
1734         int ret;
1735
1736         memset(filter, 0, sizeof(*filter));
1737         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1738         if (!item)
1739                 return -rte_errno;
1740
1741         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1742         if (ret)
1743                 return ret;
1744         input_set = filter->input_set;
1745         if (!input_set || input_set & ~item->input_set_mask) {
1746                 rte_flow_error_set(error, EINVAL,
1747                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1748                                    pattern,
1749                                    "Invalid input set");
1750                 return -rte_errno;
1751         }
1752
1753         ret = ice_fdir_parse_action(ad, actions, error, filter);
1754         if (ret)
1755                 return ret;
1756
1757         *meta = filter;
1758
1759         return 0;
1760 }
1761
1762 static struct ice_flow_parser ice_fdir_parser_os = {
1763         .engine = &ice_fdir_engine,
1764         .array = ice_fdir_pattern_os,
1765         .array_len = RTE_DIM(ice_fdir_pattern_os),
1766         .parse_pattern_action = ice_fdir_parse,
1767         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1768 };
1769
1770 static struct ice_flow_parser ice_fdir_parser_comms = {
1771         .engine = &ice_fdir_engine,
1772         .array = ice_fdir_pattern_comms,
1773         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1774         .parse_pattern_action = ice_fdir_parse,
1775         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1776 };
1777
1778 RTE_INIT(ice_fdir_engine_register)
1779 {
1780         ice_register_flow_engine(&ice_fdir_engine);
1781 }