net/ice: add error cases for wrong packages
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_DMAC | \
36         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
37         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
38
39 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
40         ICE_FDIR_INSET_ETH_IPV6 | \
41         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
50
51 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
52         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
53
54 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
55         ICE_FDIR_INSET_VXLAN_IPV4 | \
56         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
65
66 #define ICE_FDIR_INSET_GTPU_IPV4 (\
67         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
68
69 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
70         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
71         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
72         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
73         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
74         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
75         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
76         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
77         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
78         {pattern_eth_ipv4_udp_vxlan_ipv4,
79                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
80         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
81                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
83                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
87                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
91                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
94 };
95
96 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
97         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
99         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
100         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
101         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
102         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
103         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
104         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp_vxlan_ipv4,
106                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
107         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
108                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
110                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
112                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
114                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
118                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
121         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
122 };
123
124 static struct ice_flow_parser ice_fdir_parser_os;
125 static struct ice_flow_parser ice_fdir_parser_comms;
126
127 static const struct rte_memzone *
128 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
129 {
130         return rte_memzone_reserve_aligned(name, len, socket_id,
131                                            RTE_MEMZONE_IOVA_CONTIG,
132                                            ICE_RING_BASE_ALIGN);
133 }
134
135 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
136
137 static int
138 ice_fdir_prof_alloc(struct ice_hw *hw)
139 {
140         enum ice_fltr_ptype ptype, fltr_ptype;
141
142         if (!hw->fdir_prof) {
143                 hw->fdir_prof = (struct ice_fd_hw_prof **)
144                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
145                                    sizeof(*hw->fdir_prof));
146                 if (!hw->fdir_prof)
147                         return -ENOMEM;
148         }
149         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
150              ptype < ICE_FLTR_PTYPE_MAX;
151              ptype++) {
152                 if (!hw->fdir_prof[ptype]) {
153                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
154                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
155                         if (!hw->fdir_prof[ptype])
156                                 goto fail_mem;
157                 }
158         }
159         return 0;
160
161 fail_mem:
162         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
163              fltr_ptype < ptype;
164              fltr_ptype++)
165                 rte_free(hw->fdir_prof[fltr_ptype]);
166         rte_free(hw->fdir_prof);
167         return -ENOMEM;
168 }
169
170 static int
171 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
172                           struct ice_fdir_counter_pool_container *container,
173                           uint32_t index_start,
174                           uint32_t len)
175 {
176         struct ice_fdir_counter_pool *pool;
177         uint32_t i;
178         int ret = 0;
179
180         pool = rte_zmalloc("ice_fdir_counter_pool",
181                            sizeof(*pool) +
182                            sizeof(struct ice_fdir_counter) * len,
183                            0);
184         if (!pool) {
185                 PMD_INIT_LOG(ERR,
186                              "Failed to allocate memory for fdir counter pool");
187                 return -ENOMEM;
188         }
189
190         TAILQ_INIT(&pool->counter_list);
191         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
192
193         for (i = 0; i < len; i++) {
194                 struct ice_fdir_counter *counter = &pool->counters[i];
195
196                 counter->hw_index = index_start + i;
197                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
198         }
199
200         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
201                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
202                 ret = -EINVAL;
203                 goto free_pool;
204         }
205
206         container->pools[container->index_free++] = pool;
207         return 0;
208
209 free_pool:
210         rte_free(pool);
211         return ret;
212 }
213
214 static int
215 ice_fdir_counter_init(struct ice_pf *pf)
216 {
217         struct ice_hw *hw = ICE_PF_TO_HW(pf);
218         struct ice_fdir_info *fdir_info = &pf->fdir;
219         struct ice_fdir_counter_pool_container *container =
220                                 &fdir_info->counter;
221         uint32_t cnt_index, len;
222         int ret;
223
224         TAILQ_INIT(&container->pool_list);
225
226         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
227         len = ICE_FDIR_COUNTERS_PER_BLOCK;
228
229         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
230         if (ret) {
231                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
232                 return ret;
233         }
234
235         return 0;
236 }
237
238 static int
239 ice_fdir_counter_release(struct ice_pf *pf)
240 {
241         struct ice_fdir_info *fdir_info = &pf->fdir;
242         struct ice_fdir_counter_pool_container *container =
243                                 &fdir_info->counter;
244         uint8_t i;
245
246         for (i = 0; i < container->index_free; i++)
247                 rte_free(container->pools[i]);
248
249         return 0;
250 }
251
252 static struct ice_fdir_counter *
253 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
254                                         *container,
255                                uint32_t id)
256 {
257         struct ice_fdir_counter_pool *pool;
258         struct ice_fdir_counter *counter;
259         int i;
260
261         TAILQ_FOREACH(pool, &container->pool_list, next) {
262                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
263                         counter = &pool->counters[i];
264
265                         if (counter->shared &&
266                             counter->ref_cnt &&
267                             counter->id == id)
268                                 return counter;
269                 }
270         }
271
272         return NULL;
273 }
274
275 static struct ice_fdir_counter *
276 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
277 {
278         struct ice_hw *hw = ICE_PF_TO_HW(pf);
279         struct ice_fdir_info *fdir_info = &pf->fdir;
280         struct ice_fdir_counter_pool_container *container =
281                                 &fdir_info->counter;
282         struct ice_fdir_counter_pool *pool = NULL;
283         struct ice_fdir_counter *counter_free = NULL;
284
285         if (shared) {
286                 counter_free = ice_fdir_counter_shared_search(container, id);
287                 if (counter_free) {
288                         if (counter_free->ref_cnt + 1 == 0) {
289                                 rte_errno = E2BIG;
290                                 return NULL;
291                         }
292                         counter_free->ref_cnt++;
293                         return counter_free;
294                 }
295         }
296
297         TAILQ_FOREACH(pool, &container->pool_list, next) {
298                 counter_free = TAILQ_FIRST(&pool->counter_list);
299                 if (counter_free)
300                         break;
301                 counter_free = NULL;
302         }
303
304         if (!counter_free) {
305                 PMD_DRV_LOG(ERR, "No free counter found\n");
306                 return NULL;
307         }
308
309         counter_free->shared = shared;
310         counter_free->id = id;
311         counter_free->ref_cnt = 1;
312         counter_free->pool = pool;
313
314         /* reset statistic counter value */
315         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
316         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
317
318         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
319         if (TAILQ_EMPTY(&pool->counter_list)) {
320                 TAILQ_REMOVE(&container->pool_list, pool, next);
321                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
322         }
323
324         return counter_free;
325 }
326
327 static void
328 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
329                       struct ice_fdir_counter *counter)
330 {
331         if (!counter)
332                 return;
333
334         if (--counter->ref_cnt == 0) {
335                 struct ice_fdir_counter_pool *pool = counter->pool;
336
337                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
338         }
339 }
340
341 static int
342 ice_fdir_init_filter_list(struct ice_pf *pf)
343 {
344         struct rte_eth_dev *dev = pf->adapter->eth_dev;
345         struct ice_fdir_info *fdir_info = &pf->fdir;
346         char fdir_hash_name[RTE_HASH_NAMESIZE];
347         int ret;
348
349         struct rte_hash_parameters fdir_hash_params = {
350                 .name = fdir_hash_name,
351                 .entries = ICE_MAX_FDIR_FILTER_NUM,
352                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
353                 .hash_func = rte_hash_crc,
354                 .hash_func_init_val = 0,
355                 .socket_id = rte_socket_id(),
356                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
357         };
358
359         /* Initialize hash */
360         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
361                  "fdir_%s", dev->device->name);
362         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
363         if (!fdir_info->hash_table) {
364                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
365                 return -EINVAL;
366         }
367         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
368                                           sizeof(*fdir_info->hash_map) *
369                                           ICE_MAX_FDIR_FILTER_NUM,
370                                           0);
371         if (!fdir_info->hash_map) {
372                 PMD_INIT_LOG(ERR,
373                              "Failed to allocate memory for fdir hash map!");
374                 ret = -ENOMEM;
375                 goto err_fdir_hash_map_alloc;
376         }
377         return 0;
378
379 err_fdir_hash_map_alloc:
380         rte_hash_free(fdir_info->hash_table);
381
382         return ret;
383 }
384
385 static void
386 ice_fdir_release_filter_list(struct ice_pf *pf)
387 {
388         struct ice_fdir_info *fdir_info = &pf->fdir;
389
390         if (fdir_info->hash_map)
391                 rte_free(fdir_info->hash_map);
392         if (fdir_info->hash_table)
393                 rte_hash_free(fdir_info->hash_table);
394 }
395
396 /*
397  * ice_fdir_setup - reserve and initialize the Flow Director resources
398  * @pf: board private structure
399  */
400 static int
401 ice_fdir_setup(struct ice_pf *pf)
402 {
403         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
404         struct ice_hw *hw = ICE_PF_TO_HW(pf);
405         const struct rte_memzone *mz = NULL;
406         char z_name[RTE_MEMZONE_NAMESIZE];
407         struct ice_vsi *vsi;
408         int err = ICE_SUCCESS;
409
410         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
411                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
412                 return -ENOTSUP;
413         }
414
415         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
416                     " fd_fltr_best_effort = %u.",
417                     hw->func_caps.fd_fltr_guar,
418                     hw->func_caps.fd_fltr_best_effort);
419
420         if (pf->fdir.fdir_vsi) {
421                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
422                 return ICE_SUCCESS;
423         }
424
425         /* make new FDIR VSI */
426         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
427         if (!vsi) {
428                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
429                 return -EINVAL;
430         }
431         pf->fdir.fdir_vsi = vsi;
432
433         err = ice_fdir_init_filter_list(pf);
434         if (err) {
435                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
436                 return -EINVAL;
437         }
438
439         err = ice_fdir_counter_init(pf);
440         if (err) {
441                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
442                 return -EINVAL;
443         }
444
445         /*Fdir tx queue setup*/
446         err = ice_fdir_setup_tx_resources(pf);
447         if (err) {
448                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
449                 goto fail_setup_tx;
450         }
451
452         /*Fdir rx queue setup*/
453         err = ice_fdir_setup_rx_resources(pf);
454         if (err) {
455                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
456                 goto fail_setup_rx;
457         }
458
459         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
460         if (err) {
461                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
462                 goto fail_mem;
463         }
464
465         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
466         if (err) {
467                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
468                 goto fail_mem;
469         }
470
471         /* reserve memory for the fdir programming packet */
472         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
473                  ICE_FDIR_MZ_NAME,
474                  eth_dev->data->port_id);
475         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
476         if (!mz) {
477                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
478                             "flow director program packet.");
479                 err = -ENOMEM;
480                 goto fail_mem;
481         }
482         pf->fdir.prg_pkt = mz->addr;
483         pf->fdir.dma_addr = mz->iova;
484
485         err = ice_fdir_prof_alloc(hw);
486         if (err) {
487                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
488                             "flow director profile.");
489                 err = -ENOMEM;
490                 goto fail_mem;
491         }
492
493         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
494                     vsi->base_queue);
495         return ICE_SUCCESS;
496
497 fail_mem:
498         ice_rx_queue_release(pf->fdir.rxq);
499         pf->fdir.rxq = NULL;
500 fail_setup_rx:
501         ice_tx_queue_release(pf->fdir.txq);
502         pf->fdir.txq = NULL;
503 fail_setup_tx:
504         ice_release_vsi(vsi);
505         pf->fdir.fdir_vsi = NULL;
506         return err;
507 }
508
509 static void
510 ice_fdir_prof_free(struct ice_hw *hw)
511 {
512         enum ice_fltr_ptype ptype;
513
514         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
515              ptype < ICE_FLTR_PTYPE_MAX;
516              ptype++)
517                 rte_free(hw->fdir_prof[ptype]);
518
519         rte_free(hw->fdir_prof);
520 }
521
522 /* Remove a profile for some filter type */
523 static void
524 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
525 {
526         struct ice_hw *hw = ICE_PF_TO_HW(pf);
527         struct ice_fd_hw_prof *hw_prof;
528         uint64_t prof_id;
529         uint16_t vsi_num;
530         int i;
531
532         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
533                 return;
534
535         hw_prof = hw->fdir_prof[ptype];
536
537         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
538         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
539                 if (hw_prof->entry_h[i][is_tunnel]) {
540                         vsi_num = ice_get_hw_vsi_num(hw,
541                                                      hw_prof->vsi_h[i]);
542                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
543                                              vsi_num, ptype);
544                         ice_flow_rem_entry(hw,
545                                            hw_prof->entry_h[i][is_tunnel]);
546                         hw_prof->entry_h[i][is_tunnel] = 0;
547                 }
548         }
549         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
550         rte_free(hw_prof->fdir_seg[is_tunnel]);
551         hw_prof->fdir_seg[is_tunnel] = NULL;
552
553         for (i = 0; i < hw_prof->cnt; i++)
554                 hw_prof->vsi_h[i] = 0;
555         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
556 }
557
558 /* Remove all created profiles */
559 static void
560 ice_fdir_prof_rm_all(struct ice_pf *pf)
561 {
562         enum ice_fltr_ptype ptype;
563
564         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
565              ptype < ICE_FLTR_PTYPE_MAX;
566              ptype++) {
567                 ice_fdir_prof_rm(pf, ptype, false);
568                 ice_fdir_prof_rm(pf, ptype, true);
569         }
570 }
571
572 /*
573  * ice_fdir_teardown - release the Flow Director resources
574  * @pf: board private structure
575  */
576 static void
577 ice_fdir_teardown(struct ice_pf *pf)
578 {
579         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
580         struct ice_hw *hw = ICE_PF_TO_HW(pf);
581         struct ice_vsi *vsi;
582         int err;
583
584         vsi = pf->fdir.fdir_vsi;
585         if (!vsi)
586                 return;
587
588         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
589         if (err)
590                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
591
592         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
593         if (err)
594                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
595
596         err = ice_fdir_counter_release(pf);
597         if (err)
598                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
599
600         ice_fdir_release_filter_list(pf);
601
602         ice_tx_queue_release(pf->fdir.txq);
603         pf->fdir.txq = NULL;
604         ice_rx_queue_release(pf->fdir.rxq);
605         pf->fdir.rxq = NULL;
606         ice_fdir_prof_rm_all(pf);
607         ice_fdir_prof_free(hw);
608         ice_release_vsi(vsi);
609         pf->fdir.fdir_vsi = NULL;
610 }
611
612 static int
613 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
614                      struct ice_vsi *ctrl_vsi,
615                      struct ice_flow_seg_info *seg,
616                      enum ice_fltr_ptype ptype,
617                      bool is_tunnel)
618 {
619         struct ice_hw *hw = ICE_PF_TO_HW(pf);
620         enum ice_flow_dir dir = ICE_FLOW_RX;
621         struct ice_flow_seg_info *ori_seg;
622         struct ice_fd_hw_prof *hw_prof;
623         struct ice_flow_prof *prof;
624         uint64_t entry_1 = 0;
625         uint64_t entry_2 = 0;
626         uint16_t vsi_num;
627         int ret;
628         uint64_t prof_id;
629
630         hw_prof = hw->fdir_prof[ptype];
631         ori_seg = hw_prof->fdir_seg[is_tunnel];
632         if (ori_seg) {
633                 if (!is_tunnel) {
634                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
635                                 return -EAGAIN;
636                 } else {
637                         if (!memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))
638                                 return -EAGAIN;
639                 }
640
641                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
642                         return -EINVAL;
643
644                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
645         }
646
647         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
648         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
649                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
650         if (ret)
651                 return ret;
652         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
653                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
654                                  seg, NULL, 0, &entry_1);
655         if (ret) {
656                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
657                             ptype);
658                 goto err_add_prof;
659         }
660         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
661                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
662                                  seg, NULL, 0, &entry_2);
663         if (ret) {
664                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
665                             ptype);
666                 goto err_add_entry;
667         }
668
669         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
670         hw_prof->cnt = 0;
671         hw_prof->fdir_seg[is_tunnel] = seg;
672         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
673         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
674         pf->hw_prof_cnt[ptype][is_tunnel]++;
675         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
676         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
677         pf->hw_prof_cnt[ptype][is_tunnel]++;
678
679         return ret;
680
681 err_add_entry:
682         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
683         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
684         ice_flow_rem_entry(hw, entry_1);
685 err_add_prof:
686         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
687
688         return ret;
689 }
690
691 static void
692 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
693 {
694         uint32_t i, j;
695
696         struct ice_inset_map {
697                 uint64_t inset;
698                 enum ice_flow_field fld;
699         };
700         static const struct ice_inset_map ice_inset_map[] = {
701                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
702                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
703                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
704                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
705                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
706                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
707                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
708                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
709                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
710                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
711                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
712                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
713                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
714                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
715                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
716                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
717                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
718                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
719                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
720                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
721                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
722                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
723                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
724                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
725                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
726                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
727                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
728         };
729
730         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
731                 if ((inset & ice_inset_map[i].inset) ==
732                     ice_inset_map[i].inset)
733                         field[j++] = ice_inset_map[i].fld;
734         }
735 }
736
737 static int
738 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
739                         uint64_t input_set, bool is_tunnel)
740 {
741         struct ice_flow_seg_info *seg;
742         struct ice_flow_seg_info *seg_tun = NULL;
743         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
744         int i, ret;
745
746         if (!input_set)
747                 return -EINVAL;
748
749         seg = (struct ice_flow_seg_info *)
750                 ice_malloc(hw, sizeof(*seg));
751         if (!seg) {
752                 PMD_DRV_LOG(ERR, "No memory can be allocated");
753                 return -ENOMEM;
754         }
755
756         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
757                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
758         ice_fdir_input_set_parse(input_set, field);
759
760         switch (flow) {
761         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
762                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
763                                   ICE_FLOW_SEG_HDR_IPV4);
764                 break;
765         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
766                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
767                                   ICE_FLOW_SEG_HDR_IPV4);
768                 break;
769         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
770                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
771                                   ICE_FLOW_SEG_HDR_IPV4);
772                 break;
773         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
774                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
775                 break;
776         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
777                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
778                                   ICE_FLOW_SEG_HDR_IPV6);
779                 break;
780         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
781                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
782                                   ICE_FLOW_SEG_HDR_IPV6);
783                 break;
784         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
785                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
786                                   ICE_FLOW_SEG_HDR_IPV6);
787                 break;
788         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
789                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
790                 break;
791         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
792         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
793         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
794         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
795                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
796                                   ICE_FLOW_SEG_HDR_IPV4);
797                 break;
798         default:
799                 PMD_DRV_LOG(ERR, "not supported filter type.");
800                 break;
801         }
802
803         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
804                 ice_flow_set_fld(seg, field[i],
805                                  ICE_FLOW_FLD_OFF_INVAL,
806                                  ICE_FLOW_FLD_OFF_INVAL,
807                                  ICE_FLOW_FLD_OFF_INVAL, false);
808         }
809
810         if (!is_tunnel) {
811                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
812                                            seg, flow, false);
813         } else {
814                 seg_tun = (struct ice_flow_seg_info *)
815                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
816                 if (!seg_tun) {
817                         PMD_DRV_LOG(ERR, "No memory can be allocated");
818                         rte_free(seg);
819                         return -ENOMEM;
820                 }
821                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
822                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
823                                            seg_tun, flow, true);
824         }
825
826         if (!ret) {
827                 return ret;
828         } else if (ret < 0) {
829                 rte_free(seg);
830                 if (is_tunnel)
831                         rte_free(seg_tun);
832                 return (ret == -EAGAIN) ? 0 : ret;
833         } else {
834                 return ret;
835         }
836 }
837
838 static void
839 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
840                     bool is_tunnel, bool add)
841 {
842         struct ice_hw *hw = ICE_PF_TO_HW(pf);
843         int cnt;
844
845         cnt = (add) ? 1 : -1;
846         hw->fdir_active_fltr += cnt;
847         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
848                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
849         else
850                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
851 }
852
853 static int
854 ice_fdir_init(struct ice_adapter *ad)
855 {
856         struct ice_pf *pf = &ad->pf;
857         struct ice_flow_parser *parser;
858         int ret;
859
860         ret = ice_fdir_setup(pf);
861         if (ret)
862                 return ret;
863
864         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
865                 parser = &ice_fdir_parser_comms;
866         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
867                 parser = &ice_fdir_parser_os;
868         else
869                 return -EINVAL;
870
871         return ice_register_parser(parser, ad);
872 }
873
874 static void
875 ice_fdir_uninit(struct ice_adapter *ad)
876 {
877         struct ice_pf *pf = &ad->pf;
878         struct ice_flow_parser *parser;
879
880         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
881                 parser = &ice_fdir_parser_comms;
882         else
883                 parser = &ice_fdir_parser_os;
884
885         ice_unregister_parser(parser, ad);
886
887         ice_fdir_teardown(pf);
888 }
889
890 static int
891 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
892 {
893         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
894                 return 1;
895         else
896                 return 0;
897 }
898
899 static int
900 ice_fdir_add_del_filter(struct ice_pf *pf,
901                         struct ice_fdir_filter_conf *filter,
902                         bool add)
903 {
904         struct ice_fltr_desc desc;
905         struct ice_hw *hw = ICE_PF_TO_HW(pf);
906         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
907         bool is_tun;
908         int ret;
909
910         filter->input.dest_vsi = pf->main_vsi->idx;
911
912         memset(&desc, 0, sizeof(desc));
913         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
914
915         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
916
917         memset(pkt, 0, ICE_FDIR_PKT_LEN);
918         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
919         if (ret) {
920                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
921                 return -EINVAL;
922         }
923
924         return ice_fdir_programming(pf, &desc);
925 }
926
927 static void
928 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
929                           struct ice_fdir_filter_conf *filter)
930 {
931         struct ice_fdir_fltr *input = &filter->input;
932         memset(key, 0, sizeof(*key));
933
934         key->flow_type = input->flow_type;
935         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
936         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
937         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
938         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
939
940         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
941         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
942
943         key->tunnel_type = filter->tunnel_type;
944 }
945
946 /* Check if there exists the flow director filter */
947 static struct ice_fdir_filter_conf *
948 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
949                         const struct ice_fdir_fltr_pattern *key)
950 {
951         int ret;
952
953         ret = rte_hash_lookup(fdir_info->hash_table, key);
954         if (ret < 0)
955                 return NULL;
956
957         return fdir_info->hash_map[ret];
958 }
959
960 /* Add a flow director entry into the SW list */
961 static int
962 ice_fdir_entry_insert(struct ice_pf *pf,
963                       struct ice_fdir_filter_conf *entry,
964                       struct ice_fdir_fltr_pattern *key)
965 {
966         struct ice_fdir_info *fdir_info = &pf->fdir;
967         int ret;
968
969         ret = rte_hash_add_key(fdir_info->hash_table, key);
970         if (ret < 0) {
971                 PMD_DRV_LOG(ERR,
972                             "Failed to insert fdir entry to hash table %d!",
973                             ret);
974                 return ret;
975         }
976         fdir_info->hash_map[ret] = entry;
977
978         return 0;
979 }
980
981 /* Delete a flow director entry from the SW list */
982 static int
983 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
984 {
985         struct ice_fdir_info *fdir_info = &pf->fdir;
986         int ret;
987
988         ret = rte_hash_del_key(fdir_info->hash_table, key);
989         if (ret < 0) {
990                 PMD_DRV_LOG(ERR,
991                             "Failed to delete fdir filter to hash table %d!",
992                             ret);
993                 return ret;
994         }
995         fdir_info->hash_map[ret] = NULL;
996
997         return 0;
998 }
999
1000 static int
1001 ice_fdir_create_filter(struct ice_adapter *ad,
1002                        struct rte_flow *flow,
1003                        void *meta,
1004                        struct rte_flow_error *error)
1005 {
1006         struct ice_pf *pf = &ad->pf;
1007         struct ice_fdir_filter_conf *filter = meta;
1008         struct ice_fdir_info *fdir_info = &pf->fdir;
1009         struct ice_fdir_filter_conf *entry, *node;
1010         struct ice_fdir_fltr_pattern key;
1011         bool is_tun;
1012         int ret;
1013
1014         ice_fdir_extract_fltr_key(&key, filter);
1015         node = ice_fdir_entry_lookup(fdir_info, &key);
1016         if (node) {
1017                 rte_flow_error_set(error, EEXIST,
1018                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1019                                    "Rule already exists!");
1020                 return -rte_errno;
1021         }
1022
1023         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1024         if (!entry) {
1025                 rte_flow_error_set(error, ENOMEM,
1026                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1027                                    "Failed to allocate memory");
1028                 return -rte_errno;
1029         }
1030
1031         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1032
1033         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1034                         filter->input_set, is_tun);
1035         if (ret) {
1036                 rte_flow_error_set(error, -ret,
1037                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1038                                    "Profile configure failed.");
1039                 goto free_entry;
1040         }
1041
1042         /* alloc counter for FDIR */
1043         if (filter->input.cnt_ena) {
1044                 struct rte_flow_action_count *act_count = &filter->act_count;
1045
1046                 filter->counter = ice_fdir_counter_alloc(pf,
1047                                                          act_count->shared,
1048                                                          act_count->id);
1049                 if (!filter->counter) {
1050                         rte_flow_error_set(error, EINVAL,
1051                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1052                                         "Failed to alloc FDIR counter.");
1053                         goto free_entry;
1054                 }
1055                 filter->input.cnt_index = filter->counter->hw_index;
1056         }
1057
1058         ret = ice_fdir_add_del_filter(pf, filter, true);
1059         if (ret) {
1060                 rte_flow_error_set(error, -ret,
1061                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1062                                    "Add filter rule failed.");
1063                 goto free_counter;
1064         }
1065
1066         rte_memcpy(entry, filter, sizeof(*entry));
1067         ret = ice_fdir_entry_insert(pf, entry, &key);
1068         if (ret) {
1069                 rte_flow_error_set(error, -ret,
1070                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1071                                    "Insert entry to table failed.");
1072                 goto free_entry;
1073         }
1074
1075         flow->rule = entry;
1076         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1077
1078         return 0;
1079
1080 free_counter:
1081         if (filter->counter) {
1082                 ice_fdir_counter_free(pf, filter->counter);
1083                 filter->counter = NULL;
1084         }
1085
1086 free_entry:
1087         rte_free(entry);
1088         return -rte_errno;
1089 }
1090
1091 static int
1092 ice_fdir_destroy_filter(struct ice_adapter *ad,
1093                         struct rte_flow *flow,
1094                         struct rte_flow_error *error)
1095 {
1096         struct ice_pf *pf = &ad->pf;
1097         struct ice_fdir_info *fdir_info = &pf->fdir;
1098         struct ice_fdir_filter_conf *filter, *entry;
1099         struct ice_fdir_fltr_pattern key;
1100         bool is_tun;
1101         int ret;
1102
1103         filter = (struct ice_fdir_filter_conf *)flow->rule;
1104
1105         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1106
1107         if (filter->counter) {
1108                 ice_fdir_counter_free(pf, filter->counter);
1109                 filter->counter = NULL;
1110         }
1111
1112         ice_fdir_extract_fltr_key(&key, filter);
1113         entry = ice_fdir_entry_lookup(fdir_info, &key);
1114         if (!entry) {
1115                 rte_flow_error_set(error, ENOENT,
1116                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1117                                    "Can't find entry.");
1118                 return -rte_errno;
1119         }
1120
1121         ret = ice_fdir_add_del_filter(pf, filter, false);
1122         if (ret) {
1123                 rte_flow_error_set(error, -ret,
1124                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1125                                    "Del filter rule failed.");
1126                 return -rte_errno;
1127         }
1128
1129         ret = ice_fdir_entry_del(pf, &key);
1130         if (ret) {
1131                 rte_flow_error_set(error, -ret,
1132                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1133                                    "Remove entry from table failed.");
1134                 return -rte_errno;
1135         }
1136
1137         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1138         flow->rule = NULL;
1139
1140         rte_free(filter);
1141
1142         return 0;
1143 }
1144
1145 static int
1146 ice_fdir_query_count(struct ice_adapter *ad,
1147                       struct rte_flow *flow,
1148                       struct rte_flow_query_count *flow_stats,
1149                       struct rte_flow_error *error)
1150 {
1151         struct ice_pf *pf = &ad->pf;
1152         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1153         struct ice_fdir_filter_conf *filter = flow->rule;
1154         struct ice_fdir_counter *counter = filter->counter;
1155         uint64_t hits_lo, hits_hi;
1156
1157         if (!counter) {
1158                 rte_flow_error_set(error, EINVAL,
1159                                   RTE_FLOW_ERROR_TYPE_ACTION,
1160                                   NULL,
1161                                   "FDIR counters not available");
1162                 return -rte_errno;
1163         }
1164
1165         /*
1166          * Reading the low 32-bits latches the high 32-bits into a shadow
1167          * register. Reading the high 32-bit returns the value in the
1168          * shadow register.
1169          */
1170         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1171         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1172
1173         flow_stats->hits_set = 1;
1174         flow_stats->hits = hits_lo | (hits_hi << 32);
1175         flow_stats->bytes_set = 0;
1176         flow_stats->bytes = 0;
1177
1178         if (flow_stats->reset) {
1179                 /* reset statistic counter value */
1180                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1181                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1182         }
1183
1184         return 0;
1185 }
1186
1187 static struct ice_flow_engine ice_fdir_engine = {
1188         .init = ice_fdir_init,
1189         .uninit = ice_fdir_uninit,
1190         .create = ice_fdir_create_filter,
1191         .destroy = ice_fdir_destroy_filter,
1192         .query_count = ice_fdir_query_count,
1193         .type = ICE_FLOW_ENGINE_FDIR,
1194 };
1195
1196 static int
1197 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1198                               struct rte_flow_error *error,
1199                               const struct rte_flow_action *act,
1200                               struct ice_fdir_filter_conf *filter)
1201 {
1202         const struct rte_flow_action_rss *rss = act->conf;
1203         uint32_t i;
1204
1205         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1206                 rte_flow_error_set(error, EINVAL,
1207                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1208                                    "Invalid action.");
1209                 return -rte_errno;
1210         }
1211
1212         if (rss->queue_num <= 1) {
1213                 rte_flow_error_set(error, EINVAL,
1214                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1215                                    "Queue region size can't be 0 or 1.");
1216                 return -rte_errno;
1217         }
1218
1219         /* check if queue index for queue region is continuous */
1220         for (i = 0; i < rss->queue_num - 1; i++) {
1221                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1222                         rte_flow_error_set(error, EINVAL,
1223                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1224                                            "Discontinuous queue region");
1225                         return -rte_errno;
1226                 }
1227         }
1228
1229         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1230                 rte_flow_error_set(error, EINVAL,
1231                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1232                                    "Invalid queue region indexes.");
1233                 return -rte_errno;
1234         }
1235
1236         if (!(rte_is_power_of_2(rss->queue_num) &&
1237              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1238                 rte_flow_error_set(error, EINVAL,
1239                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1240                                    "The region size should be any of the following values:"
1241                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1242                                    "of queues do not exceed the VSI allocation.");
1243                 return -rte_errno;
1244         }
1245
1246         filter->input.q_index = rss->queue[0];
1247         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1248         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1249
1250         return 0;
1251 }
1252
1253 static int
1254 ice_fdir_parse_action(struct ice_adapter *ad,
1255                       const struct rte_flow_action actions[],
1256                       struct rte_flow_error *error,
1257                       struct ice_fdir_filter_conf *filter)
1258 {
1259         struct ice_pf *pf = &ad->pf;
1260         const struct rte_flow_action_queue *act_q;
1261         const struct rte_flow_action_mark *mark_spec = NULL;
1262         const struct rte_flow_action_count *act_count;
1263         uint32_t dest_num = 0;
1264         uint32_t mark_num = 0;
1265         uint32_t counter_num = 0;
1266         int ret;
1267
1268         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1269                 switch (actions->type) {
1270                 case RTE_FLOW_ACTION_TYPE_VOID:
1271                         break;
1272                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1273                         dest_num++;
1274
1275                         act_q = actions->conf;
1276                         filter->input.q_index = act_q->index;
1277                         if (filter->input.q_index >=
1278                                         pf->dev_data->nb_rx_queues) {
1279                                 rte_flow_error_set(error, EINVAL,
1280                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1281                                                    actions,
1282                                                    "Invalid queue for FDIR.");
1283                                 return -rte_errno;
1284                         }
1285                         filter->input.dest_ctl =
1286                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1287                         break;
1288                 case RTE_FLOW_ACTION_TYPE_DROP:
1289                         dest_num++;
1290
1291                         filter->input.dest_ctl =
1292                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1293                         break;
1294                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1295                         dest_num++;
1296
1297                         filter->input.dest_ctl =
1298                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1299                         filter->input.q_index = 0;
1300                         break;
1301                 case RTE_FLOW_ACTION_TYPE_RSS:
1302                         dest_num++;
1303
1304                         ret = ice_fdir_parse_action_qregion(pf,
1305                                                 error, actions, filter);
1306                         if (ret)
1307                                 return ret;
1308                         break;
1309                 case RTE_FLOW_ACTION_TYPE_MARK:
1310                         mark_num++;
1311
1312                         mark_spec = actions->conf;
1313                         filter->input.fltr_id = mark_spec->id;
1314                         break;
1315                 case RTE_FLOW_ACTION_TYPE_COUNT:
1316                         counter_num++;
1317
1318                         act_count = actions->conf;
1319                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1320                         rte_memcpy(&filter->act_count, act_count,
1321                                                 sizeof(filter->act_count));
1322
1323                         break;
1324                 default:
1325                         rte_flow_error_set(error, EINVAL,
1326                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1327                                    "Invalid action.");
1328                         return -rte_errno;
1329                 }
1330         }
1331
1332         if (dest_num == 0 || dest_num >= 2) {
1333                 rte_flow_error_set(error, EINVAL,
1334                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1335                            "Unsupported action combination");
1336                 return -rte_errno;
1337         }
1338
1339         if (mark_num >= 2) {
1340                 rte_flow_error_set(error, EINVAL,
1341                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1342                            "Too many mark actions");
1343                 return -rte_errno;
1344         }
1345
1346         if (counter_num >= 2) {
1347                 rte_flow_error_set(error, EINVAL,
1348                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1349                            "Too many count actions");
1350                 return -rte_errno;
1351         }
1352
1353         return 0;
1354 }
1355
1356 static int
1357 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1358                        const struct rte_flow_item pattern[],
1359                        struct rte_flow_error *error,
1360                        struct ice_fdir_filter_conf *filter)
1361 {
1362         const struct rte_flow_item *item = pattern;
1363         enum rte_flow_item_type item_type;
1364         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1365         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1366         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1367         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1368         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1369         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1370         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1371         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1372         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1373         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1374         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1375         uint64_t input_set = ICE_INSET_NONE;
1376         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1377         uint8_t  ipv6_addr_mask[16] = {
1378                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1379                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1380         };
1381         uint32_t vtc_flow_cpu;
1382
1383
1384         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1385                 if (item->last) {
1386                         rte_flow_error_set(error, EINVAL,
1387                                         RTE_FLOW_ERROR_TYPE_ITEM,
1388                                         item,
1389                                         "Not support range");
1390                         return -rte_errno;
1391                 }
1392                 item_type = item->type;
1393
1394                 switch (item_type) {
1395                 case RTE_FLOW_ITEM_TYPE_ETH:
1396                         eth_spec = item->spec;
1397                         eth_mask = item->mask;
1398
1399                         if (eth_spec && eth_mask) {
1400                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1401                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1402                                         rte_flow_error_set(error, EINVAL,
1403                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1404                                                 item,
1405                                                 "Src mac not support");
1406                                         return -rte_errno;
1407                                 }
1408
1409                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1410                                         rte_flow_error_set(error, EINVAL,
1411                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1412                                                 item,
1413                                                 "Invalid mac addr mask");
1414                                         return -rte_errno;
1415                                 }
1416
1417                                 input_set |= ICE_INSET_DMAC;
1418                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1419                                            &eth_spec->dst,
1420                                            RTE_ETHER_ADDR_LEN);
1421                         }
1422                         break;
1423                 case RTE_FLOW_ITEM_TYPE_IPV4:
1424                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1425                         ipv4_spec = item->spec;
1426                         ipv4_mask = item->mask;
1427
1428                         if (ipv4_spec && ipv4_mask) {
1429                                 /* Check IPv4 mask and update input set */
1430                                 if (ipv4_mask->hdr.version_ihl ||
1431                                     ipv4_mask->hdr.total_length ||
1432                                     ipv4_mask->hdr.packet_id ||
1433                                     ipv4_mask->hdr.fragment_offset ||
1434                                     ipv4_mask->hdr.hdr_checksum) {
1435                                         rte_flow_error_set(error, EINVAL,
1436                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1437                                                    item,
1438                                                    "Invalid IPv4 mask.");
1439                                         return -rte_errno;
1440                                 }
1441                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1442                                         input_set |= tunnel_type ?
1443                                                      ICE_INSET_TUN_IPV4_SRC :
1444                                                      ICE_INSET_IPV4_SRC;
1445                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1446                                         input_set |= tunnel_type ?
1447                                                      ICE_INSET_TUN_IPV4_DST :
1448                                                      ICE_INSET_IPV4_DST;
1449                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1450                                         input_set |= ICE_INSET_IPV4_TOS;
1451                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1452                                         input_set |= ICE_INSET_IPV4_TTL;
1453                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1454                                         input_set |= ICE_INSET_IPV4_PROTO;
1455
1456                                 filter->input.ip.v4.dst_ip =
1457                                         ipv4_spec->hdr.src_addr;
1458                                 filter->input.ip.v4.src_ip =
1459                                         ipv4_spec->hdr.dst_addr;
1460                                 filter->input.ip.v4.tos =
1461                                         ipv4_spec->hdr.type_of_service;
1462                                 filter->input.ip.v4.ttl =
1463                                         ipv4_spec->hdr.time_to_live;
1464                                 filter->input.ip.v4.proto =
1465                                         ipv4_spec->hdr.next_proto_id;
1466                         }
1467
1468                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1469                         break;
1470                 case RTE_FLOW_ITEM_TYPE_IPV6:
1471                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1472                         ipv6_spec = item->spec;
1473                         ipv6_mask = item->mask;
1474
1475                         if (ipv6_spec && ipv6_mask) {
1476                                 /* Check IPv6 mask and update input set */
1477                                 if (ipv6_mask->hdr.payload_len) {
1478                                         rte_flow_error_set(error, EINVAL,
1479                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1480                                                    item,
1481                                                    "Invalid IPv6 mask");
1482                                         return -rte_errno;
1483                                 }
1484
1485                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1486                                             ipv6_addr_mask,
1487                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1488                                         input_set |= ICE_INSET_IPV6_SRC;
1489                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1490                                             ipv6_addr_mask,
1491                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1492                                         input_set |= ICE_INSET_IPV6_DST;
1493
1494                                 if ((ipv6_mask->hdr.vtc_flow &
1495                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1496                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1497                                         input_set |= ICE_INSET_IPV6_TC;
1498                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1499                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1500                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1501                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1502
1503                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1504                                            ipv6_spec->hdr.src_addr, 16);
1505                                 rte_memcpy(filter->input.ip.v6.src_ip,
1506                                            ipv6_spec->hdr.dst_addr, 16);
1507
1508                                 vtc_flow_cpu =
1509                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1510                                 filter->input.ip.v6.tc =
1511                                         (uint8_t)(vtc_flow_cpu >>
1512                                                   ICE_FDIR_IPV6_TC_OFFSET);
1513                                 filter->input.ip.v6.proto =
1514                                         ipv6_spec->hdr.proto;
1515                                 filter->input.ip.v6.hlim =
1516                                         ipv6_spec->hdr.hop_limits;
1517                         }
1518
1519                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1520                         break;
1521                 case RTE_FLOW_ITEM_TYPE_TCP:
1522                         tcp_spec = item->spec;
1523                         tcp_mask = item->mask;
1524
1525                         if (tcp_spec && tcp_mask) {
1526                                 /* Check TCP mask and update input set */
1527                                 if (tcp_mask->hdr.sent_seq ||
1528                                     tcp_mask->hdr.recv_ack ||
1529                                     tcp_mask->hdr.data_off ||
1530                                     tcp_mask->hdr.tcp_flags ||
1531                                     tcp_mask->hdr.rx_win ||
1532                                     tcp_mask->hdr.cksum ||
1533                                     tcp_mask->hdr.tcp_urp) {
1534                                         rte_flow_error_set(error, EINVAL,
1535                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1536                                                    item,
1537                                                    "Invalid TCP mask");
1538                                         return -rte_errno;
1539                                 }
1540
1541                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1542                                         input_set |= tunnel_type ?
1543                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1544                                                      ICE_INSET_TCP_SRC_PORT;
1545                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1546                                         input_set |= tunnel_type ?
1547                                                      ICE_INSET_TUN_TCP_DST_PORT :
1548                                                      ICE_INSET_TCP_DST_PORT;
1549
1550                                 /* Get filter info */
1551                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1552                                         filter->input.ip.v4.dst_port =
1553                                                 tcp_spec->hdr.src_port;
1554                                         filter->input.ip.v4.src_port =
1555                                                 tcp_spec->hdr.dst_port;
1556                                         flow_type =
1557                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1558                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1559                                         filter->input.ip.v6.dst_port =
1560                                                 tcp_spec->hdr.src_port;
1561                                         filter->input.ip.v6.src_port =
1562                                                 tcp_spec->hdr.dst_port;
1563                                         flow_type =
1564                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1565                                 }
1566                         }
1567                         break;
1568                 case RTE_FLOW_ITEM_TYPE_UDP:
1569                         udp_spec = item->spec;
1570                         udp_mask = item->mask;
1571
1572                         if (udp_spec && udp_mask) {
1573                                 /* Check UDP mask and update input set*/
1574                                 if (udp_mask->hdr.dgram_len ||
1575                                     udp_mask->hdr.dgram_cksum) {
1576                                         rte_flow_error_set(error, EINVAL,
1577                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1578                                                    item,
1579                                                    "Invalid UDP mask");
1580                                         return -rte_errno;
1581                                 }
1582
1583                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1584                                         input_set |= tunnel_type ?
1585                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1586                                                      ICE_INSET_UDP_SRC_PORT;
1587                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1588                                         input_set |= tunnel_type ?
1589                                                      ICE_INSET_TUN_UDP_DST_PORT :
1590                                                      ICE_INSET_UDP_DST_PORT;
1591
1592                                 /* Get filter info */
1593                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1594                                         filter->input.ip.v4.dst_port =
1595                                                 udp_spec->hdr.src_port;
1596                                         filter->input.ip.v4.src_port =
1597                                                 udp_spec->hdr.dst_port;
1598                                         flow_type =
1599                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1600                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1601                                         filter->input.ip.v6.src_port =
1602                                                 udp_spec->hdr.dst_port;
1603                                         filter->input.ip.v6.dst_port =
1604                                                 udp_spec->hdr.src_port;
1605                                         flow_type =
1606                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1607                                 }
1608                         }
1609                         break;
1610                 case RTE_FLOW_ITEM_TYPE_SCTP:
1611                         sctp_spec = item->spec;
1612                         sctp_mask = item->mask;
1613
1614                         if (sctp_spec && sctp_mask) {
1615                                 /* Check SCTP mask and update input set */
1616                                 if (sctp_mask->hdr.cksum) {
1617                                         rte_flow_error_set(error, EINVAL,
1618                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1619                                                    item,
1620                                                    "Invalid UDP mask");
1621                                         return -rte_errno;
1622                                 }
1623
1624                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1625                                         input_set |= tunnel_type ?
1626                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1627                                                      ICE_INSET_SCTP_SRC_PORT;
1628                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1629                                         input_set |= tunnel_type ?
1630                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1631                                                      ICE_INSET_SCTP_DST_PORT;
1632
1633                                 /* Get filter info */
1634                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1635                                         filter->input.ip.v4.dst_port =
1636                                                 sctp_spec->hdr.src_port;
1637                                         filter->input.ip.v4.src_port =
1638                                                 sctp_spec->hdr.dst_port;
1639                                         flow_type =
1640                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1641                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1642                                         filter->input.ip.v6.dst_port =
1643                                                 sctp_spec->hdr.src_port;
1644                                         filter->input.ip.v6.src_port =
1645                                                 sctp_spec->hdr.dst_port;
1646                                         flow_type =
1647                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1648                                 }
1649                         }
1650                         break;
1651                 case RTE_FLOW_ITEM_TYPE_VOID:
1652                         break;
1653                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1654                         l3 = RTE_FLOW_ITEM_TYPE_END;
1655                         vxlan_spec = item->spec;
1656                         vxlan_mask = item->mask;
1657
1658                         if (vxlan_spec || vxlan_mask) {
1659                                 rte_flow_error_set(error, EINVAL,
1660                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1661                                                    item,
1662                                                    "Invalid vxlan field");
1663                                 return -rte_errno;
1664                         }
1665
1666                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1667                         break;
1668                 case RTE_FLOW_ITEM_TYPE_GTPU:
1669                         l3 = RTE_FLOW_ITEM_TYPE_END;
1670                         gtp_spec = item->spec;
1671                         gtp_mask = item->mask;
1672
1673                         if (gtp_spec && gtp_mask) {
1674                                 if (gtp_mask->v_pt_rsv_flags ||
1675                                     gtp_mask->msg_type ||
1676                                     gtp_mask->msg_len) {
1677                                         rte_flow_error_set(error, EINVAL,
1678                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1679                                                    item,
1680                                                    "Invalid GTP mask");
1681                                         return -rte_errno;
1682                                 }
1683
1684                                 if (gtp_mask->teid == UINT32_MAX)
1685                                         input_set |= ICE_INSET_GTPU_TEID;
1686
1687                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1688                         }
1689                         break;
1690                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1691                         gtp_psc_spec = item->spec;
1692                         gtp_psc_mask = item->mask;
1693
1694                         if (gtp_psc_spec && gtp_psc_mask) {
1695                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1696                                         input_set |= ICE_INSET_GTPU_QFI;
1697
1698                                 filter->input.gtpu_data.qfi =
1699                                         gtp_psc_spec->qfi;
1700                         }
1701
1702                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1703                         break;
1704                 default:
1705                         rte_flow_error_set(error, EINVAL,
1706                                    RTE_FLOW_ERROR_TYPE_ITEM,
1707                                    item,
1708                                    "Invalid pattern item.");
1709                         return -rte_errno;
1710                 }
1711         }
1712
1713         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1714                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1715
1716         filter->tunnel_type = tunnel_type;
1717         filter->input.flow_type = flow_type;
1718         filter->input_set = input_set;
1719
1720         return 0;
1721 }
1722
1723 static int
1724 ice_fdir_parse(struct ice_adapter *ad,
1725                struct ice_pattern_match_item *array,
1726                uint32_t array_len,
1727                const struct rte_flow_item pattern[],
1728                const struct rte_flow_action actions[],
1729                void **meta,
1730                struct rte_flow_error *error)
1731 {
1732         struct ice_pf *pf = &ad->pf;
1733         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1734         struct ice_pattern_match_item *item = NULL;
1735         uint64_t input_set;
1736         int ret;
1737
1738         memset(filter, 0, sizeof(*filter));
1739         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1740         if (!item)
1741                 return -rte_errno;
1742
1743         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1744         if (ret)
1745                 return ret;
1746         input_set = filter->input_set;
1747         if (!input_set || input_set & ~item->input_set_mask) {
1748                 rte_flow_error_set(error, EINVAL,
1749                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1750                                    pattern,
1751                                    "Invalid input set");
1752                 return -rte_errno;
1753         }
1754
1755         ret = ice_fdir_parse_action(ad, actions, error, filter);
1756         if (ret)
1757                 return ret;
1758
1759         *meta = filter;
1760
1761         return 0;
1762 }
1763
1764 static struct ice_flow_parser ice_fdir_parser_os = {
1765         .engine = &ice_fdir_engine,
1766         .array = ice_fdir_pattern_os,
1767         .array_len = RTE_DIM(ice_fdir_pattern_os),
1768         .parse_pattern_action = ice_fdir_parse,
1769         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1770 };
1771
1772 static struct ice_flow_parser ice_fdir_parser_comms = {
1773         .engine = &ice_fdir_engine,
1774         .array = ice_fdir_pattern_comms,
1775         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1776         .parse_pattern_action = ice_fdir_parse,
1777         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1778 };
1779
1780 RTE_INIT(ice_fdir_engine_register)
1781 {
1782         ice_register_flow_engine(&ice_fdir_engine);
1783 }