net/ice: fix flow director counter resource release
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_DMAC | \
36         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
37         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
38
39 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
40         ICE_FDIR_INSET_ETH_IPV6 | \
41         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
50
51 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
52         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
53
54 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
55         ICE_FDIR_INSET_VXLAN_IPV4 | \
56         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
65
66 #define ICE_FDIR_INSET_GTPU_IPV4 (\
67         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
68
69 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
70         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
71         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
72         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
73         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
74         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
75         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
76         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
77         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
78         {pattern_eth_ipv4_udp_vxlan_ipv4,
79                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
80         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
81                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
83                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
87                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
91                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
94 };
95
96 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
97         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
99         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
100         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
101         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
102         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
103         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
104         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp_vxlan_ipv4,
106                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
107         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
108                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
110                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
112                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
114                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
118                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
121         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
122 };
123
124 static struct ice_flow_parser ice_fdir_parser_os;
125 static struct ice_flow_parser ice_fdir_parser_comms;
126
127 static const struct rte_memzone *
128 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
129 {
130         const struct rte_memzone *mz;
131
132         mz = rte_memzone_lookup(name);
133         if (mz)
134                 return mz;
135
136         return rte_memzone_reserve_aligned(name, len, socket_id,
137                                            RTE_MEMZONE_IOVA_CONTIG,
138                                            ICE_RING_BASE_ALIGN);
139 }
140
141 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
142
143 static int
144 ice_fdir_prof_alloc(struct ice_hw *hw)
145 {
146         enum ice_fltr_ptype ptype, fltr_ptype;
147
148         if (!hw->fdir_prof) {
149                 hw->fdir_prof = (struct ice_fd_hw_prof **)
150                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
151                                    sizeof(*hw->fdir_prof));
152                 if (!hw->fdir_prof)
153                         return -ENOMEM;
154         }
155         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
156              ptype < ICE_FLTR_PTYPE_MAX;
157              ptype++) {
158                 if (!hw->fdir_prof[ptype]) {
159                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
160                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
161                         if (!hw->fdir_prof[ptype])
162                                 goto fail_mem;
163                 }
164         }
165         return 0;
166
167 fail_mem:
168         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
169              fltr_ptype < ptype;
170              fltr_ptype++)
171                 rte_free(hw->fdir_prof[fltr_ptype]);
172         rte_free(hw->fdir_prof);
173         return -ENOMEM;
174 }
175
176 static int
177 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
178                           struct ice_fdir_counter_pool_container *container,
179                           uint32_t index_start,
180                           uint32_t len)
181 {
182         struct ice_fdir_counter_pool *pool;
183         uint32_t i;
184         int ret = 0;
185
186         pool = rte_zmalloc("ice_fdir_counter_pool",
187                            sizeof(*pool) +
188                            sizeof(struct ice_fdir_counter) * len,
189                            0);
190         if (!pool) {
191                 PMD_INIT_LOG(ERR,
192                              "Failed to allocate memory for fdir counter pool");
193                 return -ENOMEM;
194         }
195
196         TAILQ_INIT(&pool->counter_list);
197         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
198
199         for (i = 0; i < len; i++) {
200                 struct ice_fdir_counter *counter = &pool->counters[i];
201
202                 counter->hw_index = index_start + i;
203                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
204         }
205
206         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
207                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
208                 ret = -EINVAL;
209                 goto free_pool;
210         }
211
212         container->pools[container->index_free++] = pool;
213         return 0;
214
215 free_pool:
216         rte_free(pool);
217         return ret;
218 }
219
220 static int
221 ice_fdir_counter_init(struct ice_pf *pf)
222 {
223         struct ice_hw *hw = ICE_PF_TO_HW(pf);
224         struct ice_fdir_info *fdir_info = &pf->fdir;
225         struct ice_fdir_counter_pool_container *container =
226                                 &fdir_info->counter;
227         uint32_t cnt_index, len;
228         int ret;
229
230         TAILQ_INIT(&container->pool_list);
231
232         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
233         len = ICE_FDIR_COUNTERS_PER_BLOCK;
234
235         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
236         if (ret) {
237                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
238                 return ret;
239         }
240
241         return 0;
242 }
243
244 static int
245 ice_fdir_counter_release(struct ice_pf *pf)
246 {
247         struct ice_fdir_info *fdir_info = &pf->fdir;
248         struct ice_fdir_counter_pool_container *container =
249                                 &fdir_info->counter;
250         uint8_t i;
251
252         for (i = 0; i < container->index_free; i++)
253                 rte_free(container->pools[i]);
254
255         TAILQ_INIT(&container->pool_list);
256         container->index_free = 0;
257
258         return 0;
259 }
260
261 static struct ice_fdir_counter *
262 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
263                                         *container,
264                                uint32_t id)
265 {
266         struct ice_fdir_counter_pool *pool;
267         struct ice_fdir_counter *counter;
268         int i;
269
270         TAILQ_FOREACH(pool, &container->pool_list, next) {
271                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
272                         counter = &pool->counters[i];
273
274                         if (counter->shared &&
275                             counter->ref_cnt &&
276                             counter->id == id)
277                                 return counter;
278                 }
279         }
280
281         return NULL;
282 }
283
284 static struct ice_fdir_counter *
285 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
286 {
287         struct ice_hw *hw = ICE_PF_TO_HW(pf);
288         struct ice_fdir_info *fdir_info = &pf->fdir;
289         struct ice_fdir_counter_pool_container *container =
290                                 &fdir_info->counter;
291         struct ice_fdir_counter_pool *pool = NULL;
292         struct ice_fdir_counter *counter_free = NULL;
293
294         if (shared) {
295                 counter_free = ice_fdir_counter_shared_search(container, id);
296                 if (counter_free) {
297                         if (counter_free->ref_cnt + 1 == 0) {
298                                 rte_errno = E2BIG;
299                                 return NULL;
300                         }
301                         counter_free->ref_cnt++;
302                         return counter_free;
303                 }
304         }
305
306         TAILQ_FOREACH(pool, &container->pool_list, next) {
307                 counter_free = TAILQ_FIRST(&pool->counter_list);
308                 if (counter_free)
309                         break;
310                 counter_free = NULL;
311         }
312
313         if (!counter_free) {
314                 PMD_DRV_LOG(ERR, "No free counter found\n");
315                 return NULL;
316         }
317
318         counter_free->shared = shared;
319         counter_free->id = id;
320         counter_free->ref_cnt = 1;
321         counter_free->pool = pool;
322
323         /* reset statistic counter value */
324         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
325         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
326
327         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
328         if (TAILQ_EMPTY(&pool->counter_list)) {
329                 TAILQ_REMOVE(&container->pool_list, pool, next);
330                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
331         }
332
333         return counter_free;
334 }
335
336 static void
337 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
338                       struct ice_fdir_counter *counter)
339 {
340         if (!counter)
341                 return;
342
343         if (--counter->ref_cnt == 0) {
344                 struct ice_fdir_counter_pool *pool = counter->pool;
345
346                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
347         }
348 }
349
350 static int
351 ice_fdir_init_filter_list(struct ice_pf *pf)
352 {
353         struct rte_eth_dev *dev = pf->adapter->eth_dev;
354         struct ice_fdir_info *fdir_info = &pf->fdir;
355         char fdir_hash_name[RTE_HASH_NAMESIZE];
356         int ret;
357
358         struct rte_hash_parameters fdir_hash_params = {
359                 .name = fdir_hash_name,
360                 .entries = ICE_MAX_FDIR_FILTER_NUM,
361                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
362                 .hash_func = rte_hash_crc,
363                 .hash_func_init_val = 0,
364                 .socket_id = rte_socket_id(),
365                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
366         };
367
368         /* Initialize hash */
369         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
370                  "fdir_%s", dev->device->name);
371         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
372         if (!fdir_info->hash_table) {
373                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
374                 return -EINVAL;
375         }
376         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
377                                           sizeof(*fdir_info->hash_map) *
378                                           ICE_MAX_FDIR_FILTER_NUM,
379                                           0);
380         if (!fdir_info->hash_map) {
381                 PMD_INIT_LOG(ERR,
382                              "Failed to allocate memory for fdir hash map!");
383                 ret = -ENOMEM;
384                 goto err_fdir_hash_map_alloc;
385         }
386         return 0;
387
388 err_fdir_hash_map_alloc:
389         rte_hash_free(fdir_info->hash_table);
390
391         return ret;
392 }
393
394 static void
395 ice_fdir_release_filter_list(struct ice_pf *pf)
396 {
397         struct ice_fdir_info *fdir_info = &pf->fdir;
398
399         if (fdir_info->hash_map)
400                 rte_free(fdir_info->hash_map);
401         if (fdir_info->hash_table)
402                 rte_hash_free(fdir_info->hash_table);
403 }
404
405 /*
406  * ice_fdir_setup - reserve and initialize the Flow Director resources
407  * @pf: board private structure
408  */
409 static int
410 ice_fdir_setup(struct ice_pf *pf)
411 {
412         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
413         struct ice_hw *hw = ICE_PF_TO_HW(pf);
414         const struct rte_memzone *mz = NULL;
415         char z_name[RTE_MEMZONE_NAMESIZE];
416         struct ice_vsi *vsi;
417         int err = ICE_SUCCESS;
418
419         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
420                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
421                 return -ENOTSUP;
422         }
423
424         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
425                     " fd_fltr_best_effort = %u.",
426                     hw->func_caps.fd_fltr_guar,
427                     hw->func_caps.fd_fltr_best_effort);
428
429         if (pf->fdir.fdir_vsi) {
430                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
431                 return ICE_SUCCESS;
432         }
433
434         /* make new FDIR VSI */
435         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
436         if (!vsi) {
437                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
438                 return -EINVAL;
439         }
440         pf->fdir.fdir_vsi = vsi;
441
442         err = ice_fdir_init_filter_list(pf);
443         if (err) {
444                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
445                 return -EINVAL;
446         }
447
448         err = ice_fdir_counter_init(pf);
449         if (err) {
450                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
451                 return -EINVAL;
452         }
453
454         /*Fdir tx queue setup*/
455         err = ice_fdir_setup_tx_resources(pf);
456         if (err) {
457                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
458                 goto fail_setup_tx;
459         }
460
461         /*Fdir rx queue setup*/
462         err = ice_fdir_setup_rx_resources(pf);
463         if (err) {
464                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
465                 goto fail_setup_rx;
466         }
467
468         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
469         if (err) {
470                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
471                 goto fail_mem;
472         }
473
474         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
475         if (err) {
476                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
477                 goto fail_mem;
478         }
479
480         /* reserve memory for the fdir programming packet */
481         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
482                  ICE_FDIR_MZ_NAME,
483                  eth_dev->data->port_id);
484         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
485         if (!mz) {
486                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
487                             "flow director program packet.");
488                 err = -ENOMEM;
489                 goto fail_mem;
490         }
491         pf->fdir.prg_pkt = mz->addr;
492         pf->fdir.dma_addr = mz->iova;
493         pf->fdir.mz = mz;
494
495         err = ice_fdir_prof_alloc(hw);
496         if (err) {
497                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
498                             "flow director profile.");
499                 err = -ENOMEM;
500                 goto fail_prof;
501         }
502
503         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
504                     vsi->base_queue);
505         return ICE_SUCCESS;
506
507 fail_prof:
508         rte_memzone_free(pf->fdir.mz);
509         pf->fdir.mz = NULL;
510 fail_mem:
511         ice_rx_queue_release(pf->fdir.rxq);
512         pf->fdir.rxq = NULL;
513 fail_setup_rx:
514         ice_tx_queue_release(pf->fdir.txq);
515         pf->fdir.txq = NULL;
516 fail_setup_tx:
517         ice_release_vsi(vsi);
518         pf->fdir.fdir_vsi = NULL;
519         return err;
520 }
521
522 static void
523 ice_fdir_prof_free(struct ice_hw *hw)
524 {
525         enum ice_fltr_ptype ptype;
526
527         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
528              ptype < ICE_FLTR_PTYPE_MAX;
529              ptype++)
530                 rte_free(hw->fdir_prof[ptype]);
531
532         rte_free(hw->fdir_prof);
533 }
534
535 /* Remove a profile for some filter type */
536 static void
537 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
538 {
539         struct ice_hw *hw = ICE_PF_TO_HW(pf);
540         struct ice_fd_hw_prof *hw_prof;
541         uint64_t prof_id;
542         uint16_t vsi_num;
543         int i;
544
545         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
546                 return;
547
548         hw_prof = hw->fdir_prof[ptype];
549
550         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
551         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
552                 if (hw_prof->entry_h[i][is_tunnel]) {
553                         vsi_num = ice_get_hw_vsi_num(hw,
554                                                      hw_prof->vsi_h[i]);
555                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
556                                              vsi_num, ptype);
557                         ice_flow_rem_entry(hw,
558                                            hw_prof->entry_h[i][is_tunnel]);
559                         hw_prof->entry_h[i][is_tunnel] = 0;
560                 }
561         }
562         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
563         rte_free(hw_prof->fdir_seg[is_tunnel]);
564         hw_prof->fdir_seg[is_tunnel] = NULL;
565
566         for (i = 0; i < hw_prof->cnt; i++)
567                 hw_prof->vsi_h[i] = 0;
568         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
569 }
570
571 /* Remove all created profiles */
572 static void
573 ice_fdir_prof_rm_all(struct ice_pf *pf)
574 {
575         enum ice_fltr_ptype ptype;
576
577         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
578              ptype < ICE_FLTR_PTYPE_MAX;
579              ptype++) {
580                 ice_fdir_prof_rm(pf, ptype, false);
581                 ice_fdir_prof_rm(pf, ptype, true);
582         }
583 }
584
585 /*
586  * ice_fdir_teardown - release the Flow Director resources
587  * @pf: board private structure
588  */
589 static void
590 ice_fdir_teardown(struct ice_pf *pf)
591 {
592         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
593         struct ice_hw *hw = ICE_PF_TO_HW(pf);
594         struct ice_vsi *vsi;
595         int err;
596
597         vsi = pf->fdir.fdir_vsi;
598         if (!vsi)
599                 return;
600
601         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
602         if (err)
603                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
604
605         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
606         if (err)
607                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
608
609         err = ice_fdir_counter_release(pf);
610         if (err)
611                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
612
613         ice_fdir_release_filter_list(pf);
614
615         ice_tx_queue_release(pf->fdir.txq);
616         pf->fdir.txq = NULL;
617         ice_rx_queue_release(pf->fdir.rxq);
618         pf->fdir.rxq = NULL;
619         ice_fdir_prof_rm_all(pf);
620         ice_fdir_prof_free(hw);
621         ice_release_vsi(vsi);
622         pf->fdir.fdir_vsi = NULL;
623
624         if (pf->fdir.mz) {
625                 err = rte_memzone_free(pf->fdir.mz);
626                 pf->fdir.mz = NULL;
627                 if (err)
628                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
629         }
630 }
631
632 static int
633 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
634                      struct ice_vsi *ctrl_vsi,
635                      struct ice_flow_seg_info *seg,
636                      enum ice_fltr_ptype ptype,
637                      bool is_tunnel)
638 {
639         struct ice_hw *hw = ICE_PF_TO_HW(pf);
640         enum ice_flow_dir dir = ICE_FLOW_RX;
641         struct ice_flow_seg_info *ori_seg;
642         struct ice_fd_hw_prof *hw_prof;
643         struct ice_flow_prof *prof;
644         uint64_t entry_1 = 0;
645         uint64_t entry_2 = 0;
646         uint16_t vsi_num;
647         int ret;
648         uint64_t prof_id;
649
650         hw_prof = hw->fdir_prof[ptype];
651         ori_seg = hw_prof->fdir_seg[is_tunnel];
652         if (ori_seg) {
653                 if (!is_tunnel) {
654                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
655                                 return -EAGAIN;
656                 } else {
657                         if (!memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))
658                                 return -EAGAIN;
659                 }
660
661                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
662                         return -EINVAL;
663
664                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
665         }
666
667         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
668         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
669                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
670         if (ret)
671                 return ret;
672         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
673                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
674                                  seg, NULL, 0, &entry_1);
675         if (ret) {
676                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
677                             ptype);
678                 goto err_add_prof;
679         }
680         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
681                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
682                                  seg, NULL, 0, &entry_2);
683         if (ret) {
684                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
685                             ptype);
686                 goto err_add_entry;
687         }
688
689         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
690         hw_prof->cnt = 0;
691         hw_prof->fdir_seg[is_tunnel] = seg;
692         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
693         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
694         pf->hw_prof_cnt[ptype][is_tunnel]++;
695         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
696         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
697         pf->hw_prof_cnt[ptype][is_tunnel]++;
698
699         return ret;
700
701 err_add_entry:
702         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
703         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
704         ice_flow_rem_entry(hw, entry_1);
705 err_add_prof:
706         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
707
708         return ret;
709 }
710
711 static void
712 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
713 {
714         uint32_t i, j;
715
716         struct ice_inset_map {
717                 uint64_t inset;
718                 enum ice_flow_field fld;
719         };
720         static const struct ice_inset_map ice_inset_map[] = {
721                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
722                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
723                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
724                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
725                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
726                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
727                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
728                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
729                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
730                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
731                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
732                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
733                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
734                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
735                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
736                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
737                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
738                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
739                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
740                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
741                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
742                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
743                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
744                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
745                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
746                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
747                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
748         };
749
750         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
751                 if ((inset & ice_inset_map[i].inset) ==
752                     ice_inset_map[i].inset)
753                         field[j++] = ice_inset_map[i].fld;
754         }
755 }
756
757 static int
758 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
759                         uint64_t input_set, bool is_tunnel)
760 {
761         struct ice_flow_seg_info *seg;
762         struct ice_flow_seg_info *seg_tun = NULL;
763         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
764         int i, ret;
765
766         if (!input_set)
767                 return -EINVAL;
768
769         seg = (struct ice_flow_seg_info *)
770                 ice_malloc(hw, sizeof(*seg));
771         if (!seg) {
772                 PMD_DRV_LOG(ERR, "No memory can be allocated");
773                 return -ENOMEM;
774         }
775
776         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
777                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
778         ice_fdir_input_set_parse(input_set, field);
779
780         switch (flow) {
781         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
782                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
783                                   ICE_FLOW_SEG_HDR_IPV4);
784                 break;
785         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
786                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
787                                   ICE_FLOW_SEG_HDR_IPV4);
788                 break;
789         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
790                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
791                                   ICE_FLOW_SEG_HDR_IPV4);
792                 break;
793         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
794                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
795                 break;
796         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
797                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
798                                   ICE_FLOW_SEG_HDR_IPV6);
799                 break;
800         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
801                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
802                                   ICE_FLOW_SEG_HDR_IPV6);
803                 break;
804         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
805                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
806                                   ICE_FLOW_SEG_HDR_IPV6);
807                 break;
808         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
809                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
810                 break;
811         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
812         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
813         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
814         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
815                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
816                                   ICE_FLOW_SEG_HDR_IPV4);
817                 break;
818         default:
819                 PMD_DRV_LOG(ERR, "not supported filter type.");
820                 break;
821         }
822
823         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
824                 ice_flow_set_fld(seg, field[i],
825                                  ICE_FLOW_FLD_OFF_INVAL,
826                                  ICE_FLOW_FLD_OFF_INVAL,
827                                  ICE_FLOW_FLD_OFF_INVAL, false);
828         }
829
830         if (!is_tunnel) {
831                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
832                                            seg, flow, false);
833         } else {
834                 seg_tun = (struct ice_flow_seg_info *)
835                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
836                 if (!seg_tun) {
837                         PMD_DRV_LOG(ERR, "No memory can be allocated");
838                         rte_free(seg);
839                         return -ENOMEM;
840                 }
841                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
842                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
843                                            seg_tun, flow, true);
844         }
845
846         if (!ret) {
847                 return ret;
848         } else if (ret < 0) {
849                 rte_free(seg);
850                 if (is_tunnel)
851                         rte_free(seg_tun);
852                 return (ret == -EAGAIN) ? 0 : ret;
853         } else {
854                 return ret;
855         }
856 }
857
858 static void
859 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
860                     bool is_tunnel, bool add)
861 {
862         struct ice_hw *hw = ICE_PF_TO_HW(pf);
863         int cnt;
864
865         cnt = (add) ? 1 : -1;
866         hw->fdir_active_fltr += cnt;
867         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
868                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
869         else
870                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
871 }
872
873 static int
874 ice_fdir_init(struct ice_adapter *ad)
875 {
876         struct ice_pf *pf = &ad->pf;
877         struct ice_flow_parser *parser;
878         int ret;
879
880         ret = ice_fdir_setup(pf);
881         if (ret)
882                 return ret;
883
884         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
885                 parser = &ice_fdir_parser_comms;
886         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
887                 parser = &ice_fdir_parser_os;
888         else
889                 return -EINVAL;
890
891         return ice_register_parser(parser, ad);
892 }
893
894 static void
895 ice_fdir_uninit(struct ice_adapter *ad)
896 {
897         struct ice_pf *pf = &ad->pf;
898         struct ice_flow_parser *parser;
899
900         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
901                 parser = &ice_fdir_parser_comms;
902         else
903                 parser = &ice_fdir_parser_os;
904
905         ice_unregister_parser(parser, ad);
906
907         ice_fdir_teardown(pf);
908 }
909
910 static int
911 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
912 {
913         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
914                 return 1;
915         else
916                 return 0;
917 }
918
919 static int
920 ice_fdir_add_del_filter(struct ice_pf *pf,
921                         struct ice_fdir_filter_conf *filter,
922                         bool add)
923 {
924         struct ice_fltr_desc desc;
925         struct ice_hw *hw = ICE_PF_TO_HW(pf);
926         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
927         bool is_tun;
928         int ret;
929
930         filter->input.dest_vsi = pf->main_vsi->idx;
931
932         memset(&desc, 0, sizeof(desc));
933         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
934
935         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
936
937         memset(pkt, 0, ICE_FDIR_PKT_LEN);
938         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
939         if (ret) {
940                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
941                 return -EINVAL;
942         }
943
944         return ice_fdir_programming(pf, &desc);
945 }
946
947 static void
948 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
949                           struct ice_fdir_filter_conf *filter)
950 {
951         struct ice_fdir_fltr *input = &filter->input;
952         memset(key, 0, sizeof(*key));
953
954         key->flow_type = input->flow_type;
955         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
956         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
957         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
958         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
959
960         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
961         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
962
963         key->tunnel_type = filter->tunnel_type;
964 }
965
966 /* Check if there exists the flow director filter */
967 static struct ice_fdir_filter_conf *
968 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
969                         const struct ice_fdir_fltr_pattern *key)
970 {
971         int ret;
972
973         ret = rte_hash_lookup(fdir_info->hash_table, key);
974         if (ret < 0)
975                 return NULL;
976
977         return fdir_info->hash_map[ret];
978 }
979
980 /* Add a flow director entry into the SW list */
981 static int
982 ice_fdir_entry_insert(struct ice_pf *pf,
983                       struct ice_fdir_filter_conf *entry,
984                       struct ice_fdir_fltr_pattern *key)
985 {
986         struct ice_fdir_info *fdir_info = &pf->fdir;
987         int ret;
988
989         ret = rte_hash_add_key(fdir_info->hash_table, key);
990         if (ret < 0) {
991                 PMD_DRV_LOG(ERR,
992                             "Failed to insert fdir entry to hash table %d!",
993                             ret);
994                 return ret;
995         }
996         fdir_info->hash_map[ret] = entry;
997
998         return 0;
999 }
1000
1001 /* Delete a flow director entry from the SW list */
1002 static int
1003 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1004 {
1005         struct ice_fdir_info *fdir_info = &pf->fdir;
1006         int ret;
1007
1008         ret = rte_hash_del_key(fdir_info->hash_table, key);
1009         if (ret < 0) {
1010                 PMD_DRV_LOG(ERR,
1011                             "Failed to delete fdir filter to hash table %d!",
1012                             ret);
1013                 return ret;
1014         }
1015         fdir_info->hash_map[ret] = NULL;
1016
1017         return 0;
1018 }
1019
1020 static int
1021 ice_fdir_create_filter(struct ice_adapter *ad,
1022                        struct rte_flow *flow,
1023                        void *meta,
1024                        struct rte_flow_error *error)
1025 {
1026         struct ice_pf *pf = &ad->pf;
1027         struct ice_fdir_filter_conf *filter = meta;
1028         struct ice_fdir_info *fdir_info = &pf->fdir;
1029         struct ice_fdir_filter_conf *entry, *node;
1030         struct ice_fdir_fltr_pattern key;
1031         bool is_tun;
1032         int ret;
1033
1034         ice_fdir_extract_fltr_key(&key, filter);
1035         node = ice_fdir_entry_lookup(fdir_info, &key);
1036         if (node) {
1037                 rte_flow_error_set(error, EEXIST,
1038                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1039                                    "Rule already exists!");
1040                 return -rte_errno;
1041         }
1042
1043         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1044         if (!entry) {
1045                 rte_flow_error_set(error, ENOMEM,
1046                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1047                                    "Failed to allocate memory");
1048                 return -rte_errno;
1049         }
1050
1051         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1052
1053         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1054                         filter->input_set, is_tun);
1055         if (ret) {
1056                 rte_flow_error_set(error, -ret,
1057                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1058                                    "Profile configure failed.");
1059                 goto free_entry;
1060         }
1061
1062         /* alloc counter for FDIR */
1063         if (filter->input.cnt_ena) {
1064                 struct rte_flow_action_count *act_count = &filter->act_count;
1065
1066                 filter->counter = ice_fdir_counter_alloc(pf,
1067                                                          act_count->shared,
1068                                                          act_count->id);
1069                 if (!filter->counter) {
1070                         rte_flow_error_set(error, EINVAL,
1071                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1072                                         "Failed to alloc FDIR counter.");
1073                         goto free_entry;
1074                 }
1075                 filter->input.cnt_index = filter->counter->hw_index;
1076         }
1077
1078         ret = ice_fdir_add_del_filter(pf, filter, true);
1079         if (ret) {
1080                 rte_flow_error_set(error, -ret,
1081                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1082                                    "Add filter rule failed.");
1083                 goto free_counter;
1084         }
1085
1086         rte_memcpy(entry, filter, sizeof(*entry));
1087         ret = ice_fdir_entry_insert(pf, entry, &key);
1088         if (ret) {
1089                 rte_flow_error_set(error, -ret,
1090                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1091                                    "Insert entry to table failed.");
1092                 goto free_entry;
1093         }
1094
1095         flow->rule = entry;
1096         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1097
1098         return 0;
1099
1100 free_counter:
1101         if (filter->counter) {
1102                 ice_fdir_counter_free(pf, filter->counter);
1103                 filter->counter = NULL;
1104         }
1105
1106 free_entry:
1107         rte_free(entry);
1108         return -rte_errno;
1109 }
1110
1111 static int
1112 ice_fdir_destroy_filter(struct ice_adapter *ad,
1113                         struct rte_flow *flow,
1114                         struct rte_flow_error *error)
1115 {
1116         struct ice_pf *pf = &ad->pf;
1117         struct ice_fdir_info *fdir_info = &pf->fdir;
1118         struct ice_fdir_filter_conf *filter, *entry;
1119         struct ice_fdir_fltr_pattern key;
1120         bool is_tun;
1121         int ret;
1122
1123         filter = (struct ice_fdir_filter_conf *)flow->rule;
1124
1125         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1126
1127         if (filter->counter) {
1128                 ice_fdir_counter_free(pf, filter->counter);
1129                 filter->counter = NULL;
1130         }
1131
1132         ice_fdir_extract_fltr_key(&key, filter);
1133         entry = ice_fdir_entry_lookup(fdir_info, &key);
1134         if (!entry) {
1135                 rte_flow_error_set(error, ENOENT,
1136                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1137                                    "Can't find entry.");
1138                 return -rte_errno;
1139         }
1140
1141         ret = ice_fdir_add_del_filter(pf, filter, false);
1142         if (ret) {
1143                 rte_flow_error_set(error, -ret,
1144                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1145                                    "Del filter rule failed.");
1146                 return -rte_errno;
1147         }
1148
1149         ret = ice_fdir_entry_del(pf, &key);
1150         if (ret) {
1151                 rte_flow_error_set(error, -ret,
1152                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1153                                    "Remove entry from table failed.");
1154                 return -rte_errno;
1155         }
1156
1157         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1158         flow->rule = NULL;
1159
1160         rte_free(filter);
1161
1162         return 0;
1163 }
1164
1165 static int
1166 ice_fdir_query_count(struct ice_adapter *ad,
1167                       struct rte_flow *flow,
1168                       struct rte_flow_query_count *flow_stats,
1169                       struct rte_flow_error *error)
1170 {
1171         struct ice_pf *pf = &ad->pf;
1172         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1173         struct ice_fdir_filter_conf *filter = flow->rule;
1174         struct ice_fdir_counter *counter = filter->counter;
1175         uint64_t hits_lo, hits_hi;
1176
1177         if (!counter) {
1178                 rte_flow_error_set(error, EINVAL,
1179                                   RTE_FLOW_ERROR_TYPE_ACTION,
1180                                   NULL,
1181                                   "FDIR counters not available");
1182                 return -rte_errno;
1183         }
1184
1185         /*
1186          * Reading the low 32-bits latches the high 32-bits into a shadow
1187          * register. Reading the high 32-bit returns the value in the
1188          * shadow register.
1189          */
1190         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1191         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1192
1193         flow_stats->hits_set = 1;
1194         flow_stats->hits = hits_lo | (hits_hi << 32);
1195         flow_stats->bytes_set = 0;
1196         flow_stats->bytes = 0;
1197
1198         if (flow_stats->reset) {
1199                 /* reset statistic counter value */
1200                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1201                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1202         }
1203
1204         return 0;
1205 }
1206
1207 static struct ice_flow_engine ice_fdir_engine = {
1208         .init = ice_fdir_init,
1209         .uninit = ice_fdir_uninit,
1210         .create = ice_fdir_create_filter,
1211         .destroy = ice_fdir_destroy_filter,
1212         .query_count = ice_fdir_query_count,
1213         .type = ICE_FLOW_ENGINE_FDIR,
1214 };
1215
1216 static int
1217 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1218                               struct rte_flow_error *error,
1219                               const struct rte_flow_action *act,
1220                               struct ice_fdir_filter_conf *filter)
1221 {
1222         const struct rte_flow_action_rss *rss = act->conf;
1223         uint32_t i;
1224
1225         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1226                 rte_flow_error_set(error, EINVAL,
1227                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1228                                    "Invalid action.");
1229                 return -rte_errno;
1230         }
1231
1232         if (rss->queue_num <= 1) {
1233                 rte_flow_error_set(error, EINVAL,
1234                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1235                                    "Queue region size can't be 0 or 1.");
1236                 return -rte_errno;
1237         }
1238
1239         /* check if queue index for queue region is continuous */
1240         for (i = 0; i < rss->queue_num - 1; i++) {
1241                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1242                         rte_flow_error_set(error, EINVAL,
1243                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1244                                            "Discontinuous queue region");
1245                         return -rte_errno;
1246                 }
1247         }
1248
1249         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1250                 rte_flow_error_set(error, EINVAL,
1251                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1252                                    "Invalid queue region indexes.");
1253                 return -rte_errno;
1254         }
1255
1256         if (!(rte_is_power_of_2(rss->queue_num) &&
1257              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1258                 rte_flow_error_set(error, EINVAL,
1259                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1260                                    "The region size should be any of the following values:"
1261                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1262                                    "of queues do not exceed the VSI allocation.");
1263                 return -rte_errno;
1264         }
1265
1266         filter->input.q_index = rss->queue[0];
1267         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1268         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1269
1270         return 0;
1271 }
1272
1273 static int
1274 ice_fdir_parse_action(struct ice_adapter *ad,
1275                       const struct rte_flow_action actions[],
1276                       struct rte_flow_error *error,
1277                       struct ice_fdir_filter_conf *filter)
1278 {
1279         struct ice_pf *pf = &ad->pf;
1280         const struct rte_flow_action_queue *act_q;
1281         const struct rte_flow_action_mark *mark_spec = NULL;
1282         const struct rte_flow_action_count *act_count;
1283         uint32_t dest_num = 0;
1284         uint32_t mark_num = 0;
1285         uint32_t counter_num = 0;
1286         int ret;
1287
1288         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1289                 switch (actions->type) {
1290                 case RTE_FLOW_ACTION_TYPE_VOID:
1291                         break;
1292                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1293                         dest_num++;
1294
1295                         act_q = actions->conf;
1296                         filter->input.q_index = act_q->index;
1297                         if (filter->input.q_index >=
1298                                         pf->dev_data->nb_rx_queues) {
1299                                 rte_flow_error_set(error, EINVAL,
1300                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1301                                                    actions,
1302                                                    "Invalid queue for FDIR.");
1303                                 return -rte_errno;
1304                         }
1305                         filter->input.dest_ctl =
1306                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1307                         break;
1308                 case RTE_FLOW_ACTION_TYPE_DROP:
1309                         dest_num++;
1310
1311                         filter->input.dest_ctl =
1312                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1313                         break;
1314                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1315                         dest_num++;
1316
1317                         filter->input.dest_ctl =
1318                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1319                         filter->input.q_index = 0;
1320                         break;
1321                 case RTE_FLOW_ACTION_TYPE_RSS:
1322                         dest_num++;
1323
1324                         ret = ice_fdir_parse_action_qregion(pf,
1325                                                 error, actions, filter);
1326                         if (ret)
1327                                 return ret;
1328                         break;
1329                 case RTE_FLOW_ACTION_TYPE_MARK:
1330                         mark_num++;
1331
1332                         mark_spec = actions->conf;
1333                         filter->input.fltr_id = mark_spec->id;
1334                         break;
1335                 case RTE_FLOW_ACTION_TYPE_COUNT:
1336                         counter_num++;
1337
1338                         act_count = actions->conf;
1339                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1340                         rte_memcpy(&filter->act_count, act_count,
1341                                                 sizeof(filter->act_count));
1342
1343                         break;
1344                 default:
1345                         rte_flow_error_set(error, EINVAL,
1346                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1347                                    "Invalid action.");
1348                         return -rte_errno;
1349                 }
1350         }
1351
1352         if (dest_num == 0 || dest_num >= 2) {
1353                 rte_flow_error_set(error, EINVAL,
1354                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1355                            "Unsupported action combination");
1356                 return -rte_errno;
1357         }
1358
1359         if (mark_num >= 2) {
1360                 rte_flow_error_set(error, EINVAL,
1361                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1362                            "Too many mark actions");
1363                 return -rte_errno;
1364         }
1365
1366         if (counter_num >= 2) {
1367                 rte_flow_error_set(error, EINVAL,
1368                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1369                            "Too many count actions");
1370                 return -rte_errno;
1371         }
1372
1373         return 0;
1374 }
1375
1376 static int
1377 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1378                        const struct rte_flow_item pattern[],
1379                        struct rte_flow_error *error,
1380                        struct ice_fdir_filter_conf *filter)
1381 {
1382         const struct rte_flow_item *item = pattern;
1383         enum rte_flow_item_type item_type;
1384         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1385         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1386         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1387         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1388         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1389         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1390         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1391         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1392         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1393         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1394         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1395         uint64_t input_set = ICE_INSET_NONE;
1396         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1397         uint8_t  ipv6_addr_mask[16] = {
1398                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1399                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1400         };
1401         uint32_t vtc_flow_cpu;
1402
1403
1404         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1405                 if (item->last) {
1406                         rte_flow_error_set(error, EINVAL,
1407                                         RTE_FLOW_ERROR_TYPE_ITEM,
1408                                         item,
1409                                         "Not support range");
1410                         return -rte_errno;
1411                 }
1412                 item_type = item->type;
1413
1414                 switch (item_type) {
1415                 case RTE_FLOW_ITEM_TYPE_ETH:
1416                         eth_spec = item->spec;
1417                         eth_mask = item->mask;
1418
1419                         if (eth_spec && eth_mask) {
1420                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1421                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1422                                         rte_flow_error_set(error, EINVAL,
1423                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1424                                                 item,
1425                                                 "Src mac not support");
1426                                         return -rte_errno;
1427                                 }
1428
1429                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1430                                         rte_flow_error_set(error, EINVAL,
1431                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1432                                                 item,
1433                                                 "Invalid mac addr mask");
1434                                         return -rte_errno;
1435                                 }
1436
1437                                 input_set |= ICE_INSET_DMAC;
1438                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1439                                            &eth_spec->dst,
1440                                            RTE_ETHER_ADDR_LEN);
1441                         }
1442                         break;
1443                 case RTE_FLOW_ITEM_TYPE_IPV4:
1444                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1445                         ipv4_spec = item->spec;
1446                         ipv4_mask = item->mask;
1447
1448                         if (ipv4_spec && ipv4_mask) {
1449                                 /* Check IPv4 mask and update input set */
1450                                 if (ipv4_mask->hdr.version_ihl ||
1451                                     ipv4_mask->hdr.total_length ||
1452                                     ipv4_mask->hdr.packet_id ||
1453                                     ipv4_mask->hdr.fragment_offset ||
1454                                     ipv4_mask->hdr.hdr_checksum) {
1455                                         rte_flow_error_set(error, EINVAL,
1456                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1457                                                    item,
1458                                                    "Invalid IPv4 mask.");
1459                                         return -rte_errno;
1460                                 }
1461                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1462                                         input_set |= tunnel_type ?
1463                                                      ICE_INSET_TUN_IPV4_SRC :
1464                                                      ICE_INSET_IPV4_SRC;
1465                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1466                                         input_set |= tunnel_type ?
1467                                                      ICE_INSET_TUN_IPV4_DST :
1468                                                      ICE_INSET_IPV4_DST;
1469                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1470                                         input_set |= ICE_INSET_IPV4_TOS;
1471                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1472                                         input_set |= ICE_INSET_IPV4_TTL;
1473                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1474                                         input_set |= ICE_INSET_IPV4_PROTO;
1475
1476                                 filter->input.ip.v4.dst_ip =
1477                                         ipv4_spec->hdr.src_addr;
1478                                 filter->input.ip.v4.src_ip =
1479                                         ipv4_spec->hdr.dst_addr;
1480                                 filter->input.ip.v4.tos =
1481                                         ipv4_spec->hdr.type_of_service;
1482                                 filter->input.ip.v4.ttl =
1483                                         ipv4_spec->hdr.time_to_live;
1484                                 filter->input.ip.v4.proto =
1485                                         ipv4_spec->hdr.next_proto_id;
1486                         }
1487
1488                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1489                         break;
1490                 case RTE_FLOW_ITEM_TYPE_IPV6:
1491                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1492                         ipv6_spec = item->spec;
1493                         ipv6_mask = item->mask;
1494
1495                         if (ipv6_spec && ipv6_mask) {
1496                                 /* Check IPv6 mask and update input set */
1497                                 if (ipv6_mask->hdr.payload_len) {
1498                                         rte_flow_error_set(error, EINVAL,
1499                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1500                                                    item,
1501                                                    "Invalid IPv6 mask");
1502                                         return -rte_errno;
1503                                 }
1504
1505                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1506                                             ipv6_addr_mask,
1507                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1508                                         input_set |= ICE_INSET_IPV6_SRC;
1509                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1510                                             ipv6_addr_mask,
1511                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1512                                         input_set |= ICE_INSET_IPV6_DST;
1513
1514                                 if ((ipv6_mask->hdr.vtc_flow &
1515                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1516                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1517                                         input_set |= ICE_INSET_IPV6_TC;
1518                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1519                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1520                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1521                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1522
1523                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1524                                            ipv6_spec->hdr.src_addr, 16);
1525                                 rte_memcpy(filter->input.ip.v6.src_ip,
1526                                            ipv6_spec->hdr.dst_addr, 16);
1527
1528                                 vtc_flow_cpu =
1529                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1530                                 filter->input.ip.v6.tc =
1531                                         (uint8_t)(vtc_flow_cpu >>
1532                                                   ICE_FDIR_IPV6_TC_OFFSET);
1533                                 filter->input.ip.v6.proto =
1534                                         ipv6_spec->hdr.proto;
1535                                 filter->input.ip.v6.hlim =
1536                                         ipv6_spec->hdr.hop_limits;
1537                         }
1538
1539                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1540                         break;
1541                 case RTE_FLOW_ITEM_TYPE_TCP:
1542                         tcp_spec = item->spec;
1543                         tcp_mask = item->mask;
1544
1545                         if (tcp_spec && tcp_mask) {
1546                                 /* Check TCP mask and update input set */
1547                                 if (tcp_mask->hdr.sent_seq ||
1548                                     tcp_mask->hdr.recv_ack ||
1549                                     tcp_mask->hdr.data_off ||
1550                                     tcp_mask->hdr.tcp_flags ||
1551                                     tcp_mask->hdr.rx_win ||
1552                                     tcp_mask->hdr.cksum ||
1553                                     tcp_mask->hdr.tcp_urp) {
1554                                         rte_flow_error_set(error, EINVAL,
1555                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1556                                                    item,
1557                                                    "Invalid TCP mask");
1558                                         return -rte_errno;
1559                                 }
1560
1561                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1562                                         input_set |= tunnel_type ?
1563                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1564                                                      ICE_INSET_TCP_SRC_PORT;
1565                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1566                                         input_set |= tunnel_type ?
1567                                                      ICE_INSET_TUN_TCP_DST_PORT :
1568                                                      ICE_INSET_TCP_DST_PORT;
1569
1570                                 /* Get filter info */
1571                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1572                                         filter->input.ip.v4.dst_port =
1573                                                 tcp_spec->hdr.src_port;
1574                                         filter->input.ip.v4.src_port =
1575                                                 tcp_spec->hdr.dst_port;
1576                                         flow_type =
1577                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1578                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1579                                         filter->input.ip.v6.dst_port =
1580                                                 tcp_spec->hdr.src_port;
1581                                         filter->input.ip.v6.src_port =
1582                                                 tcp_spec->hdr.dst_port;
1583                                         flow_type =
1584                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1585                                 }
1586                         }
1587                         break;
1588                 case RTE_FLOW_ITEM_TYPE_UDP:
1589                         udp_spec = item->spec;
1590                         udp_mask = item->mask;
1591
1592                         if (udp_spec && udp_mask) {
1593                                 /* Check UDP mask and update input set*/
1594                                 if (udp_mask->hdr.dgram_len ||
1595                                     udp_mask->hdr.dgram_cksum) {
1596                                         rte_flow_error_set(error, EINVAL,
1597                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1598                                                    item,
1599                                                    "Invalid UDP mask");
1600                                         return -rte_errno;
1601                                 }
1602
1603                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1604                                         input_set |= tunnel_type ?
1605                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1606                                                      ICE_INSET_UDP_SRC_PORT;
1607                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1608                                         input_set |= tunnel_type ?
1609                                                      ICE_INSET_TUN_UDP_DST_PORT :
1610                                                      ICE_INSET_UDP_DST_PORT;
1611
1612                                 /* Get filter info */
1613                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1614                                         filter->input.ip.v4.dst_port =
1615                                                 udp_spec->hdr.src_port;
1616                                         filter->input.ip.v4.src_port =
1617                                                 udp_spec->hdr.dst_port;
1618                                         flow_type =
1619                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1620                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1621                                         filter->input.ip.v6.src_port =
1622                                                 udp_spec->hdr.dst_port;
1623                                         filter->input.ip.v6.dst_port =
1624                                                 udp_spec->hdr.src_port;
1625                                         flow_type =
1626                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1627                                 }
1628                         }
1629                         break;
1630                 case RTE_FLOW_ITEM_TYPE_SCTP:
1631                         sctp_spec = item->spec;
1632                         sctp_mask = item->mask;
1633
1634                         if (sctp_spec && sctp_mask) {
1635                                 /* Check SCTP mask and update input set */
1636                                 if (sctp_mask->hdr.cksum) {
1637                                         rte_flow_error_set(error, EINVAL,
1638                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1639                                                    item,
1640                                                    "Invalid UDP mask");
1641                                         return -rte_errno;
1642                                 }
1643
1644                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1645                                         input_set |= tunnel_type ?
1646                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1647                                                      ICE_INSET_SCTP_SRC_PORT;
1648                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1649                                         input_set |= tunnel_type ?
1650                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1651                                                      ICE_INSET_SCTP_DST_PORT;
1652
1653                                 /* Get filter info */
1654                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1655                                         filter->input.ip.v4.dst_port =
1656                                                 sctp_spec->hdr.src_port;
1657                                         filter->input.ip.v4.src_port =
1658                                                 sctp_spec->hdr.dst_port;
1659                                         flow_type =
1660                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1661                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1662                                         filter->input.ip.v6.dst_port =
1663                                                 sctp_spec->hdr.src_port;
1664                                         filter->input.ip.v6.src_port =
1665                                                 sctp_spec->hdr.dst_port;
1666                                         flow_type =
1667                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1668                                 }
1669                         }
1670                         break;
1671                 case RTE_FLOW_ITEM_TYPE_VOID:
1672                         break;
1673                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1674                         l3 = RTE_FLOW_ITEM_TYPE_END;
1675                         vxlan_spec = item->spec;
1676                         vxlan_mask = item->mask;
1677
1678                         if (vxlan_spec || vxlan_mask) {
1679                                 rte_flow_error_set(error, EINVAL,
1680                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1681                                                    item,
1682                                                    "Invalid vxlan field");
1683                                 return -rte_errno;
1684                         }
1685
1686                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1687                         break;
1688                 case RTE_FLOW_ITEM_TYPE_GTPU:
1689                         l3 = RTE_FLOW_ITEM_TYPE_END;
1690                         gtp_spec = item->spec;
1691                         gtp_mask = item->mask;
1692
1693                         if (gtp_spec && gtp_mask) {
1694                                 if (gtp_mask->v_pt_rsv_flags ||
1695                                     gtp_mask->msg_type ||
1696                                     gtp_mask->msg_len) {
1697                                         rte_flow_error_set(error, EINVAL,
1698                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1699                                                    item,
1700                                                    "Invalid GTP mask");
1701                                         return -rte_errno;
1702                                 }
1703
1704                                 if (gtp_mask->teid == UINT32_MAX)
1705                                         input_set |= ICE_INSET_GTPU_TEID;
1706
1707                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1708                         }
1709                         break;
1710                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1711                         gtp_psc_spec = item->spec;
1712                         gtp_psc_mask = item->mask;
1713
1714                         if (gtp_psc_spec && gtp_psc_mask) {
1715                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1716                                         input_set |= ICE_INSET_GTPU_QFI;
1717
1718                                 filter->input.gtpu_data.qfi =
1719                                         gtp_psc_spec->qfi;
1720                         }
1721
1722                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1723                         break;
1724                 default:
1725                         rte_flow_error_set(error, EINVAL,
1726                                    RTE_FLOW_ERROR_TYPE_ITEM,
1727                                    item,
1728                                    "Invalid pattern item.");
1729                         return -rte_errno;
1730                 }
1731         }
1732
1733         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1734                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1735
1736         filter->tunnel_type = tunnel_type;
1737         filter->input.flow_type = flow_type;
1738         filter->input_set = input_set;
1739
1740         return 0;
1741 }
1742
1743 static int
1744 ice_fdir_parse(struct ice_adapter *ad,
1745                struct ice_pattern_match_item *array,
1746                uint32_t array_len,
1747                const struct rte_flow_item pattern[],
1748                const struct rte_flow_action actions[],
1749                void **meta,
1750                struct rte_flow_error *error)
1751 {
1752         struct ice_pf *pf = &ad->pf;
1753         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1754         struct ice_pattern_match_item *item = NULL;
1755         uint64_t input_set;
1756         int ret;
1757
1758         memset(filter, 0, sizeof(*filter));
1759         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1760         if (!item)
1761                 return -rte_errno;
1762
1763         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1764         if (ret)
1765                 return ret;
1766         input_set = filter->input_set;
1767         if (!input_set || input_set & ~item->input_set_mask) {
1768                 rte_flow_error_set(error, EINVAL,
1769                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1770                                    pattern,
1771                                    "Invalid input set");
1772                 return -rte_errno;
1773         }
1774
1775         ret = ice_fdir_parse_action(ad, actions, error, filter);
1776         if (ret)
1777                 return ret;
1778
1779         *meta = filter;
1780
1781         return 0;
1782 }
1783
1784 static struct ice_flow_parser ice_fdir_parser_os = {
1785         .engine = &ice_fdir_engine,
1786         .array = ice_fdir_pattern_os,
1787         .array_len = RTE_DIM(ice_fdir_pattern_os),
1788         .parse_pattern_action = ice_fdir_parse,
1789         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1790 };
1791
1792 static struct ice_flow_parser ice_fdir_parser_comms = {
1793         .engine = &ice_fdir_engine,
1794         .array = ice_fdir_pattern_comms,
1795         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1796         .parse_pattern_action = ice_fdir_parse,
1797         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1798 };
1799
1800 RTE_INIT(ice_fdir_engine_register)
1801 {
1802         ice_register_flow_engine(&ice_fdir_engine);
1803 }