a89c506c0d49cff2773c4c62c9e44fd05a061d6d
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_DMAC | \
36         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
37         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
38
39 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
40         ICE_FDIR_INSET_ETH_IPV6 | \
41         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
50
51 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
52         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
53
54 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
55         ICE_FDIR_INSET_VXLAN_IPV4 | \
56         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
65
66 #define ICE_FDIR_INSET_GTPU_IPV4 (\
67         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
68
69 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
70         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
71         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
72         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
73         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
74         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
75         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
76         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
77         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
78         {pattern_eth_ipv4_udp_vxlan_ipv4,
79                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
80         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
81                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
83                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
87                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
91                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
94 };
95
96 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
97         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
99         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
100         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
101         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
102         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
103         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
104         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp_vxlan_ipv4,
106                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
107         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
108                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
110                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
112                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
114                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
118                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
121         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
122 };
123
124 static struct ice_flow_parser ice_fdir_parser_os;
125 static struct ice_flow_parser ice_fdir_parser_comms;
126
127 static const struct rte_memzone *
128 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
129 {
130         const struct rte_memzone *mz;
131
132         mz = rte_memzone_lookup(name);
133         if (mz)
134                 return mz;
135
136         return rte_memzone_reserve_aligned(name, len, socket_id,
137                                            RTE_MEMZONE_IOVA_CONTIG,
138                                            ICE_RING_BASE_ALIGN);
139 }
140
141 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
142
143 static int
144 ice_fdir_prof_alloc(struct ice_hw *hw)
145 {
146         enum ice_fltr_ptype ptype, fltr_ptype;
147
148         if (!hw->fdir_prof) {
149                 hw->fdir_prof = (struct ice_fd_hw_prof **)
150                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
151                                    sizeof(*hw->fdir_prof));
152                 if (!hw->fdir_prof)
153                         return -ENOMEM;
154         }
155         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
156              ptype < ICE_FLTR_PTYPE_MAX;
157              ptype++) {
158                 if (!hw->fdir_prof[ptype]) {
159                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
160                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
161                         if (!hw->fdir_prof[ptype])
162                                 goto fail_mem;
163                 }
164         }
165         return 0;
166
167 fail_mem:
168         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
169              fltr_ptype < ptype;
170              fltr_ptype++)
171                 rte_free(hw->fdir_prof[fltr_ptype]);
172         rte_free(hw->fdir_prof);
173         return -ENOMEM;
174 }
175
176 static int
177 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
178                           struct ice_fdir_counter_pool_container *container,
179                           uint32_t index_start,
180                           uint32_t len)
181 {
182         struct ice_fdir_counter_pool *pool;
183         uint32_t i;
184         int ret = 0;
185
186         pool = rte_zmalloc("ice_fdir_counter_pool",
187                            sizeof(*pool) +
188                            sizeof(struct ice_fdir_counter) * len,
189                            0);
190         if (!pool) {
191                 PMD_INIT_LOG(ERR,
192                              "Failed to allocate memory for fdir counter pool");
193                 return -ENOMEM;
194         }
195
196         TAILQ_INIT(&pool->counter_list);
197         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
198
199         for (i = 0; i < len; i++) {
200                 struct ice_fdir_counter *counter = &pool->counters[i];
201
202                 counter->hw_index = index_start + i;
203                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
204         }
205
206         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
207                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
208                 ret = -EINVAL;
209                 goto free_pool;
210         }
211
212         container->pools[container->index_free++] = pool;
213         return 0;
214
215 free_pool:
216         rte_free(pool);
217         return ret;
218 }
219
220 static int
221 ice_fdir_counter_init(struct ice_pf *pf)
222 {
223         struct ice_hw *hw = ICE_PF_TO_HW(pf);
224         struct ice_fdir_info *fdir_info = &pf->fdir;
225         struct ice_fdir_counter_pool_container *container =
226                                 &fdir_info->counter;
227         uint32_t cnt_index, len;
228         int ret;
229
230         TAILQ_INIT(&container->pool_list);
231
232         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
233         len = ICE_FDIR_COUNTERS_PER_BLOCK;
234
235         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
236         if (ret) {
237                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
238                 return ret;
239         }
240
241         return 0;
242 }
243
244 static int
245 ice_fdir_counter_release(struct ice_pf *pf)
246 {
247         struct ice_fdir_info *fdir_info = &pf->fdir;
248         struct ice_fdir_counter_pool_container *container =
249                                 &fdir_info->counter;
250         uint8_t i;
251
252         for (i = 0; i < container->index_free; i++)
253                 rte_free(container->pools[i]);
254
255         return 0;
256 }
257
258 static struct ice_fdir_counter *
259 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
260                                         *container,
261                                uint32_t id)
262 {
263         struct ice_fdir_counter_pool *pool;
264         struct ice_fdir_counter *counter;
265         int i;
266
267         TAILQ_FOREACH(pool, &container->pool_list, next) {
268                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
269                         counter = &pool->counters[i];
270
271                         if (counter->shared &&
272                             counter->ref_cnt &&
273                             counter->id == id)
274                                 return counter;
275                 }
276         }
277
278         return NULL;
279 }
280
281 static struct ice_fdir_counter *
282 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
283 {
284         struct ice_hw *hw = ICE_PF_TO_HW(pf);
285         struct ice_fdir_info *fdir_info = &pf->fdir;
286         struct ice_fdir_counter_pool_container *container =
287                                 &fdir_info->counter;
288         struct ice_fdir_counter_pool *pool = NULL;
289         struct ice_fdir_counter *counter_free = NULL;
290
291         if (shared) {
292                 counter_free = ice_fdir_counter_shared_search(container, id);
293                 if (counter_free) {
294                         if (counter_free->ref_cnt + 1 == 0) {
295                                 rte_errno = E2BIG;
296                                 return NULL;
297                         }
298                         counter_free->ref_cnt++;
299                         return counter_free;
300                 }
301         }
302
303         TAILQ_FOREACH(pool, &container->pool_list, next) {
304                 counter_free = TAILQ_FIRST(&pool->counter_list);
305                 if (counter_free)
306                         break;
307                 counter_free = NULL;
308         }
309
310         if (!counter_free) {
311                 PMD_DRV_LOG(ERR, "No free counter found\n");
312                 return NULL;
313         }
314
315         counter_free->shared = shared;
316         counter_free->id = id;
317         counter_free->ref_cnt = 1;
318         counter_free->pool = pool;
319
320         /* reset statistic counter value */
321         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
322         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
323
324         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
325         if (TAILQ_EMPTY(&pool->counter_list)) {
326                 TAILQ_REMOVE(&container->pool_list, pool, next);
327                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
328         }
329
330         return counter_free;
331 }
332
333 static void
334 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
335                       struct ice_fdir_counter *counter)
336 {
337         if (!counter)
338                 return;
339
340         if (--counter->ref_cnt == 0) {
341                 struct ice_fdir_counter_pool *pool = counter->pool;
342
343                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
344         }
345 }
346
347 static int
348 ice_fdir_init_filter_list(struct ice_pf *pf)
349 {
350         struct rte_eth_dev *dev = pf->adapter->eth_dev;
351         struct ice_fdir_info *fdir_info = &pf->fdir;
352         char fdir_hash_name[RTE_HASH_NAMESIZE];
353         int ret;
354
355         struct rte_hash_parameters fdir_hash_params = {
356                 .name = fdir_hash_name,
357                 .entries = ICE_MAX_FDIR_FILTER_NUM,
358                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
359                 .hash_func = rte_hash_crc,
360                 .hash_func_init_val = 0,
361                 .socket_id = rte_socket_id(),
362                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
363         };
364
365         /* Initialize hash */
366         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
367                  "fdir_%s", dev->device->name);
368         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
369         if (!fdir_info->hash_table) {
370                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
371                 return -EINVAL;
372         }
373         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
374                                           sizeof(*fdir_info->hash_map) *
375                                           ICE_MAX_FDIR_FILTER_NUM,
376                                           0);
377         if (!fdir_info->hash_map) {
378                 PMD_INIT_LOG(ERR,
379                              "Failed to allocate memory for fdir hash map!");
380                 ret = -ENOMEM;
381                 goto err_fdir_hash_map_alloc;
382         }
383         return 0;
384
385 err_fdir_hash_map_alloc:
386         rte_hash_free(fdir_info->hash_table);
387
388         return ret;
389 }
390
391 static void
392 ice_fdir_release_filter_list(struct ice_pf *pf)
393 {
394         struct ice_fdir_info *fdir_info = &pf->fdir;
395
396         if (fdir_info->hash_map)
397                 rte_free(fdir_info->hash_map);
398         if (fdir_info->hash_table)
399                 rte_hash_free(fdir_info->hash_table);
400 }
401
402 /*
403  * ice_fdir_setup - reserve and initialize the Flow Director resources
404  * @pf: board private structure
405  */
406 static int
407 ice_fdir_setup(struct ice_pf *pf)
408 {
409         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
410         struct ice_hw *hw = ICE_PF_TO_HW(pf);
411         const struct rte_memzone *mz = NULL;
412         char z_name[RTE_MEMZONE_NAMESIZE];
413         struct ice_vsi *vsi;
414         int err = ICE_SUCCESS;
415
416         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
417                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
418                 return -ENOTSUP;
419         }
420
421         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
422                     " fd_fltr_best_effort = %u.",
423                     hw->func_caps.fd_fltr_guar,
424                     hw->func_caps.fd_fltr_best_effort);
425
426         if (pf->fdir.fdir_vsi) {
427                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
428                 return ICE_SUCCESS;
429         }
430
431         /* make new FDIR VSI */
432         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
433         if (!vsi) {
434                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
435                 return -EINVAL;
436         }
437         pf->fdir.fdir_vsi = vsi;
438
439         err = ice_fdir_init_filter_list(pf);
440         if (err) {
441                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
442                 return -EINVAL;
443         }
444
445         err = ice_fdir_counter_init(pf);
446         if (err) {
447                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
448                 return -EINVAL;
449         }
450
451         /*Fdir tx queue setup*/
452         err = ice_fdir_setup_tx_resources(pf);
453         if (err) {
454                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
455                 goto fail_setup_tx;
456         }
457
458         /*Fdir rx queue setup*/
459         err = ice_fdir_setup_rx_resources(pf);
460         if (err) {
461                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
462                 goto fail_setup_rx;
463         }
464
465         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
466         if (err) {
467                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
468                 goto fail_mem;
469         }
470
471         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
472         if (err) {
473                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
474                 goto fail_mem;
475         }
476
477         /* reserve memory for the fdir programming packet */
478         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
479                  ICE_FDIR_MZ_NAME,
480                  eth_dev->data->port_id);
481         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
482         if (!mz) {
483                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
484                             "flow director program packet.");
485                 err = -ENOMEM;
486                 goto fail_mem;
487         }
488         pf->fdir.prg_pkt = mz->addr;
489         pf->fdir.dma_addr = mz->iova;
490         pf->fdir.mz = mz;
491
492         err = ice_fdir_prof_alloc(hw);
493         if (err) {
494                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
495                             "flow director profile.");
496                 err = -ENOMEM;
497                 goto fail_prof;
498         }
499
500         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
501                     vsi->base_queue);
502         return ICE_SUCCESS;
503
504 fail_prof:
505         rte_memzone_free(pf->fdir.mz);
506         pf->fdir.mz = NULL;
507 fail_mem:
508         ice_rx_queue_release(pf->fdir.rxq);
509         pf->fdir.rxq = NULL;
510 fail_setup_rx:
511         ice_tx_queue_release(pf->fdir.txq);
512         pf->fdir.txq = NULL;
513 fail_setup_tx:
514         ice_release_vsi(vsi);
515         pf->fdir.fdir_vsi = NULL;
516         return err;
517 }
518
519 static void
520 ice_fdir_prof_free(struct ice_hw *hw)
521 {
522         enum ice_fltr_ptype ptype;
523
524         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
525              ptype < ICE_FLTR_PTYPE_MAX;
526              ptype++)
527                 rte_free(hw->fdir_prof[ptype]);
528
529         rte_free(hw->fdir_prof);
530 }
531
532 /* Remove a profile for some filter type */
533 static void
534 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
535 {
536         struct ice_hw *hw = ICE_PF_TO_HW(pf);
537         struct ice_fd_hw_prof *hw_prof;
538         uint64_t prof_id;
539         uint16_t vsi_num;
540         int i;
541
542         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
543                 return;
544
545         hw_prof = hw->fdir_prof[ptype];
546
547         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
548         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
549                 if (hw_prof->entry_h[i][is_tunnel]) {
550                         vsi_num = ice_get_hw_vsi_num(hw,
551                                                      hw_prof->vsi_h[i]);
552                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
553                                              vsi_num, ptype);
554                         ice_flow_rem_entry(hw,
555                                            hw_prof->entry_h[i][is_tunnel]);
556                         hw_prof->entry_h[i][is_tunnel] = 0;
557                 }
558         }
559         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
560         rte_free(hw_prof->fdir_seg[is_tunnel]);
561         hw_prof->fdir_seg[is_tunnel] = NULL;
562
563         for (i = 0; i < hw_prof->cnt; i++)
564                 hw_prof->vsi_h[i] = 0;
565         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
566 }
567
568 /* Remove all created profiles */
569 static void
570 ice_fdir_prof_rm_all(struct ice_pf *pf)
571 {
572         enum ice_fltr_ptype ptype;
573
574         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
575              ptype < ICE_FLTR_PTYPE_MAX;
576              ptype++) {
577                 ice_fdir_prof_rm(pf, ptype, false);
578                 ice_fdir_prof_rm(pf, ptype, true);
579         }
580 }
581
582 /*
583  * ice_fdir_teardown - release the Flow Director resources
584  * @pf: board private structure
585  */
586 static void
587 ice_fdir_teardown(struct ice_pf *pf)
588 {
589         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
590         struct ice_hw *hw = ICE_PF_TO_HW(pf);
591         struct ice_vsi *vsi;
592         int err;
593
594         vsi = pf->fdir.fdir_vsi;
595         if (!vsi)
596                 return;
597
598         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
599         if (err)
600                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
601
602         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
603         if (err)
604                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
605
606         err = ice_fdir_counter_release(pf);
607         if (err)
608                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
609
610         ice_fdir_release_filter_list(pf);
611
612         ice_tx_queue_release(pf->fdir.txq);
613         pf->fdir.txq = NULL;
614         ice_rx_queue_release(pf->fdir.rxq);
615         pf->fdir.rxq = NULL;
616         ice_fdir_prof_rm_all(pf);
617         ice_fdir_prof_free(hw);
618         ice_release_vsi(vsi);
619         pf->fdir.fdir_vsi = NULL;
620
621         if (pf->fdir.mz) {
622                 err = rte_memzone_free(pf->fdir.mz);
623                 pf->fdir.mz = NULL;
624                 if (err)
625                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
626         }
627 }
628
629 static int
630 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
631                      struct ice_vsi *ctrl_vsi,
632                      struct ice_flow_seg_info *seg,
633                      enum ice_fltr_ptype ptype,
634                      bool is_tunnel)
635 {
636         struct ice_hw *hw = ICE_PF_TO_HW(pf);
637         enum ice_flow_dir dir = ICE_FLOW_RX;
638         struct ice_flow_seg_info *ori_seg;
639         struct ice_fd_hw_prof *hw_prof;
640         struct ice_flow_prof *prof;
641         uint64_t entry_1 = 0;
642         uint64_t entry_2 = 0;
643         uint16_t vsi_num;
644         int ret;
645         uint64_t prof_id;
646
647         hw_prof = hw->fdir_prof[ptype];
648         ori_seg = hw_prof->fdir_seg[is_tunnel];
649         if (ori_seg) {
650                 if (!is_tunnel) {
651                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
652                                 return -EAGAIN;
653                 } else {
654                         if (!memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))
655                                 return -EAGAIN;
656                 }
657
658                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
659                         return -EINVAL;
660
661                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
662         }
663
664         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
665         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
666                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
667         if (ret)
668                 return ret;
669         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
670                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
671                                  seg, NULL, 0, &entry_1);
672         if (ret) {
673                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
674                             ptype);
675                 goto err_add_prof;
676         }
677         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
678                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
679                                  seg, NULL, 0, &entry_2);
680         if (ret) {
681                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
682                             ptype);
683                 goto err_add_entry;
684         }
685
686         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
687         hw_prof->cnt = 0;
688         hw_prof->fdir_seg[is_tunnel] = seg;
689         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
690         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
691         pf->hw_prof_cnt[ptype][is_tunnel]++;
692         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
693         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
694         pf->hw_prof_cnt[ptype][is_tunnel]++;
695
696         return ret;
697
698 err_add_entry:
699         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
700         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
701         ice_flow_rem_entry(hw, entry_1);
702 err_add_prof:
703         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
704
705         return ret;
706 }
707
708 static void
709 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
710 {
711         uint32_t i, j;
712
713         struct ice_inset_map {
714                 uint64_t inset;
715                 enum ice_flow_field fld;
716         };
717         static const struct ice_inset_map ice_inset_map[] = {
718                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
719                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
720                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
721                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
722                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
723                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
724                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
725                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
726                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
727                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
728                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
729                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
730                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
731                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
732                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
733                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
734                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
735                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
736                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
737                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
738                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
739                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
740                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
741                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
742                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
743                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
744                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
745         };
746
747         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
748                 if ((inset & ice_inset_map[i].inset) ==
749                     ice_inset_map[i].inset)
750                         field[j++] = ice_inset_map[i].fld;
751         }
752 }
753
754 static int
755 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
756                         uint64_t input_set, bool is_tunnel)
757 {
758         struct ice_flow_seg_info *seg;
759         struct ice_flow_seg_info *seg_tun = NULL;
760         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
761         int i, ret;
762
763         if (!input_set)
764                 return -EINVAL;
765
766         seg = (struct ice_flow_seg_info *)
767                 ice_malloc(hw, sizeof(*seg));
768         if (!seg) {
769                 PMD_DRV_LOG(ERR, "No memory can be allocated");
770                 return -ENOMEM;
771         }
772
773         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
774                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
775         ice_fdir_input_set_parse(input_set, field);
776
777         switch (flow) {
778         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
779                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
780                                   ICE_FLOW_SEG_HDR_IPV4);
781                 break;
782         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
783                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
784                                   ICE_FLOW_SEG_HDR_IPV4);
785                 break;
786         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
787                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
788                                   ICE_FLOW_SEG_HDR_IPV4);
789                 break;
790         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
791                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
792                 break;
793         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
794                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
795                                   ICE_FLOW_SEG_HDR_IPV6);
796                 break;
797         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
798                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
799                                   ICE_FLOW_SEG_HDR_IPV6);
800                 break;
801         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
802                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
803                                   ICE_FLOW_SEG_HDR_IPV6);
804                 break;
805         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
806                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
807                 break;
808         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
809         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
810         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
811         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
812                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
813                                   ICE_FLOW_SEG_HDR_IPV4);
814                 break;
815         default:
816                 PMD_DRV_LOG(ERR, "not supported filter type.");
817                 break;
818         }
819
820         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
821                 ice_flow_set_fld(seg, field[i],
822                                  ICE_FLOW_FLD_OFF_INVAL,
823                                  ICE_FLOW_FLD_OFF_INVAL,
824                                  ICE_FLOW_FLD_OFF_INVAL, false);
825         }
826
827         if (!is_tunnel) {
828                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
829                                            seg, flow, false);
830         } else {
831                 seg_tun = (struct ice_flow_seg_info *)
832                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
833                 if (!seg_tun) {
834                         PMD_DRV_LOG(ERR, "No memory can be allocated");
835                         rte_free(seg);
836                         return -ENOMEM;
837                 }
838                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
839                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
840                                            seg_tun, flow, true);
841         }
842
843         if (!ret) {
844                 return ret;
845         } else if (ret < 0) {
846                 rte_free(seg);
847                 if (is_tunnel)
848                         rte_free(seg_tun);
849                 return (ret == -EAGAIN) ? 0 : ret;
850         } else {
851                 return ret;
852         }
853 }
854
855 static void
856 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
857                     bool is_tunnel, bool add)
858 {
859         struct ice_hw *hw = ICE_PF_TO_HW(pf);
860         int cnt;
861
862         cnt = (add) ? 1 : -1;
863         hw->fdir_active_fltr += cnt;
864         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
865                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
866         else
867                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
868 }
869
870 static int
871 ice_fdir_init(struct ice_adapter *ad)
872 {
873         struct ice_pf *pf = &ad->pf;
874         struct ice_flow_parser *parser;
875         int ret;
876
877         ret = ice_fdir_setup(pf);
878         if (ret)
879                 return ret;
880
881         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
882                 parser = &ice_fdir_parser_comms;
883         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
884                 parser = &ice_fdir_parser_os;
885         else
886                 return -EINVAL;
887
888         return ice_register_parser(parser, ad);
889 }
890
891 static void
892 ice_fdir_uninit(struct ice_adapter *ad)
893 {
894         struct ice_pf *pf = &ad->pf;
895         struct ice_flow_parser *parser;
896
897         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
898                 parser = &ice_fdir_parser_comms;
899         else
900                 parser = &ice_fdir_parser_os;
901
902         ice_unregister_parser(parser, ad);
903
904         ice_fdir_teardown(pf);
905 }
906
907 static int
908 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
909 {
910         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
911                 return 1;
912         else
913                 return 0;
914 }
915
916 static int
917 ice_fdir_add_del_filter(struct ice_pf *pf,
918                         struct ice_fdir_filter_conf *filter,
919                         bool add)
920 {
921         struct ice_fltr_desc desc;
922         struct ice_hw *hw = ICE_PF_TO_HW(pf);
923         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
924         bool is_tun;
925         int ret;
926
927         filter->input.dest_vsi = pf->main_vsi->idx;
928
929         memset(&desc, 0, sizeof(desc));
930         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
931
932         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
933
934         memset(pkt, 0, ICE_FDIR_PKT_LEN);
935         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
936         if (ret) {
937                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
938                 return -EINVAL;
939         }
940
941         return ice_fdir_programming(pf, &desc);
942 }
943
944 static void
945 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
946                           struct ice_fdir_filter_conf *filter)
947 {
948         struct ice_fdir_fltr *input = &filter->input;
949         memset(key, 0, sizeof(*key));
950
951         key->flow_type = input->flow_type;
952         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
953         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
954         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
955         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
956
957         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
958         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
959
960         key->tunnel_type = filter->tunnel_type;
961 }
962
963 /* Check if there exists the flow director filter */
964 static struct ice_fdir_filter_conf *
965 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
966                         const struct ice_fdir_fltr_pattern *key)
967 {
968         int ret;
969
970         ret = rte_hash_lookup(fdir_info->hash_table, key);
971         if (ret < 0)
972                 return NULL;
973
974         return fdir_info->hash_map[ret];
975 }
976
977 /* Add a flow director entry into the SW list */
978 static int
979 ice_fdir_entry_insert(struct ice_pf *pf,
980                       struct ice_fdir_filter_conf *entry,
981                       struct ice_fdir_fltr_pattern *key)
982 {
983         struct ice_fdir_info *fdir_info = &pf->fdir;
984         int ret;
985
986         ret = rte_hash_add_key(fdir_info->hash_table, key);
987         if (ret < 0) {
988                 PMD_DRV_LOG(ERR,
989                             "Failed to insert fdir entry to hash table %d!",
990                             ret);
991                 return ret;
992         }
993         fdir_info->hash_map[ret] = entry;
994
995         return 0;
996 }
997
998 /* Delete a flow director entry from the SW list */
999 static int
1000 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1001 {
1002         struct ice_fdir_info *fdir_info = &pf->fdir;
1003         int ret;
1004
1005         ret = rte_hash_del_key(fdir_info->hash_table, key);
1006         if (ret < 0) {
1007                 PMD_DRV_LOG(ERR,
1008                             "Failed to delete fdir filter to hash table %d!",
1009                             ret);
1010                 return ret;
1011         }
1012         fdir_info->hash_map[ret] = NULL;
1013
1014         return 0;
1015 }
1016
1017 static int
1018 ice_fdir_create_filter(struct ice_adapter *ad,
1019                        struct rte_flow *flow,
1020                        void *meta,
1021                        struct rte_flow_error *error)
1022 {
1023         struct ice_pf *pf = &ad->pf;
1024         struct ice_fdir_filter_conf *filter = meta;
1025         struct ice_fdir_info *fdir_info = &pf->fdir;
1026         struct ice_fdir_filter_conf *entry, *node;
1027         struct ice_fdir_fltr_pattern key;
1028         bool is_tun;
1029         int ret;
1030
1031         ice_fdir_extract_fltr_key(&key, filter);
1032         node = ice_fdir_entry_lookup(fdir_info, &key);
1033         if (node) {
1034                 rte_flow_error_set(error, EEXIST,
1035                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1036                                    "Rule already exists!");
1037                 return -rte_errno;
1038         }
1039
1040         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1041         if (!entry) {
1042                 rte_flow_error_set(error, ENOMEM,
1043                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1044                                    "Failed to allocate memory");
1045                 return -rte_errno;
1046         }
1047
1048         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1049
1050         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1051                         filter->input_set, is_tun);
1052         if (ret) {
1053                 rte_flow_error_set(error, -ret,
1054                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1055                                    "Profile configure failed.");
1056                 goto free_entry;
1057         }
1058
1059         /* alloc counter for FDIR */
1060         if (filter->input.cnt_ena) {
1061                 struct rte_flow_action_count *act_count = &filter->act_count;
1062
1063                 filter->counter = ice_fdir_counter_alloc(pf,
1064                                                          act_count->shared,
1065                                                          act_count->id);
1066                 if (!filter->counter) {
1067                         rte_flow_error_set(error, EINVAL,
1068                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1069                                         "Failed to alloc FDIR counter.");
1070                         goto free_entry;
1071                 }
1072                 filter->input.cnt_index = filter->counter->hw_index;
1073         }
1074
1075         ret = ice_fdir_add_del_filter(pf, filter, true);
1076         if (ret) {
1077                 rte_flow_error_set(error, -ret,
1078                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1079                                    "Add filter rule failed.");
1080                 goto free_counter;
1081         }
1082
1083         rte_memcpy(entry, filter, sizeof(*entry));
1084         ret = ice_fdir_entry_insert(pf, entry, &key);
1085         if (ret) {
1086                 rte_flow_error_set(error, -ret,
1087                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1088                                    "Insert entry to table failed.");
1089                 goto free_entry;
1090         }
1091
1092         flow->rule = entry;
1093         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1094
1095         return 0;
1096
1097 free_counter:
1098         if (filter->counter) {
1099                 ice_fdir_counter_free(pf, filter->counter);
1100                 filter->counter = NULL;
1101         }
1102
1103 free_entry:
1104         rte_free(entry);
1105         return -rte_errno;
1106 }
1107
1108 static int
1109 ice_fdir_destroy_filter(struct ice_adapter *ad,
1110                         struct rte_flow *flow,
1111                         struct rte_flow_error *error)
1112 {
1113         struct ice_pf *pf = &ad->pf;
1114         struct ice_fdir_info *fdir_info = &pf->fdir;
1115         struct ice_fdir_filter_conf *filter, *entry;
1116         struct ice_fdir_fltr_pattern key;
1117         bool is_tun;
1118         int ret;
1119
1120         filter = (struct ice_fdir_filter_conf *)flow->rule;
1121
1122         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1123
1124         if (filter->counter) {
1125                 ice_fdir_counter_free(pf, filter->counter);
1126                 filter->counter = NULL;
1127         }
1128
1129         ice_fdir_extract_fltr_key(&key, filter);
1130         entry = ice_fdir_entry_lookup(fdir_info, &key);
1131         if (!entry) {
1132                 rte_flow_error_set(error, ENOENT,
1133                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1134                                    "Can't find entry.");
1135                 return -rte_errno;
1136         }
1137
1138         ret = ice_fdir_add_del_filter(pf, filter, false);
1139         if (ret) {
1140                 rte_flow_error_set(error, -ret,
1141                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1142                                    "Del filter rule failed.");
1143                 return -rte_errno;
1144         }
1145
1146         ret = ice_fdir_entry_del(pf, &key);
1147         if (ret) {
1148                 rte_flow_error_set(error, -ret,
1149                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1150                                    "Remove entry from table failed.");
1151                 return -rte_errno;
1152         }
1153
1154         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1155         flow->rule = NULL;
1156
1157         rte_free(filter);
1158
1159         return 0;
1160 }
1161
1162 static int
1163 ice_fdir_query_count(struct ice_adapter *ad,
1164                       struct rte_flow *flow,
1165                       struct rte_flow_query_count *flow_stats,
1166                       struct rte_flow_error *error)
1167 {
1168         struct ice_pf *pf = &ad->pf;
1169         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1170         struct ice_fdir_filter_conf *filter = flow->rule;
1171         struct ice_fdir_counter *counter = filter->counter;
1172         uint64_t hits_lo, hits_hi;
1173
1174         if (!counter) {
1175                 rte_flow_error_set(error, EINVAL,
1176                                   RTE_FLOW_ERROR_TYPE_ACTION,
1177                                   NULL,
1178                                   "FDIR counters not available");
1179                 return -rte_errno;
1180         }
1181
1182         /*
1183          * Reading the low 32-bits latches the high 32-bits into a shadow
1184          * register. Reading the high 32-bit returns the value in the
1185          * shadow register.
1186          */
1187         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1188         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1189
1190         flow_stats->hits_set = 1;
1191         flow_stats->hits = hits_lo | (hits_hi << 32);
1192         flow_stats->bytes_set = 0;
1193         flow_stats->bytes = 0;
1194
1195         if (flow_stats->reset) {
1196                 /* reset statistic counter value */
1197                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1198                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1199         }
1200
1201         return 0;
1202 }
1203
1204 static struct ice_flow_engine ice_fdir_engine = {
1205         .init = ice_fdir_init,
1206         .uninit = ice_fdir_uninit,
1207         .create = ice_fdir_create_filter,
1208         .destroy = ice_fdir_destroy_filter,
1209         .query_count = ice_fdir_query_count,
1210         .type = ICE_FLOW_ENGINE_FDIR,
1211 };
1212
1213 static int
1214 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1215                               struct rte_flow_error *error,
1216                               const struct rte_flow_action *act,
1217                               struct ice_fdir_filter_conf *filter)
1218 {
1219         const struct rte_flow_action_rss *rss = act->conf;
1220         uint32_t i;
1221
1222         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1223                 rte_flow_error_set(error, EINVAL,
1224                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1225                                    "Invalid action.");
1226                 return -rte_errno;
1227         }
1228
1229         if (rss->queue_num <= 1) {
1230                 rte_flow_error_set(error, EINVAL,
1231                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1232                                    "Queue region size can't be 0 or 1.");
1233                 return -rte_errno;
1234         }
1235
1236         /* check if queue index for queue region is continuous */
1237         for (i = 0; i < rss->queue_num - 1; i++) {
1238                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1239                         rte_flow_error_set(error, EINVAL,
1240                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1241                                            "Discontinuous queue region");
1242                         return -rte_errno;
1243                 }
1244         }
1245
1246         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1247                 rte_flow_error_set(error, EINVAL,
1248                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1249                                    "Invalid queue region indexes.");
1250                 return -rte_errno;
1251         }
1252
1253         if (!(rte_is_power_of_2(rss->queue_num) &&
1254              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1255                 rte_flow_error_set(error, EINVAL,
1256                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1257                                    "The region size should be any of the following values:"
1258                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1259                                    "of queues do not exceed the VSI allocation.");
1260                 return -rte_errno;
1261         }
1262
1263         filter->input.q_index = rss->queue[0];
1264         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1265         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1266
1267         return 0;
1268 }
1269
1270 static int
1271 ice_fdir_parse_action(struct ice_adapter *ad,
1272                       const struct rte_flow_action actions[],
1273                       struct rte_flow_error *error,
1274                       struct ice_fdir_filter_conf *filter)
1275 {
1276         struct ice_pf *pf = &ad->pf;
1277         const struct rte_flow_action_queue *act_q;
1278         const struct rte_flow_action_mark *mark_spec = NULL;
1279         const struct rte_flow_action_count *act_count;
1280         uint32_t dest_num = 0;
1281         uint32_t mark_num = 0;
1282         uint32_t counter_num = 0;
1283         int ret;
1284
1285         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1286                 switch (actions->type) {
1287                 case RTE_FLOW_ACTION_TYPE_VOID:
1288                         break;
1289                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1290                         dest_num++;
1291
1292                         act_q = actions->conf;
1293                         filter->input.q_index = act_q->index;
1294                         if (filter->input.q_index >=
1295                                         pf->dev_data->nb_rx_queues) {
1296                                 rte_flow_error_set(error, EINVAL,
1297                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1298                                                    actions,
1299                                                    "Invalid queue for FDIR.");
1300                                 return -rte_errno;
1301                         }
1302                         filter->input.dest_ctl =
1303                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1304                         break;
1305                 case RTE_FLOW_ACTION_TYPE_DROP:
1306                         dest_num++;
1307
1308                         filter->input.dest_ctl =
1309                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1310                         break;
1311                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1312                         dest_num++;
1313
1314                         filter->input.dest_ctl =
1315                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1316                         filter->input.q_index = 0;
1317                         break;
1318                 case RTE_FLOW_ACTION_TYPE_RSS:
1319                         dest_num++;
1320
1321                         ret = ice_fdir_parse_action_qregion(pf,
1322                                                 error, actions, filter);
1323                         if (ret)
1324                                 return ret;
1325                         break;
1326                 case RTE_FLOW_ACTION_TYPE_MARK:
1327                         mark_num++;
1328
1329                         mark_spec = actions->conf;
1330                         filter->input.fltr_id = mark_spec->id;
1331                         break;
1332                 case RTE_FLOW_ACTION_TYPE_COUNT:
1333                         counter_num++;
1334
1335                         act_count = actions->conf;
1336                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1337                         rte_memcpy(&filter->act_count, act_count,
1338                                                 sizeof(filter->act_count));
1339
1340                         break;
1341                 default:
1342                         rte_flow_error_set(error, EINVAL,
1343                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1344                                    "Invalid action.");
1345                         return -rte_errno;
1346                 }
1347         }
1348
1349         if (dest_num == 0 || dest_num >= 2) {
1350                 rte_flow_error_set(error, EINVAL,
1351                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1352                            "Unsupported action combination");
1353                 return -rte_errno;
1354         }
1355
1356         if (mark_num >= 2) {
1357                 rte_flow_error_set(error, EINVAL,
1358                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1359                            "Too many mark actions");
1360                 return -rte_errno;
1361         }
1362
1363         if (counter_num >= 2) {
1364                 rte_flow_error_set(error, EINVAL,
1365                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1366                            "Too many count actions");
1367                 return -rte_errno;
1368         }
1369
1370         return 0;
1371 }
1372
1373 static int
1374 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1375                        const struct rte_flow_item pattern[],
1376                        struct rte_flow_error *error,
1377                        struct ice_fdir_filter_conf *filter)
1378 {
1379         const struct rte_flow_item *item = pattern;
1380         enum rte_flow_item_type item_type;
1381         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1382         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1383         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1384         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1385         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1386         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1387         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1388         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1389         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1390         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1391         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1392         uint64_t input_set = ICE_INSET_NONE;
1393         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1394         uint8_t  ipv6_addr_mask[16] = {
1395                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1396                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1397         };
1398         uint32_t vtc_flow_cpu;
1399
1400
1401         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1402                 if (item->last) {
1403                         rte_flow_error_set(error, EINVAL,
1404                                         RTE_FLOW_ERROR_TYPE_ITEM,
1405                                         item,
1406                                         "Not support range");
1407                         return -rte_errno;
1408                 }
1409                 item_type = item->type;
1410
1411                 switch (item_type) {
1412                 case RTE_FLOW_ITEM_TYPE_ETH:
1413                         eth_spec = item->spec;
1414                         eth_mask = item->mask;
1415
1416                         if (eth_spec && eth_mask) {
1417                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1418                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1419                                         rte_flow_error_set(error, EINVAL,
1420                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1421                                                 item,
1422                                                 "Src mac not support");
1423                                         return -rte_errno;
1424                                 }
1425
1426                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1427                                         rte_flow_error_set(error, EINVAL,
1428                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1429                                                 item,
1430                                                 "Invalid mac addr mask");
1431                                         return -rte_errno;
1432                                 }
1433
1434                                 input_set |= ICE_INSET_DMAC;
1435                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1436                                            &eth_spec->dst,
1437                                            RTE_ETHER_ADDR_LEN);
1438                         }
1439                         break;
1440                 case RTE_FLOW_ITEM_TYPE_IPV4:
1441                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1442                         ipv4_spec = item->spec;
1443                         ipv4_mask = item->mask;
1444
1445                         if (ipv4_spec && ipv4_mask) {
1446                                 /* Check IPv4 mask and update input set */
1447                                 if (ipv4_mask->hdr.version_ihl ||
1448                                     ipv4_mask->hdr.total_length ||
1449                                     ipv4_mask->hdr.packet_id ||
1450                                     ipv4_mask->hdr.fragment_offset ||
1451                                     ipv4_mask->hdr.hdr_checksum) {
1452                                         rte_flow_error_set(error, EINVAL,
1453                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1454                                                    item,
1455                                                    "Invalid IPv4 mask.");
1456                                         return -rte_errno;
1457                                 }
1458                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1459                                         input_set |= tunnel_type ?
1460                                                      ICE_INSET_TUN_IPV4_SRC :
1461                                                      ICE_INSET_IPV4_SRC;
1462                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1463                                         input_set |= tunnel_type ?
1464                                                      ICE_INSET_TUN_IPV4_DST :
1465                                                      ICE_INSET_IPV4_DST;
1466                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1467                                         input_set |= ICE_INSET_IPV4_TOS;
1468                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1469                                         input_set |= ICE_INSET_IPV4_TTL;
1470                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1471                                         input_set |= ICE_INSET_IPV4_PROTO;
1472
1473                                 filter->input.ip.v4.dst_ip =
1474                                         ipv4_spec->hdr.src_addr;
1475                                 filter->input.ip.v4.src_ip =
1476                                         ipv4_spec->hdr.dst_addr;
1477                                 filter->input.ip.v4.tos =
1478                                         ipv4_spec->hdr.type_of_service;
1479                                 filter->input.ip.v4.ttl =
1480                                         ipv4_spec->hdr.time_to_live;
1481                                 filter->input.ip.v4.proto =
1482                                         ipv4_spec->hdr.next_proto_id;
1483                         }
1484
1485                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1486                         break;
1487                 case RTE_FLOW_ITEM_TYPE_IPV6:
1488                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1489                         ipv6_spec = item->spec;
1490                         ipv6_mask = item->mask;
1491
1492                         if (ipv6_spec && ipv6_mask) {
1493                                 /* Check IPv6 mask and update input set */
1494                                 if (ipv6_mask->hdr.payload_len) {
1495                                         rte_flow_error_set(error, EINVAL,
1496                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1497                                                    item,
1498                                                    "Invalid IPv6 mask");
1499                                         return -rte_errno;
1500                                 }
1501
1502                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1503                                             ipv6_addr_mask,
1504                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1505                                         input_set |= ICE_INSET_IPV6_SRC;
1506                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1507                                             ipv6_addr_mask,
1508                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1509                                         input_set |= ICE_INSET_IPV6_DST;
1510
1511                                 if ((ipv6_mask->hdr.vtc_flow &
1512                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1513                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1514                                         input_set |= ICE_INSET_IPV6_TC;
1515                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1516                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1517                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1518                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1519
1520                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1521                                            ipv6_spec->hdr.src_addr, 16);
1522                                 rte_memcpy(filter->input.ip.v6.src_ip,
1523                                            ipv6_spec->hdr.dst_addr, 16);
1524
1525                                 vtc_flow_cpu =
1526                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1527                                 filter->input.ip.v6.tc =
1528                                         (uint8_t)(vtc_flow_cpu >>
1529                                                   ICE_FDIR_IPV6_TC_OFFSET);
1530                                 filter->input.ip.v6.proto =
1531                                         ipv6_spec->hdr.proto;
1532                                 filter->input.ip.v6.hlim =
1533                                         ipv6_spec->hdr.hop_limits;
1534                         }
1535
1536                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1537                         break;
1538                 case RTE_FLOW_ITEM_TYPE_TCP:
1539                         tcp_spec = item->spec;
1540                         tcp_mask = item->mask;
1541
1542                         if (tcp_spec && tcp_mask) {
1543                                 /* Check TCP mask and update input set */
1544                                 if (tcp_mask->hdr.sent_seq ||
1545                                     tcp_mask->hdr.recv_ack ||
1546                                     tcp_mask->hdr.data_off ||
1547                                     tcp_mask->hdr.tcp_flags ||
1548                                     tcp_mask->hdr.rx_win ||
1549                                     tcp_mask->hdr.cksum ||
1550                                     tcp_mask->hdr.tcp_urp) {
1551                                         rte_flow_error_set(error, EINVAL,
1552                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1553                                                    item,
1554                                                    "Invalid TCP mask");
1555                                         return -rte_errno;
1556                                 }
1557
1558                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1559                                         input_set |= tunnel_type ?
1560                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1561                                                      ICE_INSET_TCP_SRC_PORT;
1562                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1563                                         input_set |= tunnel_type ?
1564                                                      ICE_INSET_TUN_TCP_DST_PORT :
1565                                                      ICE_INSET_TCP_DST_PORT;
1566
1567                                 /* Get filter info */
1568                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1569                                         filter->input.ip.v4.dst_port =
1570                                                 tcp_spec->hdr.src_port;
1571                                         filter->input.ip.v4.src_port =
1572                                                 tcp_spec->hdr.dst_port;
1573                                         flow_type =
1574                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1575                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1576                                         filter->input.ip.v6.dst_port =
1577                                                 tcp_spec->hdr.src_port;
1578                                         filter->input.ip.v6.src_port =
1579                                                 tcp_spec->hdr.dst_port;
1580                                         flow_type =
1581                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1582                                 }
1583                         }
1584                         break;
1585                 case RTE_FLOW_ITEM_TYPE_UDP:
1586                         udp_spec = item->spec;
1587                         udp_mask = item->mask;
1588
1589                         if (udp_spec && udp_mask) {
1590                                 /* Check UDP mask and update input set*/
1591                                 if (udp_mask->hdr.dgram_len ||
1592                                     udp_mask->hdr.dgram_cksum) {
1593                                         rte_flow_error_set(error, EINVAL,
1594                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1595                                                    item,
1596                                                    "Invalid UDP mask");
1597                                         return -rte_errno;
1598                                 }
1599
1600                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1601                                         input_set |= tunnel_type ?
1602                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1603                                                      ICE_INSET_UDP_SRC_PORT;
1604                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1605                                         input_set |= tunnel_type ?
1606                                                      ICE_INSET_TUN_UDP_DST_PORT :
1607                                                      ICE_INSET_UDP_DST_PORT;
1608
1609                                 /* Get filter info */
1610                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1611                                         filter->input.ip.v4.dst_port =
1612                                                 udp_spec->hdr.src_port;
1613                                         filter->input.ip.v4.src_port =
1614                                                 udp_spec->hdr.dst_port;
1615                                         flow_type =
1616                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1617                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1618                                         filter->input.ip.v6.src_port =
1619                                                 udp_spec->hdr.dst_port;
1620                                         filter->input.ip.v6.dst_port =
1621                                                 udp_spec->hdr.src_port;
1622                                         flow_type =
1623                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1624                                 }
1625                         }
1626                         break;
1627                 case RTE_FLOW_ITEM_TYPE_SCTP:
1628                         sctp_spec = item->spec;
1629                         sctp_mask = item->mask;
1630
1631                         if (sctp_spec && sctp_mask) {
1632                                 /* Check SCTP mask and update input set */
1633                                 if (sctp_mask->hdr.cksum) {
1634                                         rte_flow_error_set(error, EINVAL,
1635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1636                                                    item,
1637                                                    "Invalid UDP mask");
1638                                         return -rte_errno;
1639                                 }
1640
1641                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1642                                         input_set |= tunnel_type ?
1643                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1644                                                      ICE_INSET_SCTP_SRC_PORT;
1645                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1646                                         input_set |= tunnel_type ?
1647                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1648                                                      ICE_INSET_SCTP_DST_PORT;
1649
1650                                 /* Get filter info */
1651                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1652                                         filter->input.ip.v4.dst_port =
1653                                                 sctp_spec->hdr.src_port;
1654                                         filter->input.ip.v4.src_port =
1655                                                 sctp_spec->hdr.dst_port;
1656                                         flow_type =
1657                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1658                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1659                                         filter->input.ip.v6.dst_port =
1660                                                 sctp_spec->hdr.src_port;
1661                                         filter->input.ip.v6.src_port =
1662                                                 sctp_spec->hdr.dst_port;
1663                                         flow_type =
1664                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1665                                 }
1666                         }
1667                         break;
1668                 case RTE_FLOW_ITEM_TYPE_VOID:
1669                         break;
1670                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1671                         l3 = RTE_FLOW_ITEM_TYPE_END;
1672                         vxlan_spec = item->spec;
1673                         vxlan_mask = item->mask;
1674
1675                         if (vxlan_spec || vxlan_mask) {
1676                                 rte_flow_error_set(error, EINVAL,
1677                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1678                                                    item,
1679                                                    "Invalid vxlan field");
1680                                 return -rte_errno;
1681                         }
1682
1683                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1684                         break;
1685                 case RTE_FLOW_ITEM_TYPE_GTPU:
1686                         l3 = RTE_FLOW_ITEM_TYPE_END;
1687                         gtp_spec = item->spec;
1688                         gtp_mask = item->mask;
1689
1690                         if (gtp_spec && gtp_mask) {
1691                                 if (gtp_mask->v_pt_rsv_flags ||
1692                                     gtp_mask->msg_type ||
1693                                     gtp_mask->msg_len) {
1694                                         rte_flow_error_set(error, EINVAL,
1695                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1696                                                    item,
1697                                                    "Invalid GTP mask");
1698                                         return -rte_errno;
1699                                 }
1700
1701                                 if (gtp_mask->teid == UINT32_MAX)
1702                                         input_set |= ICE_INSET_GTPU_TEID;
1703
1704                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1705                         }
1706                         break;
1707                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1708                         gtp_psc_spec = item->spec;
1709                         gtp_psc_mask = item->mask;
1710
1711                         if (gtp_psc_spec && gtp_psc_mask) {
1712                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1713                                         input_set |= ICE_INSET_GTPU_QFI;
1714
1715                                 filter->input.gtpu_data.qfi =
1716                                         gtp_psc_spec->qfi;
1717                         }
1718
1719                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1720                         break;
1721                 default:
1722                         rte_flow_error_set(error, EINVAL,
1723                                    RTE_FLOW_ERROR_TYPE_ITEM,
1724                                    item,
1725                                    "Invalid pattern item.");
1726                         return -rte_errno;
1727                 }
1728         }
1729
1730         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1731                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1732
1733         filter->tunnel_type = tunnel_type;
1734         filter->input.flow_type = flow_type;
1735         filter->input_set = input_set;
1736
1737         return 0;
1738 }
1739
1740 static int
1741 ice_fdir_parse(struct ice_adapter *ad,
1742                struct ice_pattern_match_item *array,
1743                uint32_t array_len,
1744                const struct rte_flow_item pattern[],
1745                const struct rte_flow_action actions[],
1746                void **meta,
1747                struct rte_flow_error *error)
1748 {
1749         struct ice_pf *pf = &ad->pf;
1750         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1751         struct ice_pattern_match_item *item = NULL;
1752         uint64_t input_set;
1753         int ret;
1754
1755         memset(filter, 0, sizeof(*filter));
1756         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1757         if (!item)
1758                 return -rte_errno;
1759
1760         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1761         if (ret)
1762                 return ret;
1763         input_set = filter->input_set;
1764         if (!input_set || input_set & ~item->input_set_mask) {
1765                 rte_flow_error_set(error, EINVAL,
1766                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1767                                    pattern,
1768                                    "Invalid input set");
1769                 return -rte_errno;
1770         }
1771
1772         ret = ice_fdir_parse_action(ad, actions, error, filter);
1773         if (ret)
1774                 return ret;
1775
1776         *meta = filter;
1777
1778         return 0;
1779 }
1780
1781 static struct ice_flow_parser ice_fdir_parser_os = {
1782         .engine = &ice_fdir_engine,
1783         .array = ice_fdir_pattern_os,
1784         .array_len = RTE_DIM(ice_fdir_pattern_os),
1785         .parse_pattern_action = ice_fdir_parse,
1786         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1787 };
1788
1789 static struct ice_flow_parser ice_fdir_parser_comms = {
1790         .engine = &ice_fdir_engine,
1791         .array = ice_fdir_pattern_comms,
1792         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1793         .parse_pattern_action = ice_fdir_parse,
1794         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1795 };
1796
1797 RTE_INIT(ice_fdir_engine_register)
1798 {
1799         ice_register_flow_engine(&ice_fdir_engine);
1800 }