net/ice: fix flow director
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_DMAC | \
36         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
37         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
38
39 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
40         ICE_FDIR_INSET_ETH_IPV6 | \
41         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
50
51 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
52         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
53
54 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
55         ICE_FDIR_INSET_VXLAN_IPV4 | \
56         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
65
66 #define ICE_FDIR_INSET_GTPU_IPV4 (\
67         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
68
69 #define ICE_FDIR_INSET_GTPU_IPV4_TCP (\
70         ICE_FDIR_INSET_GTPU_IPV4)
71
72 #define ICE_FDIR_INSET_GTPU_IPV4_UDP (\
73         ICE_FDIR_INSET_GTPU_IPV4)
74
75 #define ICE_FDIR_INSET_GTPU_IPV4_UDP (\
76         ICE_FDIR_INSET_GTPU_IPV4)
77
78 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
79         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
80         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
81         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
82         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
83         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
84         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
85         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
86         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
87         {pattern_eth_ipv4_udp_vxlan_ipv4,
88                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
89         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
90                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
91         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
92                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
93         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
94                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
95         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
96                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
97         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
98                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
99         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
100                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
101         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
102                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
103 };
104
105 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
106         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
107         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
108         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
109         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
110         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
111         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
112         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
113         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
114         {pattern_eth_ipv4_udp_vxlan_ipv4,
115                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
116         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
117                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
118         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
119                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
120         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
121                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
122         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
123                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
124         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
125                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
126         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
127                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
128         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
129                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
130         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
131         {pattern_eth_ipv4_gtpu_ipv4_tcp,
132                                        ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
133         {pattern_eth_ipv4_gtpu_ipv4_udp,
134                                        ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
135 };
136
137 static struct ice_flow_parser ice_fdir_parser_os;
138 static struct ice_flow_parser ice_fdir_parser_comms;
139
140 static const struct rte_memzone *
141 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
142 {
143         return rte_memzone_reserve_aligned(name, len, socket_id,
144                                            RTE_MEMZONE_IOVA_CONTIG,
145                                            ICE_RING_BASE_ALIGN);
146 }
147
148 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
149
150 static int
151 ice_fdir_prof_alloc(struct ice_hw *hw)
152 {
153         enum ice_fltr_ptype ptype, fltr_ptype;
154
155         if (!hw->fdir_prof) {
156                 hw->fdir_prof = (struct ice_fd_hw_prof **)
157                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
158                                    sizeof(*hw->fdir_prof));
159                 if (!hw->fdir_prof)
160                         return -ENOMEM;
161         }
162         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
163              ptype < ICE_FLTR_PTYPE_MAX;
164              ptype++) {
165                 if (!hw->fdir_prof[ptype]) {
166                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
167                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
168                         if (!hw->fdir_prof[ptype])
169                                 goto fail_mem;
170                 }
171         }
172         return 0;
173
174 fail_mem:
175         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
176              fltr_ptype < ptype;
177              fltr_ptype++)
178                 rte_free(hw->fdir_prof[fltr_ptype]);
179         rte_free(hw->fdir_prof);
180         return -ENOMEM;
181 }
182
183 static int
184 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
185                           struct ice_fdir_counter_pool_container *container,
186                           uint32_t index_start,
187                           uint32_t len)
188 {
189         struct ice_fdir_counter_pool *pool;
190         uint32_t i;
191         int ret = 0;
192
193         pool = rte_zmalloc("ice_fdir_counter_pool",
194                            sizeof(*pool) +
195                            sizeof(struct ice_fdir_counter) * len,
196                            0);
197         if (!pool) {
198                 PMD_INIT_LOG(ERR,
199                              "Failed to allocate memory for fdir counter pool");
200                 return -ENOMEM;
201         }
202
203         TAILQ_INIT(&pool->counter_list);
204         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
205
206         for (i = 0; i < len; i++) {
207                 struct ice_fdir_counter *counter = &pool->counters[i];
208
209                 counter->hw_index = index_start + i;
210                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
211         }
212
213         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
214                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
215                 ret = -EINVAL;
216                 goto free_pool;
217         }
218
219         container->pools[container->index_free++] = pool;
220         return 0;
221
222 free_pool:
223         rte_free(pool);
224         return ret;
225 }
226
227 static int
228 ice_fdir_counter_init(struct ice_pf *pf)
229 {
230         struct ice_hw *hw = ICE_PF_TO_HW(pf);
231         struct ice_fdir_info *fdir_info = &pf->fdir;
232         struct ice_fdir_counter_pool_container *container =
233                                 &fdir_info->counter;
234         uint32_t cnt_index, len;
235         int ret;
236
237         TAILQ_INIT(&container->pool_list);
238
239         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
240         len = ICE_FDIR_COUNTERS_PER_BLOCK;
241
242         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
243         if (ret) {
244                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
245                 return ret;
246         }
247
248         return 0;
249 }
250
251 static int
252 ice_fdir_counter_release(struct ice_pf *pf)
253 {
254         struct ice_fdir_info *fdir_info = &pf->fdir;
255         struct ice_fdir_counter_pool_container *container =
256                                 &fdir_info->counter;
257         uint8_t i;
258
259         for (i = 0; i < container->index_free; i++)
260                 rte_free(container->pools[i]);
261
262         return 0;
263 }
264
265 static struct ice_fdir_counter *
266 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
267                                         *container,
268                                uint32_t id)
269 {
270         struct ice_fdir_counter_pool *pool;
271         struct ice_fdir_counter *counter;
272         int i;
273
274         TAILQ_FOREACH(pool, &container->pool_list, next) {
275                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
276                         counter = &pool->counters[i];
277
278                         if (counter->shared &&
279                             counter->ref_cnt &&
280                             counter->id == id)
281                                 return counter;
282                 }
283         }
284
285         return NULL;
286 }
287
288 static struct ice_fdir_counter *
289 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
290 {
291         struct ice_hw *hw = ICE_PF_TO_HW(pf);
292         struct ice_fdir_info *fdir_info = &pf->fdir;
293         struct ice_fdir_counter_pool_container *container =
294                                 &fdir_info->counter;
295         struct ice_fdir_counter_pool *pool = NULL;
296         struct ice_fdir_counter *counter_free = NULL;
297
298         if (shared) {
299                 counter_free = ice_fdir_counter_shared_search(container, id);
300                 if (counter_free) {
301                         if (counter_free->ref_cnt + 1 == 0) {
302                                 rte_errno = E2BIG;
303                                 return NULL;
304                         }
305                         counter_free->ref_cnt++;
306                         return counter_free;
307                 }
308         }
309
310         TAILQ_FOREACH(pool, &container->pool_list, next) {
311                 counter_free = TAILQ_FIRST(&pool->counter_list);
312                 if (counter_free)
313                         break;
314                 counter_free = NULL;
315         }
316
317         if (!counter_free) {
318                 PMD_DRV_LOG(ERR, "No free counter found\n");
319                 return NULL;
320         }
321
322         counter_free->shared = shared;
323         counter_free->id = id;
324         counter_free->ref_cnt = 1;
325         counter_free->pool = pool;
326
327         /* reset statistic counter value */
328         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
329         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
330
331         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
332         if (TAILQ_EMPTY(&pool->counter_list)) {
333                 TAILQ_REMOVE(&container->pool_list, pool, next);
334                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
335         }
336
337         return counter_free;
338 }
339
340 static void
341 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
342                       struct ice_fdir_counter *counter)
343 {
344         if (!counter)
345                 return;
346
347         if (--counter->ref_cnt == 0) {
348                 struct ice_fdir_counter_pool *pool = counter->pool;
349
350                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
351         }
352 }
353
354 static int
355 ice_fdir_init_filter_list(struct ice_pf *pf)
356 {
357         struct rte_eth_dev *dev = pf->adapter->eth_dev;
358         struct ice_fdir_info *fdir_info = &pf->fdir;
359         char fdir_hash_name[RTE_HASH_NAMESIZE];
360         int ret;
361
362         struct rte_hash_parameters fdir_hash_params = {
363                 .name = fdir_hash_name,
364                 .entries = ICE_MAX_FDIR_FILTER_NUM,
365                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
366                 .hash_func = rte_hash_crc,
367                 .hash_func_init_val = 0,
368                 .socket_id = rte_socket_id(),
369                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
370         };
371
372         /* Initialize hash */
373         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
374                  "fdir_%s", dev->device->name);
375         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
376         if (!fdir_info->hash_table) {
377                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
378                 return -EINVAL;
379         }
380         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
381                                           sizeof(*fdir_info->hash_map) *
382                                           ICE_MAX_FDIR_FILTER_NUM,
383                                           0);
384         if (!fdir_info->hash_map) {
385                 PMD_INIT_LOG(ERR,
386                              "Failed to allocate memory for fdir hash map!");
387                 ret = -ENOMEM;
388                 goto err_fdir_hash_map_alloc;
389         }
390         return 0;
391
392 err_fdir_hash_map_alloc:
393         rte_hash_free(fdir_info->hash_table);
394
395         return ret;
396 }
397
398 static void
399 ice_fdir_release_filter_list(struct ice_pf *pf)
400 {
401         struct ice_fdir_info *fdir_info = &pf->fdir;
402
403         if (fdir_info->hash_map)
404                 rte_free(fdir_info->hash_map);
405         if (fdir_info->hash_table)
406                 rte_hash_free(fdir_info->hash_table);
407 }
408
409 /*
410  * ice_fdir_setup - reserve and initialize the Flow Director resources
411  * @pf: board private structure
412  */
413 static int
414 ice_fdir_setup(struct ice_pf *pf)
415 {
416         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
417         struct ice_hw *hw = ICE_PF_TO_HW(pf);
418         const struct rte_memzone *mz = NULL;
419         char z_name[RTE_MEMZONE_NAMESIZE];
420         struct ice_vsi *vsi;
421         int err = ICE_SUCCESS;
422
423         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
424                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
425                 return -ENOTSUP;
426         }
427
428         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
429                     " fd_fltr_best_effort = %u.",
430                     hw->func_caps.fd_fltr_guar,
431                     hw->func_caps.fd_fltr_best_effort);
432
433         if (pf->fdir.fdir_vsi) {
434                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
435                 return ICE_SUCCESS;
436         }
437
438         /* make new FDIR VSI */
439         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
440         if (!vsi) {
441                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
442                 return -EINVAL;
443         }
444         pf->fdir.fdir_vsi = vsi;
445
446         err = ice_fdir_init_filter_list(pf);
447         if (err) {
448                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
449                 return -EINVAL;
450         }
451
452         err = ice_fdir_counter_init(pf);
453         if (err) {
454                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
455                 return -EINVAL;
456         }
457
458         /*Fdir tx queue setup*/
459         err = ice_fdir_setup_tx_resources(pf);
460         if (err) {
461                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
462                 goto fail_setup_tx;
463         }
464
465         /*Fdir rx queue setup*/
466         err = ice_fdir_setup_rx_resources(pf);
467         if (err) {
468                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
469                 goto fail_setup_rx;
470         }
471
472         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
473         if (err) {
474                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
475                 goto fail_mem;
476         }
477
478         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
479         if (err) {
480                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
481                 goto fail_mem;
482         }
483
484         /* reserve memory for the fdir programming packet */
485         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
486                  ICE_FDIR_MZ_NAME,
487                  eth_dev->data->port_id);
488         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
489         if (!mz) {
490                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
491                             "flow director program packet.");
492                 err = -ENOMEM;
493                 goto fail_mem;
494         }
495         pf->fdir.prg_pkt = mz->addr;
496         pf->fdir.dma_addr = mz->iova;
497
498         err = ice_fdir_prof_alloc(hw);
499         if (err) {
500                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
501                             "flow director profile.");
502                 err = -ENOMEM;
503                 goto fail_mem;
504         }
505
506         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
507                     vsi->base_queue);
508         return ICE_SUCCESS;
509
510 fail_mem:
511         ice_rx_queue_release(pf->fdir.rxq);
512         pf->fdir.rxq = NULL;
513 fail_setup_rx:
514         ice_tx_queue_release(pf->fdir.txq);
515         pf->fdir.txq = NULL;
516 fail_setup_tx:
517         ice_release_vsi(vsi);
518         pf->fdir.fdir_vsi = NULL;
519         return err;
520 }
521
522 static void
523 ice_fdir_prof_free(struct ice_hw *hw)
524 {
525         enum ice_fltr_ptype ptype;
526
527         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
528              ptype < ICE_FLTR_PTYPE_MAX;
529              ptype++)
530                 rte_free(hw->fdir_prof[ptype]);
531
532         rte_free(hw->fdir_prof);
533 }
534
535 /* Remove a profile for some filter type */
536 static void
537 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
538 {
539         struct ice_hw *hw = ICE_PF_TO_HW(pf);
540         struct ice_fd_hw_prof *hw_prof;
541         uint64_t prof_id;
542         uint16_t vsi_num;
543         int i;
544
545         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
546                 return;
547
548         hw_prof = hw->fdir_prof[ptype];
549
550         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
551         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
552                 if (hw_prof->entry_h[i][is_tunnel]) {
553                         vsi_num = ice_get_hw_vsi_num(hw,
554                                                      hw_prof->vsi_h[i]);
555                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
556                                              vsi_num, ptype);
557                         ice_flow_rem_entry(hw,
558                                            hw_prof->entry_h[i][is_tunnel]);
559                         hw_prof->entry_h[i][is_tunnel] = 0;
560                 }
561         }
562         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
563         rte_free(hw_prof->fdir_seg[is_tunnel]);
564         hw_prof->fdir_seg[is_tunnel] = NULL;
565
566         for (i = 0; i < hw_prof->cnt; i++)
567                 hw_prof->vsi_h[i] = 0;
568         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
569 }
570
571 /* Remove all created profiles */
572 static void
573 ice_fdir_prof_rm_all(struct ice_pf *pf)
574 {
575         enum ice_fltr_ptype ptype;
576
577         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
578              ptype < ICE_FLTR_PTYPE_MAX;
579              ptype++) {
580                 ice_fdir_prof_rm(pf, ptype, false);
581                 ice_fdir_prof_rm(pf, ptype, true);
582         }
583 }
584
585 /*
586  * ice_fdir_teardown - release the Flow Director resources
587  * @pf: board private structure
588  */
589 static void
590 ice_fdir_teardown(struct ice_pf *pf)
591 {
592         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
593         struct ice_hw *hw = ICE_PF_TO_HW(pf);
594         struct ice_vsi *vsi;
595         int err;
596
597         vsi = pf->fdir.fdir_vsi;
598         if (!vsi)
599                 return;
600
601         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
602         if (err)
603                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
604
605         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
606         if (err)
607                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
608
609         err = ice_fdir_counter_release(pf);
610         if (err)
611                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
612
613         ice_fdir_release_filter_list(pf);
614
615         ice_tx_queue_release(pf->fdir.txq);
616         pf->fdir.txq = NULL;
617         ice_rx_queue_release(pf->fdir.rxq);
618         pf->fdir.rxq = NULL;
619         ice_fdir_prof_rm_all(pf);
620         ice_fdir_prof_free(hw);
621         ice_release_vsi(vsi);
622         pf->fdir.fdir_vsi = NULL;
623 }
624
625 static int
626 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
627                      struct ice_vsi *ctrl_vsi,
628                      struct ice_flow_seg_info *seg,
629                      enum ice_fltr_ptype ptype,
630                      bool is_tunnel)
631 {
632         struct ice_hw *hw = ICE_PF_TO_HW(pf);
633         enum ice_flow_dir dir = ICE_FLOW_RX;
634         struct ice_flow_seg_info *ori_seg;
635         struct ice_fd_hw_prof *hw_prof;
636         struct ice_flow_prof *prof;
637         uint64_t entry_1 = 0;
638         uint64_t entry_2 = 0;
639         uint16_t vsi_num;
640         int ret;
641         uint64_t prof_id;
642
643         hw_prof = hw->fdir_prof[ptype];
644         ori_seg = hw_prof->fdir_seg[is_tunnel];
645         if (ori_seg) {
646                 if (!is_tunnel) {
647                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
648                                 return -EAGAIN;
649                 } else {
650                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
651                                 return -EAGAIN;
652                 }
653
654                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
655                         return -EINVAL;
656
657                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
658         }
659
660         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
661         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
662                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
663         if (ret)
664                 return ret;
665         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
666                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
667                                  seg, NULL, 0, &entry_1);
668         if (ret) {
669                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
670                             ptype);
671                 goto err_add_prof;
672         }
673         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
674                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
675                                  seg, NULL, 0, &entry_2);
676         if (ret) {
677                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
678                             ptype);
679                 goto err_add_entry;
680         }
681
682         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
683         hw_prof->cnt = 0;
684         hw_prof->fdir_seg[is_tunnel] = seg;
685         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
686         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
687         pf->hw_prof_cnt[ptype][is_tunnel]++;
688         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
689         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
690         pf->hw_prof_cnt[ptype][is_tunnel]++;
691
692         return ret;
693
694 err_add_entry:
695         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
696         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
697         ice_flow_rem_entry(hw, entry_1);
698 err_add_prof:
699         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
700
701         return ret;
702 }
703
704 static void
705 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
706 {
707         uint32_t i, j;
708
709         struct ice_inset_map {
710                 uint64_t inset;
711                 enum ice_flow_field fld;
712         };
713         static const struct ice_inset_map ice_inset_map[] = {
714                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
715                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
716                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
717                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
718                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
719                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
720                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
721                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
722                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
723                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
724                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
725                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
726                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
727                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
728                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
729                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
730                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
731                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
732                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
733                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
734                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
735                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
736                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
737                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
738                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
739                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
740                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
741         };
742
743         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
744                 if ((inset & ice_inset_map[i].inset) ==
745                     ice_inset_map[i].inset)
746                         field[j++] = ice_inset_map[i].fld;
747         }
748 }
749
750 static int
751 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
752                         uint64_t input_set, bool is_tunnel)
753 {
754         struct ice_flow_seg_info *seg;
755         struct ice_flow_seg_info *seg_tun = NULL;
756         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
757         int i, ret;
758
759         if (!input_set)
760                 return -EINVAL;
761
762         seg = (struct ice_flow_seg_info *)
763                 ice_malloc(hw, sizeof(*seg));
764         if (!seg) {
765                 PMD_DRV_LOG(ERR, "No memory can be allocated");
766                 return -ENOMEM;
767         }
768
769         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
770                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
771         ice_fdir_input_set_parse(input_set, field);
772
773         switch (flow) {
774         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
775                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
776                                   ICE_FLOW_SEG_HDR_IPV4);
777                 break;
778         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
779                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
780                                   ICE_FLOW_SEG_HDR_IPV4);
781                 break;
782         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
783                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
784                                   ICE_FLOW_SEG_HDR_IPV4);
785                 break;
786         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
787                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
788                 break;
789         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
790                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
791                                   ICE_FLOW_SEG_HDR_IPV6);
792                 break;
793         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
794                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
795                                   ICE_FLOW_SEG_HDR_IPV6);
796                 break;
797         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
798                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
799                                   ICE_FLOW_SEG_HDR_IPV6);
800                 break;
801         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
802                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
803                 break;
804         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
805         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
806         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
807         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
808                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
809                                   ICE_FLOW_SEG_HDR_IPV4);
810                 break;
811         default:
812                 PMD_DRV_LOG(ERR, "not supported filter type.");
813                 break;
814         }
815
816         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
817                 ice_flow_set_fld(seg, field[i],
818                                  ICE_FLOW_FLD_OFF_INVAL,
819                                  ICE_FLOW_FLD_OFF_INVAL,
820                                  ICE_FLOW_FLD_OFF_INVAL, false);
821         }
822
823         if (!is_tunnel) {
824                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
825                                            seg, flow, false);
826         } else {
827                 seg_tun = (struct ice_flow_seg_info *)
828                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
829                 if (!seg_tun) {
830                         PMD_DRV_LOG(ERR, "No memory can be allocated");
831                         rte_free(seg);
832                         return -ENOMEM;
833                 }
834                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
835                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
836                                            seg_tun, flow, true);
837         }
838
839         if (!ret) {
840                 return ret;
841         } else if (ret < 0) {
842                 rte_free(seg);
843                 if (is_tunnel)
844                         rte_free(seg_tun);
845                 return (ret == -EAGAIN) ? 0 : ret;
846         } else {
847                 return ret;
848         }
849 }
850
851 static void
852 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
853                     bool is_tunnel, bool add)
854 {
855         struct ice_hw *hw = ICE_PF_TO_HW(pf);
856         int cnt;
857
858         cnt = (add) ? 1 : -1;
859         hw->fdir_active_fltr += cnt;
860         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
861                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
862         else
863                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
864 }
865
866 static int
867 ice_fdir_init(struct ice_adapter *ad)
868 {
869         struct ice_pf *pf = &ad->pf;
870         struct ice_flow_parser *parser;
871         int ret;
872
873         ret = ice_fdir_setup(pf);
874         if (ret)
875                 return ret;
876
877         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
878                 parser = &ice_fdir_parser_comms;
879         else
880                 parser = &ice_fdir_parser_os;
881
882         return ice_register_parser(parser, ad);
883 }
884
885 static void
886 ice_fdir_uninit(struct ice_adapter *ad)
887 {
888         struct ice_pf *pf = &ad->pf;
889         struct ice_flow_parser *parser;
890
891         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
892                 parser = &ice_fdir_parser_comms;
893         else
894                 parser = &ice_fdir_parser_os;
895
896         ice_unregister_parser(parser, ad);
897
898         ice_fdir_teardown(pf);
899 }
900
901 static int
902 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
903 {
904         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
905                 return 1;
906         else
907                 return 0;
908 }
909
910 static int
911 ice_fdir_add_del_filter(struct ice_pf *pf,
912                         struct ice_fdir_filter_conf *filter,
913                         bool add)
914 {
915         struct ice_fltr_desc desc;
916         struct ice_hw *hw = ICE_PF_TO_HW(pf);
917         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
918         bool is_tun;
919         int ret;
920
921         filter->input.dest_vsi = pf->main_vsi->idx;
922
923         memset(&desc, 0, sizeof(desc));
924         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
925
926         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
927
928         memset(pkt, 0, ICE_FDIR_PKT_LEN);
929         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
930         if (ret) {
931                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
932                 return -EINVAL;
933         }
934
935         return ice_fdir_programming(pf, &desc);
936 }
937
938 static void
939 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
940                           struct ice_fdir_filter_conf *filter)
941 {
942         struct ice_fdir_fltr *input = &filter->input;
943         memset(key, 0, sizeof(*key));
944
945         key->flow_type = input->flow_type;
946         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
947         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
948         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
949         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
950
951         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
952         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
953
954         key->tunnel_type = filter->tunnel_type;
955 }
956
957 /* Check if there exists the flow director filter */
958 static struct ice_fdir_filter_conf *
959 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
960                         const struct ice_fdir_fltr_pattern *key)
961 {
962         int ret;
963
964         ret = rte_hash_lookup(fdir_info->hash_table, key);
965         if (ret < 0)
966                 return NULL;
967
968         return fdir_info->hash_map[ret];
969 }
970
971 /* Add a flow director entry into the SW list */
972 static int
973 ice_fdir_entry_insert(struct ice_pf *pf,
974                       struct ice_fdir_filter_conf *entry,
975                       struct ice_fdir_fltr_pattern *key)
976 {
977         struct ice_fdir_info *fdir_info = &pf->fdir;
978         int ret;
979
980         ret = rte_hash_add_key(fdir_info->hash_table, key);
981         if (ret < 0) {
982                 PMD_DRV_LOG(ERR,
983                             "Failed to insert fdir entry to hash table %d!",
984                             ret);
985                 return ret;
986         }
987         fdir_info->hash_map[ret] = entry;
988
989         return 0;
990 }
991
992 /* Delete a flow director entry from the SW list */
993 static int
994 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
995 {
996         struct ice_fdir_info *fdir_info = &pf->fdir;
997         int ret;
998
999         ret = rte_hash_del_key(fdir_info->hash_table, key);
1000         if (ret < 0) {
1001                 PMD_DRV_LOG(ERR,
1002                             "Failed to delete fdir filter to hash table %d!",
1003                             ret);
1004                 return ret;
1005         }
1006         fdir_info->hash_map[ret] = NULL;
1007
1008         return 0;
1009 }
1010
1011 static int
1012 ice_fdir_create_filter(struct ice_adapter *ad,
1013                        struct rte_flow *flow,
1014                        void *meta,
1015                        struct rte_flow_error *error)
1016 {
1017         struct ice_pf *pf = &ad->pf;
1018         struct ice_fdir_filter_conf *filter = meta;
1019         struct ice_fdir_info *fdir_info = &pf->fdir;
1020         struct ice_fdir_filter_conf *entry, *node;
1021         struct ice_fdir_fltr_pattern key;
1022         bool is_tun;
1023         int ret;
1024
1025         ice_fdir_extract_fltr_key(&key, filter);
1026         node = ice_fdir_entry_lookup(fdir_info, &key);
1027         if (node) {
1028                 rte_flow_error_set(error, EEXIST,
1029                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1030                                    "Rule already exists!");
1031                 return -rte_errno;
1032         }
1033
1034         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1035         if (!entry) {
1036                 rte_flow_error_set(error, ENOMEM,
1037                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1038                                    "Failed to allocate memory");
1039                 return -rte_errno;
1040         }
1041
1042         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1043
1044         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1045                         filter->input_set, is_tun);
1046         if (ret) {
1047                 rte_flow_error_set(error, -ret,
1048                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1049                                    "Profile configure failed.");
1050                 goto free_entry;
1051         }
1052
1053         /* alloc counter for FDIR */
1054         if (filter->input.cnt_ena) {
1055                 struct rte_flow_action_count *act_count = &filter->act_count;
1056
1057                 filter->counter = ice_fdir_counter_alloc(pf,
1058                                                          act_count->shared,
1059                                                          act_count->id);
1060                 if (!filter->counter) {
1061                         rte_flow_error_set(error, EINVAL,
1062                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1063                                         "Failed to alloc FDIR counter.");
1064                         goto free_entry;
1065                 }
1066                 filter->input.cnt_index = filter->counter->hw_index;
1067         }
1068
1069         ret = ice_fdir_add_del_filter(pf, filter, true);
1070         if (ret) {
1071                 rte_flow_error_set(error, -ret,
1072                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1073                                    "Add filter rule failed.");
1074                 goto free_counter;
1075         }
1076
1077         rte_memcpy(entry, filter, sizeof(*entry));
1078         ret = ice_fdir_entry_insert(pf, entry, &key);
1079         if (ret) {
1080                 rte_flow_error_set(error, -ret,
1081                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1082                                    "Insert entry to table failed.");
1083                 goto free_entry;
1084         }
1085
1086         flow->rule = entry;
1087         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1088
1089         return 0;
1090
1091 free_counter:
1092         if (filter->counter) {
1093                 ice_fdir_counter_free(pf, filter->counter);
1094                 filter->counter = NULL;
1095         }
1096
1097 free_entry:
1098         rte_free(entry);
1099         return -rte_errno;
1100 }
1101
1102 static int
1103 ice_fdir_destroy_filter(struct ice_adapter *ad,
1104                         struct rte_flow *flow,
1105                         struct rte_flow_error *error)
1106 {
1107         struct ice_pf *pf = &ad->pf;
1108         struct ice_fdir_info *fdir_info = &pf->fdir;
1109         struct ice_fdir_filter_conf *filter, *entry;
1110         struct ice_fdir_fltr_pattern key;
1111         bool is_tun;
1112         int ret;
1113
1114         filter = (struct ice_fdir_filter_conf *)flow->rule;
1115
1116         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1117
1118         if (filter->counter) {
1119                 ice_fdir_counter_free(pf, filter->counter);
1120                 filter->counter = NULL;
1121         }
1122
1123         ice_fdir_extract_fltr_key(&key, filter);
1124         entry = ice_fdir_entry_lookup(fdir_info, &key);
1125         if (!entry) {
1126                 rte_flow_error_set(error, ENOENT,
1127                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1128                                    "Can't find entry.");
1129                 return -rte_errno;
1130         }
1131
1132         ret = ice_fdir_add_del_filter(pf, filter, false);
1133         if (ret) {
1134                 rte_flow_error_set(error, -ret,
1135                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1136                                    "Del filter rule failed.");
1137                 return -rte_errno;
1138         }
1139
1140         ret = ice_fdir_entry_del(pf, &key);
1141         if (ret) {
1142                 rte_flow_error_set(error, -ret,
1143                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1144                                    "Remove entry from table failed.");
1145                 return -rte_errno;
1146         }
1147
1148         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1149         flow->rule = NULL;
1150
1151         rte_free(filter);
1152
1153         return 0;
1154 }
1155
1156 static int
1157 ice_fdir_query_count(struct ice_adapter *ad,
1158                       struct rte_flow *flow,
1159                       struct rte_flow_query_count *flow_stats,
1160                       struct rte_flow_error *error)
1161 {
1162         struct ice_pf *pf = &ad->pf;
1163         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1164         struct ice_fdir_filter_conf *filter = flow->rule;
1165         struct ice_fdir_counter *counter = filter->counter;
1166         uint64_t hits_lo, hits_hi;
1167
1168         if (!counter) {
1169                 rte_flow_error_set(error, EINVAL,
1170                                   RTE_FLOW_ERROR_TYPE_ACTION,
1171                                   NULL,
1172                                   "FDIR counters not available");
1173                 return -rte_errno;
1174         }
1175
1176         /*
1177          * Reading the low 32-bits latches the high 32-bits into a shadow
1178          * register. Reading the high 32-bit returns the value in the
1179          * shadow register.
1180          */
1181         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1182         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1183
1184         flow_stats->hits_set = 1;
1185         flow_stats->hits = hits_lo | (hits_hi << 32);
1186         flow_stats->bytes_set = 0;
1187         flow_stats->bytes = 0;
1188
1189         if (flow_stats->reset) {
1190                 /* reset statistic counter value */
1191                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1192                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1193         }
1194
1195         return 0;
1196 }
1197
1198 static struct ice_flow_engine ice_fdir_engine = {
1199         .init = ice_fdir_init,
1200         .uninit = ice_fdir_uninit,
1201         .create = ice_fdir_create_filter,
1202         .destroy = ice_fdir_destroy_filter,
1203         .query_count = ice_fdir_query_count,
1204         .type = ICE_FLOW_ENGINE_FDIR,
1205 };
1206
1207 static int
1208 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1209                               struct rte_flow_error *error,
1210                               const struct rte_flow_action *act,
1211                               struct ice_fdir_filter_conf *filter)
1212 {
1213         const struct rte_flow_action_rss *rss = act->conf;
1214         uint32_t i;
1215
1216         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1217                 rte_flow_error_set(error, EINVAL,
1218                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1219                                    "Invalid action.");
1220                 return -rte_errno;
1221         }
1222
1223         if (rss->queue_num <= 1) {
1224                 rte_flow_error_set(error, EINVAL,
1225                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1226                                    "Queue region size can't be 0 or 1.");
1227                 return -rte_errno;
1228         }
1229
1230         /* check if queue index for queue region is continuous */
1231         for (i = 0; i < rss->queue_num - 1; i++) {
1232                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1233                         rte_flow_error_set(error, EINVAL,
1234                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1235                                            "Discontinuous queue region");
1236                         return -rte_errno;
1237                 }
1238         }
1239
1240         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1241                 rte_flow_error_set(error, EINVAL,
1242                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1243                                    "Invalid queue region indexes.");
1244                 return -rte_errno;
1245         }
1246
1247         if (!(rte_is_power_of_2(rss->queue_num) &&
1248              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1249                 rte_flow_error_set(error, EINVAL,
1250                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1251                                    "The region size should be any of the following values:"
1252                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1253                                    "of queues do not exceed the VSI allocation.");
1254                 return -rte_errno;
1255         }
1256
1257         filter->input.q_index = rss->queue[0];
1258         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1259         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1260
1261         return 0;
1262 }
1263
1264 static int
1265 ice_fdir_parse_action(struct ice_adapter *ad,
1266                       const struct rte_flow_action actions[],
1267                       struct rte_flow_error *error,
1268                       struct ice_fdir_filter_conf *filter)
1269 {
1270         struct ice_pf *pf = &ad->pf;
1271         const struct rte_flow_action_queue *act_q;
1272         const struct rte_flow_action_mark *mark_spec = NULL;
1273         const struct rte_flow_action_count *act_count;
1274         uint32_t dest_num = 0;
1275         uint32_t mark_num = 0;
1276         uint32_t counter_num = 0;
1277         int ret;
1278
1279         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1280                 switch (actions->type) {
1281                 case RTE_FLOW_ACTION_TYPE_VOID:
1282                         break;
1283                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1284                         dest_num++;
1285
1286                         act_q = actions->conf;
1287                         filter->input.q_index = act_q->index;
1288                         if (filter->input.q_index >=
1289                                         pf->dev_data->nb_rx_queues) {
1290                                 rte_flow_error_set(error, EINVAL,
1291                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1292                                                    actions,
1293                                                    "Invalid queue for FDIR.");
1294                                 return -rte_errno;
1295                         }
1296                         filter->input.dest_ctl =
1297                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1298                         break;
1299                 case RTE_FLOW_ACTION_TYPE_DROP:
1300                         dest_num++;
1301
1302                         filter->input.dest_ctl =
1303                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1304                         break;
1305                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1306                         dest_num++;
1307
1308                         filter->input.dest_ctl =
1309                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1310                         filter->input.q_index = 0;
1311                         break;
1312                 case RTE_FLOW_ACTION_TYPE_RSS:
1313                         dest_num++;
1314
1315                         ret = ice_fdir_parse_action_qregion(pf,
1316                                                 error, actions, filter);
1317                         if (ret)
1318                                 return ret;
1319                         break;
1320                 case RTE_FLOW_ACTION_TYPE_MARK:
1321                         mark_num++;
1322
1323                         mark_spec = actions->conf;
1324                         filter->input.fltr_id = mark_spec->id;
1325                         break;
1326                 case RTE_FLOW_ACTION_TYPE_COUNT:
1327                         counter_num++;
1328
1329                         act_count = actions->conf;
1330                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1331                         rte_memcpy(&filter->act_count, act_count,
1332                                                 sizeof(filter->act_count));
1333
1334                         break;
1335                 default:
1336                         rte_flow_error_set(error, EINVAL,
1337                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1338                                    "Invalid action.");
1339                         return -rte_errno;
1340                 }
1341         }
1342
1343         if (dest_num == 0 || dest_num >= 2) {
1344                 rte_flow_error_set(error, EINVAL,
1345                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1346                            "Unsupported action combination");
1347                 return -rte_errno;
1348         }
1349
1350         if (mark_num >= 2) {
1351                 rte_flow_error_set(error, EINVAL,
1352                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1353                            "Too many mark actions");
1354                 return -rte_errno;
1355         }
1356
1357         if (counter_num >= 2) {
1358                 rte_flow_error_set(error, EINVAL,
1359                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1360                            "Too many count actions");
1361                 return -rte_errno;
1362         }
1363
1364         return 0;
1365 }
1366
1367 static int
1368 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1369                        const struct rte_flow_item pattern[],
1370                        struct rte_flow_error *error,
1371                        struct ice_fdir_filter_conf *filter)
1372 {
1373         const struct rte_flow_item *item = pattern;
1374         enum rte_flow_item_type item_type;
1375         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1376         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1377         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1378         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1379         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1380         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1381         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1382         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1383         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1384         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1385         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1386         uint64_t input_set = ICE_INSET_NONE;
1387         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1388         uint8_t  ipv6_addr_mask[16] = {
1389                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1390                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1391         };
1392         uint32_t vtc_flow_cpu;
1393
1394
1395         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1396                 if (item->last) {
1397                         rte_flow_error_set(error, EINVAL,
1398                                         RTE_FLOW_ERROR_TYPE_ITEM,
1399                                         item,
1400                                         "Not support range");
1401                         return -rte_errno;
1402                 }
1403                 item_type = item->type;
1404
1405                 switch (item_type) {
1406                 case RTE_FLOW_ITEM_TYPE_ETH:
1407                         eth_spec = item->spec;
1408                         eth_mask = item->mask;
1409
1410                         if (eth_spec && eth_mask) {
1411                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1412                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1413                                         rte_flow_error_set(error, EINVAL,
1414                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1415                                                 item,
1416                                                 "Src mac not support");
1417                                         return -rte_errno;
1418                                 }
1419
1420                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1421                                         rte_flow_error_set(error, EINVAL,
1422                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1423                                                 item,
1424                                                 "Invalid mac addr mask");
1425                                         return -rte_errno;
1426                                 }
1427
1428                                 input_set |= ICE_INSET_DMAC;
1429                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1430                                            &eth_spec->dst,
1431                                            RTE_ETHER_ADDR_LEN);
1432                         }
1433                         break;
1434                 case RTE_FLOW_ITEM_TYPE_IPV4:
1435                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1436                         ipv4_spec = item->spec;
1437                         ipv4_mask = item->mask;
1438
1439                         if (ipv4_spec && ipv4_mask) {
1440                                 /* Check IPv4 mask and update input set */
1441                                 if (ipv4_mask->hdr.version_ihl ||
1442                                     ipv4_mask->hdr.total_length ||
1443                                     ipv4_mask->hdr.packet_id ||
1444                                     ipv4_mask->hdr.fragment_offset ||
1445                                     ipv4_mask->hdr.hdr_checksum) {
1446                                         rte_flow_error_set(error, EINVAL,
1447                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1448                                                    item,
1449                                                    "Invalid IPv4 mask.");
1450                                         return -rte_errno;
1451                                 }
1452                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1453                                         input_set |= tunnel_type ?
1454                                                      ICE_INSET_TUN_IPV4_SRC :
1455                                                      ICE_INSET_IPV4_SRC;
1456                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1457                                         input_set |= tunnel_type ?
1458                                                      ICE_INSET_TUN_IPV4_DST :
1459                                                      ICE_INSET_IPV4_DST;
1460                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1461                                         input_set |= ICE_INSET_IPV4_TOS;
1462                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1463                                         input_set |= ICE_INSET_IPV4_TTL;
1464                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1465                                         input_set |= ICE_INSET_IPV4_PROTO;
1466
1467                                 filter->input.ip.v4.dst_ip =
1468                                         ipv4_spec->hdr.src_addr;
1469                                 filter->input.ip.v4.src_ip =
1470                                         ipv4_spec->hdr.dst_addr;
1471                                 filter->input.ip.v4.tos =
1472                                         ipv4_spec->hdr.type_of_service;
1473                                 filter->input.ip.v4.ttl =
1474                                         ipv4_spec->hdr.time_to_live;
1475                                 filter->input.ip.v4.proto =
1476                                         ipv4_spec->hdr.next_proto_id;
1477                         }
1478
1479                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1480                         break;
1481                 case RTE_FLOW_ITEM_TYPE_IPV6:
1482                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1483                         ipv6_spec = item->spec;
1484                         ipv6_mask = item->mask;
1485
1486                         if (ipv6_spec && ipv6_mask) {
1487                                 /* Check IPv6 mask and update input set */
1488                                 if (ipv6_mask->hdr.payload_len) {
1489                                         rte_flow_error_set(error, EINVAL,
1490                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1491                                                    item,
1492                                                    "Invalid IPv6 mask");
1493                                         return -rte_errno;
1494                                 }
1495
1496                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1497                                             ipv6_addr_mask,
1498                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1499                                         input_set |= ICE_INSET_IPV6_SRC;
1500                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1501                                             ipv6_addr_mask,
1502                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1503                                         input_set |= ICE_INSET_IPV6_DST;
1504
1505                                 if ((ipv6_mask->hdr.vtc_flow &
1506                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1507                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1508                                         input_set |= ICE_INSET_IPV6_TC;
1509                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1510                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1511                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1512                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1513
1514                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1515                                            ipv6_spec->hdr.src_addr, 16);
1516                                 rte_memcpy(filter->input.ip.v6.src_ip,
1517                                            ipv6_spec->hdr.dst_addr, 16);
1518
1519                                 vtc_flow_cpu =
1520                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1521                                 filter->input.ip.v6.tc =
1522                                         (uint8_t)(vtc_flow_cpu >>
1523                                                   ICE_FDIR_IPV6_TC_OFFSET);
1524                                 filter->input.ip.v6.proto =
1525                                         ipv6_spec->hdr.proto;
1526                                 filter->input.ip.v6.hlim =
1527                                         ipv6_spec->hdr.hop_limits;
1528                         }
1529
1530                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1531                         break;
1532                 case RTE_FLOW_ITEM_TYPE_TCP:
1533                         tcp_spec = item->spec;
1534                         tcp_mask = item->mask;
1535
1536                         if (tcp_spec && tcp_mask) {
1537                                 /* Check TCP mask and update input set */
1538                                 if (tcp_mask->hdr.sent_seq ||
1539                                     tcp_mask->hdr.recv_ack ||
1540                                     tcp_mask->hdr.data_off ||
1541                                     tcp_mask->hdr.tcp_flags ||
1542                                     tcp_mask->hdr.rx_win ||
1543                                     tcp_mask->hdr.cksum ||
1544                                     tcp_mask->hdr.tcp_urp) {
1545                                         rte_flow_error_set(error, EINVAL,
1546                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1547                                                    item,
1548                                                    "Invalid TCP mask");
1549                                         return -rte_errno;
1550                                 }
1551
1552                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1553                                         input_set |= tunnel_type ?
1554                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1555                                                      ICE_INSET_TCP_SRC_PORT;
1556                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1557                                         input_set |= tunnel_type ?
1558                                                      ICE_INSET_TUN_TCP_DST_PORT :
1559                                                      ICE_INSET_TCP_DST_PORT;
1560
1561                                 /* Get filter info */
1562                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1563                                         filter->input.ip.v4.dst_port =
1564                                                 tcp_spec->hdr.src_port;
1565                                         filter->input.ip.v4.src_port =
1566                                                 tcp_spec->hdr.dst_port;
1567                                         flow_type =
1568                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1569                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1570                                         filter->input.ip.v6.dst_port =
1571                                                 tcp_spec->hdr.src_port;
1572                                         filter->input.ip.v6.src_port =
1573                                                 tcp_spec->hdr.dst_port;
1574                                         flow_type =
1575                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1576                                 }
1577                         }
1578                         break;
1579                 case RTE_FLOW_ITEM_TYPE_UDP:
1580                         udp_spec = item->spec;
1581                         udp_mask = item->mask;
1582
1583                         if (udp_spec && udp_mask) {
1584                                 /* Check UDP mask and update input set*/
1585                                 if (udp_mask->hdr.dgram_len ||
1586                                     udp_mask->hdr.dgram_cksum) {
1587                                         rte_flow_error_set(error, EINVAL,
1588                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1589                                                    item,
1590                                                    "Invalid UDP mask");
1591                                         return -rte_errno;
1592                                 }
1593
1594                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1595                                         input_set |= tunnel_type ?
1596                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1597                                                      ICE_INSET_UDP_SRC_PORT;
1598                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1599                                         input_set |= tunnel_type ?
1600                                                      ICE_INSET_TUN_UDP_DST_PORT :
1601                                                      ICE_INSET_UDP_DST_PORT;
1602
1603                                 /* Get filter info */
1604                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1605                                         filter->input.ip.v4.dst_port =
1606                                                 udp_spec->hdr.src_port;
1607                                         filter->input.ip.v4.src_port =
1608                                                 udp_spec->hdr.dst_port;
1609                                         flow_type =
1610                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1611                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1612                                         filter->input.ip.v6.src_port =
1613                                                 udp_spec->hdr.dst_port;
1614                                         filter->input.ip.v6.dst_port =
1615                                                 udp_spec->hdr.src_port;
1616                                         flow_type =
1617                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1618                                 }
1619                         }
1620                         break;
1621                 case RTE_FLOW_ITEM_TYPE_SCTP:
1622                         sctp_spec = item->spec;
1623                         sctp_mask = item->mask;
1624
1625                         if (sctp_spec && sctp_mask) {
1626                                 /* Check SCTP mask and update input set */
1627                                 if (sctp_mask->hdr.cksum) {
1628                                         rte_flow_error_set(error, EINVAL,
1629                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1630                                                    item,
1631                                                    "Invalid UDP mask");
1632                                         return -rte_errno;
1633                                 }
1634
1635                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1636                                         input_set |= tunnel_type ?
1637                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1638                                                      ICE_INSET_SCTP_SRC_PORT;
1639                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1640                                         input_set |= tunnel_type ?
1641                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1642                                                      ICE_INSET_SCTP_DST_PORT;
1643
1644                                 /* Get filter info */
1645                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1646                                         filter->input.ip.v4.dst_port =
1647                                                 sctp_spec->hdr.src_port;
1648                                         filter->input.ip.v4.src_port =
1649                                                 sctp_spec->hdr.dst_port;
1650                                         flow_type =
1651                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1652                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1653                                         filter->input.ip.v6.dst_port =
1654                                                 sctp_spec->hdr.src_port;
1655                                         filter->input.ip.v6.src_port =
1656                                                 sctp_spec->hdr.dst_port;
1657                                         flow_type =
1658                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1659                                 }
1660                         }
1661                         break;
1662                 case RTE_FLOW_ITEM_TYPE_VOID:
1663                         break;
1664                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1665                         l3 = RTE_FLOW_ITEM_TYPE_END;
1666                         vxlan_spec = item->spec;
1667                         vxlan_mask = item->mask;
1668
1669                         if (vxlan_spec || vxlan_mask) {
1670                                 rte_flow_error_set(error, EINVAL,
1671                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1672                                                    item,
1673                                                    "Invalid vxlan field");
1674                                 return -rte_errno;
1675                         }
1676
1677                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1678                         break;
1679                 case RTE_FLOW_ITEM_TYPE_GTPU:
1680                         l3 = RTE_FLOW_ITEM_TYPE_END;
1681                         gtp_spec = item->spec;
1682                         gtp_mask = item->mask;
1683
1684                         if (gtp_spec && gtp_mask) {
1685                                 if (gtp_mask->v_pt_rsv_flags ||
1686                                     gtp_mask->msg_type ||
1687                                     gtp_mask->msg_len) {
1688                                         rte_flow_error_set(error, EINVAL,
1689                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1690                                                    item,
1691                                                    "Invalid GTP mask");
1692                                         return -rte_errno;
1693                                 }
1694
1695                                 if (gtp_mask->teid == UINT32_MAX)
1696                                         input_set |= ICE_INSET_GTPU_TEID;
1697
1698                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1699                         }
1700                         break;
1701                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1702                         gtp_psc_spec = item->spec;
1703                         gtp_psc_mask = item->mask;
1704
1705                         if (gtp_psc_spec && gtp_psc_mask) {
1706                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1707                                         input_set |= ICE_INSET_GTPU_QFI;
1708
1709                                 filter->input.gtpu_data.qfi =
1710                                         gtp_psc_spec->qfi;
1711                         }
1712
1713                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1714                         break;
1715                 default:
1716                         rte_flow_error_set(error, EINVAL,
1717                                    RTE_FLOW_ERROR_TYPE_ITEM,
1718                                    item,
1719                                    "Invalid pattern item.");
1720                         return -rte_errno;
1721                 }
1722         }
1723
1724         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1725                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1726
1727         filter->tunnel_type = tunnel_type;
1728         filter->input.flow_type = flow_type;
1729         filter->input_set = input_set;
1730
1731         return 0;
1732 }
1733
1734 static int
1735 ice_fdir_parse(struct ice_adapter *ad,
1736                struct ice_pattern_match_item *array,
1737                uint32_t array_len,
1738                const struct rte_flow_item pattern[],
1739                const struct rte_flow_action actions[],
1740                void **meta,
1741                struct rte_flow_error *error)
1742 {
1743         struct ice_pf *pf = &ad->pf;
1744         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1745         struct ice_pattern_match_item *item = NULL;
1746         uint64_t input_set;
1747         int ret;
1748
1749         memset(filter, 0, sizeof(*filter));
1750         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1751         if (!item)
1752                 return -rte_errno;
1753
1754         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1755         if (ret)
1756                 return ret;
1757         input_set = filter->input_set;
1758         if (!input_set || input_set & ~item->input_set_mask) {
1759                 rte_flow_error_set(error, EINVAL,
1760                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1761                                    pattern,
1762                                    "Invalid input set");
1763                 return -rte_errno;
1764         }
1765
1766         ret = ice_fdir_parse_action(ad, actions, error, filter);
1767         if (ret)
1768                 return ret;
1769
1770         *meta = filter;
1771
1772         return 0;
1773 }
1774
1775 static struct ice_flow_parser ice_fdir_parser_os = {
1776         .engine = &ice_fdir_engine,
1777         .array = ice_fdir_pattern_os,
1778         .array_len = RTE_DIM(ice_fdir_pattern_os),
1779         .parse_pattern_action = ice_fdir_parse,
1780         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1781 };
1782
1783 static struct ice_flow_parser ice_fdir_parser_comms = {
1784         .engine = &ice_fdir_engine,
1785         .array = ice_fdir_pattern_comms,
1786         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1787         .parse_pattern_action = ice_fdir_parse,
1788         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1789 };
1790
1791 RTE_INIT(ice_fdir_engine_register)
1792 {
1793         ice_register_flow_engine(&ice_fdir_engine);
1794 }