net/ice: fix wild pointer
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_DMAC | \
36         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
37         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
38
39 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
40         ICE_FDIR_INSET_ETH_IPV6 | \
41         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
50
51 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
52         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
53
54 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
55         ICE_FDIR_INSET_VXLAN_IPV4 | \
56         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
65
66 #define ICE_FDIR_INSET_GTPU_IPV4 (\
67         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
68
69 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
70         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
71         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
72         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
73         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
74         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
75         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
76         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
77         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
78         {pattern_eth_ipv4_udp_vxlan_ipv4,
79                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
80         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
81                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
83                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
87                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
91                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
94 };
95
96 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
97         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
99         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
100         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
101         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
102         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
103         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
104         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp_vxlan_ipv4,
106                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
107         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
108                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
109         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
110                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
111         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
112                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
114                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
118                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
121         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
122 };
123
124 static struct ice_flow_parser ice_fdir_parser_os;
125 static struct ice_flow_parser ice_fdir_parser_comms;
126
127 static const struct rte_memzone *
128 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
129 {
130         const struct rte_memzone *mz;
131
132         mz = rte_memzone_lookup(name);
133         if (mz)
134                 return mz;
135
136         return rte_memzone_reserve_aligned(name, len, socket_id,
137                                            RTE_MEMZONE_IOVA_CONTIG,
138                                            ICE_RING_BASE_ALIGN);
139 }
140
141 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
142
143 static int
144 ice_fdir_prof_alloc(struct ice_hw *hw)
145 {
146         enum ice_fltr_ptype ptype, fltr_ptype;
147
148         if (!hw->fdir_prof) {
149                 hw->fdir_prof = (struct ice_fd_hw_prof **)
150                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
151                                    sizeof(*hw->fdir_prof));
152                 if (!hw->fdir_prof)
153                         return -ENOMEM;
154         }
155         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
156              ptype < ICE_FLTR_PTYPE_MAX;
157              ptype++) {
158                 if (!hw->fdir_prof[ptype]) {
159                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
160                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
161                         if (!hw->fdir_prof[ptype])
162                                 goto fail_mem;
163                 }
164         }
165         return 0;
166
167 fail_mem:
168         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
169              fltr_ptype < ptype;
170              fltr_ptype++) {
171                 rte_free(hw->fdir_prof[fltr_ptype]);
172                 hw->fdir_prof[fltr_ptype] = NULL;
173         }
174
175         rte_free(hw->fdir_prof);
176         hw->fdir_prof = NULL;
177
178         return -ENOMEM;
179 }
180
181 static int
182 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
183                           struct ice_fdir_counter_pool_container *container,
184                           uint32_t index_start,
185                           uint32_t len)
186 {
187         struct ice_fdir_counter_pool *pool;
188         uint32_t i;
189         int ret = 0;
190
191         pool = rte_zmalloc("ice_fdir_counter_pool",
192                            sizeof(*pool) +
193                            sizeof(struct ice_fdir_counter) * len,
194                            0);
195         if (!pool) {
196                 PMD_INIT_LOG(ERR,
197                              "Failed to allocate memory for fdir counter pool");
198                 return -ENOMEM;
199         }
200
201         TAILQ_INIT(&pool->counter_list);
202         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
203
204         for (i = 0; i < len; i++) {
205                 struct ice_fdir_counter *counter = &pool->counters[i];
206
207                 counter->hw_index = index_start + i;
208                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
209         }
210
211         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
212                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
213                 ret = -EINVAL;
214                 goto free_pool;
215         }
216
217         container->pools[container->index_free++] = pool;
218         return 0;
219
220 free_pool:
221         rte_free(pool);
222         return ret;
223 }
224
225 static int
226 ice_fdir_counter_init(struct ice_pf *pf)
227 {
228         struct ice_hw *hw = ICE_PF_TO_HW(pf);
229         struct ice_fdir_info *fdir_info = &pf->fdir;
230         struct ice_fdir_counter_pool_container *container =
231                                 &fdir_info->counter;
232         uint32_t cnt_index, len;
233         int ret;
234
235         TAILQ_INIT(&container->pool_list);
236
237         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
238         len = ICE_FDIR_COUNTERS_PER_BLOCK;
239
240         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
241         if (ret) {
242                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
243                 return ret;
244         }
245
246         return 0;
247 }
248
249 static int
250 ice_fdir_counter_release(struct ice_pf *pf)
251 {
252         struct ice_fdir_info *fdir_info = &pf->fdir;
253         struct ice_fdir_counter_pool_container *container =
254                                 &fdir_info->counter;
255         uint8_t i;
256
257         for (i = 0; i < container->index_free; i++) {
258                 rte_free(container->pools[i]);
259                 container->pools[i] = NULL;
260         }
261
262         TAILQ_INIT(&container->pool_list);
263         container->index_free = 0;
264
265         return 0;
266 }
267
268 static struct ice_fdir_counter *
269 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
270                                         *container,
271                                uint32_t id)
272 {
273         struct ice_fdir_counter_pool *pool;
274         struct ice_fdir_counter *counter;
275         int i;
276
277         TAILQ_FOREACH(pool, &container->pool_list, next) {
278                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
279                         counter = &pool->counters[i];
280
281                         if (counter->shared &&
282                             counter->ref_cnt &&
283                             counter->id == id)
284                                 return counter;
285                 }
286         }
287
288         return NULL;
289 }
290
291 static struct ice_fdir_counter *
292 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
293 {
294         struct ice_hw *hw = ICE_PF_TO_HW(pf);
295         struct ice_fdir_info *fdir_info = &pf->fdir;
296         struct ice_fdir_counter_pool_container *container =
297                                 &fdir_info->counter;
298         struct ice_fdir_counter_pool *pool = NULL;
299         struct ice_fdir_counter *counter_free = NULL;
300
301         if (shared) {
302                 counter_free = ice_fdir_counter_shared_search(container, id);
303                 if (counter_free) {
304                         if (counter_free->ref_cnt + 1 == 0) {
305                                 rte_errno = E2BIG;
306                                 return NULL;
307                         }
308                         counter_free->ref_cnt++;
309                         return counter_free;
310                 }
311         }
312
313         TAILQ_FOREACH(pool, &container->pool_list, next) {
314                 counter_free = TAILQ_FIRST(&pool->counter_list);
315                 if (counter_free)
316                         break;
317                 counter_free = NULL;
318         }
319
320         if (!counter_free) {
321                 PMD_DRV_LOG(ERR, "No free counter found\n");
322                 return NULL;
323         }
324
325         counter_free->shared = shared;
326         counter_free->id = id;
327         counter_free->ref_cnt = 1;
328         counter_free->pool = pool;
329
330         /* reset statistic counter value */
331         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
332         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
333
334         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
335         if (TAILQ_EMPTY(&pool->counter_list)) {
336                 TAILQ_REMOVE(&container->pool_list, pool, next);
337                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
338         }
339
340         return counter_free;
341 }
342
343 static void
344 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
345                       struct ice_fdir_counter *counter)
346 {
347         if (!counter)
348                 return;
349
350         if (--counter->ref_cnt == 0) {
351                 struct ice_fdir_counter_pool *pool = counter->pool;
352
353                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
354         }
355 }
356
357 static int
358 ice_fdir_init_filter_list(struct ice_pf *pf)
359 {
360         struct rte_eth_dev *dev = pf->adapter->eth_dev;
361         struct ice_fdir_info *fdir_info = &pf->fdir;
362         char fdir_hash_name[RTE_HASH_NAMESIZE];
363         int ret;
364
365         struct rte_hash_parameters fdir_hash_params = {
366                 .name = fdir_hash_name,
367                 .entries = ICE_MAX_FDIR_FILTER_NUM,
368                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
369                 .hash_func = rte_hash_crc,
370                 .hash_func_init_val = 0,
371                 .socket_id = rte_socket_id(),
372                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
373         };
374
375         /* Initialize hash */
376         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
377                  "fdir_%s", dev->device->name);
378         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
379         if (!fdir_info->hash_table) {
380                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
381                 return -EINVAL;
382         }
383         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
384                                           sizeof(*fdir_info->hash_map) *
385                                           ICE_MAX_FDIR_FILTER_NUM,
386                                           0);
387         if (!fdir_info->hash_map) {
388                 PMD_INIT_LOG(ERR,
389                              "Failed to allocate memory for fdir hash map!");
390                 ret = -ENOMEM;
391                 goto err_fdir_hash_map_alloc;
392         }
393         return 0;
394
395 err_fdir_hash_map_alloc:
396         rte_hash_free(fdir_info->hash_table);
397
398         return ret;
399 }
400
401 static void
402 ice_fdir_release_filter_list(struct ice_pf *pf)
403 {
404         struct ice_fdir_info *fdir_info = &pf->fdir;
405
406         if (fdir_info->hash_map)
407                 rte_free(fdir_info->hash_map);
408         if (fdir_info->hash_table)
409                 rte_hash_free(fdir_info->hash_table);
410
411         fdir_info->hash_map = NULL;
412         fdir_info->hash_table = NULL;
413 }
414
415 /*
416  * ice_fdir_setup - reserve and initialize the Flow Director resources
417  * @pf: board private structure
418  */
419 static int
420 ice_fdir_setup(struct ice_pf *pf)
421 {
422         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
423         struct ice_hw *hw = ICE_PF_TO_HW(pf);
424         const struct rte_memzone *mz = NULL;
425         char z_name[RTE_MEMZONE_NAMESIZE];
426         struct ice_vsi *vsi;
427         int err = ICE_SUCCESS;
428
429         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
430                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
431                 return -ENOTSUP;
432         }
433
434         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
435                     " fd_fltr_best_effort = %u.",
436                     hw->func_caps.fd_fltr_guar,
437                     hw->func_caps.fd_fltr_best_effort);
438
439         if (pf->fdir.fdir_vsi) {
440                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
441                 return ICE_SUCCESS;
442         }
443
444         /* make new FDIR VSI */
445         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
446         if (!vsi) {
447                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
448                 return -EINVAL;
449         }
450         pf->fdir.fdir_vsi = vsi;
451
452         err = ice_fdir_init_filter_list(pf);
453         if (err) {
454                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
455                 return -EINVAL;
456         }
457
458         err = ice_fdir_counter_init(pf);
459         if (err) {
460                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
461                 return -EINVAL;
462         }
463
464         /*Fdir tx queue setup*/
465         err = ice_fdir_setup_tx_resources(pf);
466         if (err) {
467                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
468                 goto fail_setup_tx;
469         }
470
471         /*Fdir rx queue setup*/
472         err = ice_fdir_setup_rx_resources(pf);
473         if (err) {
474                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
475                 goto fail_setup_rx;
476         }
477
478         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
479         if (err) {
480                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
481                 goto fail_mem;
482         }
483
484         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
485         if (err) {
486                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
487                 goto fail_mem;
488         }
489
490         /* reserve memory for the fdir programming packet */
491         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
492                  ICE_FDIR_MZ_NAME,
493                  eth_dev->data->port_id);
494         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
495         if (!mz) {
496                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
497                             "flow director program packet.");
498                 err = -ENOMEM;
499                 goto fail_mem;
500         }
501         pf->fdir.prg_pkt = mz->addr;
502         pf->fdir.dma_addr = mz->iova;
503         pf->fdir.mz = mz;
504
505         err = ice_fdir_prof_alloc(hw);
506         if (err) {
507                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
508                             "flow director profile.");
509                 err = -ENOMEM;
510                 goto fail_prof;
511         }
512
513         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
514                     vsi->base_queue);
515         return ICE_SUCCESS;
516
517 fail_prof:
518         rte_memzone_free(pf->fdir.mz);
519         pf->fdir.mz = NULL;
520 fail_mem:
521         ice_rx_queue_release(pf->fdir.rxq);
522         pf->fdir.rxq = NULL;
523 fail_setup_rx:
524         ice_tx_queue_release(pf->fdir.txq);
525         pf->fdir.txq = NULL;
526 fail_setup_tx:
527         ice_release_vsi(vsi);
528         pf->fdir.fdir_vsi = NULL;
529         return err;
530 }
531
532 static void
533 ice_fdir_prof_free(struct ice_hw *hw)
534 {
535         enum ice_fltr_ptype ptype;
536
537         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
538              ptype < ICE_FLTR_PTYPE_MAX;
539              ptype++) {
540                 rte_free(hw->fdir_prof[ptype]);
541                 hw->fdir_prof[ptype] = NULL;
542         }
543
544         rte_free(hw->fdir_prof);
545         hw->fdir_prof = NULL;
546 }
547
548 /* Remove a profile for some filter type */
549 static void
550 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
551 {
552         struct ice_hw *hw = ICE_PF_TO_HW(pf);
553         struct ice_fd_hw_prof *hw_prof;
554         uint64_t prof_id;
555         uint16_t vsi_num;
556         int i;
557
558         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
559                 return;
560
561         hw_prof = hw->fdir_prof[ptype];
562
563         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
564         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
565                 if (hw_prof->entry_h[i][is_tunnel]) {
566                         vsi_num = ice_get_hw_vsi_num(hw,
567                                                      hw_prof->vsi_h[i]);
568                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
569                                              vsi_num, ptype);
570                         ice_flow_rem_entry(hw,
571                                            hw_prof->entry_h[i][is_tunnel]);
572                         hw_prof->entry_h[i][is_tunnel] = 0;
573                 }
574         }
575         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
576         rte_free(hw_prof->fdir_seg[is_tunnel]);
577         hw_prof->fdir_seg[is_tunnel] = NULL;
578
579         for (i = 0; i < hw_prof->cnt; i++)
580                 hw_prof->vsi_h[i] = 0;
581         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
582 }
583
584 /* Remove all created profiles */
585 static void
586 ice_fdir_prof_rm_all(struct ice_pf *pf)
587 {
588         enum ice_fltr_ptype ptype;
589
590         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
591              ptype < ICE_FLTR_PTYPE_MAX;
592              ptype++) {
593                 ice_fdir_prof_rm(pf, ptype, false);
594                 ice_fdir_prof_rm(pf, ptype, true);
595         }
596 }
597
598 /*
599  * ice_fdir_teardown - release the Flow Director resources
600  * @pf: board private structure
601  */
602 static void
603 ice_fdir_teardown(struct ice_pf *pf)
604 {
605         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
606         struct ice_hw *hw = ICE_PF_TO_HW(pf);
607         struct ice_vsi *vsi;
608         int err;
609
610         vsi = pf->fdir.fdir_vsi;
611         if (!vsi)
612                 return;
613
614         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
615         if (err)
616                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
617
618         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
619         if (err)
620                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
621
622         err = ice_fdir_counter_release(pf);
623         if (err)
624                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
625
626         ice_fdir_release_filter_list(pf);
627
628         ice_tx_queue_release(pf->fdir.txq);
629         pf->fdir.txq = NULL;
630         ice_rx_queue_release(pf->fdir.rxq);
631         pf->fdir.rxq = NULL;
632         ice_fdir_prof_rm_all(pf);
633         ice_fdir_prof_free(hw);
634         ice_release_vsi(vsi);
635         pf->fdir.fdir_vsi = NULL;
636
637         if (pf->fdir.mz) {
638                 err = rte_memzone_free(pf->fdir.mz);
639                 pf->fdir.mz = NULL;
640                 if (err)
641                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
642         }
643 }
644
645 static int
646 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
647                      struct ice_vsi *ctrl_vsi,
648                      struct ice_flow_seg_info *seg,
649                      enum ice_fltr_ptype ptype,
650                      bool is_tunnel)
651 {
652         struct ice_hw *hw = ICE_PF_TO_HW(pf);
653         enum ice_flow_dir dir = ICE_FLOW_RX;
654         struct ice_flow_seg_info *ori_seg;
655         struct ice_fd_hw_prof *hw_prof;
656         struct ice_flow_prof *prof;
657         uint64_t entry_1 = 0;
658         uint64_t entry_2 = 0;
659         uint16_t vsi_num;
660         int ret;
661         uint64_t prof_id;
662
663         hw_prof = hw->fdir_prof[ptype];
664         ori_seg = hw_prof->fdir_seg[is_tunnel];
665         if (ori_seg) {
666                 if (!is_tunnel) {
667                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
668                                 return -EAGAIN;
669                 } else {
670                         if (!memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))
671                                 return -EAGAIN;
672                 }
673
674                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
675                         return -EINVAL;
676
677                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
678         }
679
680         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
681         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
682                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
683         if (ret)
684                 return ret;
685         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
686                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
687                                  seg, NULL, 0, &entry_1);
688         if (ret) {
689                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
690                             ptype);
691                 goto err_add_prof;
692         }
693         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
694                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
695                                  seg, NULL, 0, &entry_2);
696         if (ret) {
697                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
698                             ptype);
699                 goto err_add_entry;
700         }
701
702         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
703         hw_prof->cnt = 0;
704         hw_prof->fdir_seg[is_tunnel] = seg;
705         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
706         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
707         pf->hw_prof_cnt[ptype][is_tunnel]++;
708         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
709         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
710         pf->hw_prof_cnt[ptype][is_tunnel]++;
711
712         return ret;
713
714 err_add_entry:
715         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
716         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
717         ice_flow_rem_entry(hw, entry_1);
718 err_add_prof:
719         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
720
721         return ret;
722 }
723
724 static void
725 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
726 {
727         uint32_t i, j;
728
729         struct ice_inset_map {
730                 uint64_t inset;
731                 enum ice_flow_field fld;
732         };
733         static const struct ice_inset_map ice_inset_map[] = {
734                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
735                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
736                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
737                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
738                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
739                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
740                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
741                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
742                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
743                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
744                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
745                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
746                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
747                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
748                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
749                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
750                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
751                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
752                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
753                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
754                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
755                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
756                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
757                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
758                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
759                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
760                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
761         };
762
763         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
764                 if ((inset & ice_inset_map[i].inset) ==
765                     ice_inset_map[i].inset)
766                         field[j++] = ice_inset_map[i].fld;
767         }
768 }
769
770 static int
771 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
772                         uint64_t input_set, bool is_tunnel)
773 {
774         struct ice_flow_seg_info *seg;
775         struct ice_flow_seg_info *seg_tun = NULL;
776         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
777         int i, ret;
778
779         if (!input_set)
780                 return -EINVAL;
781
782         seg = (struct ice_flow_seg_info *)
783                 ice_malloc(hw, sizeof(*seg));
784         if (!seg) {
785                 PMD_DRV_LOG(ERR, "No memory can be allocated");
786                 return -ENOMEM;
787         }
788
789         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
790                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
791         ice_fdir_input_set_parse(input_set, field);
792
793         switch (flow) {
794         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
795                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
796                                   ICE_FLOW_SEG_HDR_IPV4);
797                 break;
798         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
799                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
800                                   ICE_FLOW_SEG_HDR_IPV4);
801                 break;
802         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
803                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
804                                   ICE_FLOW_SEG_HDR_IPV4);
805                 break;
806         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
807                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
808                 break;
809         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
810                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
811                                   ICE_FLOW_SEG_HDR_IPV6);
812                 break;
813         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
814                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
815                                   ICE_FLOW_SEG_HDR_IPV6);
816                 break;
817         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
818                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
819                                   ICE_FLOW_SEG_HDR_IPV6);
820                 break;
821         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
822                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
823                 break;
824         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
825         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
826         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
827         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
828                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
829                                   ICE_FLOW_SEG_HDR_IPV4);
830                 break;
831         default:
832                 PMD_DRV_LOG(ERR, "not supported filter type.");
833                 break;
834         }
835
836         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
837                 ice_flow_set_fld(seg, field[i],
838                                  ICE_FLOW_FLD_OFF_INVAL,
839                                  ICE_FLOW_FLD_OFF_INVAL,
840                                  ICE_FLOW_FLD_OFF_INVAL, false);
841         }
842
843         if (!is_tunnel) {
844                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
845                                            seg, flow, false);
846         } else {
847                 seg_tun = (struct ice_flow_seg_info *)
848                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
849                 if (!seg_tun) {
850                         PMD_DRV_LOG(ERR, "No memory can be allocated");
851                         rte_free(seg);
852                         return -ENOMEM;
853                 }
854                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
855                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
856                                            seg_tun, flow, true);
857         }
858
859         if (!ret) {
860                 return ret;
861         } else if (ret < 0) {
862                 rte_free(seg);
863                 if (is_tunnel)
864                         rte_free(seg_tun);
865                 return (ret == -EAGAIN) ? 0 : ret;
866         } else {
867                 return ret;
868         }
869 }
870
871 static void
872 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
873                     bool is_tunnel, bool add)
874 {
875         struct ice_hw *hw = ICE_PF_TO_HW(pf);
876         int cnt;
877
878         cnt = (add) ? 1 : -1;
879         hw->fdir_active_fltr += cnt;
880         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
881                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
882         else
883                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
884 }
885
886 static int
887 ice_fdir_init(struct ice_adapter *ad)
888 {
889         struct ice_pf *pf = &ad->pf;
890         struct ice_flow_parser *parser;
891         int ret;
892
893         ret = ice_fdir_setup(pf);
894         if (ret)
895                 return ret;
896
897         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
898                 parser = &ice_fdir_parser_comms;
899         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
900                 parser = &ice_fdir_parser_os;
901         else
902                 return -EINVAL;
903
904         return ice_register_parser(parser, ad);
905 }
906
907 static void
908 ice_fdir_uninit(struct ice_adapter *ad)
909 {
910         struct ice_pf *pf = &ad->pf;
911         struct ice_flow_parser *parser;
912
913         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
914                 parser = &ice_fdir_parser_comms;
915         else
916                 parser = &ice_fdir_parser_os;
917
918         ice_unregister_parser(parser, ad);
919
920         ice_fdir_teardown(pf);
921 }
922
923 static int
924 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
925 {
926         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
927                 return 1;
928         else
929                 return 0;
930 }
931
932 static int
933 ice_fdir_add_del_filter(struct ice_pf *pf,
934                         struct ice_fdir_filter_conf *filter,
935                         bool add)
936 {
937         struct ice_fltr_desc desc;
938         struct ice_hw *hw = ICE_PF_TO_HW(pf);
939         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
940         bool is_tun;
941         int ret;
942
943         filter->input.dest_vsi = pf->main_vsi->idx;
944
945         memset(&desc, 0, sizeof(desc));
946         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
947
948         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
949
950         memset(pkt, 0, ICE_FDIR_PKT_LEN);
951         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
952         if (ret) {
953                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
954                 return -EINVAL;
955         }
956
957         return ice_fdir_programming(pf, &desc);
958 }
959
960 static void
961 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
962                           struct ice_fdir_filter_conf *filter)
963 {
964         struct ice_fdir_fltr *input = &filter->input;
965         memset(key, 0, sizeof(*key));
966
967         key->flow_type = input->flow_type;
968         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
969         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
970         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
971         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
972
973         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
974         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
975
976         key->tunnel_type = filter->tunnel_type;
977 }
978
979 /* Check if there exists the flow director filter */
980 static struct ice_fdir_filter_conf *
981 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
982                         const struct ice_fdir_fltr_pattern *key)
983 {
984         int ret;
985
986         ret = rte_hash_lookup(fdir_info->hash_table, key);
987         if (ret < 0)
988                 return NULL;
989
990         return fdir_info->hash_map[ret];
991 }
992
993 /* Add a flow director entry into the SW list */
994 static int
995 ice_fdir_entry_insert(struct ice_pf *pf,
996                       struct ice_fdir_filter_conf *entry,
997                       struct ice_fdir_fltr_pattern *key)
998 {
999         struct ice_fdir_info *fdir_info = &pf->fdir;
1000         int ret;
1001
1002         ret = rte_hash_add_key(fdir_info->hash_table, key);
1003         if (ret < 0) {
1004                 PMD_DRV_LOG(ERR,
1005                             "Failed to insert fdir entry to hash table %d!",
1006                             ret);
1007                 return ret;
1008         }
1009         fdir_info->hash_map[ret] = entry;
1010
1011         return 0;
1012 }
1013
1014 /* Delete a flow director entry from the SW list */
1015 static int
1016 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1017 {
1018         struct ice_fdir_info *fdir_info = &pf->fdir;
1019         int ret;
1020
1021         ret = rte_hash_del_key(fdir_info->hash_table, key);
1022         if (ret < 0) {
1023                 PMD_DRV_LOG(ERR,
1024                             "Failed to delete fdir filter to hash table %d!",
1025                             ret);
1026                 return ret;
1027         }
1028         fdir_info->hash_map[ret] = NULL;
1029
1030         return 0;
1031 }
1032
1033 static int
1034 ice_fdir_create_filter(struct ice_adapter *ad,
1035                        struct rte_flow *flow,
1036                        void *meta,
1037                        struct rte_flow_error *error)
1038 {
1039         struct ice_pf *pf = &ad->pf;
1040         struct ice_fdir_filter_conf *filter = meta;
1041         struct ice_fdir_info *fdir_info = &pf->fdir;
1042         struct ice_fdir_filter_conf *entry, *node;
1043         struct ice_fdir_fltr_pattern key;
1044         bool is_tun;
1045         int ret;
1046
1047         ice_fdir_extract_fltr_key(&key, filter);
1048         node = ice_fdir_entry_lookup(fdir_info, &key);
1049         if (node) {
1050                 rte_flow_error_set(error, EEXIST,
1051                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1052                                    "Rule already exists!");
1053                 return -rte_errno;
1054         }
1055
1056         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1057         if (!entry) {
1058                 rte_flow_error_set(error, ENOMEM,
1059                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1060                                    "Failed to allocate memory");
1061                 return -rte_errno;
1062         }
1063
1064         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1065
1066         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1067                         filter->input_set, is_tun);
1068         if (ret) {
1069                 rte_flow_error_set(error, -ret,
1070                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1071                                    "Profile configure failed.");
1072                 goto free_entry;
1073         }
1074
1075         /* alloc counter for FDIR */
1076         if (filter->input.cnt_ena) {
1077                 struct rte_flow_action_count *act_count = &filter->act_count;
1078
1079                 filter->counter = ice_fdir_counter_alloc(pf,
1080                                                          act_count->shared,
1081                                                          act_count->id);
1082                 if (!filter->counter) {
1083                         rte_flow_error_set(error, EINVAL,
1084                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1085                                         "Failed to alloc FDIR counter.");
1086                         goto free_entry;
1087                 }
1088                 filter->input.cnt_index = filter->counter->hw_index;
1089         }
1090
1091         ret = ice_fdir_add_del_filter(pf, filter, true);
1092         if (ret) {
1093                 rte_flow_error_set(error, -ret,
1094                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1095                                    "Add filter rule failed.");
1096                 goto free_counter;
1097         }
1098
1099         rte_memcpy(entry, filter, sizeof(*entry));
1100         ret = ice_fdir_entry_insert(pf, entry, &key);
1101         if (ret) {
1102                 rte_flow_error_set(error, -ret,
1103                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1104                                    "Insert entry to table failed.");
1105                 goto free_entry;
1106         }
1107
1108         flow->rule = entry;
1109         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1110
1111         return 0;
1112
1113 free_counter:
1114         if (filter->counter) {
1115                 ice_fdir_counter_free(pf, filter->counter);
1116                 filter->counter = NULL;
1117         }
1118
1119 free_entry:
1120         rte_free(entry);
1121         return -rte_errno;
1122 }
1123
1124 static int
1125 ice_fdir_destroy_filter(struct ice_adapter *ad,
1126                         struct rte_flow *flow,
1127                         struct rte_flow_error *error)
1128 {
1129         struct ice_pf *pf = &ad->pf;
1130         struct ice_fdir_info *fdir_info = &pf->fdir;
1131         struct ice_fdir_filter_conf *filter, *entry;
1132         struct ice_fdir_fltr_pattern key;
1133         bool is_tun;
1134         int ret;
1135
1136         filter = (struct ice_fdir_filter_conf *)flow->rule;
1137
1138         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1139
1140         if (filter->counter) {
1141                 ice_fdir_counter_free(pf, filter->counter);
1142                 filter->counter = NULL;
1143         }
1144
1145         ice_fdir_extract_fltr_key(&key, filter);
1146         entry = ice_fdir_entry_lookup(fdir_info, &key);
1147         if (!entry) {
1148                 rte_flow_error_set(error, ENOENT,
1149                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1150                                    "Can't find entry.");
1151                 return -rte_errno;
1152         }
1153
1154         ret = ice_fdir_add_del_filter(pf, filter, false);
1155         if (ret) {
1156                 rte_flow_error_set(error, -ret,
1157                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1158                                    "Del filter rule failed.");
1159                 return -rte_errno;
1160         }
1161
1162         ret = ice_fdir_entry_del(pf, &key);
1163         if (ret) {
1164                 rte_flow_error_set(error, -ret,
1165                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1166                                    "Remove entry from table failed.");
1167                 return -rte_errno;
1168         }
1169
1170         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1171         flow->rule = NULL;
1172
1173         rte_free(filter);
1174
1175         return 0;
1176 }
1177
1178 static int
1179 ice_fdir_query_count(struct ice_adapter *ad,
1180                       struct rte_flow *flow,
1181                       struct rte_flow_query_count *flow_stats,
1182                       struct rte_flow_error *error)
1183 {
1184         struct ice_pf *pf = &ad->pf;
1185         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1186         struct ice_fdir_filter_conf *filter = flow->rule;
1187         struct ice_fdir_counter *counter = filter->counter;
1188         uint64_t hits_lo, hits_hi;
1189
1190         if (!counter) {
1191                 rte_flow_error_set(error, EINVAL,
1192                                   RTE_FLOW_ERROR_TYPE_ACTION,
1193                                   NULL,
1194                                   "FDIR counters not available");
1195                 return -rte_errno;
1196         }
1197
1198         /*
1199          * Reading the low 32-bits latches the high 32-bits into a shadow
1200          * register. Reading the high 32-bit returns the value in the
1201          * shadow register.
1202          */
1203         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1204         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1205
1206         flow_stats->hits_set = 1;
1207         flow_stats->hits = hits_lo | (hits_hi << 32);
1208         flow_stats->bytes_set = 0;
1209         flow_stats->bytes = 0;
1210
1211         if (flow_stats->reset) {
1212                 /* reset statistic counter value */
1213                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1214                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1215         }
1216
1217         return 0;
1218 }
1219
1220 static struct ice_flow_engine ice_fdir_engine = {
1221         .init = ice_fdir_init,
1222         .uninit = ice_fdir_uninit,
1223         .create = ice_fdir_create_filter,
1224         .destroy = ice_fdir_destroy_filter,
1225         .query_count = ice_fdir_query_count,
1226         .type = ICE_FLOW_ENGINE_FDIR,
1227 };
1228
1229 static int
1230 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1231                               struct rte_flow_error *error,
1232                               const struct rte_flow_action *act,
1233                               struct ice_fdir_filter_conf *filter)
1234 {
1235         const struct rte_flow_action_rss *rss = act->conf;
1236         uint32_t i;
1237
1238         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1239                 rte_flow_error_set(error, EINVAL,
1240                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1241                                    "Invalid action.");
1242                 return -rte_errno;
1243         }
1244
1245         if (rss->queue_num <= 1) {
1246                 rte_flow_error_set(error, EINVAL,
1247                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1248                                    "Queue region size can't be 0 or 1.");
1249                 return -rte_errno;
1250         }
1251
1252         /* check if queue index for queue region is continuous */
1253         for (i = 0; i < rss->queue_num - 1; i++) {
1254                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1255                         rte_flow_error_set(error, EINVAL,
1256                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1257                                            "Discontinuous queue region");
1258                         return -rte_errno;
1259                 }
1260         }
1261
1262         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1263                 rte_flow_error_set(error, EINVAL,
1264                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1265                                    "Invalid queue region indexes.");
1266                 return -rte_errno;
1267         }
1268
1269         if (!(rte_is_power_of_2(rss->queue_num) &&
1270              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1271                 rte_flow_error_set(error, EINVAL,
1272                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1273                                    "The region size should be any of the following values:"
1274                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1275                                    "of queues do not exceed the VSI allocation.");
1276                 return -rte_errno;
1277         }
1278
1279         filter->input.q_index = rss->queue[0];
1280         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1281         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1282
1283         return 0;
1284 }
1285
1286 static int
1287 ice_fdir_parse_action(struct ice_adapter *ad,
1288                       const struct rte_flow_action actions[],
1289                       struct rte_flow_error *error,
1290                       struct ice_fdir_filter_conf *filter)
1291 {
1292         struct ice_pf *pf = &ad->pf;
1293         const struct rte_flow_action_queue *act_q;
1294         const struct rte_flow_action_mark *mark_spec = NULL;
1295         const struct rte_flow_action_count *act_count;
1296         uint32_t dest_num = 0;
1297         uint32_t mark_num = 0;
1298         uint32_t counter_num = 0;
1299         int ret;
1300
1301         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1302                 switch (actions->type) {
1303                 case RTE_FLOW_ACTION_TYPE_VOID:
1304                         break;
1305                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1306                         dest_num++;
1307
1308                         act_q = actions->conf;
1309                         filter->input.q_index = act_q->index;
1310                         if (filter->input.q_index >=
1311                                         pf->dev_data->nb_rx_queues) {
1312                                 rte_flow_error_set(error, EINVAL,
1313                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1314                                                    actions,
1315                                                    "Invalid queue for FDIR.");
1316                                 return -rte_errno;
1317                         }
1318                         filter->input.dest_ctl =
1319                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1320                         break;
1321                 case RTE_FLOW_ACTION_TYPE_DROP:
1322                         dest_num++;
1323
1324                         filter->input.dest_ctl =
1325                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1326                         break;
1327                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1328                         dest_num++;
1329
1330                         filter->input.dest_ctl =
1331                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1332                         filter->input.q_index = 0;
1333                         break;
1334                 case RTE_FLOW_ACTION_TYPE_RSS:
1335                         dest_num++;
1336
1337                         ret = ice_fdir_parse_action_qregion(pf,
1338                                                 error, actions, filter);
1339                         if (ret)
1340                                 return ret;
1341                         break;
1342                 case RTE_FLOW_ACTION_TYPE_MARK:
1343                         mark_num++;
1344
1345                         mark_spec = actions->conf;
1346                         filter->input.fltr_id = mark_spec->id;
1347                         break;
1348                 case RTE_FLOW_ACTION_TYPE_COUNT:
1349                         counter_num++;
1350
1351                         act_count = actions->conf;
1352                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1353                         rte_memcpy(&filter->act_count, act_count,
1354                                                 sizeof(filter->act_count));
1355
1356                         break;
1357                 default:
1358                         rte_flow_error_set(error, EINVAL,
1359                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1360                                    "Invalid action.");
1361                         return -rte_errno;
1362                 }
1363         }
1364
1365         if (dest_num == 0 || dest_num >= 2) {
1366                 rte_flow_error_set(error, EINVAL,
1367                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1368                            "Unsupported action combination");
1369                 return -rte_errno;
1370         }
1371
1372         if (mark_num >= 2) {
1373                 rte_flow_error_set(error, EINVAL,
1374                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1375                            "Too many mark actions");
1376                 return -rte_errno;
1377         }
1378
1379         if (counter_num >= 2) {
1380                 rte_flow_error_set(error, EINVAL,
1381                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1382                            "Too many count actions");
1383                 return -rte_errno;
1384         }
1385
1386         return 0;
1387 }
1388
1389 static int
1390 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1391                        const struct rte_flow_item pattern[],
1392                        struct rte_flow_error *error,
1393                        struct ice_fdir_filter_conf *filter)
1394 {
1395         const struct rte_flow_item *item = pattern;
1396         enum rte_flow_item_type item_type;
1397         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1398         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1399         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1400         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1401         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1402         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1403         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1404         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1405         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1406         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1407         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1408         uint64_t input_set = ICE_INSET_NONE;
1409         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1410         uint8_t  ipv6_addr_mask[16] = {
1411                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1412                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1413         };
1414         uint32_t vtc_flow_cpu;
1415
1416
1417         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1418                 if (item->last) {
1419                         rte_flow_error_set(error, EINVAL,
1420                                         RTE_FLOW_ERROR_TYPE_ITEM,
1421                                         item,
1422                                         "Not support range");
1423                         return -rte_errno;
1424                 }
1425                 item_type = item->type;
1426
1427                 switch (item_type) {
1428                 case RTE_FLOW_ITEM_TYPE_ETH:
1429                         eth_spec = item->spec;
1430                         eth_mask = item->mask;
1431
1432                         if (eth_spec && eth_mask) {
1433                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1434                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1435                                         rte_flow_error_set(error, EINVAL,
1436                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1437                                                 item,
1438                                                 "Src mac not support");
1439                                         return -rte_errno;
1440                                 }
1441
1442                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1443                                         rte_flow_error_set(error, EINVAL,
1444                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1445                                                 item,
1446                                                 "Invalid mac addr mask");
1447                                         return -rte_errno;
1448                                 }
1449
1450                                 input_set |= ICE_INSET_DMAC;
1451                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1452                                            &eth_spec->dst,
1453                                            RTE_ETHER_ADDR_LEN);
1454                         }
1455                         break;
1456                 case RTE_FLOW_ITEM_TYPE_IPV4:
1457                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1458                         ipv4_spec = item->spec;
1459                         ipv4_mask = item->mask;
1460
1461                         if (ipv4_spec && ipv4_mask) {
1462                                 /* Check IPv4 mask and update input set */
1463                                 if (ipv4_mask->hdr.version_ihl ||
1464                                     ipv4_mask->hdr.total_length ||
1465                                     ipv4_mask->hdr.packet_id ||
1466                                     ipv4_mask->hdr.fragment_offset ||
1467                                     ipv4_mask->hdr.hdr_checksum) {
1468                                         rte_flow_error_set(error, EINVAL,
1469                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1470                                                    item,
1471                                                    "Invalid IPv4 mask.");
1472                                         return -rte_errno;
1473                                 }
1474                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1475                                         input_set |= tunnel_type ?
1476                                                      ICE_INSET_TUN_IPV4_SRC :
1477                                                      ICE_INSET_IPV4_SRC;
1478                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1479                                         input_set |= tunnel_type ?
1480                                                      ICE_INSET_TUN_IPV4_DST :
1481                                                      ICE_INSET_IPV4_DST;
1482                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1483                                         input_set |= ICE_INSET_IPV4_TOS;
1484                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1485                                         input_set |= ICE_INSET_IPV4_TTL;
1486                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1487                                         input_set |= ICE_INSET_IPV4_PROTO;
1488
1489                                 filter->input.ip.v4.dst_ip =
1490                                         ipv4_spec->hdr.src_addr;
1491                                 filter->input.ip.v4.src_ip =
1492                                         ipv4_spec->hdr.dst_addr;
1493                                 filter->input.ip.v4.tos =
1494                                         ipv4_spec->hdr.type_of_service;
1495                                 filter->input.ip.v4.ttl =
1496                                         ipv4_spec->hdr.time_to_live;
1497                                 filter->input.ip.v4.proto =
1498                                         ipv4_spec->hdr.next_proto_id;
1499                         }
1500
1501                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1502                         break;
1503                 case RTE_FLOW_ITEM_TYPE_IPV6:
1504                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1505                         ipv6_spec = item->spec;
1506                         ipv6_mask = item->mask;
1507
1508                         if (ipv6_spec && ipv6_mask) {
1509                                 /* Check IPv6 mask and update input set */
1510                                 if (ipv6_mask->hdr.payload_len) {
1511                                         rte_flow_error_set(error, EINVAL,
1512                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1513                                                    item,
1514                                                    "Invalid IPv6 mask");
1515                                         return -rte_errno;
1516                                 }
1517
1518                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1519                                             ipv6_addr_mask,
1520                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1521                                         input_set |= ICE_INSET_IPV6_SRC;
1522                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1523                                             ipv6_addr_mask,
1524                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1525                                         input_set |= ICE_INSET_IPV6_DST;
1526
1527                                 if ((ipv6_mask->hdr.vtc_flow &
1528                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1529                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1530                                         input_set |= ICE_INSET_IPV6_TC;
1531                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1532                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1533                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1534                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1535
1536                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1537                                            ipv6_spec->hdr.src_addr, 16);
1538                                 rte_memcpy(filter->input.ip.v6.src_ip,
1539                                            ipv6_spec->hdr.dst_addr, 16);
1540
1541                                 vtc_flow_cpu =
1542                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1543                                 filter->input.ip.v6.tc =
1544                                         (uint8_t)(vtc_flow_cpu >>
1545                                                   ICE_FDIR_IPV6_TC_OFFSET);
1546                                 filter->input.ip.v6.proto =
1547                                         ipv6_spec->hdr.proto;
1548                                 filter->input.ip.v6.hlim =
1549                                         ipv6_spec->hdr.hop_limits;
1550                         }
1551
1552                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1553                         break;
1554                 case RTE_FLOW_ITEM_TYPE_TCP:
1555                         tcp_spec = item->spec;
1556                         tcp_mask = item->mask;
1557
1558                         if (tcp_spec && tcp_mask) {
1559                                 /* Check TCP mask and update input set */
1560                                 if (tcp_mask->hdr.sent_seq ||
1561                                     tcp_mask->hdr.recv_ack ||
1562                                     tcp_mask->hdr.data_off ||
1563                                     tcp_mask->hdr.tcp_flags ||
1564                                     tcp_mask->hdr.rx_win ||
1565                                     tcp_mask->hdr.cksum ||
1566                                     tcp_mask->hdr.tcp_urp) {
1567                                         rte_flow_error_set(error, EINVAL,
1568                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1569                                                    item,
1570                                                    "Invalid TCP mask");
1571                                         return -rte_errno;
1572                                 }
1573
1574                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1575                                         input_set |= tunnel_type ?
1576                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1577                                                      ICE_INSET_TCP_SRC_PORT;
1578                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1579                                         input_set |= tunnel_type ?
1580                                                      ICE_INSET_TUN_TCP_DST_PORT :
1581                                                      ICE_INSET_TCP_DST_PORT;
1582
1583                                 /* Get filter info */
1584                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1585                                         filter->input.ip.v4.dst_port =
1586                                                 tcp_spec->hdr.src_port;
1587                                         filter->input.ip.v4.src_port =
1588                                                 tcp_spec->hdr.dst_port;
1589                                         flow_type =
1590                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1591                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1592                                         filter->input.ip.v6.dst_port =
1593                                                 tcp_spec->hdr.src_port;
1594                                         filter->input.ip.v6.src_port =
1595                                                 tcp_spec->hdr.dst_port;
1596                                         flow_type =
1597                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1598                                 }
1599                         }
1600                         break;
1601                 case RTE_FLOW_ITEM_TYPE_UDP:
1602                         udp_spec = item->spec;
1603                         udp_mask = item->mask;
1604
1605                         if (udp_spec && udp_mask) {
1606                                 /* Check UDP mask and update input set*/
1607                                 if (udp_mask->hdr.dgram_len ||
1608                                     udp_mask->hdr.dgram_cksum) {
1609                                         rte_flow_error_set(error, EINVAL,
1610                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1611                                                    item,
1612                                                    "Invalid UDP mask");
1613                                         return -rte_errno;
1614                                 }
1615
1616                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1617                                         input_set |= tunnel_type ?
1618                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1619                                                      ICE_INSET_UDP_SRC_PORT;
1620                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1621                                         input_set |= tunnel_type ?
1622                                                      ICE_INSET_TUN_UDP_DST_PORT :
1623                                                      ICE_INSET_UDP_DST_PORT;
1624
1625                                 /* Get filter info */
1626                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1627                                         filter->input.ip.v4.dst_port =
1628                                                 udp_spec->hdr.src_port;
1629                                         filter->input.ip.v4.src_port =
1630                                                 udp_spec->hdr.dst_port;
1631                                         flow_type =
1632                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1633                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1634                                         filter->input.ip.v6.src_port =
1635                                                 udp_spec->hdr.dst_port;
1636                                         filter->input.ip.v6.dst_port =
1637                                                 udp_spec->hdr.src_port;
1638                                         flow_type =
1639                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1640                                 }
1641                         }
1642                         break;
1643                 case RTE_FLOW_ITEM_TYPE_SCTP:
1644                         sctp_spec = item->spec;
1645                         sctp_mask = item->mask;
1646
1647                         if (sctp_spec && sctp_mask) {
1648                                 /* Check SCTP mask and update input set */
1649                                 if (sctp_mask->hdr.cksum) {
1650                                         rte_flow_error_set(error, EINVAL,
1651                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1652                                                    item,
1653                                                    "Invalid UDP mask");
1654                                         return -rte_errno;
1655                                 }
1656
1657                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1658                                         input_set |= tunnel_type ?
1659                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1660                                                      ICE_INSET_SCTP_SRC_PORT;
1661                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1662                                         input_set |= tunnel_type ?
1663                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1664                                                      ICE_INSET_SCTP_DST_PORT;
1665
1666                                 /* Get filter info */
1667                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1668                                         filter->input.ip.v4.dst_port =
1669                                                 sctp_spec->hdr.src_port;
1670                                         filter->input.ip.v4.src_port =
1671                                                 sctp_spec->hdr.dst_port;
1672                                         flow_type =
1673                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1674                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1675                                         filter->input.ip.v6.dst_port =
1676                                                 sctp_spec->hdr.src_port;
1677                                         filter->input.ip.v6.src_port =
1678                                                 sctp_spec->hdr.dst_port;
1679                                         flow_type =
1680                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1681                                 }
1682                         }
1683                         break;
1684                 case RTE_FLOW_ITEM_TYPE_VOID:
1685                         break;
1686                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1687                         l3 = RTE_FLOW_ITEM_TYPE_END;
1688                         vxlan_spec = item->spec;
1689                         vxlan_mask = item->mask;
1690
1691                         if (vxlan_spec || vxlan_mask) {
1692                                 rte_flow_error_set(error, EINVAL,
1693                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1694                                                    item,
1695                                                    "Invalid vxlan field");
1696                                 return -rte_errno;
1697                         }
1698
1699                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1700                         break;
1701                 case RTE_FLOW_ITEM_TYPE_GTPU:
1702                         l3 = RTE_FLOW_ITEM_TYPE_END;
1703                         gtp_spec = item->spec;
1704                         gtp_mask = item->mask;
1705
1706                         if (gtp_spec && gtp_mask) {
1707                                 if (gtp_mask->v_pt_rsv_flags ||
1708                                     gtp_mask->msg_type ||
1709                                     gtp_mask->msg_len) {
1710                                         rte_flow_error_set(error, EINVAL,
1711                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1712                                                    item,
1713                                                    "Invalid GTP mask");
1714                                         return -rte_errno;
1715                                 }
1716
1717                                 if (gtp_mask->teid == UINT32_MAX)
1718                                         input_set |= ICE_INSET_GTPU_TEID;
1719
1720                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1721                         }
1722                         break;
1723                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1724                         gtp_psc_spec = item->spec;
1725                         gtp_psc_mask = item->mask;
1726
1727                         if (gtp_psc_spec && gtp_psc_mask) {
1728                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1729                                         input_set |= ICE_INSET_GTPU_QFI;
1730
1731                                 filter->input.gtpu_data.qfi =
1732                                         gtp_psc_spec->qfi;
1733                         }
1734
1735                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1736                         break;
1737                 default:
1738                         rte_flow_error_set(error, EINVAL,
1739                                    RTE_FLOW_ERROR_TYPE_ITEM,
1740                                    item,
1741                                    "Invalid pattern item.");
1742                         return -rte_errno;
1743                 }
1744         }
1745
1746         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1747                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1748
1749         filter->tunnel_type = tunnel_type;
1750         filter->input.flow_type = flow_type;
1751         filter->input_set = input_set;
1752
1753         return 0;
1754 }
1755
1756 static int
1757 ice_fdir_parse(struct ice_adapter *ad,
1758                struct ice_pattern_match_item *array,
1759                uint32_t array_len,
1760                const struct rte_flow_item pattern[],
1761                const struct rte_flow_action actions[],
1762                void **meta,
1763                struct rte_flow_error *error)
1764 {
1765         struct ice_pf *pf = &ad->pf;
1766         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1767         struct ice_pattern_match_item *item = NULL;
1768         uint64_t input_set;
1769         int ret;
1770
1771         memset(filter, 0, sizeof(*filter));
1772         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1773         if (!item)
1774                 return -rte_errno;
1775
1776         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1777         if (ret)
1778                 return ret;
1779         input_set = filter->input_set;
1780         if (!input_set || input_set & ~item->input_set_mask) {
1781                 rte_flow_error_set(error, EINVAL,
1782                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1783                                    pattern,
1784                                    "Invalid input set");
1785                 return -rte_errno;
1786         }
1787
1788         ret = ice_fdir_parse_action(ad, actions, error, filter);
1789         if (ret)
1790                 return ret;
1791
1792         *meta = filter;
1793
1794         return 0;
1795 }
1796
1797 static struct ice_flow_parser ice_fdir_parser_os = {
1798         .engine = &ice_fdir_engine,
1799         .array = ice_fdir_pattern_os,
1800         .array_len = RTE_DIM(ice_fdir_pattern_os),
1801         .parse_pattern_action = ice_fdir_parse,
1802         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1803 };
1804
1805 static struct ice_flow_parser ice_fdir_parser_comms = {
1806         .engine = &ice_fdir_engine,
1807         .array = ice_fdir_pattern_comms,
1808         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1809         .parse_pattern_action = ice_fdir_parse,
1810         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1811 };
1812
1813 RTE_INIT(ice_fdir_engine_register)
1814 {
1815         ice_register_flow_engine(&ice_fdir_engine);
1816 }