2d8cdadf59c6e4bfac292cea05423375e67f9d85
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
22         ICE_INSET_DMAC | \
23         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35         ICE_FDIR_INSET_ETH_IPV4 | \
36         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
37
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
39         ICE_INSET_DMAC | \
40         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44         ICE_FDIR_INSET_ETH_IPV6 | \
45         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
54
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59         ICE_FDIR_INSET_VXLAN_IPV4 | \
60         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63         ICE_FDIR_INSET_VXLAN_IPV4 | \
64         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
65
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67         ICE_FDIR_INSET_VXLAN_IPV4 | \
68         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
69
70 #define ICE_FDIR_INSET_GTPU (\
71         ICE_INSET_GTPU_TEID)
72
73 #define ICE_FDIR_INSET_GTPU_EH (\
74         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
75
76 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
77         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
78         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
79         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
80         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
81         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
82         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
83         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
84         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
85         {pattern_eth_ipv4_udp_vxlan_ipv4,
86                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
87         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
88                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
89         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
90                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
91         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
92                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
93         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
94                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
95         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
96                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
97         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
98                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
99         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
100                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
101 };
102
103 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
104         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
106         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
107         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
108         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
109         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
110         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
111         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
112         {pattern_eth_ipv4_udp_vxlan_ipv4,
113                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
114         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
115                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
116         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
117                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
118         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
119                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
120         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
121                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
122         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
123                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
124         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
125                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
126         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
127                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
128         {pattern_eth_ipv4_gtpu,        ICE_FDIR_INSET_GTPU,                  ICE_INSET_NONE},
129         {pattern_eth_ipv4_gtpu_eh,     ICE_FDIR_INSET_GTPU_EH,               ICE_INSET_NONE},
130 };
131
132 static struct ice_flow_parser ice_fdir_parser_os;
133 static struct ice_flow_parser ice_fdir_parser_comms;
134
135 static const struct rte_memzone *
136 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
137 {
138         const struct rte_memzone *mz;
139
140         mz = rte_memzone_lookup(name);
141         if (mz)
142                 return mz;
143
144         return rte_memzone_reserve_aligned(name, len, socket_id,
145                                            RTE_MEMZONE_IOVA_CONTIG,
146                                            ICE_RING_BASE_ALIGN);
147 }
148
149 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
150
151 static int
152 ice_fdir_prof_alloc(struct ice_hw *hw)
153 {
154         enum ice_fltr_ptype ptype, fltr_ptype;
155
156         if (!hw->fdir_prof) {
157                 hw->fdir_prof = (struct ice_fd_hw_prof **)
158                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
159                                    sizeof(*hw->fdir_prof));
160                 if (!hw->fdir_prof)
161                         return -ENOMEM;
162         }
163         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
164              ptype < ICE_FLTR_PTYPE_MAX;
165              ptype++) {
166                 if (!hw->fdir_prof[ptype]) {
167                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
168                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
169                         if (!hw->fdir_prof[ptype])
170                                 goto fail_mem;
171                 }
172         }
173         return 0;
174
175 fail_mem:
176         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
177              fltr_ptype < ptype;
178              fltr_ptype++) {
179                 rte_free(hw->fdir_prof[fltr_ptype]);
180                 hw->fdir_prof[fltr_ptype] = NULL;
181         }
182
183         rte_free(hw->fdir_prof);
184         hw->fdir_prof = NULL;
185
186         return -ENOMEM;
187 }
188
189 static int
190 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
191                           struct ice_fdir_counter_pool_container *container,
192                           uint32_t index_start,
193                           uint32_t len)
194 {
195         struct ice_fdir_counter_pool *pool;
196         uint32_t i;
197         int ret = 0;
198
199         pool = rte_zmalloc("ice_fdir_counter_pool",
200                            sizeof(*pool) +
201                            sizeof(struct ice_fdir_counter) * len,
202                            0);
203         if (!pool) {
204                 PMD_INIT_LOG(ERR,
205                              "Failed to allocate memory for fdir counter pool");
206                 return -ENOMEM;
207         }
208
209         TAILQ_INIT(&pool->counter_list);
210         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
211
212         for (i = 0; i < len; i++) {
213                 struct ice_fdir_counter *counter = &pool->counters[i];
214
215                 counter->hw_index = index_start + i;
216                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
217         }
218
219         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
220                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
221                 ret = -EINVAL;
222                 goto free_pool;
223         }
224
225         container->pools[container->index_free++] = pool;
226         return 0;
227
228 free_pool:
229         rte_free(pool);
230         return ret;
231 }
232
233 static int
234 ice_fdir_counter_init(struct ice_pf *pf)
235 {
236         struct ice_hw *hw = ICE_PF_TO_HW(pf);
237         struct ice_fdir_info *fdir_info = &pf->fdir;
238         struct ice_fdir_counter_pool_container *container =
239                                 &fdir_info->counter;
240         uint32_t cnt_index, len;
241         int ret;
242
243         TAILQ_INIT(&container->pool_list);
244
245         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
246         len = ICE_FDIR_COUNTERS_PER_BLOCK;
247
248         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
249         if (ret) {
250                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
251                 return ret;
252         }
253
254         return 0;
255 }
256
257 static int
258 ice_fdir_counter_release(struct ice_pf *pf)
259 {
260         struct ice_fdir_info *fdir_info = &pf->fdir;
261         struct ice_fdir_counter_pool_container *container =
262                                 &fdir_info->counter;
263         uint8_t i;
264
265         for (i = 0; i < container->index_free; i++) {
266                 rte_free(container->pools[i]);
267                 container->pools[i] = NULL;
268         }
269
270         TAILQ_INIT(&container->pool_list);
271         container->index_free = 0;
272
273         return 0;
274 }
275
276 static struct ice_fdir_counter *
277 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
278                                         *container,
279                                uint32_t id)
280 {
281         struct ice_fdir_counter_pool *pool;
282         struct ice_fdir_counter *counter;
283         int i;
284
285         TAILQ_FOREACH(pool, &container->pool_list, next) {
286                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
287                         counter = &pool->counters[i];
288
289                         if (counter->shared &&
290                             counter->ref_cnt &&
291                             counter->id == id)
292                                 return counter;
293                 }
294         }
295
296         return NULL;
297 }
298
299 static struct ice_fdir_counter *
300 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
301 {
302         struct ice_hw *hw = ICE_PF_TO_HW(pf);
303         struct ice_fdir_info *fdir_info = &pf->fdir;
304         struct ice_fdir_counter_pool_container *container =
305                                 &fdir_info->counter;
306         struct ice_fdir_counter_pool *pool = NULL;
307         struct ice_fdir_counter *counter_free = NULL;
308
309         if (shared) {
310                 counter_free = ice_fdir_counter_shared_search(container, id);
311                 if (counter_free) {
312                         if (counter_free->ref_cnt + 1 == 0) {
313                                 rte_errno = E2BIG;
314                                 return NULL;
315                         }
316                         counter_free->ref_cnt++;
317                         return counter_free;
318                 }
319         }
320
321         TAILQ_FOREACH(pool, &container->pool_list, next) {
322                 counter_free = TAILQ_FIRST(&pool->counter_list);
323                 if (counter_free)
324                         break;
325                 counter_free = NULL;
326         }
327
328         if (!counter_free) {
329                 PMD_DRV_LOG(ERR, "No free counter found\n");
330                 return NULL;
331         }
332
333         counter_free->shared = shared;
334         counter_free->id = id;
335         counter_free->ref_cnt = 1;
336         counter_free->pool = pool;
337
338         /* reset statistic counter value */
339         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
340         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
341
342         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
343         if (TAILQ_EMPTY(&pool->counter_list)) {
344                 TAILQ_REMOVE(&container->pool_list, pool, next);
345                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
346         }
347
348         return counter_free;
349 }
350
351 static void
352 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
353                       struct ice_fdir_counter *counter)
354 {
355         if (!counter)
356                 return;
357
358         if (--counter->ref_cnt == 0) {
359                 struct ice_fdir_counter_pool *pool = counter->pool;
360
361                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
362         }
363 }
364
365 static int
366 ice_fdir_init_filter_list(struct ice_pf *pf)
367 {
368         struct rte_eth_dev *dev = pf->adapter->eth_dev;
369         struct ice_fdir_info *fdir_info = &pf->fdir;
370         char fdir_hash_name[RTE_HASH_NAMESIZE];
371         int ret;
372
373         struct rte_hash_parameters fdir_hash_params = {
374                 .name = fdir_hash_name,
375                 .entries = ICE_MAX_FDIR_FILTER_NUM,
376                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
377                 .hash_func = rte_hash_crc,
378                 .hash_func_init_val = 0,
379                 .socket_id = rte_socket_id(),
380                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
381         };
382
383         /* Initialize hash */
384         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
385                  "fdir_%s", dev->device->name);
386         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
387         if (!fdir_info->hash_table) {
388                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
389                 return -EINVAL;
390         }
391         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
392                                           sizeof(*fdir_info->hash_map) *
393                                           ICE_MAX_FDIR_FILTER_NUM,
394                                           0);
395         if (!fdir_info->hash_map) {
396                 PMD_INIT_LOG(ERR,
397                              "Failed to allocate memory for fdir hash map!");
398                 ret = -ENOMEM;
399                 goto err_fdir_hash_map_alloc;
400         }
401         return 0;
402
403 err_fdir_hash_map_alloc:
404         rte_hash_free(fdir_info->hash_table);
405
406         return ret;
407 }
408
409 static void
410 ice_fdir_release_filter_list(struct ice_pf *pf)
411 {
412         struct ice_fdir_info *fdir_info = &pf->fdir;
413
414         if (fdir_info->hash_map)
415                 rte_free(fdir_info->hash_map);
416         if (fdir_info->hash_table)
417                 rte_hash_free(fdir_info->hash_table);
418
419         fdir_info->hash_map = NULL;
420         fdir_info->hash_table = NULL;
421 }
422
423 /*
424  * ice_fdir_setup - reserve and initialize the Flow Director resources
425  * @pf: board private structure
426  */
427 static int
428 ice_fdir_setup(struct ice_pf *pf)
429 {
430         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
431         struct ice_hw *hw = ICE_PF_TO_HW(pf);
432         const struct rte_memzone *mz = NULL;
433         char z_name[RTE_MEMZONE_NAMESIZE];
434         struct ice_vsi *vsi;
435         int err = ICE_SUCCESS;
436
437         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
438                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
439                 return -ENOTSUP;
440         }
441
442         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
443                     " fd_fltr_best_effort = %u.",
444                     hw->func_caps.fd_fltr_guar,
445                     hw->func_caps.fd_fltr_best_effort);
446
447         if (pf->fdir.fdir_vsi) {
448                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
449                 return ICE_SUCCESS;
450         }
451
452         /* make new FDIR VSI */
453         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
454         if (!vsi) {
455                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
456                 return -EINVAL;
457         }
458         pf->fdir.fdir_vsi = vsi;
459
460         err = ice_fdir_init_filter_list(pf);
461         if (err) {
462                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
463                 return -EINVAL;
464         }
465
466         err = ice_fdir_counter_init(pf);
467         if (err) {
468                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
469                 return -EINVAL;
470         }
471
472         /*Fdir tx queue setup*/
473         err = ice_fdir_setup_tx_resources(pf);
474         if (err) {
475                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
476                 goto fail_setup_tx;
477         }
478
479         /*Fdir rx queue setup*/
480         err = ice_fdir_setup_rx_resources(pf);
481         if (err) {
482                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
483                 goto fail_setup_rx;
484         }
485
486         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
487         if (err) {
488                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
489                 goto fail_mem;
490         }
491
492         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
493         if (err) {
494                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
495                 goto fail_mem;
496         }
497
498         /* Enable FDIR MSIX interrupt */
499         vsi->nb_used_qps = 1;
500         ice_vsi_queues_bind_intr(vsi);
501         ice_vsi_enable_queues_intr(vsi);
502
503         /* reserve memory for the fdir programming packet */
504         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
505                  ICE_FDIR_MZ_NAME,
506                  eth_dev->data->port_id);
507         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
508         if (!mz) {
509                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
510                             "flow director program packet.");
511                 err = -ENOMEM;
512                 goto fail_mem;
513         }
514         pf->fdir.prg_pkt = mz->addr;
515         pf->fdir.dma_addr = mz->iova;
516         pf->fdir.mz = mz;
517
518         err = ice_fdir_prof_alloc(hw);
519         if (err) {
520                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
521                             "flow director profile.");
522                 err = -ENOMEM;
523                 goto fail_prof;
524         }
525
526         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
527                     vsi->base_queue);
528         return ICE_SUCCESS;
529
530 fail_prof:
531         rte_memzone_free(pf->fdir.mz);
532         pf->fdir.mz = NULL;
533 fail_mem:
534         ice_rx_queue_release(pf->fdir.rxq);
535         pf->fdir.rxq = NULL;
536 fail_setup_rx:
537         ice_tx_queue_release(pf->fdir.txq);
538         pf->fdir.txq = NULL;
539 fail_setup_tx:
540         ice_release_vsi(vsi);
541         pf->fdir.fdir_vsi = NULL;
542         return err;
543 }
544
545 static void
546 ice_fdir_prof_free(struct ice_hw *hw)
547 {
548         enum ice_fltr_ptype ptype;
549
550         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
551              ptype < ICE_FLTR_PTYPE_MAX;
552              ptype++) {
553                 rte_free(hw->fdir_prof[ptype]);
554                 hw->fdir_prof[ptype] = NULL;
555         }
556
557         rte_free(hw->fdir_prof);
558         hw->fdir_prof = NULL;
559 }
560
561 /* Remove a profile for some filter type */
562 static void
563 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
564 {
565         struct ice_hw *hw = ICE_PF_TO_HW(pf);
566         struct ice_fd_hw_prof *hw_prof;
567         uint64_t prof_id;
568         uint16_t vsi_num;
569         int i;
570
571         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
572                 return;
573
574         hw_prof = hw->fdir_prof[ptype];
575
576         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
577         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
578                 if (hw_prof->entry_h[i][is_tunnel]) {
579                         vsi_num = ice_get_hw_vsi_num(hw,
580                                                      hw_prof->vsi_h[i]);
581                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
582                                              vsi_num, ptype);
583                         ice_flow_rem_entry(hw,
584                                            hw_prof->entry_h[i][is_tunnel]);
585                         hw_prof->entry_h[i][is_tunnel] = 0;
586                 }
587         }
588         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
589         rte_free(hw_prof->fdir_seg[is_tunnel]);
590         hw_prof->fdir_seg[is_tunnel] = NULL;
591
592         for (i = 0; i < hw_prof->cnt; i++)
593                 hw_prof->vsi_h[i] = 0;
594         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
595 }
596
597 /* Remove all created profiles */
598 static void
599 ice_fdir_prof_rm_all(struct ice_pf *pf)
600 {
601         enum ice_fltr_ptype ptype;
602
603         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
604              ptype < ICE_FLTR_PTYPE_MAX;
605              ptype++) {
606                 ice_fdir_prof_rm(pf, ptype, false);
607                 ice_fdir_prof_rm(pf, ptype, true);
608         }
609 }
610
611 /*
612  * ice_fdir_teardown - release the Flow Director resources
613  * @pf: board private structure
614  */
615 static void
616 ice_fdir_teardown(struct ice_pf *pf)
617 {
618         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
619         struct ice_hw *hw = ICE_PF_TO_HW(pf);
620         struct ice_vsi *vsi;
621         int err;
622
623         vsi = pf->fdir.fdir_vsi;
624         if (!vsi)
625                 return;
626
627         ice_vsi_disable_queues_intr(vsi);
628
629         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
630         if (err)
631                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
632
633         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
634         if (err)
635                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
636
637         err = ice_fdir_counter_release(pf);
638         if (err)
639                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
640
641         ice_fdir_release_filter_list(pf);
642
643         ice_tx_queue_release(pf->fdir.txq);
644         pf->fdir.txq = NULL;
645         ice_rx_queue_release(pf->fdir.rxq);
646         pf->fdir.rxq = NULL;
647         ice_fdir_prof_rm_all(pf);
648         ice_fdir_prof_free(hw);
649         ice_release_vsi(vsi);
650         pf->fdir.fdir_vsi = NULL;
651
652         if (pf->fdir.mz) {
653                 err = rte_memzone_free(pf->fdir.mz);
654                 pf->fdir.mz = NULL;
655                 if (err)
656                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
657         }
658 }
659
660 static int
661 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
662                            enum ice_fltr_ptype ptype,
663                            struct ice_flow_seg_info *seg,
664                            bool is_tunnel)
665 {
666         struct ice_hw *hw = ICE_PF_TO_HW(pf);
667         struct ice_flow_seg_info *ori_seg;
668         struct ice_fd_hw_prof *hw_prof;
669
670         hw_prof = hw->fdir_prof[ptype];
671         ori_seg = hw_prof->fdir_seg[is_tunnel];
672
673         /* profile does not exist */
674         if (!ori_seg)
675                 return 0;
676
677         /* if no input set conflict, return -EEXIST */
678         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
679             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
680                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
681                             ptype);
682                 return -EEXIST;
683         }
684
685         /* a rule with input set conflict already exist, so give up */
686         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
687                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
688                             ptype);
689                 return -EINVAL;
690         }
691
692         /* it's safe to delete an empty profile */
693         ice_fdir_prof_rm(pf, ptype, is_tunnel);
694         return 0;
695 }
696
697 static bool
698 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
699                                enum ice_fltr_ptype ptype,
700                                bool is_tunnel)
701 {
702         struct ice_hw *hw = ICE_PF_TO_HW(pf);
703         struct ice_fd_hw_prof *hw_prof;
704         struct ice_flow_seg_info *seg;
705
706         hw_prof = hw->fdir_prof[ptype];
707         seg = hw_prof->fdir_seg[is_tunnel];
708
709         /* profile does not exist */
710         if (!seg)
711                 return true;
712
713         /* profile exists and rule exists, fail to resolve the conflict */
714         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
715                 return false;
716
717         /* it's safe to delete an empty profile */
718         ice_fdir_prof_rm(pf, ptype, is_tunnel);
719
720         return true;
721 }
722
723 static int
724 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
725                              enum ice_fltr_ptype ptype,
726                              bool is_tunnel)
727 {
728         enum ice_fltr_ptype cflct_ptype;
729
730         switch (ptype) {
731         /* IPv4 */
732         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
733         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
734         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
735                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
736                 if (!ice_fdir_prof_resolve_conflict
737                         (pf, cflct_ptype, is_tunnel))
738                         goto err;
739                 break;
740         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
741                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
742                 if (!ice_fdir_prof_resolve_conflict
743                         (pf, cflct_ptype, is_tunnel))
744                         goto err;
745                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
746                 if (!ice_fdir_prof_resolve_conflict
747                         (pf, cflct_ptype, is_tunnel))
748                         goto err;
749                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
750                 if (!ice_fdir_prof_resolve_conflict
751                         (pf, cflct_ptype, is_tunnel))
752                         goto err;
753                 break;
754         /* IPv4 GTPU */
755         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
756         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
757         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
758                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
759                 if (!ice_fdir_prof_resolve_conflict
760                         (pf, cflct_ptype, is_tunnel))
761                         goto err;
762                 break;
763         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
764                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
765                 if (!ice_fdir_prof_resolve_conflict
766                         (pf, cflct_ptype, is_tunnel))
767                         goto err;
768                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
769                 if (!ice_fdir_prof_resolve_conflict
770                         (pf, cflct_ptype, is_tunnel))
771                         goto err;
772                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
773                 if (!ice_fdir_prof_resolve_conflict
774                         (pf, cflct_ptype, is_tunnel))
775                         goto err;
776                 break;
777         /* IPv6 */
778         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
779         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
780         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
781                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
782                 if (!ice_fdir_prof_resolve_conflict
783                         (pf, cflct_ptype, is_tunnel))
784                         goto err;
785                 break;
786         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
787                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
788                 if (!ice_fdir_prof_resolve_conflict
789                         (pf, cflct_ptype, is_tunnel))
790                         goto err;
791                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
792                 if (!ice_fdir_prof_resolve_conflict
793                         (pf, cflct_ptype, is_tunnel))
794                         goto err;
795                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
796                 if (!ice_fdir_prof_resolve_conflict
797                         (pf, cflct_ptype, is_tunnel))
798                         goto err;
799                 break;
800         default:
801                 break;
802         }
803         return 0;
804 err:
805         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
806                     ptype, cflct_ptype);
807         return -EINVAL;
808 }
809
810 static int
811 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
812                      struct ice_vsi *ctrl_vsi,
813                      struct ice_flow_seg_info *seg,
814                      enum ice_fltr_ptype ptype,
815                      bool is_tunnel)
816 {
817         struct ice_hw *hw = ICE_PF_TO_HW(pf);
818         enum ice_flow_dir dir = ICE_FLOW_RX;
819         struct ice_fd_hw_prof *hw_prof;
820         struct ice_flow_prof *prof;
821         uint64_t entry_1 = 0;
822         uint64_t entry_2 = 0;
823         uint16_t vsi_num;
824         int ret;
825         uint64_t prof_id;
826
827         /* check if have input set conflict on current profile. */
828         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
829         if (ret)
830                 return ret;
831
832         /* check if the profile is conflict with other profile. */
833         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
834         if (ret)
835                 return ret;
836
837         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
838         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
839                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
840         if (ret)
841                 return ret;
842         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
843                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
844                                  seg, NULL, 0, &entry_1);
845         if (ret) {
846                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
847                             ptype);
848                 goto err_add_prof;
849         }
850         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
851                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
852                                  seg, NULL, 0, &entry_2);
853         if (ret) {
854                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
855                             ptype);
856                 goto err_add_entry;
857         }
858
859         hw_prof = hw->fdir_prof[ptype];
860         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
861         hw_prof->cnt = 0;
862         hw_prof->fdir_seg[is_tunnel] = seg;
863         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
864         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
865         pf->hw_prof_cnt[ptype][is_tunnel]++;
866         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
867         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
868         pf->hw_prof_cnt[ptype][is_tunnel]++;
869
870         return ret;
871
872 err_add_entry:
873         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
874         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
875         ice_flow_rem_entry(hw, entry_1);
876 err_add_prof:
877         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
878
879         return ret;
880 }
881
882 static void
883 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
884 {
885         uint32_t i, j;
886
887         struct ice_inset_map {
888                 uint64_t inset;
889                 enum ice_flow_field fld;
890         };
891         static const struct ice_inset_map ice_inset_map[] = {
892                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
893                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
894                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
895                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
896                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
897                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
898                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
899                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
900                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
901                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
902                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
903                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
904                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
905                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
906                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
907                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
908                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
909                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
910                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
911                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
912                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
913                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
914                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
915                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
916                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
917                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
918                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
919         };
920
921         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
922                 if ((inset & ice_inset_map[i].inset) ==
923                     ice_inset_map[i].inset)
924                         field[j++] = ice_inset_map[i].fld;
925         }
926 }
927
928 static int
929 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
930                         uint64_t input_set, bool is_tunnel)
931 {
932         struct ice_flow_seg_info *seg;
933         struct ice_flow_seg_info *seg_tun = NULL;
934         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
935         int i, ret;
936
937         if (!input_set)
938                 return -EINVAL;
939
940         seg = (struct ice_flow_seg_info *)
941                 ice_malloc(hw, sizeof(*seg));
942         if (!seg) {
943                 PMD_DRV_LOG(ERR, "No memory can be allocated");
944                 return -ENOMEM;
945         }
946
947         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
948                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
949         ice_fdir_input_set_parse(input_set, field);
950
951         switch (flow) {
952         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
953                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
954                                   ICE_FLOW_SEG_HDR_IPV4);
955                 break;
956         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
957                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
958                                   ICE_FLOW_SEG_HDR_IPV4);
959                 break;
960         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
961                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
962                                   ICE_FLOW_SEG_HDR_IPV4);
963                 break;
964         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
965                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
966                 break;
967         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
968                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
969                                   ICE_FLOW_SEG_HDR_IPV6);
970                 break;
971         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
972                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
973                                   ICE_FLOW_SEG_HDR_IPV6);
974                 break;
975         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
976                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
977                                   ICE_FLOW_SEG_HDR_IPV6);
978                 break;
979         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
980                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
981                 break;
982         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
983         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
984         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
985         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
986                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
987                                        ICE_FLOW_SEG_HDR_GTPU_IP |
988                                   ICE_FLOW_SEG_HDR_IPV4);
989                 break;
990         default:
991                 PMD_DRV_LOG(ERR, "not supported filter type.");
992                 break;
993         }
994
995         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
996                 ice_flow_set_fld(seg, field[i],
997                                  ICE_FLOW_FLD_OFF_INVAL,
998                                  ICE_FLOW_FLD_OFF_INVAL,
999                                  ICE_FLOW_FLD_OFF_INVAL, false);
1000         }
1001
1002         if (!is_tunnel) {
1003                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1004                                            seg, flow, false);
1005         } else {
1006                 seg_tun = (struct ice_flow_seg_info *)
1007                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1008                 if (!seg_tun) {
1009                         PMD_DRV_LOG(ERR, "No memory can be allocated");
1010                         rte_free(seg);
1011                         return -ENOMEM;
1012                 }
1013                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1014                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1015                                            seg_tun, flow, true);
1016         }
1017
1018         if (!ret) {
1019                 return ret;
1020         } else if (ret < 0) {
1021                 rte_free(seg);
1022                 if (is_tunnel)
1023                         rte_free(seg_tun);
1024                 return (ret == -EEXIST) ? 0 : ret;
1025         } else {
1026                 return ret;
1027         }
1028 }
1029
1030 static void
1031 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1032                     bool is_tunnel, bool add)
1033 {
1034         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1035         int cnt;
1036
1037         cnt = (add) ? 1 : -1;
1038         hw->fdir_active_fltr += cnt;
1039         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1040                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1041         else
1042                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1043 }
1044
1045 static int
1046 ice_fdir_init(struct ice_adapter *ad)
1047 {
1048         struct ice_pf *pf = &ad->pf;
1049         struct ice_flow_parser *parser;
1050         int ret;
1051
1052         ret = ice_fdir_setup(pf);
1053         if (ret)
1054                 return ret;
1055
1056         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1057                 parser = &ice_fdir_parser_comms;
1058         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1059                 parser = &ice_fdir_parser_os;
1060         else
1061                 return -EINVAL;
1062
1063         return ice_register_parser(parser, ad);
1064 }
1065
1066 static void
1067 ice_fdir_uninit(struct ice_adapter *ad)
1068 {
1069         struct ice_pf *pf = &ad->pf;
1070         struct ice_flow_parser *parser;
1071
1072         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1073                 parser = &ice_fdir_parser_comms;
1074         else
1075                 parser = &ice_fdir_parser_os;
1076
1077         ice_unregister_parser(parser, ad);
1078
1079         ice_fdir_teardown(pf);
1080 }
1081
1082 static int
1083 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1084 {
1085         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1086                 return 1;
1087         else
1088                 return 0;
1089 }
1090
1091 static int
1092 ice_fdir_add_del_filter(struct ice_pf *pf,
1093                         struct ice_fdir_filter_conf *filter,
1094                         bool add)
1095 {
1096         struct ice_fltr_desc desc;
1097         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1098         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1099         bool is_tun;
1100         int ret;
1101
1102         filter->input.dest_vsi = pf->main_vsi->idx;
1103
1104         memset(&desc, 0, sizeof(desc));
1105         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1106
1107         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1108
1109         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1110         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1111         if (ret) {
1112                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1113                 return -EINVAL;
1114         }
1115
1116         return ice_fdir_programming(pf, &desc);
1117 }
1118
1119 static void
1120 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1121                           struct ice_fdir_filter_conf *filter)
1122 {
1123         struct ice_fdir_fltr *input = &filter->input;
1124         memset(key, 0, sizeof(*key));
1125
1126         key->flow_type = input->flow_type;
1127         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1128         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1129         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1130         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1131
1132         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1133         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1134
1135         key->tunnel_type = filter->tunnel_type;
1136 }
1137
1138 /* Check if there exists the flow director filter */
1139 static struct ice_fdir_filter_conf *
1140 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1141                         const struct ice_fdir_fltr_pattern *key)
1142 {
1143         int ret;
1144
1145         ret = rte_hash_lookup(fdir_info->hash_table, key);
1146         if (ret < 0)
1147                 return NULL;
1148
1149         return fdir_info->hash_map[ret];
1150 }
1151
1152 /* Add a flow director entry into the SW list */
1153 static int
1154 ice_fdir_entry_insert(struct ice_pf *pf,
1155                       struct ice_fdir_filter_conf *entry,
1156                       struct ice_fdir_fltr_pattern *key)
1157 {
1158         struct ice_fdir_info *fdir_info = &pf->fdir;
1159         int ret;
1160
1161         ret = rte_hash_add_key(fdir_info->hash_table, key);
1162         if (ret < 0) {
1163                 PMD_DRV_LOG(ERR,
1164                             "Failed to insert fdir entry to hash table %d!",
1165                             ret);
1166                 return ret;
1167         }
1168         fdir_info->hash_map[ret] = entry;
1169
1170         return 0;
1171 }
1172
1173 /* Delete a flow director entry from the SW list */
1174 static int
1175 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1176 {
1177         struct ice_fdir_info *fdir_info = &pf->fdir;
1178         int ret;
1179
1180         ret = rte_hash_del_key(fdir_info->hash_table, key);
1181         if (ret < 0) {
1182                 PMD_DRV_LOG(ERR,
1183                             "Failed to delete fdir filter to hash table %d!",
1184                             ret);
1185                 return ret;
1186         }
1187         fdir_info->hash_map[ret] = NULL;
1188
1189         return 0;
1190 }
1191
1192 static int
1193 ice_fdir_create_filter(struct ice_adapter *ad,
1194                        struct rte_flow *flow,
1195                        void *meta,
1196                        struct rte_flow_error *error)
1197 {
1198         struct ice_pf *pf = &ad->pf;
1199         struct ice_fdir_filter_conf *filter = meta;
1200         struct ice_fdir_info *fdir_info = &pf->fdir;
1201         struct ice_fdir_filter_conf *entry, *node;
1202         struct ice_fdir_fltr_pattern key;
1203         bool is_tun;
1204         int ret;
1205
1206         ice_fdir_extract_fltr_key(&key, filter);
1207         node = ice_fdir_entry_lookup(fdir_info, &key);
1208         if (node) {
1209                 rte_flow_error_set(error, EEXIST,
1210                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1211                                    "Rule already exists!");
1212                 return -rte_errno;
1213         }
1214
1215         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1216         if (!entry) {
1217                 rte_flow_error_set(error, ENOMEM,
1218                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1219                                    "Failed to allocate memory");
1220                 return -rte_errno;
1221         }
1222
1223         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1224
1225         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1226                         filter->input_set, is_tun);
1227         if (ret) {
1228                 rte_flow_error_set(error, -ret,
1229                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1230                                    "Profile configure failed.");
1231                 goto free_entry;
1232         }
1233
1234         /* alloc counter for FDIR */
1235         if (filter->input.cnt_ena) {
1236                 struct rte_flow_action_count *act_count = &filter->act_count;
1237
1238                 filter->counter = ice_fdir_counter_alloc(pf,
1239                                                          act_count->shared,
1240                                                          act_count->id);
1241                 if (!filter->counter) {
1242                         rte_flow_error_set(error, EINVAL,
1243                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1244                                         "Failed to alloc FDIR counter.");
1245                         goto free_entry;
1246                 }
1247                 filter->input.cnt_index = filter->counter->hw_index;
1248         }
1249
1250         ret = ice_fdir_add_del_filter(pf, filter, true);
1251         if (ret) {
1252                 rte_flow_error_set(error, -ret,
1253                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1254                                    "Add filter rule failed.");
1255                 goto free_counter;
1256         }
1257
1258         rte_memcpy(entry, filter, sizeof(*entry));
1259         ret = ice_fdir_entry_insert(pf, entry, &key);
1260         if (ret) {
1261                 rte_flow_error_set(error, -ret,
1262                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1263                                    "Insert entry to table failed.");
1264                 goto free_entry;
1265         }
1266
1267         flow->rule = entry;
1268         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1269
1270         return 0;
1271
1272 free_counter:
1273         if (filter->counter) {
1274                 ice_fdir_counter_free(pf, filter->counter);
1275                 filter->counter = NULL;
1276         }
1277
1278 free_entry:
1279         rte_free(entry);
1280         return -rte_errno;
1281 }
1282
1283 static int
1284 ice_fdir_destroy_filter(struct ice_adapter *ad,
1285                         struct rte_flow *flow,
1286                         struct rte_flow_error *error)
1287 {
1288         struct ice_pf *pf = &ad->pf;
1289         struct ice_fdir_info *fdir_info = &pf->fdir;
1290         struct ice_fdir_filter_conf *filter, *entry;
1291         struct ice_fdir_fltr_pattern key;
1292         bool is_tun;
1293         int ret;
1294
1295         filter = (struct ice_fdir_filter_conf *)flow->rule;
1296
1297         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1298
1299         if (filter->counter) {
1300                 ice_fdir_counter_free(pf, filter->counter);
1301                 filter->counter = NULL;
1302         }
1303
1304         ice_fdir_extract_fltr_key(&key, filter);
1305         entry = ice_fdir_entry_lookup(fdir_info, &key);
1306         if (!entry) {
1307                 rte_flow_error_set(error, ENOENT,
1308                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1309                                    "Can't find entry.");
1310                 return -rte_errno;
1311         }
1312
1313         ret = ice_fdir_add_del_filter(pf, filter, false);
1314         if (ret) {
1315                 rte_flow_error_set(error, -ret,
1316                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1317                                    "Del filter rule failed.");
1318                 return -rte_errno;
1319         }
1320
1321         ret = ice_fdir_entry_del(pf, &key);
1322         if (ret) {
1323                 rte_flow_error_set(error, -ret,
1324                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1325                                    "Remove entry from table failed.");
1326                 return -rte_errno;
1327         }
1328
1329         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1330         flow->rule = NULL;
1331
1332         rte_free(filter);
1333
1334         return 0;
1335 }
1336
1337 static int
1338 ice_fdir_query_count(struct ice_adapter *ad,
1339                       struct rte_flow *flow,
1340                       struct rte_flow_query_count *flow_stats,
1341                       struct rte_flow_error *error)
1342 {
1343         struct ice_pf *pf = &ad->pf;
1344         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1345         struct ice_fdir_filter_conf *filter = flow->rule;
1346         struct ice_fdir_counter *counter = filter->counter;
1347         uint64_t hits_lo, hits_hi;
1348
1349         if (!counter) {
1350                 rte_flow_error_set(error, EINVAL,
1351                                   RTE_FLOW_ERROR_TYPE_ACTION,
1352                                   NULL,
1353                                   "FDIR counters not available");
1354                 return -rte_errno;
1355         }
1356
1357         /*
1358          * Reading the low 32-bits latches the high 32-bits into a shadow
1359          * register. Reading the high 32-bit returns the value in the
1360          * shadow register.
1361          */
1362         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1363         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1364
1365         flow_stats->hits_set = 1;
1366         flow_stats->hits = hits_lo | (hits_hi << 32);
1367         flow_stats->bytes_set = 0;
1368         flow_stats->bytes = 0;
1369
1370         if (flow_stats->reset) {
1371                 /* reset statistic counter value */
1372                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1373                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1374         }
1375
1376         return 0;
1377 }
1378
1379 static struct ice_flow_engine ice_fdir_engine = {
1380         .init = ice_fdir_init,
1381         .uninit = ice_fdir_uninit,
1382         .create = ice_fdir_create_filter,
1383         .destroy = ice_fdir_destroy_filter,
1384         .query_count = ice_fdir_query_count,
1385         .type = ICE_FLOW_ENGINE_FDIR,
1386 };
1387
1388 static int
1389 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1390                               struct rte_flow_error *error,
1391                               const struct rte_flow_action *act,
1392                               struct ice_fdir_filter_conf *filter)
1393 {
1394         const struct rte_flow_action_rss *rss = act->conf;
1395         uint32_t i;
1396
1397         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1398                 rte_flow_error_set(error, EINVAL,
1399                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1400                                    "Invalid action.");
1401                 return -rte_errno;
1402         }
1403
1404         if (rss->queue_num <= 1) {
1405                 rte_flow_error_set(error, EINVAL,
1406                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1407                                    "Queue region size can't be 0 or 1.");
1408                 return -rte_errno;
1409         }
1410
1411         /* check if queue index for queue region is continuous */
1412         for (i = 0; i < rss->queue_num - 1; i++) {
1413                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1414                         rte_flow_error_set(error, EINVAL,
1415                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1416                                            "Discontinuous queue region");
1417                         return -rte_errno;
1418                 }
1419         }
1420
1421         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1422                 rte_flow_error_set(error, EINVAL,
1423                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1424                                    "Invalid queue region indexes.");
1425                 return -rte_errno;
1426         }
1427
1428         if (!(rte_is_power_of_2(rss->queue_num) &&
1429              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1430                 rte_flow_error_set(error, EINVAL,
1431                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1432                                    "The region size should be any of the following values:"
1433                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1434                                    "of queues do not exceed the VSI allocation.");
1435                 return -rte_errno;
1436         }
1437
1438         filter->input.q_index = rss->queue[0];
1439         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1440         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1441
1442         return 0;
1443 }
1444
1445 static int
1446 ice_fdir_parse_action(struct ice_adapter *ad,
1447                       const struct rte_flow_action actions[],
1448                       struct rte_flow_error *error,
1449                       struct ice_fdir_filter_conf *filter)
1450 {
1451         struct ice_pf *pf = &ad->pf;
1452         const struct rte_flow_action_queue *act_q;
1453         const struct rte_flow_action_mark *mark_spec = NULL;
1454         const struct rte_flow_action_count *act_count;
1455         uint32_t dest_num = 0;
1456         uint32_t mark_num = 0;
1457         uint32_t counter_num = 0;
1458         int ret;
1459
1460         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1461                 switch (actions->type) {
1462                 case RTE_FLOW_ACTION_TYPE_VOID:
1463                         break;
1464                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1465                         dest_num++;
1466
1467                         act_q = actions->conf;
1468                         filter->input.q_index = act_q->index;
1469                         if (filter->input.q_index >=
1470                                         pf->dev_data->nb_rx_queues) {
1471                                 rte_flow_error_set(error, EINVAL,
1472                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1473                                                    actions,
1474                                                    "Invalid queue for FDIR.");
1475                                 return -rte_errno;
1476                         }
1477                         filter->input.dest_ctl =
1478                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1479                         break;
1480                 case RTE_FLOW_ACTION_TYPE_DROP:
1481                         dest_num++;
1482
1483                         filter->input.dest_ctl =
1484                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1485                         break;
1486                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1487                         dest_num++;
1488
1489                         filter->input.dest_ctl =
1490                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1491                         filter->input.q_index = 0;
1492                         break;
1493                 case RTE_FLOW_ACTION_TYPE_RSS:
1494                         dest_num++;
1495
1496                         ret = ice_fdir_parse_action_qregion(pf,
1497                                                 error, actions, filter);
1498                         if (ret)
1499                                 return ret;
1500                         break;
1501                 case RTE_FLOW_ACTION_TYPE_MARK:
1502                         mark_num++;
1503
1504                         mark_spec = actions->conf;
1505                         filter->input.fltr_id = mark_spec->id;
1506                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1507                         break;
1508                 case RTE_FLOW_ACTION_TYPE_COUNT:
1509                         counter_num++;
1510
1511                         act_count = actions->conf;
1512                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1513                         rte_memcpy(&filter->act_count, act_count,
1514                                                 sizeof(filter->act_count));
1515
1516                         break;
1517                 default:
1518                         rte_flow_error_set(error, EINVAL,
1519                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1520                                    "Invalid action.");
1521                         return -rte_errno;
1522                 }
1523         }
1524
1525         if (dest_num == 0 || dest_num >= 2) {
1526                 rte_flow_error_set(error, EINVAL,
1527                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1528                            "Unsupported action combination");
1529                 return -rte_errno;
1530         }
1531
1532         if (mark_num >= 2) {
1533                 rte_flow_error_set(error, EINVAL,
1534                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1535                            "Too many mark actions");
1536                 return -rte_errno;
1537         }
1538
1539         if (counter_num >= 2) {
1540                 rte_flow_error_set(error, EINVAL,
1541                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1542                            "Too many count actions");
1543                 return -rte_errno;
1544         }
1545
1546         return 0;
1547 }
1548
1549 static int
1550 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1551                        const struct rte_flow_item pattern[],
1552                        struct rte_flow_error *error,
1553                        struct ice_fdir_filter_conf *filter)
1554 {
1555         const struct rte_flow_item *item = pattern;
1556         enum rte_flow_item_type item_type;
1557         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1558         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1559         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1560         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1561         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1562         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1563         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1564         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1565         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1566         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1567         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1568         uint64_t input_set = ICE_INSET_NONE;
1569         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1570         uint8_t  ipv6_addr_mask[16] = {
1571                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1572                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1573         };
1574         uint32_t vtc_flow_cpu;
1575
1576
1577         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1578                 if (item->last) {
1579                         rte_flow_error_set(error, EINVAL,
1580                                         RTE_FLOW_ERROR_TYPE_ITEM,
1581                                         item,
1582                                         "Not support range");
1583                         return -rte_errno;
1584                 }
1585                 item_type = item->type;
1586
1587                 switch (item_type) {
1588                 case RTE_FLOW_ITEM_TYPE_ETH:
1589                         eth_spec = item->spec;
1590                         eth_mask = item->mask;
1591
1592                         if (eth_spec && eth_mask) {
1593                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1594                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1595                                         rte_flow_error_set(error, EINVAL,
1596                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1597                                                 item,
1598                                                 "Src mac not support");
1599                                         return -rte_errno;
1600                                 }
1601
1602                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1603                                         rte_flow_error_set(error, EINVAL,
1604                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1605                                                 item,
1606                                                 "Invalid mac addr mask");
1607                                         return -rte_errno;
1608                                 }
1609
1610                                 input_set |= ICE_INSET_DMAC;
1611                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1612                                            &eth_spec->dst,
1613                                            RTE_ETHER_ADDR_LEN);
1614                         }
1615                         break;
1616                 case RTE_FLOW_ITEM_TYPE_IPV4:
1617                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1618                         ipv4_spec = item->spec;
1619                         ipv4_mask = item->mask;
1620
1621                         if (ipv4_spec && ipv4_mask) {
1622                                 /* Check IPv4 mask and update input set */
1623                                 if (ipv4_mask->hdr.version_ihl ||
1624                                     ipv4_mask->hdr.total_length ||
1625                                     ipv4_mask->hdr.packet_id ||
1626                                     ipv4_mask->hdr.fragment_offset ||
1627                                     ipv4_mask->hdr.hdr_checksum) {
1628                                         rte_flow_error_set(error, EINVAL,
1629                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1630                                                    item,
1631                                                    "Invalid IPv4 mask.");
1632                                         return -rte_errno;
1633                                 }
1634                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1635                                         input_set |= tunnel_type ?
1636                                                      ICE_INSET_TUN_IPV4_SRC :
1637                                                      ICE_INSET_IPV4_SRC;
1638                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1639                                         input_set |= tunnel_type ?
1640                                                      ICE_INSET_TUN_IPV4_DST :
1641                                                      ICE_INSET_IPV4_DST;
1642                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1643                                         input_set |= ICE_INSET_IPV4_TOS;
1644                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1645                                         input_set |= ICE_INSET_IPV4_TTL;
1646                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1647                                         input_set |= ICE_INSET_IPV4_PROTO;
1648
1649                                 filter->input.ip.v4.dst_ip =
1650                                         ipv4_spec->hdr.src_addr;
1651                                 filter->input.ip.v4.src_ip =
1652                                         ipv4_spec->hdr.dst_addr;
1653                                 filter->input.ip.v4.tos =
1654                                         ipv4_spec->hdr.type_of_service;
1655                                 filter->input.ip.v4.ttl =
1656                                         ipv4_spec->hdr.time_to_live;
1657                                 filter->input.ip.v4.proto =
1658                                         ipv4_spec->hdr.next_proto_id;
1659                         }
1660
1661                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1662                         break;
1663                 case RTE_FLOW_ITEM_TYPE_IPV6:
1664                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1665                         ipv6_spec = item->spec;
1666                         ipv6_mask = item->mask;
1667
1668                         if (ipv6_spec && ipv6_mask) {
1669                                 /* Check IPv6 mask and update input set */
1670                                 if (ipv6_mask->hdr.payload_len) {
1671                                         rte_flow_error_set(error, EINVAL,
1672                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1673                                                    item,
1674                                                    "Invalid IPv6 mask");
1675                                         return -rte_errno;
1676                                 }
1677
1678                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1679                                             ipv6_addr_mask,
1680                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1681                                         input_set |= ICE_INSET_IPV6_SRC;
1682                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1683                                             ipv6_addr_mask,
1684                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1685                                         input_set |= ICE_INSET_IPV6_DST;
1686
1687                                 if ((ipv6_mask->hdr.vtc_flow &
1688                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1689                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1690                                         input_set |= ICE_INSET_IPV6_TC;
1691                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1692                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1693                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1694                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1695
1696                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1697                                            ipv6_spec->hdr.src_addr, 16);
1698                                 rte_memcpy(filter->input.ip.v6.src_ip,
1699                                            ipv6_spec->hdr.dst_addr, 16);
1700
1701                                 vtc_flow_cpu =
1702                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1703                                 filter->input.ip.v6.tc =
1704                                         (uint8_t)(vtc_flow_cpu >>
1705                                                   ICE_FDIR_IPV6_TC_OFFSET);
1706                                 filter->input.ip.v6.proto =
1707                                         ipv6_spec->hdr.proto;
1708                                 filter->input.ip.v6.hlim =
1709                                         ipv6_spec->hdr.hop_limits;
1710                         }
1711
1712                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1713                         break;
1714                 case RTE_FLOW_ITEM_TYPE_TCP:
1715                         tcp_spec = item->spec;
1716                         tcp_mask = item->mask;
1717
1718                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1719                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1720                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1721                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1722
1723                         if (tcp_spec && tcp_mask) {
1724                                 /* Check TCP mask and update input set */
1725                                 if (tcp_mask->hdr.sent_seq ||
1726                                     tcp_mask->hdr.recv_ack ||
1727                                     tcp_mask->hdr.data_off ||
1728                                     tcp_mask->hdr.tcp_flags ||
1729                                     tcp_mask->hdr.rx_win ||
1730                                     tcp_mask->hdr.cksum ||
1731                                     tcp_mask->hdr.tcp_urp) {
1732                                         rte_flow_error_set(error, EINVAL,
1733                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1734                                                    item,
1735                                                    "Invalid TCP mask");
1736                                         return -rte_errno;
1737                                 }
1738
1739                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1740                                         input_set |= tunnel_type ?
1741                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1742                                                      ICE_INSET_TCP_SRC_PORT;
1743                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1744                                         input_set |= tunnel_type ?
1745                                                      ICE_INSET_TUN_TCP_DST_PORT :
1746                                                      ICE_INSET_TCP_DST_PORT;
1747
1748                                 /* Get filter info */
1749                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1750                                         filter->input.ip.v4.dst_port =
1751                                                 tcp_spec->hdr.src_port;
1752                                         filter->input.ip.v4.src_port =
1753                                                 tcp_spec->hdr.dst_port;
1754                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1755                                         filter->input.ip.v6.dst_port =
1756                                                 tcp_spec->hdr.src_port;
1757                                         filter->input.ip.v6.src_port =
1758                                                 tcp_spec->hdr.dst_port;
1759                                 }
1760                         }
1761                         break;
1762                 case RTE_FLOW_ITEM_TYPE_UDP:
1763                         udp_spec = item->spec;
1764                         udp_mask = item->mask;
1765
1766                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1767                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1768                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1769                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1770
1771                         if (udp_spec && udp_mask) {
1772                                 /* Check UDP mask and update input set*/
1773                                 if (udp_mask->hdr.dgram_len ||
1774                                     udp_mask->hdr.dgram_cksum) {
1775                                         rte_flow_error_set(error, EINVAL,
1776                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1777                                                    item,
1778                                                    "Invalid UDP mask");
1779                                         return -rte_errno;
1780                                 }
1781
1782                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1783                                         input_set |= tunnel_type ?
1784                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1785                                                      ICE_INSET_UDP_SRC_PORT;
1786                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1787                                         input_set |= tunnel_type ?
1788                                                      ICE_INSET_TUN_UDP_DST_PORT :
1789                                                      ICE_INSET_UDP_DST_PORT;
1790
1791                                 /* Get filter info */
1792                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1793                                         filter->input.ip.v4.dst_port =
1794                                                 udp_spec->hdr.src_port;
1795                                         filter->input.ip.v4.src_port =
1796                                                 udp_spec->hdr.dst_port;
1797                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1798                                         filter->input.ip.v6.src_port =
1799                                                 udp_spec->hdr.dst_port;
1800                                         filter->input.ip.v6.dst_port =
1801                                                 udp_spec->hdr.src_port;
1802                                 }
1803                         }
1804                         break;
1805                 case RTE_FLOW_ITEM_TYPE_SCTP:
1806                         sctp_spec = item->spec;
1807                         sctp_mask = item->mask;
1808
1809                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1810                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1811                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1812                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1813
1814                         if (sctp_spec && sctp_mask) {
1815                                 /* Check SCTP mask and update input set */
1816                                 if (sctp_mask->hdr.cksum) {
1817                                         rte_flow_error_set(error, EINVAL,
1818                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1819                                                    item,
1820                                                    "Invalid UDP mask");
1821                                         return -rte_errno;
1822                                 }
1823
1824                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1825                                         input_set |= tunnel_type ?
1826                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1827                                                      ICE_INSET_SCTP_SRC_PORT;
1828                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1829                                         input_set |= tunnel_type ?
1830                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1831                                                      ICE_INSET_SCTP_DST_PORT;
1832
1833                                 /* Get filter info */
1834                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1835                                         filter->input.ip.v4.dst_port =
1836                                                 sctp_spec->hdr.src_port;
1837                                         filter->input.ip.v4.src_port =
1838                                                 sctp_spec->hdr.dst_port;
1839                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1840                                         filter->input.ip.v6.dst_port =
1841                                                 sctp_spec->hdr.src_port;
1842                                         filter->input.ip.v6.src_port =
1843                                                 sctp_spec->hdr.dst_port;
1844                                 }
1845                         }
1846                         break;
1847                 case RTE_FLOW_ITEM_TYPE_VOID:
1848                         break;
1849                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1850                         l3 = RTE_FLOW_ITEM_TYPE_END;
1851                         vxlan_spec = item->spec;
1852                         vxlan_mask = item->mask;
1853
1854                         if (vxlan_spec || vxlan_mask) {
1855                                 rte_flow_error_set(error, EINVAL,
1856                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1857                                                    item,
1858                                                    "Invalid vxlan field");
1859                                 return -rte_errno;
1860                         }
1861
1862                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1863                         break;
1864                 case RTE_FLOW_ITEM_TYPE_GTPU:
1865                         l3 = RTE_FLOW_ITEM_TYPE_END;
1866                         gtp_spec = item->spec;
1867                         gtp_mask = item->mask;
1868
1869                         if (gtp_spec && gtp_mask) {
1870                                 if (gtp_mask->v_pt_rsv_flags ||
1871                                     gtp_mask->msg_type ||
1872                                     gtp_mask->msg_len) {
1873                                         rte_flow_error_set(error, EINVAL,
1874                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1875                                                    item,
1876                                                    "Invalid GTP mask");
1877                                         return -rte_errno;
1878                                 }
1879
1880                                 if (gtp_mask->teid == UINT32_MAX)
1881                                         input_set |= ICE_INSET_GTPU_TEID;
1882
1883                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1884                         }
1885
1886                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1887                         break;
1888                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1889                         gtp_psc_spec = item->spec;
1890                         gtp_psc_mask = item->mask;
1891
1892                         if (gtp_psc_spec && gtp_psc_mask) {
1893                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1894                                         input_set |= ICE_INSET_GTPU_QFI;
1895
1896                                 filter->input.gtpu_data.qfi =
1897                                         gtp_psc_spec->qfi;
1898                         }
1899                         break;
1900                 default:
1901                         rte_flow_error_set(error, EINVAL,
1902                                    RTE_FLOW_ERROR_TYPE_ITEM,
1903                                    item,
1904                                    "Invalid pattern item.");
1905                         return -rte_errno;
1906                 }
1907         }
1908
1909         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1910                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1911
1912         filter->tunnel_type = tunnel_type;
1913         filter->input.flow_type = flow_type;
1914         filter->input_set = input_set;
1915
1916         return 0;
1917 }
1918
1919 static int
1920 ice_fdir_parse(struct ice_adapter *ad,
1921                struct ice_pattern_match_item *array,
1922                uint32_t array_len,
1923                const struct rte_flow_item pattern[],
1924                const struct rte_flow_action actions[],
1925                void **meta,
1926                struct rte_flow_error *error)
1927 {
1928         struct ice_pf *pf = &ad->pf;
1929         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1930         struct ice_pattern_match_item *item = NULL;
1931         uint64_t input_set;
1932         int ret;
1933
1934         memset(filter, 0, sizeof(*filter));
1935         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1936         if (!item)
1937                 return -rte_errno;
1938
1939         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1940         if (ret)
1941                 goto error;
1942         input_set = filter->input_set;
1943         if (!input_set || input_set & ~item->input_set_mask) {
1944                 rte_flow_error_set(error, EINVAL,
1945                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1946                                    pattern,
1947                                    "Invalid input set");
1948                 ret = -rte_errno;
1949                 goto error;
1950         }
1951
1952         ret = ice_fdir_parse_action(ad, actions, error, filter);
1953         if (ret)
1954                 goto error;
1955
1956         *meta = filter;
1957 error:
1958         rte_free(item);
1959         return ret;
1960 }
1961
1962 static struct ice_flow_parser ice_fdir_parser_os = {
1963         .engine = &ice_fdir_engine,
1964         .array = ice_fdir_pattern_os,
1965         .array_len = RTE_DIM(ice_fdir_pattern_os),
1966         .parse_pattern_action = ice_fdir_parse,
1967         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1968 };
1969
1970 static struct ice_flow_parser ice_fdir_parser_comms = {
1971         .engine = &ice_fdir_engine,
1972         .array = ice_fdir_pattern_comms,
1973         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1974         .parse_pattern_action = ice_fdir_parse,
1975         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1976 };
1977
1978 RTE_INIT(ice_fdir_engine_register)
1979 {
1980         ice_register_flow_engine(&ice_fdir_engine);
1981 }