net/ice/base: sign external device package programming
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH (\
22         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25         ICE_FDIR_INSET_ETH | \
26         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30         ICE_FDIR_INSET_ETH_IPV4 | \
31         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34         ICE_FDIR_INSET_ETH_IPV4 | \
35         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38         ICE_FDIR_INSET_ETH_IPV4 | \
39         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42         ICE_INSET_DMAC | \
43         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
45
46 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
47         ICE_FDIR_INSET_ETH_IPV6 | \
48         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
49
50 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
51         ICE_FDIR_INSET_ETH_IPV6 | \
52         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
53
54 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
55         ICE_FDIR_INSET_ETH_IPV6 | \
56         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
57
58 #define ICE_FDIR_INSET_IPV4 (\
59         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
60
61 #define ICE_FDIR_INSET_IPV4_TCP (\
62         ICE_FDIR_INSET_IPV4 | \
63         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
64
65 #define ICE_FDIR_INSET_IPV4_UDP (\
66         ICE_FDIR_INSET_IPV4 | \
67         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
68
69 #define ICE_FDIR_INSET_IPV4_SCTP (\
70         ICE_FDIR_INSET_IPV4 | \
71         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
72
73 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
74         ICE_FDIR_INSET_ETH | ICE_FDIR_INSET_ETH_IPV4 | \
75         ICE_INSET_VXLAN_VNI)
76
77 #define ICE_FDIR_INSET_IPV4_GTPU (\
78         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
79
80 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
81         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
82         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
83
84 #define ICE_FDIR_INSET_IPV6_GTPU (\
85         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
86
87 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
88         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
89         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
90
91 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
92         {pattern_ethertype,                             ICE_FDIR_INSET_ETH,             ICE_INSET_NONE,                 ICE_INSET_NONE},
93         {pattern_eth_ipv4,                              ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE,                 ICE_INSET_NONE},
94         {pattern_eth_ipv4_udp,                          ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
95         {pattern_eth_ipv4_tcp,                          ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
96         {pattern_eth_ipv4_sctp,                         ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
97         {pattern_eth_ipv6,                              ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
98         {pattern_eth_ipv6_udp,                          ICE_FDIR_INSET_ETH_IPV6_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
99         {pattern_eth_ipv6_tcp,                          ICE_FDIR_INSET_ETH_IPV6_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
100         {pattern_eth_ipv6_sctp,                         ICE_FDIR_INSET_ETH_IPV6_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
101         {pattern_eth_ipv4_udp_vxlan_ipv4,               ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4,            ICE_INSET_NONE},
102         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_UDP,        ICE_INSET_NONE},
103         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_TCP,        ICE_INSET_NONE},
104         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,          ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_SCTP,       ICE_INSET_NONE},
105         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE},
106         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE},
107         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE},
108         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,      ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE},
109         /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
110         {pattern_eth_ipv4_gtpu,                         ICE_FDIR_INSET_IPV4_GTPU,       ICE_FDIR_INSET_IPV4_GTPU,       ICE_INSET_NONE},
111         {pattern_eth_ipv4_gtpu_eh,                      ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_INSET_NONE},
112         {pattern_eth_ipv6_gtpu,                         ICE_FDIR_INSET_IPV6_GTPU,       ICE_FDIR_INSET_IPV6_GTPU,       ICE_INSET_NONE},
113         {pattern_eth_ipv6_gtpu_eh,                      ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_INSET_NONE},
114 };
115
116 static struct ice_flow_parser ice_fdir_parser;
117
118 static int
119 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
120
121 static const struct rte_memzone *
122 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
123 {
124         const struct rte_memzone *mz;
125
126         mz = rte_memzone_lookup(name);
127         if (mz)
128                 return mz;
129
130         return rte_memzone_reserve_aligned(name, len, socket_id,
131                                            RTE_MEMZONE_IOVA_CONTIG,
132                                            ICE_RING_BASE_ALIGN);
133 }
134
135 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
136
137 static int
138 ice_fdir_prof_alloc(struct ice_hw *hw)
139 {
140         enum ice_fltr_ptype ptype, fltr_ptype;
141
142         if (!hw->fdir_prof) {
143                 hw->fdir_prof = (struct ice_fd_hw_prof **)
144                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
145                                    sizeof(*hw->fdir_prof));
146                 if (!hw->fdir_prof)
147                         return -ENOMEM;
148         }
149         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
150              ptype < ICE_FLTR_PTYPE_MAX;
151              ptype++) {
152                 if (!hw->fdir_prof[ptype]) {
153                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
154                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
155                         if (!hw->fdir_prof[ptype])
156                                 goto fail_mem;
157                 }
158         }
159         return 0;
160
161 fail_mem:
162         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
163              fltr_ptype < ptype;
164              fltr_ptype++) {
165                 rte_free(hw->fdir_prof[fltr_ptype]);
166                 hw->fdir_prof[fltr_ptype] = NULL;
167         }
168
169         rte_free(hw->fdir_prof);
170         hw->fdir_prof = NULL;
171
172         return -ENOMEM;
173 }
174
175 static int
176 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
177                           struct ice_fdir_counter_pool_container *container,
178                           uint32_t index_start,
179                           uint32_t len)
180 {
181         struct ice_fdir_counter_pool *pool;
182         uint32_t i;
183         int ret = 0;
184
185         pool = rte_zmalloc("ice_fdir_counter_pool",
186                            sizeof(*pool) +
187                            sizeof(struct ice_fdir_counter) * len,
188                            0);
189         if (!pool) {
190                 PMD_INIT_LOG(ERR,
191                              "Failed to allocate memory for fdir counter pool");
192                 return -ENOMEM;
193         }
194
195         TAILQ_INIT(&pool->counter_list);
196         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
197
198         for (i = 0; i < len; i++) {
199                 struct ice_fdir_counter *counter = &pool->counters[i];
200
201                 counter->hw_index = index_start + i;
202                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
203         }
204
205         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
206                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
207                 ret = -EINVAL;
208                 goto free_pool;
209         }
210
211         container->pools[container->index_free++] = pool;
212         return 0;
213
214 free_pool:
215         rte_free(pool);
216         return ret;
217 }
218
219 static int
220 ice_fdir_counter_init(struct ice_pf *pf)
221 {
222         struct ice_hw *hw = ICE_PF_TO_HW(pf);
223         struct ice_fdir_info *fdir_info = &pf->fdir;
224         struct ice_fdir_counter_pool_container *container =
225                                 &fdir_info->counter;
226         uint32_t cnt_index, len;
227         int ret;
228
229         TAILQ_INIT(&container->pool_list);
230
231         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
232         len = ICE_FDIR_COUNTERS_PER_BLOCK;
233
234         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
235         if (ret) {
236                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
237                 return ret;
238         }
239
240         return 0;
241 }
242
243 static int
244 ice_fdir_counter_release(struct ice_pf *pf)
245 {
246         struct ice_fdir_info *fdir_info = &pf->fdir;
247         struct ice_fdir_counter_pool_container *container =
248                                 &fdir_info->counter;
249         uint8_t i;
250
251         for (i = 0; i < container->index_free; i++) {
252                 rte_free(container->pools[i]);
253                 container->pools[i] = NULL;
254         }
255
256         TAILQ_INIT(&container->pool_list);
257         container->index_free = 0;
258
259         return 0;
260 }
261
262 static struct ice_fdir_counter *
263 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
264                                         *container,
265                                uint32_t id)
266 {
267         struct ice_fdir_counter_pool *pool;
268         struct ice_fdir_counter *counter;
269         int i;
270
271         TAILQ_FOREACH(pool, &container->pool_list, next) {
272                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
273                         counter = &pool->counters[i];
274
275                         if (counter->shared &&
276                             counter->ref_cnt &&
277                             counter->id == id)
278                                 return counter;
279                 }
280         }
281
282         return NULL;
283 }
284
285 static struct ice_fdir_counter *
286 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
287 {
288         struct ice_hw *hw = ICE_PF_TO_HW(pf);
289         struct ice_fdir_info *fdir_info = &pf->fdir;
290         struct ice_fdir_counter_pool_container *container =
291                                 &fdir_info->counter;
292         struct ice_fdir_counter_pool *pool = NULL;
293         struct ice_fdir_counter *counter_free = NULL;
294
295         if (shared) {
296                 counter_free = ice_fdir_counter_shared_search(container, id);
297                 if (counter_free) {
298                         if (counter_free->ref_cnt + 1 == 0) {
299                                 rte_errno = E2BIG;
300                                 return NULL;
301                         }
302                         counter_free->ref_cnt++;
303                         return counter_free;
304                 }
305         }
306
307         TAILQ_FOREACH(pool, &container->pool_list, next) {
308                 counter_free = TAILQ_FIRST(&pool->counter_list);
309                 if (counter_free)
310                         break;
311                 counter_free = NULL;
312         }
313
314         if (!counter_free) {
315                 PMD_DRV_LOG(ERR, "No free counter found\n");
316                 return NULL;
317         }
318
319         counter_free->shared = shared;
320         counter_free->id = id;
321         counter_free->ref_cnt = 1;
322         counter_free->pool = pool;
323
324         /* reset statistic counter value */
325         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
326         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
327
328         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
329         if (TAILQ_EMPTY(&pool->counter_list)) {
330                 TAILQ_REMOVE(&container->pool_list, pool, next);
331                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
332         }
333
334         return counter_free;
335 }
336
337 static void
338 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
339                       struct ice_fdir_counter *counter)
340 {
341         if (!counter)
342                 return;
343
344         if (--counter->ref_cnt == 0) {
345                 struct ice_fdir_counter_pool *pool = counter->pool;
346
347                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
348         }
349 }
350
351 static int
352 ice_fdir_init_filter_list(struct ice_pf *pf)
353 {
354         struct rte_eth_dev *dev = pf->adapter->eth_dev;
355         struct ice_fdir_info *fdir_info = &pf->fdir;
356         char fdir_hash_name[RTE_HASH_NAMESIZE];
357         int ret;
358
359         struct rte_hash_parameters fdir_hash_params = {
360                 .name = fdir_hash_name,
361                 .entries = ICE_MAX_FDIR_FILTER_NUM,
362                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
363                 .hash_func = rte_hash_crc,
364                 .hash_func_init_val = 0,
365                 .socket_id = rte_socket_id(),
366                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
367         };
368
369         /* Initialize hash */
370         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
371                  "fdir_%s", dev->device->name);
372         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
373         if (!fdir_info->hash_table) {
374                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
375                 return -EINVAL;
376         }
377         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
378                                           sizeof(*fdir_info->hash_map) *
379                                           ICE_MAX_FDIR_FILTER_NUM,
380                                           0);
381         if (!fdir_info->hash_map) {
382                 PMD_INIT_LOG(ERR,
383                              "Failed to allocate memory for fdir hash map!");
384                 ret = -ENOMEM;
385                 goto err_fdir_hash_map_alloc;
386         }
387         return 0;
388
389 err_fdir_hash_map_alloc:
390         rte_hash_free(fdir_info->hash_table);
391
392         return ret;
393 }
394
395 static void
396 ice_fdir_release_filter_list(struct ice_pf *pf)
397 {
398         struct ice_fdir_info *fdir_info = &pf->fdir;
399
400         if (fdir_info->hash_map)
401                 rte_free(fdir_info->hash_map);
402         if (fdir_info->hash_table)
403                 rte_hash_free(fdir_info->hash_table);
404
405         fdir_info->hash_map = NULL;
406         fdir_info->hash_table = NULL;
407 }
408
409 /*
410  * ice_fdir_setup - reserve and initialize the Flow Director resources
411  * @pf: board private structure
412  */
413 static int
414 ice_fdir_setup(struct ice_pf *pf)
415 {
416         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
417         struct ice_hw *hw = ICE_PF_TO_HW(pf);
418         const struct rte_memzone *mz = NULL;
419         char z_name[RTE_MEMZONE_NAMESIZE];
420         struct ice_vsi *vsi;
421         int err = ICE_SUCCESS;
422
423         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
424                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
425                 return -ENOTSUP;
426         }
427
428         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
429                     " fd_fltr_best_effort = %u.",
430                     hw->func_caps.fd_fltr_guar,
431                     hw->func_caps.fd_fltr_best_effort);
432
433         if (pf->fdir.fdir_vsi) {
434                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
435                 return ICE_SUCCESS;
436         }
437
438         /* make new FDIR VSI */
439         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
440         if (!vsi) {
441                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
442                 return -EINVAL;
443         }
444         pf->fdir.fdir_vsi = vsi;
445
446         err = ice_fdir_init_filter_list(pf);
447         if (err) {
448                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
449                 return -EINVAL;
450         }
451
452         err = ice_fdir_counter_init(pf);
453         if (err) {
454                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
455                 return -EINVAL;
456         }
457
458         /*Fdir tx queue setup*/
459         err = ice_fdir_setup_tx_resources(pf);
460         if (err) {
461                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
462                 goto fail_setup_tx;
463         }
464
465         /*Fdir rx queue setup*/
466         err = ice_fdir_setup_rx_resources(pf);
467         if (err) {
468                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
469                 goto fail_setup_rx;
470         }
471
472         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
473         if (err) {
474                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
475                 goto fail_mem;
476         }
477
478         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
479         if (err) {
480                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
481                 goto fail_mem;
482         }
483
484         /* Enable FDIR MSIX interrupt */
485         vsi->nb_used_qps = 1;
486         ice_vsi_queues_bind_intr(vsi);
487         ice_vsi_enable_queues_intr(vsi);
488
489         /* reserve memory for the fdir programming packet */
490         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
491                  ICE_FDIR_MZ_NAME,
492                  eth_dev->data->port_id);
493         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
494         if (!mz) {
495                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
496                             "flow director program packet.");
497                 err = -ENOMEM;
498                 goto fail_mem;
499         }
500         pf->fdir.prg_pkt = mz->addr;
501         pf->fdir.dma_addr = mz->iova;
502         pf->fdir.mz = mz;
503
504         err = ice_fdir_prof_alloc(hw);
505         if (err) {
506                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
507                             "flow director profile.");
508                 err = -ENOMEM;
509                 goto fail_prof;
510         }
511
512         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
513                     vsi->base_queue);
514         return ICE_SUCCESS;
515
516 fail_prof:
517         rte_memzone_free(pf->fdir.mz);
518         pf->fdir.mz = NULL;
519 fail_mem:
520         ice_rx_queue_release(pf->fdir.rxq);
521         pf->fdir.rxq = NULL;
522 fail_setup_rx:
523         ice_tx_queue_release(pf->fdir.txq);
524         pf->fdir.txq = NULL;
525 fail_setup_tx:
526         ice_release_vsi(vsi);
527         pf->fdir.fdir_vsi = NULL;
528         return err;
529 }
530
531 static void
532 ice_fdir_prof_free(struct ice_hw *hw)
533 {
534         enum ice_fltr_ptype ptype;
535
536         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
537              ptype < ICE_FLTR_PTYPE_MAX;
538              ptype++) {
539                 rte_free(hw->fdir_prof[ptype]);
540                 hw->fdir_prof[ptype] = NULL;
541         }
542
543         rte_free(hw->fdir_prof);
544         hw->fdir_prof = NULL;
545 }
546
547 /* Remove a profile for some filter type */
548 static void
549 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
550 {
551         struct ice_hw *hw = ICE_PF_TO_HW(pf);
552         struct ice_fd_hw_prof *hw_prof;
553         uint64_t prof_id;
554         uint16_t vsi_num;
555         int i;
556
557         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
558                 return;
559
560         hw_prof = hw->fdir_prof[ptype];
561
562         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
563         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
564                 if (hw_prof->entry_h[i][is_tunnel]) {
565                         vsi_num = ice_get_hw_vsi_num(hw,
566                                                      hw_prof->vsi_h[i]);
567                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
568                                              vsi_num, ptype);
569                         ice_flow_rem_entry(hw, ICE_BLK_FD,
570                                            hw_prof->entry_h[i][is_tunnel]);
571                         hw_prof->entry_h[i][is_tunnel] = 0;
572                 }
573         }
574         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
575         rte_free(hw_prof->fdir_seg[is_tunnel]);
576         hw_prof->fdir_seg[is_tunnel] = NULL;
577
578         for (i = 0; i < hw_prof->cnt; i++)
579                 hw_prof->vsi_h[i] = 0;
580         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
581 }
582
583 /* Remove all created profiles */
584 static void
585 ice_fdir_prof_rm_all(struct ice_pf *pf)
586 {
587         enum ice_fltr_ptype ptype;
588
589         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
590              ptype < ICE_FLTR_PTYPE_MAX;
591              ptype++) {
592                 ice_fdir_prof_rm(pf, ptype, false);
593                 ice_fdir_prof_rm(pf, ptype, true);
594         }
595 }
596
597 /*
598  * ice_fdir_teardown - release the Flow Director resources
599  * @pf: board private structure
600  */
601 static void
602 ice_fdir_teardown(struct ice_pf *pf)
603 {
604         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
605         struct ice_hw *hw = ICE_PF_TO_HW(pf);
606         struct ice_vsi *vsi;
607         int err;
608
609         vsi = pf->fdir.fdir_vsi;
610         if (!vsi)
611                 return;
612
613         ice_vsi_disable_queues_intr(vsi);
614
615         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
616         if (err)
617                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
618
619         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
620         if (err)
621                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
622
623         err = ice_fdir_counter_release(pf);
624         if (err)
625                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
626
627         ice_fdir_release_filter_list(pf);
628
629         ice_tx_queue_release(pf->fdir.txq);
630         pf->fdir.txq = NULL;
631         ice_rx_queue_release(pf->fdir.rxq);
632         pf->fdir.rxq = NULL;
633         ice_fdir_prof_rm_all(pf);
634         ice_fdir_prof_free(hw);
635         ice_release_vsi(vsi);
636         pf->fdir.fdir_vsi = NULL;
637
638         if (pf->fdir.mz) {
639                 err = rte_memzone_free(pf->fdir.mz);
640                 pf->fdir.mz = NULL;
641                 if (err)
642                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
643         }
644 }
645
646 static int
647 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
648                            enum ice_fltr_ptype ptype,
649                            struct ice_flow_seg_info *seg,
650                            bool is_tunnel)
651 {
652         struct ice_hw *hw = ICE_PF_TO_HW(pf);
653         struct ice_flow_seg_info *ori_seg;
654         struct ice_fd_hw_prof *hw_prof;
655
656         hw_prof = hw->fdir_prof[ptype];
657         ori_seg = hw_prof->fdir_seg[is_tunnel];
658
659         /* profile does not exist */
660         if (!ori_seg)
661                 return 0;
662
663         /* if no input set conflict, return -EEXIST */
664         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
665             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
666                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
667                             ptype);
668                 return -EEXIST;
669         }
670
671         /* a rule with input set conflict already exist, so give up */
672         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
673                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
674                             ptype);
675                 return -EINVAL;
676         }
677
678         /* it's safe to delete an empty profile */
679         ice_fdir_prof_rm(pf, ptype, is_tunnel);
680         return 0;
681 }
682
683 static bool
684 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
685                                enum ice_fltr_ptype ptype,
686                                bool is_tunnel)
687 {
688         struct ice_hw *hw = ICE_PF_TO_HW(pf);
689         struct ice_fd_hw_prof *hw_prof;
690         struct ice_flow_seg_info *seg;
691
692         hw_prof = hw->fdir_prof[ptype];
693         seg = hw_prof->fdir_seg[is_tunnel];
694
695         /* profile does not exist */
696         if (!seg)
697                 return true;
698
699         /* profile exists and rule exists, fail to resolve the conflict */
700         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
701                 return false;
702
703         /* it's safe to delete an empty profile */
704         ice_fdir_prof_rm(pf, ptype, is_tunnel);
705
706         return true;
707 }
708
709 static int
710 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
711                              enum ice_fltr_ptype ptype,
712                              bool is_tunnel)
713 {
714         enum ice_fltr_ptype cflct_ptype;
715
716         switch (ptype) {
717         /* IPv4 */
718         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
719         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
720         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
721                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
722                 if (!ice_fdir_prof_resolve_conflict
723                         (pf, cflct_ptype, is_tunnel))
724                         goto err;
725                 break;
726         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
727                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
728                 if (!ice_fdir_prof_resolve_conflict
729                         (pf, cflct_ptype, is_tunnel))
730                         goto err;
731                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
732                 if (!ice_fdir_prof_resolve_conflict
733                         (pf, cflct_ptype, is_tunnel))
734                         goto err;
735                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
736                 if (!ice_fdir_prof_resolve_conflict
737                         (pf, cflct_ptype, is_tunnel))
738                         goto err;
739                 break;
740         /* IPv4 GTPU */
741         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
742         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
743         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
744                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
745                 if (!ice_fdir_prof_resolve_conflict
746                         (pf, cflct_ptype, is_tunnel))
747                         goto err;
748                 break;
749         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
750                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
751                 if (!ice_fdir_prof_resolve_conflict
752                         (pf, cflct_ptype, is_tunnel))
753                         goto err;
754                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
755                 if (!ice_fdir_prof_resolve_conflict
756                         (pf, cflct_ptype, is_tunnel))
757                         goto err;
758                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
759                 if (!ice_fdir_prof_resolve_conflict
760                         (pf, cflct_ptype, is_tunnel))
761                         goto err;
762                 break;
763         /* IPv6 */
764         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
765         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
766         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
767                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
768                 if (!ice_fdir_prof_resolve_conflict
769                         (pf, cflct_ptype, is_tunnel))
770                         goto err;
771                 break;
772         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
773                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
774                 if (!ice_fdir_prof_resolve_conflict
775                         (pf, cflct_ptype, is_tunnel))
776                         goto err;
777                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
778                 if (!ice_fdir_prof_resolve_conflict
779                         (pf, cflct_ptype, is_tunnel))
780                         goto err;
781                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
782                 if (!ice_fdir_prof_resolve_conflict
783                         (pf, cflct_ptype, is_tunnel))
784                         goto err;
785                 break;
786         default:
787                 break;
788         }
789         return 0;
790 err:
791         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
792                     ptype, cflct_ptype);
793         return -EINVAL;
794 }
795
796 static int
797 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
798                      struct ice_vsi *ctrl_vsi,
799                      struct ice_flow_seg_info *seg,
800                      enum ice_fltr_ptype ptype,
801                      bool is_tunnel)
802 {
803         struct ice_hw *hw = ICE_PF_TO_HW(pf);
804         enum ice_flow_dir dir = ICE_FLOW_RX;
805         struct ice_fd_hw_prof *hw_prof;
806         struct ice_flow_prof *prof;
807         uint64_t entry_1 = 0;
808         uint64_t entry_2 = 0;
809         uint16_t vsi_num;
810         int ret;
811         uint64_t prof_id;
812
813         /* check if have input set conflict on current profile. */
814         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
815         if (ret)
816                 return ret;
817
818         /* check if the profile is conflict with other profile. */
819         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
820         if (ret)
821                 return ret;
822
823         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
824         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
825                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
826         if (ret)
827                 return ret;
828         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
829                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
830                                  seg, NULL, 0, &entry_1);
831         if (ret) {
832                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
833                             ptype);
834                 goto err_add_prof;
835         }
836         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
837                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
838                                  seg, NULL, 0, &entry_2);
839         if (ret) {
840                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
841                             ptype);
842                 goto err_add_entry;
843         }
844
845         hw_prof = hw->fdir_prof[ptype];
846         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
847         hw_prof->cnt = 0;
848         hw_prof->fdir_seg[is_tunnel] = seg;
849         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
850         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
851         pf->hw_prof_cnt[ptype][is_tunnel]++;
852         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
853         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
854         pf->hw_prof_cnt[ptype][is_tunnel]++;
855
856         return ret;
857
858 err_add_entry:
859         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
860         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
861         ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
862 err_add_prof:
863         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
864
865         return ret;
866 }
867
868 static void
869 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
870 {
871         uint32_t i, j;
872
873         struct ice_inset_map {
874                 uint64_t inset;
875                 enum ice_flow_field fld;
876         };
877         static const struct ice_inset_map ice_inset_map[] = {
878                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
879                 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
880                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
881                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
882                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
883                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
884                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
885                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
886                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
887                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
888                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
889                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
890                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
891                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
892                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
893                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
894                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
895                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
896                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
897                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
898                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
899                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
900                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
901                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
902                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
903                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
904                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
905                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
906                 {ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
907         };
908
909         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
910                 if ((inset & ice_inset_map[i].inset) ==
911                     ice_inset_map[i].inset)
912                         field[j++] = ice_inset_map[i].fld;
913         }
914 }
915
916 static void
917 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
918 {
919         switch (flow) {
920         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
921                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
922                                   ICE_FLOW_SEG_HDR_IPV4 |
923                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
924                 break;
925         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
926                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
927                                   ICE_FLOW_SEG_HDR_IPV4 |
928                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
929                 break;
930         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
931                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
932                                   ICE_FLOW_SEG_HDR_IPV4 |
933                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
934                 break;
935         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
936                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
937                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
938                 break;
939         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
940                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
941                                   ICE_FLOW_SEG_HDR_IPV6 |
942                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
943                 break;
944         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
945                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
946                                   ICE_FLOW_SEG_HDR_IPV6 |
947                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
948                 break;
949         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
950                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
951                                   ICE_FLOW_SEG_HDR_IPV6 |
952                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
953                 break;
954         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
955                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
956                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
957                 break;
958         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN:
959                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
960                                 ICE_FLOW_SEG_HDR_IPV4 |
961                                 ICE_FLOW_SEG_HDR_VXLAN |
962                                 ICE_FLOW_SEG_HDR_IPV_OTHER);
963                 break;
964         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
965                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
966                                   ICE_FLOW_SEG_HDR_IPV4 |
967                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
968                 break;
969         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
970                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
971                                   ICE_FLOW_SEG_HDR_GTPU_IP |
972                                   ICE_FLOW_SEG_HDR_IPV4 |
973                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
974                 break;
975         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
976                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
977                                   ICE_FLOW_SEG_HDR_IPV6 |
978                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
979                 break;
980         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
981                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
982                                   ICE_FLOW_SEG_HDR_GTPU_IP |
983                                   ICE_FLOW_SEG_HDR_IPV6 |
984                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
985                 break;
986         case ICE_FLTR_PTYPE_NON_IP_L2:
987                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
988                 break;
989         default:
990                 PMD_DRV_LOG(ERR, "not supported filter type.");
991                 break;
992         }
993 }
994
995 static int
996 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
997                         uint64_t inner_input_set, uint64_t outer_input_set,
998                         enum ice_fdir_tunnel_type ttype)
999 {
1000         struct ice_flow_seg_info *seg;
1001         struct ice_flow_seg_info *seg_tun = NULL;
1002         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1003         uint64_t input_set;
1004         bool is_tunnel;
1005         int k, i, ret = 0;
1006
1007         if (!(inner_input_set | outer_input_set))
1008                 return -EINVAL;
1009
1010         seg_tun = (struct ice_flow_seg_info *)
1011                 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1012         if (!seg_tun) {
1013                 PMD_DRV_LOG(ERR, "No memory can be allocated");
1014                 return -ENOMEM;
1015         }
1016
1017         /* use seg_tun[1] to record tunnel inner part */
1018         for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1019                 seg = &seg_tun[k];
1020                 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1021                 if (input_set == 0)
1022                         continue;
1023
1024                 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1025                         field[i] = ICE_FLOW_FIELD_IDX_MAX;
1026
1027                 ice_fdir_input_set_parse(input_set, field);
1028
1029                 ice_fdir_input_set_hdrs(flow, seg);
1030
1031                 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1032                         ice_flow_set_fld(seg, field[i],
1033                                          ICE_FLOW_FLD_OFF_INVAL,
1034                                          ICE_FLOW_FLD_OFF_INVAL,
1035                                          ICE_FLOW_FLD_OFF_INVAL, false);
1036                 }
1037         }
1038
1039         is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1040
1041         ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1042                                    seg_tun, flow, is_tunnel);
1043
1044         if (!ret) {
1045                 return ret;
1046         } else if (ret < 0) {
1047                 rte_free(seg_tun);
1048                 return (ret == -EEXIST) ? 0 : ret;
1049         } else {
1050                 return ret;
1051         }
1052 }
1053
1054 static void
1055 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1056                     bool is_tunnel, bool add)
1057 {
1058         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1059         int cnt;
1060
1061         cnt = (add) ? 1 : -1;
1062         hw->fdir_active_fltr += cnt;
1063         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1064                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1065         else
1066                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1067 }
1068
1069 static int
1070 ice_fdir_init(struct ice_adapter *ad)
1071 {
1072         struct ice_pf *pf = &ad->pf;
1073         struct ice_flow_parser *parser;
1074         int ret;
1075
1076         if (ad->hw.dcf_enabled)
1077                 return 0;
1078
1079         ret = ice_fdir_setup(pf);
1080         if (ret)
1081                 return ret;
1082
1083         parser = &ice_fdir_parser;
1084
1085         return ice_register_parser(parser, ad);
1086 }
1087
1088 static void
1089 ice_fdir_uninit(struct ice_adapter *ad)
1090 {
1091         struct ice_flow_parser *parser;
1092         struct ice_pf *pf = &ad->pf;
1093
1094         if (ad->hw.dcf_enabled)
1095                 return;
1096
1097         parser = &ice_fdir_parser;
1098
1099         ice_unregister_parser(parser, ad);
1100
1101         ice_fdir_teardown(pf);
1102 }
1103
1104 static int
1105 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1106 {
1107         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1108                 return 1;
1109         else
1110                 return 0;
1111 }
1112
1113 static int
1114 ice_fdir_add_del_filter(struct ice_pf *pf,
1115                         struct ice_fdir_filter_conf *filter,
1116                         bool add)
1117 {
1118         struct ice_fltr_desc desc;
1119         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1120         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1121         bool is_tun;
1122         int ret;
1123
1124         filter->input.dest_vsi = pf->main_vsi->idx;
1125
1126         memset(&desc, 0, sizeof(desc));
1127         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1128         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1129
1130         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1131
1132         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1133         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1134         if (ret) {
1135                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1136                 return -EINVAL;
1137         }
1138
1139         return ice_fdir_programming(pf, &desc);
1140 }
1141
1142 static void
1143 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1144                           struct ice_fdir_filter_conf *filter)
1145 {
1146         struct ice_fdir_fltr *input = &filter->input;
1147         memset(key, 0, sizeof(*key));
1148
1149         key->flow_type = input->flow_type;
1150         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1151         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1152         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1153         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1154
1155         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1156         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1157
1158         key->tunnel_type = filter->tunnel_type;
1159 }
1160
1161 /* Check if there exists the flow director filter */
1162 static struct ice_fdir_filter_conf *
1163 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1164                         const struct ice_fdir_fltr_pattern *key)
1165 {
1166         int ret;
1167
1168         ret = rte_hash_lookup(fdir_info->hash_table, key);
1169         if (ret < 0)
1170                 return NULL;
1171
1172         return fdir_info->hash_map[ret];
1173 }
1174
1175 /* Add a flow director entry into the SW list */
1176 static int
1177 ice_fdir_entry_insert(struct ice_pf *pf,
1178                       struct ice_fdir_filter_conf *entry,
1179                       struct ice_fdir_fltr_pattern *key)
1180 {
1181         struct ice_fdir_info *fdir_info = &pf->fdir;
1182         int ret;
1183
1184         ret = rte_hash_add_key(fdir_info->hash_table, key);
1185         if (ret < 0) {
1186                 PMD_DRV_LOG(ERR,
1187                             "Failed to insert fdir entry to hash table %d!",
1188                             ret);
1189                 return ret;
1190         }
1191         fdir_info->hash_map[ret] = entry;
1192
1193         return 0;
1194 }
1195
1196 /* Delete a flow director entry from the SW list */
1197 static int
1198 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1199 {
1200         struct ice_fdir_info *fdir_info = &pf->fdir;
1201         int ret;
1202
1203         ret = rte_hash_del_key(fdir_info->hash_table, key);
1204         if (ret < 0) {
1205                 PMD_DRV_LOG(ERR,
1206                             "Failed to delete fdir filter to hash table %d!",
1207                             ret);
1208                 return ret;
1209         }
1210         fdir_info->hash_map[ret] = NULL;
1211
1212         return 0;
1213 }
1214
1215 static int
1216 ice_fdir_create_filter(struct ice_adapter *ad,
1217                        struct rte_flow *flow,
1218                        void *meta,
1219                        struct rte_flow_error *error)
1220 {
1221         struct ice_pf *pf = &ad->pf;
1222         struct ice_fdir_filter_conf *filter = meta;
1223         struct ice_fdir_info *fdir_info = &pf->fdir;
1224         struct ice_fdir_filter_conf *entry, *node;
1225         struct ice_fdir_fltr_pattern key;
1226         bool is_tun;
1227         int ret;
1228
1229         ice_fdir_extract_fltr_key(&key, filter);
1230         node = ice_fdir_entry_lookup(fdir_info, &key);
1231         if (node) {
1232                 rte_flow_error_set(error, EEXIST,
1233                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1234                                    "Rule already exists!");
1235                 return -rte_errno;
1236         }
1237
1238         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1239         if (!entry) {
1240                 rte_flow_error_set(error, ENOMEM,
1241                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1242                                    "Failed to allocate memory");
1243                 return -rte_errno;
1244         }
1245
1246         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1247
1248         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1249                                       filter->input_set_i, filter->input_set_o,
1250                                       filter->tunnel_type);
1251         if (ret) {
1252                 rte_flow_error_set(error, -ret,
1253                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1254                                    "Profile configure failed.");
1255                 goto free_entry;
1256         }
1257
1258         /* alloc counter for FDIR */
1259         if (filter->input.cnt_ena) {
1260                 struct rte_flow_action_count *act_count = &filter->act_count;
1261
1262                 filter->counter = ice_fdir_counter_alloc(pf,
1263                                                          act_count->shared,
1264                                                          act_count->id);
1265                 if (!filter->counter) {
1266                         rte_flow_error_set(error, EINVAL,
1267                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1268                                         "Failed to alloc FDIR counter.");
1269                         goto free_entry;
1270                 }
1271                 filter->input.cnt_index = filter->counter->hw_index;
1272         }
1273
1274         ret = ice_fdir_add_del_filter(pf, filter, true);
1275         if (ret) {
1276                 rte_flow_error_set(error, -ret,
1277                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1278                                    "Add filter rule failed.");
1279                 goto free_counter;
1280         }
1281
1282         if (filter->mark_flag == 1)
1283                 ice_fdir_rx_parsing_enable(ad, 1);
1284
1285         rte_memcpy(entry, filter, sizeof(*entry));
1286         ret = ice_fdir_entry_insert(pf, entry, &key);
1287         if (ret) {
1288                 rte_flow_error_set(error, -ret,
1289                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1290                                    "Insert entry to table failed.");
1291                 goto free_entry;
1292         }
1293
1294         flow->rule = entry;
1295         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1296
1297         return 0;
1298
1299 free_counter:
1300         if (filter->counter) {
1301                 ice_fdir_counter_free(pf, filter->counter);
1302                 filter->counter = NULL;
1303         }
1304
1305 free_entry:
1306         rte_free(entry);
1307         return -rte_errno;
1308 }
1309
1310 static int
1311 ice_fdir_destroy_filter(struct ice_adapter *ad,
1312                         struct rte_flow *flow,
1313                         struct rte_flow_error *error)
1314 {
1315         struct ice_pf *pf = &ad->pf;
1316         struct ice_fdir_info *fdir_info = &pf->fdir;
1317         struct ice_fdir_filter_conf *filter, *entry;
1318         struct ice_fdir_fltr_pattern key;
1319         bool is_tun;
1320         int ret;
1321
1322         filter = (struct ice_fdir_filter_conf *)flow->rule;
1323
1324         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1325
1326         if (filter->counter) {
1327                 ice_fdir_counter_free(pf, filter->counter);
1328                 filter->counter = NULL;
1329         }
1330
1331         ice_fdir_extract_fltr_key(&key, filter);
1332         entry = ice_fdir_entry_lookup(fdir_info, &key);
1333         if (!entry) {
1334                 rte_flow_error_set(error, ENOENT,
1335                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1336                                    "Can't find entry.");
1337                 return -rte_errno;
1338         }
1339
1340         ret = ice_fdir_add_del_filter(pf, filter, false);
1341         if (ret) {
1342                 rte_flow_error_set(error, -ret,
1343                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1344                                    "Del filter rule failed.");
1345                 return -rte_errno;
1346         }
1347
1348         ret = ice_fdir_entry_del(pf, &key);
1349         if (ret) {
1350                 rte_flow_error_set(error, -ret,
1351                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1352                                    "Remove entry from table failed.");
1353                 return -rte_errno;
1354         }
1355
1356         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1357
1358         if (filter->mark_flag == 1)
1359                 ice_fdir_rx_parsing_enable(ad, 0);
1360
1361         flow->rule = NULL;
1362
1363         rte_free(filter);
1364
1365         return 0;
1366 }
1367
1368 static int
1369 ice_fdir_query_count(struct ice_adapter *ad,
1370                       struct rte_flow *flow,
1371                       struct rte_flow_query_count *flow_stats,
1372                       struct rte_flow_error *error)
1373 {
1374         struct ice_pf *pf = &ad->pf;
1375         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1376         struct ice_fdir_filter_conf *filter = flow->rule;
1377         struct ice_fdir_counter *counter = filter->counter;
1378         uint64_t hits_lo, hits_hi;
1379
1380         if (!counter) {
1381                 rte_flow_error_set(error, EINVAL,
1382                                   RTE_FLOW_ERROR_TYPE_ACTION,
1383                                   NULL,
1384                                   "FDIR counters not available");
1385                 return -rte_errno;
1386         }
1387
1388         /*
1389          * Reading the low 32-bits latches the high 32-bits into a shadow
1390          * register. Reading the high 32-bit returns the value in the
1391          * shadow register.
1392          */
1393         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1394         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1395
1396         flow_stats->hits_set = 1;
1397         flow_stats->hits = hits_lo | (hits_hi << 32);
1398         flow_stats->bytes_set = 0;
1399         flow_stats->bytes = 0;
1400
1401         if (flow_stats->reset) {
1402                 /* reset statistic counter value */
1403                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1404                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1405         }
1406
1407         return 0;
1408 }
1409
1410 static struct ice_flow_engine ice_fdir_engine = {
1411         .init = ice_fdir_init,
1412         .uninit = ice_fdir_uninit,
1413         .create = ice_fdir_create_filter,
1414         .destroy = ice_fdir_destroy_filter,
1415         .query_count = ice_fdir_query_count,
1416         .type = ICE_FLOW_ENGINE_FDIR,
1417 };
1418
1419 static int
1420 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1421                               struct rte_flow_error *error,
1422                               const struct rte_flow_action *act,
1423                               struct ice_fdir_filter_conf *filter)
1424 {
1425         const struct rte_flow_action_rss *rss = act->conf;
1426         uint32_t i;
1427
1428         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1429                 rte_flow_error_set(error, EINVAL,
1430                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1431                                    "Invalid action.");
1432                 return -rte_errno;
1433         }
1434
1435         if (rss->queue_num <= 1) {
1436                 rte_flow_error_set(error, EINVAL,
1437                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1438                                    "Queue region size can't be 0 or 1.");
1439                 return -rte_errno;
1440         }
1441
1442         /* check if queue index for queue region is continuous */
1443         for (i = 0; i < rss->queue_num - 1; i++) {
1444                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1445                         rte_flow_error_set(error, EINVAL,
1446                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1447                                            "Discontinuous queue region");
1448                         return -rte_errno;
1449                 }
1450         }
1451
1452         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1453                 rte_flow_error_set(error, EINVAL,
1454                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1455                                    "Invalid queue region indexes.");
1456                 return -rte_errno;
1457         }
1458
1459         if (!(rte_is_power_of_2(rss->queue_num) &&
1460              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1461                 rte_flow_error_set(error, EINVAL,
1462                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1463                                    "The region size should be any of the following values:"
1464                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1465                                    "of queues do not exceed the VSI allocation.");
1466                 return -rte_errno;
1467         }
1468
1469         filter->input.q_index = rss->queue[0];
1470         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1471         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1472
1473         return 0;
1474 }
1475
1476 static int
1477 ice_fdir_parse_action(struct ice_adapter *ad,
1478                       const struct rte_flow_action actions[],
1479                       struct rte_flow_error *error,
1480                       struct ice_fdir_filter_conf *filter)
1481 {
1482         struct ice_pf *pf = &ad->pf;
1483         const struct rte_flow_action_queue *act_q;
1484         const struct rte_flow_action_mark *mark_spec = NULL;
1485         const struct rte_flow_action_count *act_count;
1486         uint32_t dest_num = 0;
1487         uint32_t mark_num = 0;
1488         uint32_t counter_num = 0;
1489         int ret;
1490
1491         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1492                 switch (actions->type) {
1493                 case RTE_FLOW_ACTION_TYPE_VOID:
1494                         break;
1495                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1496                         dest_num++;
1497
1498                         act_q = actions->conf;
1499                         filter->input.q_index = act_q->index;
1500                         if (filter->input.q_index >=
1501                                         pf->dev_data->nb_rx_queues) {
1502                                 rte_flow_error_set(error, EINVAL,
1503                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1504                                                    actions,
1505                                                    "Invalid queue for FDIR.");
1506                                 return -rte_errno;
1507                         }
1508                         filter->input.dest_ctl =
1509                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1510                         break;
1511                 case RTE_FLOW_ACTION_TYPE_DROP:
1512                         dest_num++;
1513
1514                         filter->input.dest_ctl =
1515                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1516                         break;
1517                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1518                         dest_num++;
1519
1520                         filter->input.dest_ctl =
1521                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1522                         break;
1523                 case RTE_FLOW_ACTION_TYPE_RSS:
1524                         dest_num++;
1525
1526                         ret = ice_fdir_parse_action_qregion(pf,
1527                                                 error, actions, filter);
1528                         if (ret)
1529                                 return ret;
1530                         break;
1531                 case RTE_FLOW_ACTION_TYPE_MARK:
1532                         mark_num++;
1533                         filter->mark_flag = 1;
1534                         mark_spec = actions->conf;
1535                         filter->input.fltr_id = mark_spec->id;
1536                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1537                         break;
1538                 case RTE_FLOW_ACTION_TYPE_COUNT:
1539                         counter_num++;
1540
1541                         act_count = actions->conf;
1542                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1543                         rte_memcpy(&filter->act_count, act_count,
1544                                                 sizeof(filter->act_count));
1545
1546                         break;
1547                 default:
1548                         rte_flow_error_set(error, EINVAL,
1549                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1550                                    "Invalid action.");
1551                         return -rte_errno;
1552                 }
1553         }
1554
1555         if (dest_num >= 2) {
1556                 rte_flow_error_set(error, EINVAL,
1557                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1558                            "Unsupported action combination");
1559                 return -rte_errno;
1560         }
1561
1562         if (mark_num >= 2) {
1563                 rte_flow_error_set(error, EINVAL,
1564                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1565                            "Too many mark actions");
1566                 return -rte_errno;
1567         }
1568
1569         if (counter_num >= 2) {
1570                 rte_flow_error_set(error, EINVAL,
1571                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1572                            "Too many count actions");
1573                 return -rte_errno;
1574         }
1575
1576         if (dest_num + mark_num + counter_num == 0) {
1577                 rte_flow_error_set(error, EINVAL,
1578                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1579                            "Empty action");
1580                 return -rte_errno;
1581         }
1582
1583         /* set default action to PASSTHRU mode, in "mark/count only" case. */
1584         if (dest_num == 0)
1585                 filter->input.dest_ctl =
1586                         ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1587
1588         return 0;
1589 }
1590
1591 static int
1592 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1593                        const struct rte_flow_item pattern[],
1594                        struct rte_flow_error *error,
1595                        struct ice_fdir_filter_conf *filter)
1596 {
1597         const struct rte_flow_item *item = pattern;
1598         enum rte_flow_item_type item_type;
1599         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1600         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1601         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1602         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1603         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1604         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1605         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1606         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1607         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1608         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1609         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1610         uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1611         uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1612         uint64_t *input_set;
1613         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1614         uint8_t  ipv6_addr_mask[16] = {
1615                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1616                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1617         };
1618         uint32_t vtc_flow_cpu;
1619         uint16_t ether_type;
1620         enum rte_flow_item_type next_type;
1621         bool is_outer = true;
1622         struct ice_fdir_extra *p_ext_data;
1623         struct ice_fdir_v4 *p_v4 = NULL;
1624         struct ice_fdir_v6 *p_v6 = NULL;
1625
1626         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1627                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1628                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1629                 /* To align with shared code behavior, save gtpu outer
1630                  * fields in inner struct.
1631                  */
1632                 if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1633                     item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1634                         is_outer = false;
1635                 }
1636         }
1637
1638         /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1639          * flow. input_set_i is used for inner part.
1640          */
1641         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1642                 if (item->last) {
1643                         rte_flow_error_set(error, EINVAL,
1644                                            RTE_FLOW_ERROR_TYPE_ITEM,
1645                                            item,
1646                                            "Not support range");
1647                         return -rte_errno;
1648                 }
1649                 item_type = item->type;
1650
1651                 input_set = (tunnel_type && !is_outer) ?
1652                             &input_set_i : &input_set_o;
1653
1654                 switch (item_type) {
1655                 case RTE_FLOW_ITEM_TYPE_ETH:
1656                         flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1657                         eth_spec = item->spec;
1658                         eth_mask = item->mask;
1659
1660                         if (!(eth_spec && eth_mask))
1661                                 break;
1662
1663                         if (!rte_is_zero_ether_addr(&eth_mask->dst))
1664                                 *input_set |= ICE_INSET_DMAC;
1665                         if (!rte_is_zero_ether_addr(&eth_mask->src))
1666                                 *input_set |= ICE_INSET_SMAC;
1667
1668                         next_type = (item + 1)->type;
1669                         /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1670                         if (eth_mask->type == RTE_BE16(0xffff) &&
1671                             next_type == RTE_FLOW_ITEM_TYPE_END) {
1672                                 *input_set |= ICE_INSET_ETHERTYPE;
1673                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
1674
1675                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1676                                     ether_type == RTE_ETHER_TYPE_IPV6) {
1677                                         rte_flow_error_set(error, EINVAL,
1678                                                            RTE_FLOW_ERROR_TYPE_ITEM,
1679                                                            item,
1680                                                            "Unsupported ether_type.");
1681                                         return -rte_errno;
1682                                 }
1683                         }
1684
1685                         p_ext_data = (tunnel_type && is_outer) ?
1686                                      &filter->input.ext_data_outer :
1687                                      &filter->input.ext_data;
1688                         rte_memcpy(&p_ext_data->src_mac,
1689                                    &eth_spec->src, RTE_ETHER_ADDR_LEN);
1690                         rte_memcpy(&p_ext_data->dst_mac,
1691                                    &eth_spec->dst, RTE_ETHER_ADDR_LEN);
1692                         rte_memcpy(&p_ext_data->ether_type,
1693                                    &eth_spec->type, sizeof(eth_spec->type));
1694                         break;
1695                 case RTE_FLOW_ITEM_TYPE_IPV4:
1696                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1697                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1698                         ipv4_spec = item->spec;
1699                         ipv4_mask = item->mask;
1700                         p_v4 = (tunnel_type && is_outer) ?
1701                                &filter->input.ip_outer.v4 :
1702                                &filter->input.ip.v4;
1703
1704                         if (!(ipv4_spec && ipv4_mask))
1705                                 break;
1706
1707                         /* Check IPv4 mask and update input set */
1708                         if (ipv4_mask->hdr.version_ihl ||
1709                             ipv4_mask->hdr.total_length ||
1710                             ipv4_mask->hdr.packet_id ||
1711                             ipv4_mask->hdr.fragment_offset ||
1712                             ipv4_mask->hdr.hdr_checksum) {
1713                                 rte_flow_error_set(error, EINVAL,
1714                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1715                                                    item,
1716                                                    "Invalid IPv4 mask.");
1717                                 return -rte_errno;
1718                         }
1719
1720                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1721                                 *input_set |= ICE_INSET_IPV4_DST;
1722                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1723                                 *input_set |= ICE_INSET_IPV4_SRC;
1724                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1725                                 *input_set |= ICE_INSET_IPV4_TTL;
1726                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1727                                 *input_set |= ICE_INSET_IPV4_PROTO;
1728                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1729                                 *input_set |= ICE_INSET_IPV4_TOS;
1730
1731                         p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
1732                         p_v4->src_ip = ipv4_spec->hdr.src_addr;
1733                         p_v4->ttl = ipv4_spec->hdr.time_to_live;
1734                         p_v4->proto = ipv4_spec->hdr.next_proto_id;
1735                         p_v4->tos = ipv4_spec->hdr.type_of_service;
1736                         break;
1737                 case RTE_FLOW_ITEM_TYPE_IPV6:
1738                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1739                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1740                         ipv6_spec = item->spec;
1741                         ipv6_mask = item->mask;
1742                         p_v6 = (tunnel_type && is_outer) ?
1743                                &filter->input.ip_outer.v6 :
1744                                &filter->input.ip.v6;
1745
1746                         if (!(ipv6_spec && ipv6_mask))
1747                                 break;
1748
1749                         /* Check IPv6 mask and update input set */
1750                         if (ipv6_mask->hdr.payload_len) {
1751                                 rte_flow_error_set(error, EINVAL,
1752                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1753                                                    item,
1754                                                    "Invalid IPv6 mask");
1755                                 return -rte_errno;
1756                         }
1757
1758                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1759                                     RTE_DIM(ipv6_mask->hdr.src_addr)))
1760                                 *input_set |= ICE_INSET_IPV6_SRC;
1761                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1762                                     RTE_DIM(ipv6_mask->hdr.dst_addr)))
1763                                 *input_set |= ICE_INSET_IPV6_DST;
1764
1765                         if ((ipv6_mask->hdr.vtc_flow &
1766                              rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1767                             == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1768                                 *input_set |= ICE_INSET_IPV6_TC;
1769                         if (ipv6_mask->hdr.proto == UINT8_MAX)
1770                                 *input_set |= ICE_INSET_IPV6_NEXT_HDR;
1771                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1772                                 *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1773
1774                         rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
1775                         rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
1776                         vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1777                         p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
1778                         p_v6->proto = ipv6_spec->hdr.proto;
1779                         p_v6->hlim = ipv6_spec->hdr.hop_limits;
1780                         break;
1781                 case RTE_FLOW_ITEM_TYPE_TCP:
1782                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1783                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1784                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1785                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1786
1787                         tcp_spec = item->spec;
1788                         tcp_mask = item->mask;
1789
1790                         if (!(tcp_spec && tcp_mask))
1791                                 break;
1792
1793                         /* Check TCP mask and update input set */
1794                         if (tcp_mask->hdr.sent_seq ||
1795                             tcp_mask->hdr.recv_ack ||
1796                             tcp_mask->hdr.data_off ||
1797                             tcp_mask->hdr.tcp_flags ||
1798                             tcp_mask->hdr.rx_win ||
1799                             tcp_mask->hdr.cksum ||
1800                             tcp_mask->hdr.tcp_urp) {
1801                                 rte_flow_error_set(error, EINVAL,
1802                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1803                                                    item,
1804                                                    "Invalid TCP mask");
1805                                 return -rte_errno;
1806                         }
1807
1808                         if (tcp_mask->hdr.src_port == UINT16_MAX)
1809                                 *input_set |= ICE_INSET_TCP_SRC_PORT;
1810                         if (tcp_mask->hdr.dst_port == UINT16_MAX)
1811                                 *input_set |= ICE_INSET_TCP_DST_PORT;
1812
1813                         /* Get filter info */
1814                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1815                                 assert(p_v4);
1816                                 p_v4->dst_port = tcp_spec->hdr.dst_port;
1817                                 p_v4->src_port = tcp_spec->hdr.src_port;
1818                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1819                                 assert(p_v6);
1820                                 p_v6->dst_port = tcp_spec->hdr.dst_port;
1821                                 p_v6->src_port = tcp_spec->hdr.src_port;
1822                         }
1823                         break;
1824                 case RTE_FLOW_ITEM_TYPE_UDP:
1825                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1826                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1827                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1828                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1829
1830                         udp_spec = item->spec;
1831                         udp_mask = item->mask;
1832
1833                         if (!(udp_spec && udp_mask))
1834                                 break;
1835
1836                         /* Check UDP mask and update input set*/
1837                         if (udp_mask->hdr.dgram_len ||
1838                             udp_mask->hdr.dgram_cksum) {
1839                                 rte_flow_error_set(error, EINVAL,
1840                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1841                                                    item,
1842                                                    "Invalid UDP mask");
1843                                 return -rte_errno;
1844                         }
1845
1846                         if (udp_mask->hdr.src_port == UINT16_MAX)
1847                                 *input_set |= ICE_INSET_UDP_SRC_PORT;
1848                         if (udp_mask->hdr.dst_port == UINT16_MAX)
1849                                 *input_set |= ICE_INSET_UDP_DST_PORT;
1850
1851                         /* Get filter info */
1852                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1853                                 assert(p_v4);
1854                                 p_v4->dst_port = udp_spec->hdr.dst_port;
1855                                 p_v4->src_port = udp_spec->hdr.src_port;
1856                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1857                                 assert(p_v6);
1858                                 p_v6->src_port = udp_spec->hdr.src_port;
1859                                 p_v6->dst_port = udp_spec->hdr.dst_port;
1860                         }
1861                         break;
1862                 case RTE_FLOW_ITEM_TYPE_SCTP:
1863                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1864                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1865                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1866                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1867
1868                         sctp_spec = item->spec;
1869                         sctp_mask = item->mask;
1870
1871                         if (!(sctp_spec && sctp_mask))
1872                                 break;
1873
1874                         /* Check SCTP mask and update input set */
1875                         if (sctp_mask->hdr.cksum) {
1876                                 rte_flow_error_set(error, EINVAL,
1877                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1878                                                    item,
1879                                                    "Invalid UDP mask");
1880                                 return -rte_errno;
1881                         }
1882
1883                         if (sctp_mask->hdr.src_port == UINT16_MAX)
1884                                 *input_set |= ICE_INSET_SCTP_SRC_PORT;
1885                         if (sctp_mask->hdr.dst_port == UINT16_MAX)
1886                                 *input_set |= ICE_INSET_SCTP_DST_PORT;
1887
1888                         /* Get filter info */
1889                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1890                                 assert(p_v4);
1891                                 p_v4->dst_port = sctp_spec->hdr.dst_port;
1892                                 p_v4->src_port = sctp_spec->hdr.src_port;
1893                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1894                                 assert(p_v6);
1895                                 p_v6->dst_port = sctp_spec->hdr.dst_port;
1896                                 p_v6->src_port = sctp_spec->hdr.src_port;
1897                         }
1898                         break;
1899                 case RTE_FLOW_ITEM_TYPE_VOID:
1900                         break;
1901                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1902                         l3 = RTE_FLOW_ITEM_TYPE_END;
1903                         vxlan_spec = item->spec;
1904                         vxlan_mask = item->mask;
1905                         is_outer = false;
1906
1907                         if (!(vxlan_spec && vxlan_mask))
1908                                 break;
1909
1910                         if (vxlan_mask->hdr.vx_flags) {
1911                                 rte_flow_error_set(error, EINVAL,
1912                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1913                                                    item,
1914                                                    "Invalid vxlan field");
1915                                 return -rte_errno;
1916                         }
1917
1918                         if (vxlan_mask->hdr.vx_vni)
1919                                 *input_set |= ICE_INSET_VXLAN_VNI;
1920
1921                         filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
1922
1923                         break;
1924                 case RTE_FLOW_ITEM_TYPE_GTPU:
1925                         l3 = RTE_FLOW_ITEM_TYPE_END;
1926                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1927                         gtp_spec = item->spec;
1928                         gtp_mask = item->mask;
1929
1930                         if (!(gtp_spec && gtp_mask))
1931                                 break;
1932
1933                         if (gtp_mask->v_pt_rsv_flags ||
1934                             gtp_mask->msg_type ||
1935                             gtp_mask->msg_len) {
1936                                 rte_flow_error_set(error, EINVAL,
1937                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1938                                                    item,
1939                                                    "Invalid GTP mask");
1940                                 return -rte_errno;
1941                         }
1942
1943                         if (gtp_mask->teid == UINT32_MAX)
1944                                 input_set_o |= ICE_INSET_GTPU_TEID;
1945
1946                         filter->input.gtpu_data.teid = gtp_spec->teid;
1947                         break;
1948                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1949                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1950                         gtp_psc_spec = item->spec;
1951                         gtp_psc_mask = item->mask;
1952
1953                         if (!(gtp_psc_spec && gtp_psc_mask))
1954                                 break;
1955
1956                         if (gtp_psc_mask->qfi == UINT8_MAX)
1957                                 input_set_o |= ICE_INSET_GTPU_QFI;
1958
1959                         filter->input.gtpu_data.qfi =
1960                                 gtp_psc_spec->qfi;
1961                         break;
1962                 default:
1963                         rte_flow_error_set(error, EINVAL,
1964                                            RTE_FLOW_ERROR_TYPE_ITEM,
1965                                            item,
1966                                            "Invalid pattern item.");
1967                         return -rte_errno;
1968                 }
1969         }
1970
1971         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
1972                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
1973                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
1974         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
1975                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
1976                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
1977         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
1978                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1979                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
1980         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
1981                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1982                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
1983         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1984                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN;
1985
1986         filter->tunnel_type = tunnel_type;
1987         filter->input.flow_type = flow_type;
1988         filter->input_set_o = input_set_o;
1989         filter->input_set_i = input_set_i;
1990
1991         return 0;
1992 }
1993
1994 static int
1995 ice_fdir_parse(struct ice_adapter *ad,
1996                struct ice_pattern_match_item *array,
1997                uint32_t array_len,
1998                const struct rte_flow_item pattern[],
1999                const struct rte_flow_action actions[],
2000                uint32_t priority __rte_unused,
2001                void **meta,
2002                struct rte_flow_error *error)
2003 {
2004         struct ice_pf *pf = &ad->pf;
2005         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2006         struct ice_pattern_match_item *item = NULL;
2007         uint64_t input_set;
2008         int ret;
2009
2010         memset(filter, 0, sizeof(*filter));
2011         item = ice_search_pattern_match_item(ad, pattern, array, array_len,
2012                                              error);
2013         if (!item)
2014                 return -rte_errno;
2015
2016         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2017         if (ret)
2018                 goto error;
2019         input_set = filter->input_set_o | filter->input_set_i;
2020         if (!input_set || filter->input_set_o & ~item->input_set_mask_o ||
2021             filter->input_set_i & ~item->input_set_mask_i) {
2022                 rte_flow_error_set(error, EINVAL,
2023                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2024                                    pattern,
2025                                    "Invalid input set");
2026                 ret = -rte_errno;
2027                 goto error;
2028         }
2029
2030         ret = ice_fdir_parse_action(ad, actions, error, filter);
2031         if (ret)
2032                 goto error;
2033
2034         if (meta)
2035                 *meta = filter;
2036 error:
2037         rte_free(item);
2038         return ret;
2039 }
2040
2041 static struct ice_flow_parser ice_fdir_parser = {
2042         .engine = &ice_fdir_engine,
2043         .array = ice_fdir_pattern_list,
2044         .array_len = RTE_DIM(ice_fdir_pattern_list),
2045         .parse_pattern_action = ice_fdir_parse,
2046         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2047 };
2048
2049 RTE_INIT(ice_fdir_engine_register)
2050 {
2051         ice_register_flow_engine(&ice_fdir_engine);
2052 }