net/ice: support flow director GTPU outer IPv4/IPv6
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH (\
22         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25         ICE_FDIR_INSET_ETH | \
26         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30         ICE_FDIR_INSET_ETH_IPV4 | \
31         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34         ICE_FDIR_INSET_ETH_IPV4 | \
35         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38         ICE_FDIR_INSET_ETH_IPV4 | \
39         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42         ICE_INSET_DMAC | \
43         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
45
46 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
47         ICE_FDIR_INSET_ETH_IPV6 | \
48         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
49
50 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
51         ICE_FDIR_INSET_ETH_IPV6 | \
52         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
53
54 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
55         ICE_FDIR_INSET_ETH_IPV6 | \
56         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
60
61 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
62         ICE_FDIR_INSET_VXLAN_IPV4 | \
63         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
64
65 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
66         ICE_FDIR_INSET_VXLAN_IPV4 | \
67         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
68
69 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
70         ICE_FDIR_INSET_VXLAN_IPV4 | \
71         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
72
73 #define ICE_FDIR_INSET_IPV4_GTPU (\
74         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
75
76 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
77         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
78         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
79
80 #define ICE_FDIR_INSET_IPV6_GTPU (\
81         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
82
83 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
84         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
85         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
86
87 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
88         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
89         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
90         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
91         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
92         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
93         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
94         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
95         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
96         {pattern_eth_ipv4_udp_vxlan_ipv4,
97                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
99                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
100         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
101                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
102         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
103                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
104         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
105                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
106         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
107                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
108         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
109                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
110         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
111                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
112 };
113
114 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
115         {pattern_ethertype,            ICE_FDIR_INSET_ETH,                   ICE_INSET_NONE},
116         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
118         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
119         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
120         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
121         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
122         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
123         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
124         {pattern_eth_ipv4_udp_vxlan_ipv4,
125                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
126         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
127                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
128         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
129                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
130         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
131                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
132         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
133                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
134         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
135                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
136         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
137                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
138         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
139                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
140         {pattern_eth_ipv4_gtpu,        ICE_FDIR_INSET_IPV4_GTPU,             ICE_INSET_NONE},
141         {pattern_eth_ipv4_gtpu_eh,     ICE_FDIR_INSET_IPV4_GTPU_EH,          ICE_INSET_NONE},
142         {pattern_eth_ipv6_gtpu,        ICE_FDIR_INSET_IPV6_GTPU,             ICE_INSET_NONE},
143         {pattern_eth_ipv6_gtpu_eh,     ICE_FDIR_INSET_IPV6_GTPU_EH,          ICE_INSET_NONE},
144 };
145
146 static struct ice_flow_parser ice_fdir_parser_os;
147 static struct ice_flow_parser ice_fdir_parser_comms;
148
149 static int
150 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
151
152 static const struct rte_memzone *
153 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
154 {
155         const struct rte_memzone *mz;
156
157         mz = rte_memzone_lookup(name);
158         if (mz)
159                 return mz;
160
161         return rte_memzone_reserve_aligned(name, len, socket_id,
162                                            RTE_MEMZONE_IOVA_CONTIG,
163                                            ICE_RING_BASE_ALIGN);
164 }
165
166 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
167
168 static int
169 ice_fdir_prof_alloc(struct ice_hw *hw)
170 {
171         enum ice_fltr_ptype ptype, fltr_ptype;
172
173         if (!hw->fdir_prof) {
174                 hw->fdir_prof = (struct ice_fd_hw_prof **)
175                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
176                                    sizeof(*hw->fdir_prof));
177                 if (!hw->fdir_prof)
178                         return -ENOMEM;
179         }
180         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
181              ptype < ICE_FLTR_PTYPE_MAX;
182              ptype++) {
183                 if (!hw->fdir_prof[ptype]) {
184                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
185                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
186                         if (!hw->fdir_prof[ptype])
187                                 goto fail_mem;
188                 }
189         }
190         return 0;
191
192 fail_mem:
193         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
194              fltr_ptype < ptype;
195              fltr_ptype++) {
196                 rte_free(hw->fdir_prof[fltr_ptype]);
197                 hw->fdir_prof[fltr_ptype] = NULL;
198         }
199
200         rte_free(hw->fdir_prof);
201         hw->fdir_prof = NULL;
202
203         return -ENOMEM;
204 }
205
206 static int
207 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
208                           struct ice_fdir_counter_pool_container *container,
209                           uint32_t index_start,
210                           uint32_t len)
211 {
212         struct ice_fdir_counter_pool *pool;
213         uint32_t i;
214         int ret = 0;
215
216         pool = rte_zmalloc("ice_fdir_counter_pool",
217                            sizeof(*pool) +
218                            sizeof(struct ice_fdir_counter) * len,
219                            0);
220         if (!pool) {
221                 PMD_INIT_LOG(ERR,
222                              "Failed to allocate memory for fdir counter pool");
223                 return -ENOMEM;
224         }
225
226         TAILQ_INIT(&pool->counter_list);
227         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
228
229         for (i = 0; i < len; i++) {
230                 struct ice_fdir_counter *counter = &pool->counters[i];
231
232                 counter->hw_index = index_start + i;
233                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
234         }
235
236         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
237                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
238                 ret = -EINVAL;
239                 goto free_pool;
240         }
241
242         container->pools[container->index_free++] = pool;
243         return 0;
244
245 free_pool:
246         rte_free(pool);
247         return ret;
248 }
249
250 static int
251 ice_fdir_counter_init(struct ice_pf *pf)
252 {
253         struct ice_hw *hw = ICE_PF_TO_HW(pf);
254         struct ice_fdir_info *fdir_info = &pf->fdir;
255         struct ice_fdir_counter_pool_container *container =
256                                 &fdir_info->counter;
257         uint32_t cnt_index, len;
258         int ret;
259
260         TAILQ_INIT(&container->pool_list);
261
262         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
263         len = ICE_FDIR_COUNTERS_PER_BLOCK;
264
265         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
266         if (ret) {
267                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
268                 return ret;
269         }
270
271         return 0;
272 }
273
274 static int
275 ice_fdir_counter_release(struct ice_pf *pf)
276 {
277         struct ice_fdir_info *fdir_info = &pf->fdir;
278         struct ice_fdir_counter_pool_container *container =
279                                 &fdir_info->counter;
280         uint8_t i;
281
282         for (i = 0; i < container->index_free; i++) {
283                 rte_free(container->pools[i]);
284                 container->pools[i] = NULL;
285         }
286
287         TAILQ_INIT(&container->pool_list);
288         container->index_free = 0;
289
290         return 0;
291 }
292
293 static struct ice_fdir_counter *
294 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
295                                         *container,
296                                uint32_t id)
297 {
298         struct ice_fdir_counter_pool *pool;
299         struct ice_fdir_counter *counter;
300         int i;
301
302         TAILQ_FOREACH(pool, &container->pool_list, next) {
303                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
304                         counter = &pool->counters[i];
305
306                         if (counter->shared &&
307                             counter->ref_cnt &&
308                             counter->id == id)
309                                 return counter;
310                 }
311         }
312
313         return NULL;
314 }
315
316 static struct ice_fdir_counter *
317 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
318 {
319         struct ice_hw *hw = ICE_PF_TO_HW(pf);
320         struct ice_fdir_info *fdir_info = &pf->fdir;
321         struct ice_fdir_counter_pool_container *container =
322                                 &fdir_info->counter;
323         struct ice_fdir_counter_pool *pool = NULL;
324         struct ice_fdir_counter *counter_free = NULL;
325
326         if (shared) {
327                 counter_free = ice_fdir_counter_shared_search(container, id);
328                 if (counter_free) {
329                         if (counter_free->ref_cnt + 1 == 0) {
330                                 rte_errno = E2BIG;
331                                 return NULL;
332                         }
333                         counter_free->ref_cnt++;
334                         return counter_free;
335                 }
336         }
337
338         TAILQ_FOREACH(pool, &container->pool_list, next) {
339                 counter_free = TAILQ_FIRST(&pool->counter_list);
340                 if (counter_free)
341                         break;
342                 counter_free = NULL;
343         }
344
345         if (!counter_free) {
346                 PMD_DRV_LOG(ERR, "No free counter found\n");
347                 return NULL;
348         }
349
350         counter_free->shared = shared;
351         counter_free->id = id;
352         counter_free->ref_cnt = 1;
353         counter_free->pool = pool;
354
355         /* reset statistic counter value */
356         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
357         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
358
359         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
360         if (TAILQ_EMPTY(&pool->counter_list)) {
361                 TAILQ_REMOVE(&container->pool_list, pool, next);
362                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
363         }
364
365         return counter_free;
366 }
367
368 static void
369 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
370                       struct ice_fdir_counter *counter)
371 {
372         if (!counter)
373                 return;
374
375         if (--counter->ref_cnt == 0) {
376                 struct ice_fdir_counter_pool *pool = counter->pool;
377
378                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
379         }
380 }
381
382 static int
383 ice_fdir_init_filter_list(struct ice_pf *pf)
384 {
385         struct rte_eth_dev *dev = pf->adapter->eth_dev;
386         struct ice_fdir_info *fdir_info = &pf->fdir;
387         char fdir_hash_name[RTE_HASH_NAMESIZE];
388         int ret;
389
390         struct rte_hash_parameters fdir_hash_params = {
391                 .name = fdir_hash_name,
392                 .entries = ICE_MAX_FDIR_FILTER_NUM,
393                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
394                 .hash_func = rte_hash_crc,
395                 .hash_func_init_val = 0,
396                 .socket_id = rte_socket_id(),
397                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
398         };
399
400         /* Initialize hash */
401         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
402                  "fdir_%s", dev->device->name);
403         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
404         if (!fdir_info->hash_table) {
405                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
406                 return -EINVAL;
407         }
408         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
409                                           sizeof(*fdir_info->hash_map) *
410                                           ICE_MAX_FDIR_FILTER_NUM,
411                                           0);
412         if (!fdir_info->hash_map) {
413                 PMD_INIT_LOG(ERR,
414                              "Failed to allocate memory for fdir hash map!");
415                 ret = -ENOMEM;
416                 goto err_fdir_hash_map_alloc;
417         }
418         return 0;
419
420 err_fdir_hash_map_alloc:
421         rte_hash_free(fdir_info->hash_table);
422
423         return ret;
424 }
425
426 static void
427 ice_fdir_release_filter_list(struct ice_pf *pf)
428 {
429         struct ice_fdir_info *fdir_info = &pf->fdir;
430
431         if (fdir_info->hash_map)
432                 rte_free(fdir_info->hash_map);
433         if (fdir_info->hash_table)
434                 rte_hash_free(fdir_info->hash_table);
435
436         fdir_info->hash_map = NULL;
437         fdir_info->hash_table = NULL;
438 }
439
440 /*
441  * ice_fdir_setup - reserve and initialize the Flow Director resources
442  * @pf: board private structure
443  */
444 static int
445 ice_fdir_setup(struct ice_pf *pf)
446 {
447         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
448         struct ice_hw *hw = ICE_PF_TO_HW(pf);
449         const struct rte_memzone *mz = NULL;
450         char z_name[RTE_MEMZONE_NAMESIZE];
451         struct ice_vsi *vsi;
452         int err = ICE_SUCCESS;
453
454         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
455                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
456                 return -ENOTSUP;
457         }
458
459         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
460                     " fd_fltr_best_effort = %u.",
461                     hw->func_caps.fd_fltr_guar,
462                     hw->func_caps.fd_fltr_best_effort);
463
464         if (pf->fdir.fdir_vsi) {
465                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
466                 return ICE_SUCCESS;
467         }
468
469         /* make new FDIR VSI */
470         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
471         if (!vsi) {
472                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
473                 return -EINVAL;
474         }
475         pf->fdir.fdir_vsi = vsi;
476
477         err = ice_fdir_init_filter_list(pf);
478         if (err) {
479                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
480                 return -EINVAL;
481         }
482
483         err = ice_fdir_counter_init(pf);
484         if (err) {
485                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
486                 return -EINVAL;
487         }
488
489         /*Fdir tx queue setup*/
490         err = ice_fdir_setup_tx_resources(pf);
491         if (err) {
492                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
493                 goto fail_setup_tx;
494         }
495
496         /*Fdir rx queue setup*/
497         err = ice_fdir_setup_rx_resources(pf);
498         if (err) {
499                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
500                 goto fail_setup_rx;
501         }
502
503         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
504         if (err) {
505                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
506                 goto fail_mem;
507         }
508
509         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
510         if (err) {
511                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
512                 goto fail_mem;
513         }
514
515         /* Enable FDIR MSIX interrupt */
516         vsi->nb_used_qps = 1;
517         ice_vsi_queues_bind_intr(vsi);
518         ice_vsi_enable_queues_intr(vsi);
519
520         /* reserve memory for the fdir programming packet */
521         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
522                  ICE_FDIR_MZ_NAME,
523                  eth_dev->data->port_id);
524         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
525         if (!mz) {
526                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
527                             "flow director program packet.");
528                 err = -ENOMEM;
529                 goto fail_mem;
530         }
531         pf->fdir.prg_pkt = mz->addr;
532         pf->fdir.dma_addr = mz->iova;
533         pf->fdir.mz = mz;
534
535         err = ice_fdir_prof_alloc(hw);
536         if (err) {
537                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
538                             "flow director profile.");
539                 err = -ENOMEM;
540                 goto fail_prof;
541         }
542
543         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
544                     vsi->base_queue);
545         return ICE_SUCCESS;
546
547 fail_prof:
548         rte_memzone_free(pf->fdir.mz);
549         pf->fdir.mz = NULL;
550 fail_mem:
551         ice_rx_queue_release(pf->fdir.rxq);
552         pf->fdir.rxq = NULL;
553 fail_setup_rx:
554         ice_tx_queue_release(pf->fdir.txq);
555         pf->fdir.txq = NULL;
556 fail_setup_tx:
557         ice_release_vsi(vsi);
558         pf->fdir.fdir_vsi = NULL;
559         return err;
560 }
561
562 static void
563 ice_fdir_prof_free(struct ice_hw *hw)
564 {
565         enum ice_fltr_ptype ptype;
566
567         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
568              ptype < ICE_FLTR_PTYPE_MAX;
569              ptype++) {
570                 rte_free(hw->fdir_prof[ptype]);
571                 hw->fdir_prof[ptype] = NULL;
572         }
573
574         rte_free(hw->fdir_prof);
575         hw->fdir_prof = NULL;
576 }
577
578 /* Remove a profile for some filter type */
579 static void
580 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
581 {
582         struct ice_hw *hw = ICE_PF_TO_HW(pf);
583         struct ice_fd_hw_prof *hw_prof;
584         uint64_t prof_id;
585         uint16_t vsi_num;
586         int i;
587
588         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
589                 return;
590
591         hw_prof = hw->fdir_prof[ptype];
592
593         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
594         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
595                 if (hw_prof->entry_h[i][is_tunnel]) {
596                         vsi_num = ice_get_hw_vsi_num(hw,
597                                                      hw_prof->vsi_h[i]);
598                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
599                                              vsi_num, ptype);
600                         ice_flow_rem_entry(hw, ICE_BLK_FD,
601                                            hw_prof->entry_h[i][is_tunnel]);
602                         hw_prof->entry_h[i][is_tunnel] = 0;
603                 }
604         }
605         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
606         rte_free(hw_prof->fdir_seg[is_tunnel]);
607         hw_prof->fdir_seg[is_tunnel] = NULL;
608
609         for (i = 0; i < hw_prof->cnt; i++)
610                 hw_prof->vsi_h[i] = 0;
611         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
612 }
613
614 /* Remove all created profiles */
615 static void
616 ice_fdir_prof_rm_all(struct ice_pf *pf)
617 {
618         enum ice_fltr_ptype ptype;
619
620         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
621              ptype < ICE_FLTR_PTYPE_MAX;
622              ptype++) {
623                 ice_fdir_prof_rm(pf, ptype, false);
624                 ice_fdir_prof_rm(pf, ptype, true);
625         }
626 }
627
628 /*
629  * ice_fdir_teardown - release the Flow Director resources
630  * @pf: board private structure
631  */
632 static void
633 ice_fdir_teardown(struct ice_pf *pf)
634 {
635         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
636         struct ice_hw *hw = ICE_PF_TO_HW(pf);
637         struct ice_vsi *vsi;
638         int err;
639
640         vsi = pf->fdir.fdir_vsi;
641         if (!vsi)
642                 return;
643
644         ice_vsi_disable_queues_intr(vsi);
645
646         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
647         if (err)
648                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
649
650         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
651         if (err)
652                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
653
654         err = ice_fdir_counter_release(pf);
655         if (err)
656                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
657
658         ice_fdir_release_filter_list(pf);
659
660         ice_tx_queue_release(pf->fdir.txq);
661         pf->fdir.txq = NULL;
662         ice_rx_queue_release(pf->fdir.rxq);
663         pf->fdir.rxq = NULL;
664         ice_fdir_prof_rm_all(pf);
665         ice_fdir_prof_free(hw);
666         ice_release_vsi(vsi);
667         pf->fdir.fdir_vsi = NULL;
668
669         if (pf->fdir.mz) {
670                 err = rte_memzone_free(pf->fdir.mz);
671                 pf->fdir.mz = NULL;
672                 if (err)
673                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
674         }
675 }
676
677 static int
678 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
679                            enum ice_fltr_ptype ptype,
680                            struct ice_flow_seg_info *seg,
681                            bool is_tunnel)
682 {
683         struct ice_hw *hw = ICE_PF_TO_HW(pf);
684         struct ice_flow_seg_info *ori_seg;
685         struct ice_fd_hw_prof *hw_prof;
686
687         hw_prof = hw->fdir_prof[ptype];
688         ori_seg = hw_prof->fdir_seg[is_tunnel];
689
690         /* profile does not exist */
691         if (!ori_seg)
692                 return 0;
693
694         /* if no input set conflict, return -EEXIST */
695         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
696             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
697                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
698                             ptype);
699                 return -EEXIST;
700         }
701
702         /* a rule with input set conflict already exist, so give up */
703         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
704                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
705                             ptype);
706                 return -EINVAL;
707         }
708
709         /* it's safe to delete an empty profile */
710         ice_fdir_prof_rm(pf, ptype, is_tunnel);
711         return 0;
712 }
713
714 static bool
715 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
716                                enum ice_fltr_ptype ptype,
717                                bool is_tunnel)
718 {
719         struct ice_hw *hw = ICE_PF_TO_HW(pf);
720         struct ice_fd_hw_prof *hw_prof;
721         struct ice_flow_seg_info *seg;
722
723         hw_prof = hw->fdir_prof[ptype];
724         seg = hw_prof->fdir_seg[is_tunnel];
725
726         /* profile does not exist */
727         if (!seg)
728                 return true;
729
730         /* profile exists and rule exists, fail to resolve the conflict */
731         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
732                 return false;
733
734         /* it's safe to delete an empty profile */
735         ice_fdir_prof_rm(pf, ptype, is_tunnel);
736
737         return true;
738 }
739
740 static int
741 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
742                              enum ice_fltr_ptype ptype,
743                              bool is_tunnel)
744 {
745         enum ice_fltr_ptype cflct_ptype;
746
747         switch (ptype) {
748         /* IPv4 */
749         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
750         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
751         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
752                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
753                 if (!ice_fdir_prof_resolve_conflict
754                         (pf, cflct_ptype, is_tunnel))
755                         goto err;
756                 break;
757         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
758                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
759                 if (!ice_fdir_prof_resolve_conflict
760                         (pf, cflct_ptype, is_tunnel))
761                         goto err;
762                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
763                 if (!ice_fdir_prof_resolve_conflict
764                         (pf, cflct_ptype, is_tunnel))
765                         goto err;
766                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
767                 if (!ice_fdir_prof_resolve_conflict
768                         (pf, cflct_ptype, is_tunnel))
769                         goto err;
770                 break;
771         /* IPv4 GTPU */
772         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
773         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
774         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
775                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
776                 if (!ice_fdir_prof_resolve_conflict
777                         (pf, cflct_ptype, is_tunnel))
778                         goto err;
779                 break;
780         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
781                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
782                 if (!ice_fdir_prof_resolve_conflict
783                         (pf, cflct_ptype, is_tunnel))
784                         goto err;
785                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
786                 if (!ice_fdir_prof_resolve_conflict
787                         (pf, cflct_ptype, is_tunnel))
788                         goto err;
789                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
790                 if (!ice_fdir_prof_resolve_conflict
791                         (pf, cflct_ptype, is_tunnel))
792                         goto err;
793                 break;
794         /* IPv6 GTPU */
795         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER:
796                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER;
797                 if (!ice_fdir_prof_resolve_conflict
798                         (pf, cflct_ptype, is_tunnel))
799                         goto err;
800                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER;
801                 if (!ice_fdir_prof_resolve_conflict
802                         (pf, cflct_ptype, is_tunnel))
803                         goto err;
804                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER;
805                 if (!ice_fdir_prof_resolve_conflict
806                         (pf, cflct_ptype, is_tunnel))
807                         goto err;
808                 break;
809         /* IPv6 */
810         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
811         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
812         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
813                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
814                 if (!ice_fdir_prof_resolve_conflict
815                         (pf, cflct_ptype, is_tunnel))
816                         goto err;
817                 break;
818         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
819                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
820                 if (!ice_fdir_prof_resolve_conflict
821                         (pf, cflct_ptype, is_tunnel))
822                         goto err;
823                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
824                 if (!ice_fdir_prof_resolve_conflict
825                         (pf, cflct_ptype, is_tunnel))
826                         goto err;
827                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
828                 if (!ice_fdir_prof_resolve_conflict
829                         (pf, cflct_ptype, is_tunnel))
830                         goto err;
831                 break;
832         default:
833                 break;
834         }
835         return 0;
836 err:
837         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
838                     ptype, cflct_ptype);
839         return -EINVAL;
840 }
841
842 static int
843 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
844                      struct ice_vsi *ctrl_vsi,
845                      struct ice_flow_seg_info *seg,
846                      enum ice_fltr_ptype ptype,
847                      bool is_tunnel)
848 {
849         struct ice_hw *hw = ICE_PF_TO_HW(pf);
850         enum ice_flow_dir dir = ICE_FLOW_RX;
851         struct ice_fd_hw_prof *hw_prof;
852         struct ice_flow_prof *prof;
853         uint64_t entry_1 = 0;
854         uint64_t entry_2 = 0;
855         uint16_t vsi_num;
856         int ret;
857         uint64_t prof_id;
858
859         /* check if have input set conflict on current profile. */
860         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
861         if (ret)
862                 return ret;
863
864         /* check if the profile is conflict with other profile. */
865         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
866         if (ret)
867                 return ret;
868
869         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
870         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
871                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
872         if (ret)
873                 return ret;
874         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
875                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
876                                  seg, NULL, 0, &entry_1);
877         if (ret) {
878                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
879                             ptype);
880                 goto err_add_prof;
881         }
882         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
883                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
884                                  seg, NULL, 0, &entry_2);
885         if (ret) {
886                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
887                             ptype);
888                 goto err_add_entry;
889         }
890
891         hw_prof = hw->fdir_prof[ptype];
892         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
893         hw_prof->cnt = 0;
894         hw_prof->fdir_seg[is_tunnel] = seg;
895         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
896         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
897         pf->hw_prof_cnt[ptype][is_tunnel]++;
898         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
899         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
900         pf->hw_prof_cnt[ptype][is_tunnel]++;
901
902         return ret;
903
904 err_add_entry:
905         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
906         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
907         ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
908 err_add_prof:
909         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
910
911         return ret;
912 }
913
914 static void
915 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
916 {
917         uint32_t i, j;
918
919         struct ice_inset_map {
920                 uint64_t inset;
921                 enum ice_flow_field fld;
922         };
923         static const struct ice_inset_map ice_inset_map[] = {
924                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
925                 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
926                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
927                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
928                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
929                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
930                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
931                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
932                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
933                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
934                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
935                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
936                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
937                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
938                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
939                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
940                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
941                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
942                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
943                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
944                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
945                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
946                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
947                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
948                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
949                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
950                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
951                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
952         };
953
954         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
955                 if ((inset & ice_inset_map[i].inset) ==
956                     ice_inset_map[i].inset)
957                         field[j++] = ice_inset_map[i].fld;
958         }
959 }
960
961 static int
962 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
963                         uint64_t input_set, enum ice_fdir_tunnel_type ttype)
964 {
965         struct ice_flow_seg_info *seg;
966         struct ice_flow_seg_info *seg_tun = NULL;
967         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
968         bool is_tunnel;
969         int i, ret;
970
971         if (!input_set)
972                 return -EINVAL;
973
974         seg = (struct ice_flow_seg_info *)
975                 ice_malloc(hw, sizeof(*seg));
976         if (!seg) {
977                 PMD_DRV_LOG(ERR, "No memory can be allocated");
978                 return -ENOMEM;
979         }
980
981         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
982                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
983         ice_fdir_input_set_parse(input_set, field);
984
985         switch (flow) {
986         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
987                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
988                                   ICE_FLOW_SEG_HDR_IPV4 |
989                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
990                 break;
991         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
992                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
993                                   ICE_FLOW_SEG_HDR_IPV4 |
994                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
995                 break;
996         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
997                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
998                                   ICE_FLOW_SEG_HDR_IPV4 |
999                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1000                 break;
1001         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
1002                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
1003                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1004                 break;
1005         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
1006                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
1007                                   ICE_FLOW_SEG_HDR_IPV6 |
1008                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1009                 break;
1010         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
1011                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
1012                                   ICE_FLOW_SEG_HDR_IPV6 |
1013                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1014                 break;
1015         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
1016                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
1017                                   ICE_FLOW_SEG_HDR_IPV6 |
1018                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1019                 break;
1020         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
1021                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1022                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1023                 break;
1024         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
1025         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
1026         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
1027         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
1028                 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU)
1029                         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1030                                           ICE_FLOW_SEG_HDR_IPV4 |
1031                                           ICE_FLOW_SEG_HDR_IPV_OTHER);
1032                 else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1033                         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1034                                           ICE_FLOW_SEG_HDR_GTPU_IP |
1035                                           ICE_FLOW_SEG_HDR_IPV4 |
1036                                           ICE_FLOW_SEG_HDR_IPV_OTHER);
1037                 else
1038                         PMD_DRV_LOG(ERR, "not supported tunnel type.");
1039                 break;
1040         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER:
1041                 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU)
1042                         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1043                                           ICE_FLOW_SEG_HDR_IPV6 |
1044                                           ICE_FLOW_SEG_HDR_IPV_OTHER);
1045                 else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1046                         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1047                                           ICE_FLOW_SEG_HDR_GTPU_IP |
1048                                           ICE_FLOW_SEG_HDR_IPV6 |
1049                                           ICE_FLOW_SEG_HDR_IPV_OTHER);
1050                 else
1051                         PMD_DRV_LOG(ERR, "not supported tunnel type.");
1052                 break;
1053         case ICE_FLTR_PTYPE_NON_IP_L2:
1054                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1055                 break;
1056         default:
1057                 PMD_DRV_LOG(ERR, "not supported filter type.");
1058                 break;
1059         }
1060
1061         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1062                 ice_flow_set_fld(seg, field[i],
1063                                  ICE_FLOW_FLD_OFF_INVAL,
1064                                  ICE_FLOW_FLD_OFF_INVAL,
1065                                  ICE_FLOW_FLD_OFF_INVAL, false);
1066         }
1067
1068         is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1069         if (!is_tunnel) {
1070                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1071                                            seg, flow, false);
1072         } else {
1073                 seg_tun = (struct ice_flow_seg_info *)
1074                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1075                 if (!seg_tun) {
1076                         PMD_DRV_LOG(ERR, "No memory can be allocated");
1077                         rte_free(seg);
1078                         return -ENOMEM;
1079                 }
1080                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1081                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1082                                            seg_tun, flow, true);
1083         }
1084
1085         if (!ret) {
1086                 return ret;
1087         } else if (ret < 0) {
1088                 rte_free(seg);
1089                 if (is_tunnel)
1090                         rte_free(seg_tun);
1091                 return (ret == -EEXIST) ? 0 : ret;
1092         } else {
1093                 return ret;
1094         }
1095 }
1096
1097 static void
1098 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1099                     bool is_tunnel, bool add)
1100 {
1101         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1102         int cnt;
1103
1104         cnt = (add) ? 1 : -1;
1105         hw->fdir_active_fltr += cnt;
1106         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1107                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1108         else
1109                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1110 }
1111
1112 static int
1113 ice_fdir_init(struct ice_adapter *ad)
1114 {
1115         struct ice_pf *pf = &ad->pf;
1116         struct ice_flow_parser *parser;
1117         int ret;
1118
1119         if (ad->hw.dcf_enabled)
1120                 return 0;
1121
1122         ret = ice_fdir_setup(pf);
1123         if (ret)
1124                 return ret;
1125
1126         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1127                 parser = &ice_fdir_parser_comms;
1128         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1129                 parser = &ice_fdir_parser_os;
1130         else
1131                 return -EINVAL;
1132
1133         return ice_register_parser(parser, ad);
1134 }
1135
1136 static void
1137 ice_fdir_uninit(struct ice_adapter *ad)
1138 {
1139         struct ice_pf *pf = &ad->pf;
1140         struct ice_flow_parser *parser;
1141
1142         if (ad->hw.dcf_enabled)
1143                 return;
1144
1145         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1146                 parser = &ice_fdir_parser_comms;
1147         else
1148                 parser = &ice_fdir_parser_os;
1149
1150         ice_unregister_parser(parser, ad);
1151
1152         ice_fdir_teardown(pf);
1153 }
1154
1155 static int
1156 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1157 {
1158         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1159                 return 1;
1160         else
1161                 return 0;
1162 }
1163
1164 static int
1165 ice_fdir_add_del_filter(struct ice_pf *pf,
1166                         struct ice_fdir_filter_conf *filter,
1167                         bool add)
1168 {
1169         struct ice_fltr_desc desc;
1170         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1171         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1172         bool is_tun;
1173         int ret;
1174
1175         filter->input.dest_vsi = pf->main_vsi->idx;
1176
1177         memset(&desc, 0, sizeof(desc));
1178         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1179         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1180
1181         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1182
1183         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1184         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1185         if (ret) {
1186                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1187                 return -EINVAL;
1188         }
1189
1190         return ice_fdir_programming(pf, &desc);
1191 }
1192
1193 static void
1194 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1195                           struct ice_fdir_filter_conf *filter)
1196 {
1197         struct ice_fdir_fltr *input = &filter->input;
1198         memset(key, 0, sizeof(*key));
1199
1200         key->flow_type = input->flow_type;
1201         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1202         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1203         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1204         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1205
1206         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1207         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1208
1209         key->tunnel_type = filter->tunnel_type;
1210 }
1211
1212 /* Check if there exists the flow director filter */
1213 static struct ice_fdir_filter_conf *
1214 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1215                         const struct ice_fdir_fltr_pattern *key)
1216 {
1217         int ret;
1218
1219         ret = rte_hash_lookup(fdir_info->hash_table, key);
1220         if (ret < 0)
1221                 return NULL;
1222
1223         return fdir_info->hash_map[ret];
1224 }
1225
1226 /* Add a flow director entry into the SW list */
1227 static int
1228 ice_fdir_entry_insert(struct ice_pf *pf,
1229                       struct ice_fdir_filter_conf *entry,
1230                       struct ice_fdir_fltr_pattern *key)
1231 {
1232         struct ice_fdir_info *fdir_info = &pf->fdir;
1233         int ret;
1234
1235         ret = rte_hash_add_key(fdir_info->hash_table, key);
1236         if (ret < 0) {
1237                 PMD_DRV_LOG(ERR,
1238                             "Failed to insert fdir entry to hash table %d!",
1239                             ret);
1240                 return ret;
1241         }
1242         fdir_info->hash_map[ret] = entry;
1243
1244         return 0;
1245 }
1246
1247 /* Delete a flow director entry from the SW list */
1248 static int
1249 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1250 {
1251         struct ice_fdir_info *fdir_info = &pf->fdir;
1252         int ret;
1253
1254         ret = rte_hash_del_key(fdir_info->hash_table, key);
1255         if (ret < 0) {
1256                 PMD_DRV_LOG(ERR,
1257                             "Failed to delete fdir filter to hash table %d!",
1258                             ret);
1259                 return ret;
1260         }
1261         fdir_info->hash_map[ret] = NULL;
1262
1263         return 0;
1264 }
1265
1266 static int
1267 ice_fdir_create_filter(struct ice_adapter *ad,
1268                        struct rte_flow *flow,
1269                        void *meta,
1270                        struct rte_flow_error *error)
1271 {
1272         struct ice_pf *pf = &ad->pf;
1273         struct ice_fdir_filter_conf *filter = meta;
1274         struct ice_fdir_info *fdir_info = &pf->fdir;
1275         struct ice_fdir_filter_conf *entry, *node;
1276         struct ice_fdir_fltr_pattern key;
1277         bool is_tun;
1278         int ret;
1279
1280         ice_fdir_extract_fltr_key(&key, filter);
1281         node = ice_fdir_entry_lookup(fdir_info, &key);
1282         if (node) {
1283                 rte_flow_error_set(error, EEXIST,
1284                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1285                                    "Rule already exists!");
1286                 return -rte_errno;
1287         }
1288
1289         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1290         if (!entry) {
1291                 rte_flow_error_set(error, ENOMEM,
1292                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1293                                    "Failed to allocate memory");
1294                 return -rte_errno;
1295         }
1296
1297         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1298
1299         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1300                         filter->input_set, filter->tunnel_type);
1301         if (ret) {
1302                 rte_flow_error_set(error, -ret,
1303                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1304                                    "Profile configure failed.");
1305                 goto free_entry;
1306         }
1307
1308         /* alloc counter for FDIR */
1309         if (filter->input.cnt_ena) {
1310                 struct rte_flow_action_count *act_count = &filter->act_count;
1311
1312                 filter->counter = ice_fdir_counter_alloc(pf,
1313                                                          act_count->shared,
1314                                                          act_count->id);
1315                 if (!filter->counter) {
1316                         rte_flow_error_set(error, EINVAL,
1317                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1318                                         "Failed to alloc FDIR counter.");
1319                         goto free_entry;
1320                 }
1321                 filter->input.cnt_index = filter->counter->hw_index;
1322         }
1323
1324         ret = ice_fdir_add_del_filter(pf, filter, true);
1325         if (ret) {
1326                 rte_flow_error_set(error, -ret,
1327                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1328                                    "Add filter rule failed.");
1329                 goto free_counter;
1330         }
1331
1332         rte_memcpy(entry, filter, sizeof(*entry));
1333         ret = ice_fdir_entry_insert(pf, entry, &key);
1334         if (ret) {
1335                 rte_flow_error_set(error, -ret,
1336                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1337                                    "Insert entry to table failed.");
1338                 goto free_entry;
1339         }
1340
1341         flow->rule = entry;
1342         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1343
1344         return 0;
1345
1346 free_counter:
1347         if (filter->counter) {
1348                 ice_fdir_counter_free(pf, filter->counter);
1349                 filter->counter = NULL;
1350         }
1351
1352 free_entry:
1353         rte_free(entry);
1354         return -rte_errno;
1355 }
1356
1357 static int
1358 ice_fdir_destroy_filter(struct ice_adapter *ad,
1359                         struct rte_flow *flow,
1360                         struct rte_flow_error *error)
1361 {
1362         struct ice_pf *pf = &ad->pf;
1363         struct ice_fdir_info *fdir_info = &pf->fdir;
1364         struct ice_fdir_filter_conf *filter, *entry;
1365         struct ice_fdir_fltr_pattern key;
1366         bool is_tun;
1367         int ret;
1368
1369         filter = (struct ice_fdir_filter_conf *)flow->rule;
1370
1371         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1372
1373         if (filter->counter) {
1374                 ice_fdir_counter_free(pf, filter->counter);
1375                 filter->counter = NULL;
1376         }
1377
1378         ice_fdir_extract_fltr_key(&key, filter);
1379         entry = ice_fdir_entry_lookup(fdir_info, &key);
1380         if (!entry) {
1381                 rte_flow_error_set(error, ENOENT,
1382                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1383                                    "Can't find entry.");
1384                 return -rte_errno;
1385         }
1386
1387         ret = ice_fdir_add_del_filter(pf, filter, false);
1388         if (ret) {
1389                 rte_flow_error_set(error, -ret,
1390                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1391                                    "Del filter rule failed.");
1392                 return -rte_errno;
1393         }
1394
1395         ret = ice_fdir_entry_del(pf, &key);
1396         if (ret) {
1397                 rte_flow_error_set(error, -ret,
1398                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1399                                    "Remove entry from table failed.");
1400                 return -rte_errno;
1401         }
1402
1403         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1404         flow->rule = NULL;
1405
1406         rte_free(filter);
1407
1408         return 0;
1409 }
1410
1411 static int
1412 ice_fdir_query_count(struct ice_adapter *ad,
1413                       struct rte_flow *flow,
1414                       struct rte_flow_query_count *flow_stats,
1415                       struct rte_flow_error *error)
1416 {
1417         struct ice_pf *pf = &ad->pf;
1418         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1419         struct ice_fdir_filter_conf *filter = flow->rule;
1420         struct ice_fdir_counter *counter = filter->counter;
1421         uint64_t hits_lo, hits_hi;
1422
1423         if (!counter) {
1424                 rte_flow_error_set(error, EINVAL,
1425                                   RTE_FLOW_ERROR_TYPE_ACTION,
1426                                   NULL,
1427                                   "FDIR counters not available");
1428                 return -rte_errno;
1429         }
1430
1431         /*
1432          * Reading the low 32-bits latches the high 32-bits into a shadow
1433          * register. Reading the high 32-bit returns the value in the
1434          * shadow register.
1435          */
1436         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1437         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1438
1439         flow_stats->hits_set = 1;
1440         flow_stats->hits = hits_lo | (hits_hi << 32);
1441         flow_stats->bytes_set = 0;
1442         flow_stats->bytes = 0;
1443
1444         if (flow_stats->reset) {
1445                 /* reset statistic counter value */
1446                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1447                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1448         }
1449
1450         return 0;
1451 }
1452
1453 static struct ice_flow_engine ice_fdir_engine = {
1454         .init = ice_fdir_init,
1455         .uninit = ice_fdir_uninit,
1456         .create = ice_fdir_create_filter,
1457         .destroy = ice_fdir_destroy_filter,
1458         .query_count = ice_fdir_query_count,
1459         .type = ICE_FLOW_ENGINE_FDIR,
1460 };
1461
1462 static int
1463 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1464                               struct rte_flow_error *error,
1465                               const struct rte_flow_action *act,
1466                               struct ice_fdir_filter_conf *filter)
1467 {
1468         const struct rte_flow_action_rss *rss = act->conf;
1469         uint32_t i;
1470
1471         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1472                 rte_flow_error_set(error, EINVAL,
1473                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1474                                    "Invalid action.");
1475                 return -rte_errno;
1476         }
1477
1478         if (rss->queue_num <= 1) {
1479                 rte_flow_error_set(error, EINVAL,
1480                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1481                                    "Queue region size can't be 0 or 1.");
1482                 return -rte_errno;
1483         }
1484
1485         /* check if queue index for queue region is continuous */
1486         for (i = 0; i < rss->queue_num - 1; i++) {
1487                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1488                         rte_flow_error_set(error, EINVAL,
1489                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1490                                            "Discontinuous queue region");
1491                         return -rte_errno;
1492                 }
1493         }
1494
1495         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1496                 rte_flow_error_set(error, EINVAL,
1497                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1498                                    "Invalid queue region indexes.");
1499                 return -rte_errno;
1500         }
1501
1502         if (!(rte_is_power_of_2(rss->queue_num) &&
1503              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1504                 rte_flow_error_set(error, EINVAL,
1505                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1506                                    "The region size should be any of the following values:"
1507                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1508                                    "of queues do not exceed the VSI allocation.");
1509                 return -rte_errno;
1510         }
1511
1512         filter->input.q_index = rss->queue[0];
1513         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1514         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1515
1516         return 0;
1517 }
1518
1519 static int
1520 ice_fdir_parse_action(struct ice_adapter *ad,
1521                       const struct rte_flow_action actions[],
1522                       struct rte_flow_error *error,
1523                       struct ice_fdir_filter_conf *filter)
1524 {
1525         struct ice_pf *pf = &ad->pf;
1526         const struct rte_flow_action_queue *act_q;
1527         const struct rte_flow_action_mark *mark_spec = NULL;
1528         const struct rte_flow_action_count *act_count;
1529         uint32_t dest_num = 0;
1530         uint32_t mark_num = 0;
1531         uint32_t counter_num = 0;
1532         int ret;
1533
1534         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1535                 switch (actions->type) {
1536                 case RTE_FLOW_ACTION_TYPE_VOID:
1537                         break;
1538                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1539                         dest_num++;
1540
1541                         act_q = actions->conf;
1542                         filter->input.q_index = act_q->index;
1543                         if (filter->input.q_index >=
1544                                         pf->dev_data->nb_rx_queues) {
1545                                 rte_flow_error_set(error, EINVAL,
1546                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1547                                                    actions,
1548                                                    "Invalid queue for FDIR.");
1549                                 return -rte_errno;
1550                         }
1551                         filter->input.dest_ctl =
1552                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1553                         break;
1554                 case RTE_FLOW_ACTION_TYPE_DROP:
1555                         dest_num++;
1556
1557                         filter->input.dest_ctl =
1558                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1559                         break;
1560                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1561                         dest_num++;
1562
1563                         filter->input.dest_ctl =
1564                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1565                         break;
1566                 case RTE_FLOW_ACTION_TYPE_RSS:
1567                         dest_num++;
1568
1569                         ret = ice_fdir_parse_action_qregion(pf,
1570                                                 error, actions, filter);
1571                         if (ret)
1572                                 return ret;
1573                         break;
1574                 case RTE_FLOW_ACTION_TYPE_MARK:
1575                         mark_num++;
1576
1577                         mark_spec = actions->conf;
1578                         filter->input.fltr_id = mark_spec->id;
1579                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1580                         break;
1581                 case RTE_FLOW_ACTION_TYPE_COUNT:
1582                         counter_num++;
1583
1584                         act_count = actions->conf;
1585                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1586                         rte_memcpy(&filter->act_count, act_count,
1587                                                 sizeof(filter->act_count));
1588
1589                         break;
1590                 default:
1591                         rte_flow_error_set(error, EINVAL,
1592                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1593                                    "Invalid action.");
1594                         return -rte_errno;
1595                 }
1596         }
1597
1598         if (dest_num >= 2) {
1599                 rte_flow_error_set(error, EINVAL,
1600                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1601                            "Unsupported action combination");
1602                 return -rte_errno;
1603         }
1604
1605         if (mark_num >= 2) {
1606                 rte_flow_error_set(error, EINVAL,
1607                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1608                            "Too many mark actions");
1609                 return -rte_errno;
1610         }
1611
1612         if (counter_num >= 2) {
1613                 rte_flow_error_set(error, EINVAL,
1614                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1615                            "Too many count actions");
1616                 return -rte_errno;
1617         }
1618
1619         if (dest_num + mark_num + counter_num == 0) {
1620                 rte_flow_error_set(error, EINVAL,
1621                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1622                            "Empty action");
1623                 return -rte_errno;
1624         }
1625
1626         /* set default action to PASSTHRU mode, in "mark/count only" case. */
1627         if (dest_num == 0)
1628                 filter->input.dest_ctl =
1629                         ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1630
1631         return 0;
1632 }
1633
1634 static int
1635 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1636                        const struct rte_flow_item pattern[],
1637                        struct rte_flow_error *error,
1638                        struct ice_fdir_filter_conf *filter)
1639 {
1640         const struct rte_flow_item *item = pattern;
1641         enum rte_flow_item_type item_type;
1642         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1643         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1644         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1645         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1646         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1647         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1648         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1649         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1650         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1651         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1652         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1653         uint64_t input_set = ICE_INSET_NONE;
1654         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1655         uint8_t  ipv6_addr_mask[16] = {
1656                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1657                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1658         };
1659         uint32_t vtc_flow_cpu;
1660         uint16_t ether_type;
1661         enum rte_flow_item_type next_type;
1662
1663         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1664                 if (item->last) {
1665                         rte_flow_error_set(error, EINVAL,
1666                                         RTE_FLOW_ERROR_TYPE_ITEM,
1667                                         item,
1668                                         "Not support range");
1669                         return -rte_errno;
1670                 }
1671                 item_type = item->type;
1672
1673                 switch (item_type) {
1674                 case RTE_FLOW_ITEM_TYPE_ETH:
1675                         eth_spec = item->spec;
1676                         eth_mask = item->mask;
1677                         next_type = (item + 1)->type;
1678
1679                         if (eth_spec && eth_mask) {
1680                                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
1681                                         input_set |= ICE_INSET_DMAC;
1682                                         rte_memcpy(&filter->input.ext_data.dst_mac,
1683                                                    &eth_spec->dst,
1684                                                    RTE_ETHER_ADDR_LEN);
1685                                 }
1686
1687                                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
1688                                         input_set |= ICE_INSET_SMAC;
1689                                         rte_memcpy(&filter->input.ext_data.src_mac,
1690                                                    &eth_spec->src,
1691                                                    RTE_ETHER_ADDR_LEN);
1692                                 }
1693
1694                                 /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1695                                 if (eth_mask->type == RTE_BE16(0xffff) &&
1696                                     next_type == RTE_FLOW_ITEM_TYPE_END) {
1697                                         input_set |= ICE_INSET_ETHERTYPE;
1698                                         ether_type = rte_be_to_cpu_16(eth_spec->type);
1699
1700                                         if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1701                                             ether_type == RTE_ETHER_TYPE_IPV6) {
1702                                                 rte_flow_error_set(error, EINVAL,
1703                                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1704                                                                    item,
1705                                                                    "Unsupported ether_type.");
1706                                                 return -rte_errno;
1707                                         }
1708
1709                                         rte_memcpy(&filter->input.ext_data.ether_type,
1710                                                    &eth_spec->type,
1711                                                    sizeof(eth_spec->type));
1712                                         flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1713                                 }
1714                         }
1715                         break;
1716                 case RTE_FLOW_ITEM_TYPE_IPV4:
1717                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1718                         ipv4_spec = item->spec;
1719                         ipv4_mask = item->mask;
1720
1721                         if (ipv4_spec && ipv4_mask) {
1722                                 /* Check IPv4 mask and update input set */
1723                                 if (ipv4_mask->hdr.version_ihl ||
1724                                     ipv4_mask->hdr.total_length ||
1725                                     ipv4_mask->hdr.packet_id ||
1726                                     ipv4_mask->hdr.fragment_offset ||
1727                                     ipv4_mask->hdr.hdr_checksum) {
1728                                         rte_flow_error_set(error, EINVAL,
1729                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1730                                                    item,
1731                                                    "Invalid IPv4 mask.");
1732                                         return -rte_errno;
1733                                 }
1734                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1735                                         input_set |= tunnel_type ?
1736                                                      ICE_INSET_TUN_IPV4_SRC :
1737                                                      ICE_INSET_IPV4_SRC;
1738                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1739                                         input_set |= tunnel_type ?
1740                                                      ICE_INSET_TUN_IPV4_DST :
1741                                                      ICE_INSET_IPV4_DST;
1742                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1743                                         input_set |= ICE_INSET_IPV4_TOS;
1744                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1745                                         input_set |= ICE_INSET_IPV4_TTL;
1746                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1747                                         input_set |= ICE_INSET_IPV4_PROTO;
1748
1749                                 filter->input.ip.v4.dst_ip =
1750                                         ipv4_spec->hdr.dst_addr;
1751                                 filter->input.ip.v4.src_ip =
1752                                         ipv4_spec->hdr.src_addr;
1753                                 filter->input.ip.v4.tos =
1754                                         ipv4_spec->hdr.type_of_service;
1755                                 filter->input.ip.v4.ttl =
1756                                         ipv4_spec->hdr.time_to_live;
1757                                 filter->input.ip.v4.proto =
1758                                         ipv4_spec->hdr.next_proto_id;
1759                         }
1760
1761                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1762                         break;
1763                 case RTE_FLOW_ITEM_TYPE_IPV6:
1764                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1765                         ipv6_spec = item->spec;
1766                         ipv6_mask = item->mask;
1767
1768                         if (ipv6_spec && ipv6_mask) {
1769                                 /* Check IPv6 mask and update input set */
1770                                 if (ipv6_mask->hdr.payload_len) {
1771                                         rte_flow_error_set(error, EINVAL,
1772                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1773                                                    item,
1774                                                    "Invalid IPv6 mask");
1775                                         return -rte_errno;
1776                                 }
1777
1778                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1779                                             ipv6_addr_mask,
1780                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1781                                         input_set |= ICE_INSET_IPV6_SRC;
1782                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1783                                             ipv6_addr_mask,
1784                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1785                                         input_set |= ICE_INSET_IPV6_DST;
1786
1787                                 if ((ipv6_mask->hdr.vtc_flow &
1788                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1789                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1790                                         input_set |= ICE_INSET_IPV6_TC;
1791                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1792                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1793                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1794                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1795
1796                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1797                                            ipv6_spec->hdr.dst_addr, 16);
1798                                 rte_memcpy(filter->input.ip.v6.src_ip,
1799                                            ipv6_spec->hdr.src_addr, 16);
1800
1801                                 vtc_flow_cpu =
1802                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1803                                 filter->input.ip.v6.tc =
1804                                         (uint8_t)(vtc_flow_cpu >>
1805                                                   ICE_FDIR_IPV6_TC_OFFSET);
1806                                 filter->input.ip.v6.proto =
1807                                         ipv6_spec->hdr.proto;
1808                                 filter->input.ip.v6.hlim =
1809                                         ipv6_spec->hdr.hop_limits;
1810                         }
1811
1812                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1813                         break;
1814                 case RTE_FLOW_ITEM_TYPE_TCP:
1815                         tcp_spec = item->spec;
1816                         tcp_mask = item->mask;
1817
1818                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1819                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1820                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1821                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1822
1823                         if (tcp_spec && tcp_mask) {
1824                                 /* Check TCP mask and update input set */
1825                                 if (tcp_mask->hdr.sent_seq ||
1826                                     tcp_mask->hdr.recv_ack ||
1827                                     tcp_mask->hdr.data_off ||
1828                                     tcp_mask->hdr.tcp_flags ||
1829                                     tcp_mask->hdr.rx_win ||
1830                                     tcp_mask->hdr.cksum ||
1831                                     tcp_mask->hdr.tcp_urp) {
1832                                         rte_flow_error_set(error, EINVAL,
1833                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1834                                                    item,
1835                                                    "Invalid TCP mask");
1836                                         return -rte_errno;
1837                                 }
1838
1839                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1840                                         input_set |= tunnel_type ?
1841                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1842                                                      ICE_INSET_TCP_SRC_PORT;
1843                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1844                                         input_set |= tunnel_type ?
1845                                                      ICE_INSET_TUN_TCP_DST_PORT :
1846                                                      ICE_INSET_TCP_DST_PORT;
1847
1848                                 /* Get filter info */
1849                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1850                                         filter->input.ip.v4.dst_port =
1851                                                 tcp_spec->hdr.dst_port;
1852                                         filter->input.ip.v4.src_port =
1853                                                 tcp_spec->hdr.src_port;
1854                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1855                                         filter->input.ip.v6.dst_port =
1856                                                 tcp_spec->hdr.dst_port;
1857                                         filter->input.ip.v6.src_port =
1858                                                 tcp_spec->hdr.src_port;
1859                                 }
1860                         }
1861                         break;
1862                 case RTE_FLOW_ITEM_TYPE_UDP:
1863                         udp_spec = item->spec;
1864                         udp_mask = item->mask;
1865
1866                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1867                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1868                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1869                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1870
1871                         if (udp_spec && udp_mask) {
1872                                 /* Check UDP mask and update input set*/
1873                                 if (udp_mask->hdr.dgram_len ||
1874                                     udp_mask->hdr.dgram_cksum) {
1875                                         rte_flow_error_set(error, EINVAL,
1876                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1877                                                    item,
1878                                                    "Invalid UDP mask");
1879                                         return -rte_errno;
1880                                 }
1881
1882                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1883                                         input_set |= tunnel_type ?
1884                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1885                                                      ICE_INSET_UDP_SRC_PORT;
1886                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1887                                         input_set |= tunnel_type ?
1888                                                      ICE_INSET_TUN_UDP_DST_PORT :
1889                                                      ICE_INSET_UDP_DST_PORT;
1890
1891                                 /* Get filter info */
1892                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1893                                         filter->input.ip.v4.dst_port =
1894                                                 udp_spec->hdr.dst_port;
1895                                         filter->input.ip.v4.src_port =
1896                                                 udp_spec->hdr.src_port;
1897                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1898                                         filter->input.ip.v6.src_port =
1899                                                 udp_spec->hdr.src_port;
1900                                         filter->input.ip.v6.dst_port =
1901                                                 udp_spec->hdr.dst_port;
1902                                 }
1903                         }
1904                         break;
1905                 case RTE_FLOW_ITEM_TYPE_SCTP:
1906                         sctp_spec = item->spec;
1907                         sctp_mask = item->mask;
1908
1909                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1910                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1911                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1912                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1913
1914                         if (sctp_spec && sctp_mask) {
1915                                 /* Check SCTP mask and update input set */
1916                                 if (sctp_mask->hdr.cksum) {
1917                                         rte_flow_error_set(error, EINVAL,
1918                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1919                                                    item,
1920                                                    "Invalid UDP mask");
1921                                         return -rte_errno;
1922                                 }
1923
1924                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1925                                         input_set |= tunnel_type ?
1926                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1927                                                      ICE_INSET_SCTP_SRC_PORT;
1928                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1929                                         input_set |= tunnel_type ?
1930                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1931                                                      ICE_INSET_SCTP_DST_PORT;
1932
1933                                 /* Get filter info */
1934                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1935                                         filter->input.ip.v4.dst_port =
1936                                                 sctp_spec->hdr.dst_port;
1937                                         filter->input.ip.v4.src_port =
1938                                                 sctp_spec->hdr.src_port;
1939                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1940                                         filter->input.ip.v6.dst_port =
1941                                                 sctp_spec->hdr.dst_port;
1942                                         filter->input.ip.v6.src_port =
1943                                                 sctp_spec->hdr.src_port;
1944                                 }
1945                         }
1946                         break;
1947                 case RTE_FLOW_ITEM_TYPE_VOID:
1948                         break;
1949                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1950                         l3 = RTE_FLOW_ITEM_TYPE_END;
1951                         vxlan_spec = item->spec;
1952                         vxlan_mask = item->mask;
1953
1954                         if (vxlan_spec || vxlan_mask) {
1955                                 rte_flow_error_set(error, EINVAL,
1956                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1957                                                    item,
1958                                                    "Invalid vxlan field");
1959                                 return -rte_errno;
1960                         }
1961
1962                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1963                         break;
1964                 case RTE_FLOW_ITEM_TYPE_GTPU:
1965                         l3 = RTE_FLOW_ITEM_TYPE_END;
1966                         gtp_spec = item->spec;
1967                         gtp_mask = item->mask;
1968
1969                         if (gtp_spec && gtp_mask) {
1970                                 if (gtp_mask->v_pt_rsv_flags ||
1971                                     gtp_mask->msg_type ||
1972                                     gtp_mask->msg_len) {
1973                                         rte_flow_error_set(error, EINVAL,
1974                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1975                                                    item,
1976                                                    "Invalid GTP mask");
1977                                         return -rte_errno;
1978                                 }
1979
1980                                 if (gtp_mask->teid == UINT32_MAX)
1981                                         input_set |= ICE_INSET_GTPU_TEID;
1982
1983                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1984                         }
1985
1986                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1987                         break;
1988                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1989                         gtp_psc_spec = item->spec;
1990                         gtp_psc_mask = item->mask;
1991
1992                         if (gtp_psc_spec && gtp_psc_mask) {
1993                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1994                                         input_set |= ICE_INSET_GTPU_QFI;
1995
1996                                 filter->input.gtpu_data.qfi =
1997                                         gtp_psc_spec->qfi;
1998                         }
1999                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
2000                         break;
2001                 default:
2002                         rte_flow_error_set(error, EINVAL,
2003                                    RTE_FLOW_ERROR_TYPE_ITEM,
2004                                    item,
2005                                    "Invalid pattern item.");
2006                         return -rte_errno;
2007                 }
2008         }
2009
2010         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU ||
2011             tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
2012                 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2013                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
2014                 else
2015                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER;
2016         }
2017
2018         filter->tunnel_type = tunnel_type;
2019         filter->input.flow_type = flow_type;
2020         filter->input_set = input_set;
2021
2022         return 0;
2023 }
2024
2025 static int
2026 ice_fdir_parse(struct ice_adapter *ad,
2027                struct ice_pattern_match_item *array,
2028                uint32_t array_len,
2029                const struct rte_flow_item pattern[],
2030                const struct rte_flow_action actions[],
2031                void **meta,
2032                struct rte_flow_error *error)
2033 {
2034         struct ice_pf *pf = &ad->pf;
2035         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2036         struct ice_pattern_match_item *item = NULL;
2037         uint64_t input_set;
2038         int ret;
2039
2040         memset(filter, 0, sizeof(*filter));
2041         item = ice_search_pattern_match_item(pattern, array, array_len, error);
2042         if (!item)
2043                 return -rte_errno;
2044
2045         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2046         if (ret)
2047                 goto error;
2048         input_set = filter->input_set;
2049         if (!input_set || input_set & ~item->input_set_mask) {
2050                 rte_flow_error_set(error, EINVAL,
2051                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2052                                    pattern,
2053                                    "Invalid input set");
2054                 ret = -rte_errno;
2055                 goto error;
2056         }
2057
2058         ret = ice_fdir_parse_action(ad, actions, error, filter);
2059         if (ret)
2060                 goto error;
2061
2062         if (meta)
2063                 *meta = filter;
2064 error:
2065         rte_free(item);
2066         return ret;
2067 }
2068
2069 static struct ice_flow_parser ice_fdir_parser_os = {
2070         .engine = &ice_fdir_engine,
2071         .array = ice_fdir_pattern_os,
2072         .array_len = RTE_DIM(ice_fdir_pattern_os),
2073         .parse_pattern_action = ice_fdir_parse,
2074         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2075 };
2076
2077 static struct ice_flow_parser ice_fdir_parser_comms = {
2078         .engine = &ice_fdir_engine,
2079         .array = ice_fdir_pattern_comms,
2080         .array_len = RTE_DIM(ice_fdir_pattern_comms),
2081         .parse_pattern_action = ice_fdir_parse,
2082         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2083 };
2084
2085 RTE_INIT(ice_fdir_engine_register)
2086 {
2087         ice_register_flow_engine(&ice_fdir_engine);
2088 }