net/iavf: fix pointer of meta data
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH (\
22         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25         ICE_FDIR_INSET_ETH | \
26         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30         ICE_FDIR_INSET_ETH_IPV4 | \
31         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34         ICE_FDIR_INSET_ETH_IPV4 | \
35         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38         ICE_FDIR_INSET_ETH_IPV4 | \
39         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42         ICE_INSET_DMAC | \
43         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
45         ICE_INSET_IPV6_PKID)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
48         ICE_FDIR_INSET_ETH_IPV6 | \
49         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
52         ICE_FDIR_INSET_ETH_IPV6 | \
53         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
54
55 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
56         ICE_FDIR_INSET_ETH_IPV6 | \
57         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
58
59 #define ICE_FDIR_INSET_IPV4 (\
60         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
61         ICE_INSET_IPV4_PKID)
62
63 #define ICE_FDIR_INSET_IPV4_TCP (\
64         ICE_FDIR_INSET_IPV4 | \
65         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
66
67 #define ICE_FDIR_INSET_IPV4_UDP (\
68         ICE_FDIR_INSET_IPV4 | \
69         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
70
71 #define ICE_FDIR_INSET_IPV4_SCTP (\
72         ICE_FDIR_INSET_IPV4 | \
73         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
74
75 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
76         ICE_FDIR_INSET_ETH | ICE_FDIR_INSET_ETH_IPV4 | \
77         ICE_INSET_VXLAN_VNI)
78
79 #define ICE_FDIR_INSET_IPV4_GTPU (\
80         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
81
82 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
83         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
84         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
85
86 #define ICE_FDIR_INSET_IPV6_GTPU (\
87         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
88
89 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
90         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
91         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
92
93 #define ICE_FDIR_INSET_IPV4_ESP (\
94         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
95         ICE_INSET_ESP_SPI)
96
97 #define ICE_FDIR_INSET_IPV6_ESP (\
98         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
99         ICE_INSET_ESP_SPI)
100
101 #define ICE_FDIR_INSET_IPV4_NATT_ESP (\
102         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
103         ICE_INSET_NAT_T_ESP_SPI)
104
105 #define ICE_FDIR_INSET_IPV6_NATT_ESP (\
106         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
107         ICE_INSET_NAT_T_ESP_SPI)
108
109 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
110         {pattern_raw,                                   ICE_INSET_NONE,                 ICE_INSET_NONE,                 ICE_INSET_NONE},
111         {pattern_ethertype,                             ICE_FDIR_INSET_ETH,             ICE_INSET_NONE,                 ICE_INSET_NONE},
112         {pattern_eth_ipv4,                              ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE,                 ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp,                          ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
114         {pattern_eth_ipv4_tcp,                          ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
115         {pattern_eth_ipv4_sctp,                         ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
116         {pattern_eth_ipv6,                              ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
117         {pattern_eth_ipv6_frag_ext,                     ICE_FDIR_INSET_ETH_IPV6,        ICE_INSET_NONE,                 ICE_INSET_NONE},
118         {pattern_eth_ipv6_udp,                          ICE_FDIR_INSET_ETH_IPV6_UDP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
119         {pattern_eth_ipv6_tcp,                          ICE_FDIR_INSET_ETH_IPV6_TCP,    ICE_INSET_NONE,                 ICE_INSET_NONE},
120         {pattern_eth_ipv6_sctp,                         ICE_FDIR_INSET_ETH_IPV6_SCTP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
121         {pattern_eth_ipv4_esp,                          ICE_FDIR_INSET_IPV4_ESP,        ICE_INSET_NONE,                 ICE_INSET_NONE},
122         {pattern_eth_ipv4_udp_esp,                      ICE_FDIR_INSET_IPV4_NATT_ESP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
123         {pattern_eth_ipv6_esp,                          ICE_FDIR_INSET_IPV6_ESP,        ICE_INSET_NONE,                 ICE_INSET_NONE},
124         {pattern_eth_ipv6_udp_esp,                      ICE_FDIR_INSET_IPV6_NATT_ESP,   ICE_INSET_NONE,                 ICE_INSET_NONE},
125         {pattern_eth_ipv4_udp_vxlan_ipv4,               ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4,            ICE_INSET_NONE},
126         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_UDP,        ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_TCP,        ICE_INSET_NONE},
128         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,          ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_IPV4_SCTP,       ICE_INSET_NONE},
129         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4,        ICE_INSET_NONE},
130         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_UDP,    ICE_INSET_NONE},
131         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_TCP,    ICE_INSET_NONE},
132         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,      ICE_FDIR_INSET_ETH_IPV4_VXLAN,  ICE_FDIR_INSET_ETH_IPV4_SCTP,   ICE_INSET_NONE},
133         /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
134         {pattern_eth_ipv4_gtpu,                         ICE_FDIR_INSET_IPV4_GTPU,       ICE_FDIR_INSET_IPV4_GTPU,       ICE_INSET_NONE},
135         {pattern_eth_ipv4_gtpu_eh,                      ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_FDIR_INSET_IPV4_GTPU_EH,    ICE_INSET_NONE},
136         {pattern_eth_ipv6_gtpu,                         ICE_FDIR_INSET_IPV6_GTPU,       ICE_FDIR_INSET_IPV6_GTPU,       ICE_INSET_NONE},
137         {pattern_eth_ipv6_gtpu_eh,                      ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_FDIR_INSET_IPV6_GTPU_EH,    ICE_INSET_NONE},
138 };
139
140 static struct ice_flow_parser ice_fdir_parser;
141
142 static int
143 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
144
145 static const struct rte_memzone *
146 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
147 {
148         const struct rte_memzone *mz;
149
150         mz = rte_memzone_lookup(name);
151         if (mz)
152                 return mz;
153
154         return rte_memzone_reserve_aligned(name, len, socket_id,
155                                            RTE_MEMZONE_IOVA_CONTIG,
156                                            ICE_RING_BASE_ALIGN);
157 }
158
159 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
160
161 static int
162 ice_fdir_prof_alloc(struct ice_hw *hw)
163 {
164         enum ice_fltr_ptype ptype, fltr_ptype;
165
166         if (!hw->fdir_prof) {
167                 hw->fdir_prof = (struct ice_fd_hw_prof **)
168                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
169                                    sizeof(*hw->fdir_prof));
170                 if (!hw->fdir_prof)
171                         return -ENOMEM;
172         }
173         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
174              ptype < ICE_FLTR_PTYPE_MAX;
175              ptype++) {
176                 if (!hw->fdir_prof[ptype]) {
177                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
178                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
179                         if (!hw->fdir_prof[ptype])
180                                 goto fail_mem;
181                 }
182         }
183         return 0;
184
185 fail_mem:
186         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
187              fltr_ptype < ptype;
188              fltr_ptype++) {
189                 rte_free(hw->fdir_prof[fltr_ptype]);
190                 hw->fdir_prof[fltr_ptype] = NULL;
191         }
192
193         rte_free(hw->fdir_prof);
194         hw->fdir_prof = NULL;
195
196         return -ENOMEM;
197 }
198
199 static int
200 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
201                           struct ice_fdir_counter_pool_container *container,
202                           uint32_t index_start,
203                           uint32_t len)
204 {
205         struct ice_fdir_counter_pool *pool;
206         uint32_t i;
207         int ret = 0;
208
209         pool = rte_zmalloc("ice_fdir_counter_pool",
210                            sizeof(*pool) +
211                            sizeof(struct ice_fdir_counter) * len,
212                            0);
213         if (!pool) {
214                 PMD_INIT_LOG(ERR,
215                              "Failed to allocate memory for fdir counter pool");
216                 return -ENOMEM;
217         }
218
219         TAILQ_INIT(&pool->counter_list);
220         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
221
222         for (i = 0; i < len; i++) {
223                 struct ice_fdir_counter *counter = &pool->counters[i];
224
225                 counter->hw_index = index_start + i;
226                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
227         }
228
229         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
230                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
231                 ret = -EINVAL;
232                 goto free_pool;
233         }
234
235         container->pools[container->index_free++] = pool;
236         return 0;
237
238 free_pool:
239         rte_free(pool);
240         return ret;
241 }
242
243 static int
244 ice_fdir_counter_init(struct ice_pf *pf)
245 {
246         struct ice_hw *hw = ICE_PF_TO_HW(pf);
247         struct ice_fdir_info *fdir_info = &pf->fdir;
248         struct ice_fdir_counter_pool_container *container =
249                                 &fdir_info->counter;
250         uint32_t cnt_index, len;
251         int ret;
252
253         TAILQ_INIT(&container->pool_list);
254
255         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
256         len = ICE_FDIR_COUNTERS_PER_BLOCK;
257
258         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
259         if (ret) {
260                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
261                 return ret;
262         }
263
264         return 0;
265 }
266
267 static int
268 ice_fdir_counter_release(struct ice_pf *pf)
269 {
270         struct ice_fdir_info *fdir_info = &pf->fdir;
271         struct ice_fdir_counter_pool_container *container =
272                                 &fdir_info->counter;
273         uint8_t i;
274
275         for (i = 0; i < container->index_free; i++) {
276                 rte_free(container->pools[i]);
277                 container->pools[i] = NULL;
278         }
279
280         TAILQ_INIT(&container->pool_list);
281         container->index_free = 0;
282
283         return 0;
284 }
285
286 static struct ice_fdir_counter *
287 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
288                                         *container,
289                                uint32_t id)
290 {
291         struct ice_fdir_counter_pool *pool;
292         struct ice_fdir_counter *counter;
293         int i;
294
295         TAILQ_FOREACH(pool, &container->pool_list, next) {
296                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
297                         counter = &pool->counters[i];
298
299                         if (counter->shared &&
300                             counter->ref_cnt &&
301                             counter->id == id)
302                                 return counter;
303                 }
304         }
305
306         return NULL;
307 }
308
309 static struct ice_fdir_counter *
310 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
311 {
312         struct ice_hw *hw = ICE_PF_TO_HW(pf);
313         struct ice_fdir_info *fdir_info = &pf->fdir;
314         struct ice_fdir_counter_pool_container *container =
315                                 &fdir_info->counter;
316         struct ice_fdir_counter_pool *pool = NULL;
317         struct ice_fdir_counter *counter_free = NULL;
318
319         if (shared) {
320                 counter_free = ice_fdir_counter_shared_search(container, id);
321                 if (counter_free) {
322                         if (counter_free->ref_cnt + 1 == 0) {
323                                 rte_errno = E2BIG;
324                                 return NULL;
325                         }
326                         counter_free->ref_cnt++;
327                         return counter_free;
328                 }
329         }
330
331         TAILQ_FOREACH(pool, &container->pool_list, next) {
332                 counter_free = TAILQ_FIRST(&pool->counter_list);
333                 if (counter_free)
334                         break;
335                 counter_free = NULL;
336         }
337
338         if (!counter_free) {
339                 PMD_DRV_LOG(ERR, "No free counter found\n");
340                 return NULL;
341         }
342
343         counter_free->shared = shared;
344         counter_free->id = id;
345         counter_free->ref_cnt = 1;
346         counter_free->pool = pool;
347
348         /* reset statistic counter value */
349         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
350         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
351
352         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
353         if (TAILQ_EMPTY(&pool->counter_list)) {
354                 TAILQ_REMOVE(&container->pool_list, pool, next);
355                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
356         }
357
358         return counter_free;
359 }
360
361 static void
362 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
363                       struct ice_fdir_counter *counter)
364 {
365         if (!counter)
366                 return;
367
368         if (--counter->ref_cnt == 0) {
369                 struct ice_fdir_counter_pool *pool = counter->pool;
370
371                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
372         }
373 }
374
375 static int
376 ice_fdir_init_filter_list(struct ice_pf *pf)
377 {
378         struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
379         struct ice_fdir_info *fdir_info = &pf->fdir;
380         char fdir_hash_name[RTE_HASH_NAMESIZE];
381         int ret;
382
383         struct rte_hash_parameters fdir_hash_params = {
384                 .name = fdir_hash_name,
385                 .entries = ICE_MAX_FDIR_FILTER_NUM,
386                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
387                 .hash_func = rte_hash_crc,
388                 .hash_func_init_val = 0,
389                 .socket_id = rte_socket_id(),
390                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
391         };
392
393         /* Initialize hash */
394         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
395                  "fdir_%s", dev->device->name);
396         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
397         if (!fdir_info->hash_table) {
398                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
399                 return -EINVAL;
400         }
401         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
402                                           sizeof(*fdir_info->hash_map) *
403                                           ICE_MAX_FDIR_FILTER_NUM,
404                                           0);
405         if (!fdir_info->hash_map) {
406                 PMD_INIT_LOG(ERR,
407                              "Failed to allocate memory for fdir hash map!");
408                 ret = -ENOMEM;
409                 goto err_fdir_hash_map_alloc;
410         }
411         return 0;
412
413 err_fdir_hash_map_alloc:
414         rte_hash_free(fdir_info->hash_table);
415
416         return ret;
417 }
418
419 static void
420 ice_fdir_release_filter_list(struct ice_pf *pf)
421 {
422         struct ice_fdir_info *fdir_info = &pf->fdir;
423
424         if (fdir_info->hash_map)
425                 rte_free(fdir_info->hash_map);
426         if (fdir_info->hash_table)
427                 rte_hash_free(fdir_info->hash_table);
428
429         fdir_info->hash_map = NULL;
430         fdir_info->hash_table = NULL;
431 }
432
433 /*
434  * ice_fdir_setup - reserve and initialize the Flow Director resources
435  * @pf: board private structure
436  */
437 static int
438 ice_fdir_setup(struct ice_pf *pf)
439 {
440         struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
441         struct ice_hw *hw = ICE_PF_TO_HW(pf);
442         const struct rte_memzone *mz = NULL;
443         char z_name[RTE_MEMZONE_NAMESIZE];
444         struct ice_vsi *vsi;
445         int err = ICE_SUCCESS;
446
447         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
448                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
449                 return -ENOTSUP;
450         }
451
452         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
453                     " fd_fltr_best_effort = %u.",
454                     hw->func_caps.fd_fltr_guar,
455                     hw->func_caps.fd_fltr_best_effort);
456
457         if (pf->fdir.fdir_vsi) {
458                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
459                 return ICE_SUCCESS;
460         }
461
462         /* make new FDIR VSI */
463         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
464         if (!vsi) {
465                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
466                 return -EINVAL;
467         }
468         pf->fdir.fdir_vsi = vsi;
469
470         err = ice_fdir_init_filter_list(pf);
471         if (err) {
472                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
473                 return -EINVAL;
474         }
475
476         err = ice_fdir_counter_init(pf);
477         if (err) {
478                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
479                 return -EINVAL;
480         }
481
482         /*Fdir tx queue setup*/
483         err = ice_fdir_setup_tx_resources(pf);
484         if (err) {
485                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
486                 goto fail_setup_tx;
487         }
488
489         /*Fdir rx queue setup*/
490         err = ice_fdir_setup_rx_resources(pf);
491         if (err) {
492                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
493                 goto fail_setup_rx;
494         }
495
496         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
497         if (err) {
498                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
499                 goto fail_mem;
500         }
501
502         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
503         if (err) {
504                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
505                 goto fail_mem;
506         }
507
508         /* Enable FDIR MSIX interrupt */
509         vsi->nb_used_qps = 1;
510         ice_vsi_queues_bind_intr(vsi);
511         ice_vsi_enable_queues_intr(vsi);
512
513         /* reserve memory for the fdir programming packet */
514         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
515                  ICE_FDIR_MZ_NAME,
516                  eth_dev->data->port_id);
517         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
518         if (!mz) {
519                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
520                             "flow director program packet.");
521                 err = -ENOMEM;
522                 goto fail_mem;
523         }
524         pf->fdir.prg_pkt = mz->addr;
525         pf->fdir.dma_addr = mz->iova;
526         pf->fdir.mz = mz;
527
528         err = ice_fdir_prof_alloc(hw);
529         if (err) {
530                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
531                             "flow director profile.");
532                 err = -ENOMEM;
533                 goto fail_prof;
534         }
535
536         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
537                     vsi->base_queue);
538         return ICE_SUCCESS;
539
540 fail_prof:
541         rte_memzone_free(pf->fdir.mz);
542         pf->fdir.mz = NULL;
543 fail_mem:
544         ice_rx_queue_release(pf->fdir.rxq);
545         pf->fdir.rxq = NULL;
546 fail_setup_rx:
547         ice_tx_queue_release(pf->fdir.txq);
548         pf->fdir.txq = NULL;
549 fail_setup_tx:
550         ice_release_vsi(vsi);
551         pf->fdir.fdir_vsi = NULL;
552         return err;
553 }
554
555 static void
556 ice_fdir_prof_free(struct ice_hw *hw)
557 {
558         enum ice_fltr_ptype ptype;
559
560         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
561              ptype < ICE_FLTR_PTYPE_MAX;
562              ptype++) {
563                 rte_free(hw->fdir_prof[ptype]);
564                 hw->fdir_prof[ptype] = NULL;
565         }
566
567         rte_free(hw->fdir_prof);
568         hw->fdir_prof = NULL;
569 }
570
571 /* Remove a profile for some filter type */
572 static void
573 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
574 {
575         struct ice_hw *hw = ICE_PF_TO_HW(pf);
576         struct ice_fd_hw_prof *hw_prof;
577         uint64_t prof_id;
578         uint16_t vsi_num;
579         int i;
580
581         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
582                 return;
583
584         hw_prof = hw->fdir_prof[ptype];
585
586         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
587         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
588                 if (hw_prof->entry_h[i][is_tunnel]) {
589                         vsi_num = ice_get_hw_vsi_num(hw,
590                                                      hw_prof->vsi_h[i]);
591                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
592                                              vsi_num, ptype);
593                         ice_flow_rem_entry(hw, ICE_BLK_FD,
594                                            hw_prof->entry_h[i][is_tunnel]);
595                         hw_prof->entry_h[i][is_tunnel] = 0;
596                 }
597         }
598         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
599         rte_free(hw_prof->fdir_seg[is_tunnel]);
600         hw_prof->fdir_seg[is_tunnel] = NULL;
601
602         for (i = 0; i < hw_prof->cnt; i++)
603                 hw_prof->vsi_h[i] = 0;
604         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
605 }
606
607 /* Remove all created profiles */
608 static void
609 ice_fdir_prof_rm_all(struct ice_pf *pf)
610 {
611         enum ice_fltr_ptype ptype;
612
613         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
614              ptype < ICE_FLTR_PTYPE_MAX;
615              ptype++) {
616                 ice_fdir_prof_rm(pf, ptype, false);
617                 ice_fdir_prof_rm(pf, ptype, true);
618         }
619 }
620
621 /*
622  * ice_fdir_teardown - release the Flow Director resources
623  * @pf: board private structure
624  */
625 static void
626 ice_fdir_teardown(struct ice_pf *pf)
627 {
628         struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
629         struct ice_hw *hw = ICE_PF_TO_HW(pf);
630         struct ice_vsi *vsi;
631         int err;
632
633         vsi = pf->fdir.fdir_vsi;
634         if (!vsi)
635                 return;
636
637         ice_vsi_disable_queues_intr(vsi);
638
639         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
640         if (err)
641                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
642
643         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
644         if (err)
645                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
646
647         err = ice_fdir_counter_release(pf);
648         if (err)
649                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
650
651         ice_fdir_release_filter_list(pf);
652
653         ice_tx_queue_release(pf->fdir.txq);
654         pf->fdir.txq = NULL;
655         ice_rx_queue_release(pf->fdir.rxq);
656         pf->fdir.rxq = NULL;
657         ice_fdir_prof_rm_all(pf);
658         ice_fdir_prof_free(hw);
659         ice_release_vsi(vsi);
660         pf->fdir.fdir_vsi = NULL;
661
662         if (pf->fdir.mz) {
663                 err = rte_memzone_free(pf->fdir.mz);
664                 pf->fdir.mz = NULL;
665                 if (err)
666                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
667         }
668 }
669
670 static int
671 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
672                            enum ice_fltr_ptype ptype,
673                            struct ice_flow_seg_info *seg,
674                            bool is_tunnel)
675 {
676         struct ice_hw *hw = ICE_PF_TO_HW(pf);
677         struct ice_flow_seg_info *ori_seg;
678         struct ice_fd_hw_prof *hw_prof;
679
680         hw_prof = hw->fdir_prof[ptype];
681         ori_seg = hw_prof->fdir_seg[is_tunnel];
682
683         /* profile does not exist */
684         if (!ori_seg)
685                 return 0;
686
687         /* if no input set conflict, return -EEXIST */
688         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
689             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
690                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
691                             ptype);
692                 return -EEXIST;
693         }
694
695         /* a rule with input set conflict already exist, so give up */
696         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
697                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
698                             ptype);
699                 return -EINVAL;
700         }
701
702         /* it's safe to delete an empty profile */
703         ice_fdir_prof_rm(pf, ptype, is_tunnel);
704         return 0;
705 }
706
707 static bool
708 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
709                                enum ice_fltr_ptype ptype,
710                                bool is_tunnel)
711 {
712         struct ice_hw *hw = ICE_PF_TO_HW(pf);
713         struct ice_fd_hw_prof *hw_prof;
714         struct ice_flow_seg_info *seg;
715
716         hw_prof = hw->fdir_prof[ptype];
717         seg = hw_prof->fdir_seg[is_tunnel];
718
719         /* profile does not exist */
720         if (!seg)
721                 return true;
722
723         /* profile exists and rule exists, fail to resolve the conflict */
724         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
725                 return false;
726
727         /* it's safe to delete an empty profile */
728         ice_fdir_prof_rm(pf, ptype, is_tunnel);
729
730         return true;
731 }
732
733 static int
734 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
735                              enum ice_fltr_ptype ptype,
736                              bool is_tunnel)
737 {
738         enum ice_fltr_ptype cflct_ptype;
739
740         switch (ptype) {
741         /* IPv4 */
742         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
743         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
744         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
745                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
746                 if (!ice_fdir_prof_resolve_conflict
747                         (pf, cflct_ptype, is_tunnel))
748                         goto err;
749                 break;
750         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
751                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
752                 if (!ice_fdir_prof_resolve_conflict
753                         (pf, cflct_ptype, is_tunnel))
754                         goto err;
755                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
756                 if (!ice_fdir_prof_resolve_conflict
757                         (pf, cflct_ptype, is_tunnel))
758                         goto err;
759                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
760                 if (!ice_fdir_prof_resolve_conflict
761                         (pf, cflct_ptype, is_tunnel))
762                         goto err;
763                 break;
764         /* IPv4 GTPU */
765         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
766         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
767         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
768                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
769                 if (!ice_fdir_prof_resolve_conflict
770                         (pf, cflct_ptype, is_tunnel))
771                         goto err;
772                 break;
773         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
774                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
775                 if (!ice_fdir_prof_resolve_conflict
776                         (pf, cflct_ptype, is_tunnel))
777                         goto err;
778                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
779                 if (!ice_fdir_prof_resolve_conflict
780                         (pf, cflct_ptype, is_tunnel))
781                         goto err;
782                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
783                 if (!ice_fdir_prof_resolve_conflict
784                         (pf, cflct_ptype, is_tunnel))
785                         goto err;
786                 break;
787         /* IPv6 */
788         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
789         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
790         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
791                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
792                 if (!ice_fdir_prof_resolve_conflict
793                         (pf, cflct_ptype, is_tunnel))
794                         goto err;
795                 break;
796         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
797                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
798                 if (!ice_fdir_prof_resolve_conflict
799                         (pf, cflct_ptype, is_tunnel))
800                         goto err;
801                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
802                 if (!ice_fdir_prof_resolve_conflict
803                         (pf, cflct_ptype, is_tunnel))
804                         goto err;
805                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
806                 if (!ice_fdir_prof_resolve_conflict
807                         (pf, cflct_ptype, is_tunnel))
808                         goto err;
809                 break;
810         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
811         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
812         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
813                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
814                 if (!ice_fdir_prof_resolve_conflict
815                         (pf, cflct_ptype, is_tunnel))
816                         goto err;
817                 break;
818         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
819                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
820                 if (!ice_fdir_prof_resolve_conflict
821                         (pf, cflct_ptype, is_tunnel))
822                         goto err;
823                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
824                 if (!ice_fdir_prof_resolve_conflict
825                         (pf, cflct_ptype, is_tunnel))
826                         goto err;
827                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
828                 if (!ice_fdir_prof_resolve_conflict
829                         (pf, cflct_ptype, is_tunnel))
830                         goto err;
831                 break;
832         default:
833                 break;
834         }
835         return 0;
836 err:
837         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
838                     ptype, cflct_ptype);
839         return -EINVAL;
840 }
841
842 static int
843 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
844                      struct ice_vsi *ctrl_vsi,
845                      struct ice_flow_seg_info *seg,
846                      enum ice_fltr_ptype ptype,
847                      bool is_tunnel)
848 {
849         struct ice_hw *hw = ICE_PF_TO_HW(pf);
850         enum ice_flow_dir dir = ICE_FLOW_RX;
851         struct ice_fd_hw_prof *hw_prof;
852         struct ice_flow_prof *prof;
853         uint64_t entry_1 = 0;
854         uint64_t entry_2 = 0;
855         uint16_t vsi_num;
856         int ret;
857         uint64_t prof_id;
858
859         /* check if have input set conflict on current profile. */
860         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
861         if (ret)
862                 return ret;
863
864         /* check if the profile is conflict with other profile. */
865         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
866         if (ret)
867                 return ret;
868
869         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
870         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
871                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
872         if (ret)
873                 return ret;
874         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
875                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
876                                  seg, NULL, 0, &entry_1);
877         if (ret) {
878                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
879                             ptype);
880                 goto err_add_prof;
881         }
882         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
883                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
884                                  seg, NULL, 0, &entry_2);
885         if (ret) {
886                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
887                             ptype);
888                 goto err_add_entry;
889         }
890
891         hw_prof = hw->fdir_prof[ptype];
892         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
893         hw_prof->cnt = 0;
894         hw_prof->fdir_seg[is_tunnel] = seg;
895         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
896         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
897         pf->hw_prof_cnt[ptype][is_tunnel]++;
898         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
899         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
900         pf->hw_prof_cnt[ptype][is_tunnel]++;
901
902         return ret;
903
904 err_add_entry:
905         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
906         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
907         ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
908 err_add_prof:
909         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
910
911         return ret;
912 }
913
914 static void
915 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
916 {
917         uint32_t i, j;
918
919         struct ice_inset_map {
920                 uint64_t inset;
921                 enum ice_flow_field fld;
922         };
923         static const struct ice_inset_map ice_inset_map[] = {
924                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
925                 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
926                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
927                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
928                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
929                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
930                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
931                 {ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
932                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
933                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
934                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
935                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
936                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
937                 {ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
938                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
939                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
940                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
941                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
942                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
943                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
944                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
945                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
946                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
947                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
948                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
949                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
950                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
951                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
952                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
953                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
954                 {ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
955                 {ICE_INSET_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI},
956                 {ICE_INSET_NAT_T_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI},
957         };
958
959         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
960                 if ((inset & ice_inset_map[i].inset) ==
961                     ice_inset_map[i].inset)
962                         field[j++] = ice_inset_map[i].fld;
963         }
964 }
965
966 static void
967 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
968 {
969         switch (flow) {
970         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
971                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
972                                   ICE_FLOW_SEG_HDR_IPV4 |
973                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
974                 break;
975         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
976                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
977                                   ICE_FLOW_SEG_HDR_IPV4 |
978                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
979                 break;
980         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
981                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
982                                   ICE_FLOW_SEG_HDR_IPV4 |
983                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
984                 break;
985         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
986                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
987                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
988                 break;
989         case ICE_FLTR_PTYPE_FRAG_IPV4:
990                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
991                                   ICE_FLOW_SEG_HDR_IPV_FRAG);
992                 break;
993         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
994                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
995                                   ICE_FLOW_SEG_HDR_IPV6 |
996                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
997                 break;
998         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
999                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
1000                                   ICE_FLOW_SEG_HDR_IPV6 |
1001                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1002                 break;
1003         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
1004                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
1005                                   ICE_FLOW_SEG_HDR_IPV6 |
1006                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1007                 break;
1008         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
1009                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1010                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1011                 break;
1012         case ICE_FLTR_PTYPE_FRAG_IPV6:
1013                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1014                                   ICE_FLOW_SEG_HDR_IPV_FRAG);
1015                 break;
1016         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
1017         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
1018         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
1019                 break;
1020         case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
1021                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV_OTHER);
1022                 break;
1023         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
1024                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1025                                   ICE_FLOW_SEG_HDR_IPV4 |
1026                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1027                 break;
1028         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
1029                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1030                                   ICE_FLOW_SEG_HDR_GTPU_IP |
1031                                   ICE_FLOW_SEG_HDR_IPV4 |
1032                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1033                 break;
1034         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
1035                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1036                                   ICE_FLOW_SEG_HDR_IPV6 |
1037                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1038                 break;
1039         case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
1040                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1041                                   ICE_FLOW_SEG_HDR_GTPU_IP |
1042                                   ICE_FLOW_SEG_HDR_IPV6 |
1043                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1044                 break;
1045         case ICE_FLTR_PTYPE_NON_IP_L2:
1046                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1047                 break;
1048         case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
1049                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1050                                   ICE_FLOW_SEG_HDR_IPV4 |
1051                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1052                 break;
1053         case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
1054                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1055                                   ICE_FLOW_SEG_HDR_IPV6 |
1056                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1057                 break;
1058         case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
1059                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1060                                   ICE_FLOW_SEG_HDR_IPV4 |
1061                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1062                 break;
1063         case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
1064                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1065                                   ICE_FLOW_SEG_HDR_IPV6 |
1066                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
1067                 break;
1068         default:
1069                 PMD_DRV_LOG(ERR, "not supported filter type.");
1070                 break;
1071         }
1072 }
1073
1074 static int
1075 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
1076                         uint64_t inner_input_set, uint64_t outer_input_set,
1077                         enum ice_fdir_tunnel_type ttype)
1078 {
1079         struct ice_flow_seg_info *seg;
1080         struct ice_flow_seg_info *seg_tun = NULL;
1081         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1082         uint64_t input_set;
1083         bool is_tunnel;
1084         int k, i, ret = 0;
1085
1086         if (!(inner_input_set | outer_input_set))
1087                 return -EINVAL;
1088
1089         seg_tun = (struct ice_flow_seg_info *)
1090                 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1091         if (!seg_tun) {
1092                 PMD_DRV_LOG(ERR, "No memory can be allocated");
1093                 return -ENOMEM;
1094         }
1095
1096         /* use seg_tun[1] to record tunnel inner part */
1097         for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1098                 seg = &seg_tun[k];
1099                 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1100                 if (input_set == 0)
1101                         continue;
1102
1103                 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1104                         field[i] = ICE_FLOW_FIELD_IDX_MAX;
1105
1106                 ice_fdir_input_set_parse(input_set, field);
1107
1108                 ice_fdir_input_set_hdrs(flow, seg);
1109
1110                 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1111                         ice_flow_set_fld(seg, field[i],
1112                                          ICE_FLOW_FLD_OFF_INVAL,
1113                                          ICE_FLOW_FLD_OFF_INVAL,
1114                                          ICE_FLOW_FLD_OFF_INVAL, false);
1115                 }
1116         }
1117
1118         is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1119
1120         ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1121                                    seg_tun, flow, is_tunnel);
1122
1123         if (!ret) {
1124                 return ret;
1125         } else if (ret < 0) {
1126                 rte_free(seg_tun);
1127                 return (ret == -EEXIST) ? 0 : ret;
1128         } else {
1129                 return ret;
1130         }
1131 }
1132
1133 static void
1134 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1135                     bool is_tunnel, bool add)
1136 {
1137         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1138         int cnt;
1139
1140         cnt = (add) ? 1 : -1;
1141         hw->fdir_active_fltr += cnt;
1142         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1143                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1144         else
1145                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1146 }
1147
1148 static int
1149 ice_fdir_init(struct ice_adapter *ad)
1150 {
1151         struct ice_pf *pf = &ad->pf;
1152         struct ice_flow_parser *parser;
1153         int ret;
1154
1155         if (ad->hw.dcf_enabled)
1156                 return 0;
1157
1158         ret = ice_fdir_setup(pf);
1159         if (ret)
1160                 return ret;
1161
1162         parser = &ice_fdir_parser;
1163
1164         return ice_register_parser(parser, ad);
1165 }
1166
1167 static void
1168 ice_fdir_uninit(struct ice_adapter *ad)
1169 {
1170         struct ice_flow_parser *parser;
1171         struct ice_pf *pf = &ad->pf;
1172
1173         if (ad->hw.dcf_enabled)
1174                 return;
1175
1176         parser = &ice_fdir_parser;
1177
1178         ice_unregister_parser(parser, ad);
1179
1180         ice_fdir_teardown(pf);
1181 }
1182
1183 static int
1184 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1185 {
1186         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1187                 return 1;
1188         else
1189                 return 0;
1190 }
1191
1192 static int
1193 ice_fdir_add_del_raw(struct ice_pf *pf,
1194                      struct ice_fdir_filter_conf *filter,
1195                      bool add)
1196 {
1197         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1198
1199         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1200         rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
1201
1202         struct ice_fltr_desc desc;
1203         memset(&desc, 0, sizeof(desc));
1204         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1205         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1206
1207         return ice_fdir_programming(pf, &desc);
1208 }
1209
1210 static int
1211 ice_fdir_add_del_filter(struct ice_pf *pf,
1212                         struct ice_fdir_filter_conf *filter,
1213                         bool add)
1214 {
1215         struct ice_fltr_desc desc;
1216         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1217         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1218         bool is_tun;
1219         int ret;
1220
1221         filter->input.dest_vsi = pf->main_vsi->idx;
1222
1223         memset(&desc, 0, sizeof(desc));
1224         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1225         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1226
1227         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1228
1229         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1230         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1231         if (ret) {
1232                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1233                 return -EINVAL;
1234         }
1235
1236         return ice_fdir_programming(pf, &desc);
1237 }
1238
1239 static void
1240 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1241                           struct ice_fdir_filter_conf *filter)
1242 {
1243         struct ice_fdir_fltr *input = &filter->input;
1244         memset(key, 0, sizeof(*key));
1245
1246         key->flow_type = input->flow_type;
1247         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1248         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1249         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1250         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1251
1252         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1253         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1254
1255         key->tunnel_type = filter->tunnel_type;
1256 }
1257
1258 /* Check if there exists the flow director filter */
1259 static struct ice_fdir_filter_conf *
1260 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1261                         const struct ice_fdir_fltr_pattern *key)
1262 {
1263         int ret;
1264
1265         ret = rte_hash_lookup(fdir_info->hash_table, key);
1266         if (ret < 0)
1267                 return NULL;
1268
1269         return fdir_info->hash_map[ret];
1270 }
1271
1272 /* Add a flow director entry into the SW list */
1273 static int
1274 ice_fdir_entry_insert(struct ice_pf *pf,
1275                       struct ice_fdir_filter_conf *entry,
1276                       struct ice_fdir_fltr_pattern *key)
1277 {
1278         struct ice_fdir_info *fdir_info = &pf->fdir;
1279         int ret;
1280
1281         ret = rte_hash_add_key(fdir_info->hash_table, key);
1282         if (ret < 0) {
1283                 PMD_DRV_LOG(ERR,
1284                             "Failed to insert fdir entry to hash table %d!",
1285                             ret);
1286                 return ret;
1287         }
1288         fdir_info->hash_map[ret] = entry;
1289
1290         return 0;
1291 }
1292
1293 /* Delete a flow director entry from the SW list */
1294 static int
1295 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1296 {
1297         struct ice_fdir_info *fdir_info = &pf->fdir;
1298         int ret;
1299
1300         ret = rte_hash_del_key(fdir_info->hash_table, key);
1301         if (ret < 0) {
1302                 PMD_DRV_LOG(ERR,
1303                             "Failed to delete fdir filter to hash table %d!",
1304                             ret);
1305                 return ret;
1306         }
1307         fdir_info->hash_map[ret] = NULL;
1308
1309         return 0;
1310 }
1311
1312 static int
1313 ice_fdir_create_filter(struct ice_adapter *ad,
1314                        struct rte_flow *flow,
1315                        void *meta,
1316                        struct rte_flow_error *error)
1317 {
1318         struct ice_pf *pf = &ad->pf;
1319         struct ice_fdir_filter_conf *filter = meta;
1320         struct ice_fdir_info *fdir_info = &pf->fdir;
1321         struct ice_fdir_filter_conf *entry, *node;
1322         struct ice_fdir_fltr_pattern key;
1323         bool is_tun;
1324         int ret;
1325         int i;
1326
1327         if (filter->parser_ena) {
1328                 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1329
1330                 int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
1331                 int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
1332                 u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
1333                 u16 main_vsi = pf->main_vsi->idx;
1334                 bool fv_found = false;
1335
1336                 struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
1337                 if (pi->fdir_actived_cnt != 0) {
1338                         for (i = 0; i < ICE_MAX_FV_WORDS; i++)
1339                                 if (pi->prof.fv[i].proto_id !=
1340                                     filter->prof->fv[i].proto_id ||
1341                                     pi->prof.fv[i].offset !=
1342                                     filter->prof->fv[i].offset ||
1343                                     pi->prof.fv[i].msk !=
1344                                     filter->prof->fv[i].msk)
1345                                         break;
1346                         if (i == ICE_MAX_FV_WORDS) {
1347                                 fv_found = true;
1348                                 pi->fdir_actived_cnt++;
1349                         }
1350                 }
1351
1352                 if (!fv_found) {
1353                         ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
1354                                                    filter->prof, ICE_BLK_FD);
1355                         if (ret)
1356                                 goto error;
1357                 }
1358
1359                 ret = ice_fdir_add_del_raw(pf, filter, true);
1360                 if (ret)
1361                         goto error;
1362
1363                 if (!fv_found) {
1364                         for (i = 0; i < filter->prof->fv_num; i++) {
1365                                 pi->prof.fv[i].proto_id =
1366                                         filter->prof->fv[i].proto_id;
1367                                 pi->prof.fv[i].offset =
1368                                         filter->prof->fv[i].offset;
1369                                 pi->prof.fv[i].msk = filter->prof->fv[i].msk;
1370                         }
1371                         pi->fdir_actived_cnt = 1;
1372                 }
1373
1374                 if (filter->mark_flag == 1)
1375                         ice_fdir_rx_parsing_enable(ad, 1);
1376
1377                 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1378                 if (!entry)
1379                         goto error;
1380
1381                 rte_memcpy(entry, filter, sizeof(*filter));
1382
1383                 flow->rule = entry;
1384
1385                 return 0;
1386         }
1387
1388         ice_fdir_extract_fltr_key(&key, filter);
1389         node = ice_fdir_entry_lookup(fdir_info, &key);
1390         if (node) {
1391                 rte_flow_error_set(error, EEXIST,
1392                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1393                                    "Rule already exists!");
1394                 return -rte_errno;
1395         }
1396
1397         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1398         if (!entry) {
1399                 rte_flow_error_set(error, ENOMEM,
1400                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1401                                    "Failed to allocate memory");
1402                 return -rte_errno;
1403         }
1404
1405         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1406
1407         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1408                                       filter->input_set_i, filter->input_set_o,
1409                                       filter->tunnel_type);
1410         if (ret) {
1411                 rte_flow_error_set(error, -ret,
1412                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1413                                    "Profile configure failed.");
1414                 goto free_entry;
1415         }
1416
1417         /* alloc counter for FDIR */
1418         if (filter->input.cnt_ena) {
1419                 struct rte_flow_action_count *act_count = &filter->act_count;
1420
1421                 filter->counter = ice_fdir_counter_alloc(pf, 0, act_count->id);
1422                 if (!filter->counter) {
1423                         rte_flow_error_set(error, EINVAL,
1424                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1425                                         "Failed to alloc FDIR counter.");
1426                         goto free_entry;
1427                 }
1428                 filter->input.cnt_index = filter->counter->hw_index;
1429         }
1430
1431         ret = ice_fdir_add_del_filter(pf, filter, true);
1432         if (ret) {
1433                 rte_flow_error_set(error, -ret,
1434                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1435                                    "Add filter rule failed.");
1436                 goto free_counter;
1437         }
1438
1439         if (filter->mark_flag == 1)
1440                 ice_fdir_rx_parsing_enable(ad, 1);
1441
1442         rte_memcpy(entry, filter, sizeof(*entry));
1443         ret = ice_fdir_entry_insert(pf, entry, &key);
1444         if (ret) {
1445                 rte_flow_error_set(error, -ret,
1446                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1447                                    "Insert entry to table failed.");
1448                 goto free_entry;
1449         }
1450
1451         flow->rule = entry;
1452         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1453
1454         return 0;
1455
1456 free_counter:
1457         if (filter->counter) {
1458                 ice_fdir_counter_free(pf, filter->counter);
1459                 filter->counter = NULL;
1460         }
1461
1462 free_entry:
1463         rte_free(entry);
1464         return -rte_errno;
1465
1466 error:
1467         rte_free(filter->prof);
1468         rte_free(filter->pkt_buf);
1469         return -rte_errno;
1470 }
1471
1472 static int
1473 ice_fdir_destroy_filter(struct ice_adapter *ad,
1474                         struct rte_flow *flow,
1475                         struct rte_flow_error *error)
1476 {
1477         struct ice_pf *pf = &ad->pf;
1478         struct ice_fdir_info *fdir_info = &pf->fdir;
1479         struct ice_fdir_filter_conf *filter, *entry;
1480         struct ice_fdir_fltr_pattern key;
1481         bool is_tun;
1482         int ret;
1483
1484         filter = (struct ice_fdir_filter_conf *)flow->rule;
1485
1486         if (filter->parser_ena) {
1487                 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1488
1489                 int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
1490                 int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
1491                 u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
1492                 u16 main_vsi = pf->main_vsi->idx;
1493                 enum ice_block blk = ICE_BLK_FD;
1494                 u16 vsi_num;
1495
1496                 ret = ice_fdir_add_del_raw(pf, filter, false);
1497                 if (ret)
1498                         return -rte_errno;
1499
1500                 struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
1501                 if (pi->fdir_actived_cnt != 0) {
1502                         pi->fdir_actived_cnt--;
1503                         if (!pi->fdir_actived_cnt) {
1504                                 vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi);
1505                                 ice_rem_prof_id_flow(hw, blk, vsi_num, id);
1506
1507                                 vsi_num = ice_get_hw_vsi_num(hw, main_vsi);
1508                                 ice_rem_prof_id_flow(hw, blk, vsi_num, id);
1509                         }
1510                 }
1511
1512                 if (filter->mark_flag == 1)
1513                         ice_fdir_rx_parsing_enable(ad, 0);
1514
1515                 flow->rule = NULL;
1516
1517                 rte_free(filter->prof);
1518                 rte_free(filter->pkt_buf);
1519                 rte_free(filter);
1520
1521                 return 0;
1522         }
1523
1524         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1525
1526         if (filter->counter) {
1527                 ice_fdir_counter_free(pf, filter->counter);
1528                 filter->counter = NULL;
1529         }
1530
1531         ice_fdir_extract_fltr_key(&key, filter);
1532         entry = ice_fdir_entry_lookup(fdir_info, &key);
1533         if (!entry) {
1534                 rte_flow_error_set(error, ENOENT,
1535                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1536                                    "Can't find entry.");
1537                 return -rte_errno;
1538         }
1539
1540         ret = ice_fdir_add_del_filter(pf, filter, false);
1541         if (ret) {
1542                 rte_flow_error_set(error, -ret,
1543                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1544                                    "Del filter rule failed.");
1545                 return -rte_errno;
1546         }
1547
1548         ret = ice_fdir_entry_del(pf, &key);
1549         if (ret) {
1550                 rte_flow_error_set(error, -ret,
1551                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1552                                    "Remove entry from table failed.");
1553                 return -rte_errno;
1554         }
1555
1556         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1557
1558         if (filter->mark_flag == 1)
1559                 ice_fdir_rx_parsing_enable(ad, 0);
1560
1561         flow->rule = NULL;
1562
1563         rte_free(filter);
1564
1565         return 0;
1566 }
1567
1568 static int
1569 ice_fdir_query_count(struct ice_adapter *ad,
1570                       struct rte_flow *flow,
1571                       struct rte_flow_query_count *flow_stats,
1572                       struct rte_flow_error *error)
1573 {
1574         struct ice_pf *pf = &ad->pf;
1575         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1576         struct ice_fdir_filter_conf *filter = flow->rule;
1577         struct ice_fdir_counter *counter = filter->counter;
1578         uint64_t hits_lo, hits_hi;
1579
1580         if (!counter) {
1581                 rte_flow_error_set(error, EINVAL,
1582                                   RTE_FLOW_ERROR_TYPE_ACTION,
1583                                   NULL,
1584                                   "FDIR counters not available");
1585                 return -rte_errno;
1586         }
1587
1588         /*
1589          * Reading the low 32-bits latches the high 32-bits into a shadow
1590          * register. Reading the high 32-bit returns the value in the
1591          * shadow register.
1592          */
1593         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1594         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1595
1596         flow_stats->hits_set = 1;
1597         flow_stats->hits = hits_lo | (hits_hi << 32);
1598         flow_stats->bytes_set = 0;
1599         flow_stats->bytes = 0;
1600
1601         if (flow_stats->reset) {
1602                 /* reset statistic counter value */
1603                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1604                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1605         }
1606
1607         return 0;
1608 }
1609
1610 static struct ice_flow_engine ice_fdir_engine = {
1611         .init = ice_fdir_init,
1612         .uninit = ice_fdir_uninit,
1613         .create = ice_fdir_create_filter,
1614         .destroy = ice_fdir_destroy_filter,
1615         .query_count = ice_fdir_query_count,
1616         .type = ICE_FLOW_ENGINE_FDIR,
1617 };
1618
1619 static int
1620 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1621                               struct rte_flow_error *error,
1622                               const struct rte_flow_action *act,
1623                               struct ice_fdir_filter_conf *filter)
1624 {
1625         const struct rte_flow_action_rss *rss = act->conf;
1626         uint32_t i;
1627
1628         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1629                 rte_flow_error_set(error, EINVAL,
1630                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1631                                    "Invalid action.");
1632                 return -rte_errno;
1633         }
1634
1635         if (rss->queue_num <= 1) {
1636                 rte_flow_error_set(error, EINVAL,
1637                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1638                                    "Queue region size can't be 0 or 1.");
1639                 return -rte_errno;
1640         }
1641
1642         /* check if queue index for queue region is continuous */
1643         for (i = 0; i < rss->queue_num - 1; i++) {
1644                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1645                         rte_flow_error_set(error, EINVAL,
1646                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1647                                            "Discontinuous queue region");
1648                         return -rte_errno;
1649                 }
1650         }
1651
1652         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1653                 rte_flow_error_set(error, EINVAL,
1654                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1655                                    "Invalid queue region indexes.");
1656                 return -rte_errno;
1657         }
1658
1659         if (!(rte_is_power_of_2(rss->queue_num) &&
1660              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1661                 rte_flow_error_set(error, EINVAL,
1662                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1663                                    "The region size should be any of the following values:"
1664                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1665                                    "of queues do not exceed the VSI allocation.");
1666                 return -rte_errno;
1667         }
1668
1669         filter->input.q_index = rss->queue[0];
1670         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1671         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1672
1673         return 0;
1674 }
1675
1676 static int
1677 ice_fdir_parse_action(struct ice_adapter *ad,
1678                       const struct rte_flow_action actions[],
1679                       struct rte_flow_error *error,
1680                       struct ice_fdir_filter_conf *filter)
1681 {
1682         struct ice_pf *pf = &ad->pf;
1683         const struct rte_flow_action_queue *act_q;
1684         const struct rte_flow_action_mark *mark_spec = NULL;
1685         const struct rte_flow_action_count *act_count;
1686         uint32_t dest_num = 0;
1687         uint32_t mark_num = 0;
1688         uint32_t counter_num = 0;
1689         int ret;
1690
1691         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1692                 switch (actions->type) {
1693                 case RTE_FLOW_ACTION_TYPE_VOID:
1694                         break;
1695                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1696                         dest_num++;
1697
1698                         act_q = actions->conf;
1699                         filter->input.q_index = act_q->index;
1700                         if (filter->input.q_index >=
1701                                         pf->dev_data->nb_rx_queues) {
1702                                 rte_flow_error_set(error, EINVAL,
1703                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1704                                                    actions,
1705                                                    "Invalid queue for FDIR.");
1706                                 return -rte_errno;
1707                         }
1708                         filter->input.dest_ctl =
1709                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1710                         break;
1711                 case RTE_FLOW_ACTION_TYPE_DROP:
1712                         dest_num++;
1713
1714                         filter->input.dest_ctl =
1715                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1716                         break;
1717                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1718                         dest_num++;
1719
1720                         filter->input.dest_ctl =
1721                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1722                         break;
1723                 case RTE_FLOW_ACTION_TYPE_RSS:
1724                         dest_num++;
1725
1726                         ret = ice_fdir_parse_action_qregion(pf,
1727                                                 error, actions, filter);
1728                         if (ret)
1729                                 return ret;
1730                         break;
1731                 case RTE_FLOW_ACTION_TYPE_MARK:
1732                         mark_num++;
1733                         filter->mark_flag = 1;
1734                         mark_spec = actions->conf;
1735                         filter->input.fltr_id = mark_spec->id;
1736                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1737                         break;
1738                 case RTE_FLOW_ACTION_TYPE_COUNT:
1739                         counter_num++;
1740
1741                         act_count = actions->conf;
1742                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1743                         rte_memcpy(&filter->act_count, act_count,
1744                                                 sizeof(filter->act_count));
1745
1746                         break;
1747                 default:
1748                         rte_flow_error_set(error, EINVAL,
1749                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1750                                    "Invalid action.");
1751                         return -rte_errno;
1752                 }
1753         }
1754
1755         if (dest_num >= 2) {
1756                 rte_flow_error_set(error, EINVAL,
1757                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1758                            "Unsupported action combination");
1759                 return -rte_errno;
1760         }
1761
1762         if (mark_num >= 2) {
1763                 rte_flow_error_set(error, EINVAL,
1764                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1765                            "Too many mark actions");
1766                 return -rte_errno;
1767         }
1768
1769         if (counter_num >= 2) {
1770                 rte_flow_error_set(error, EINVAL,
1771                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1772                            "Too many count actions");
1773                 return -rte_errno;
1774         }
1775
1776         if (dest_num + mark_num + counter_num == 0) {
1777                 rte_flow_error_set(error, EINVAL,
1778                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1779                            "Empty action");
1780                 return -rte_errno;
1781         }
1782
1783         /* set default action to PASSTHRU mode, in "mark/count only" case. */
1784         if (dest_num == 0)
1785                 filter->input.dest_ctl =
1786                         ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1787
1788         return 0;
1789 }
1790
1791 static int
1792 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1793                        const struct rte_flow_item pattern[],
1794                        struct rte_flow_error *error,
1795                        struct ice_fdir_filter_conf *filter)
1796 {
1797         const struct rte_flow_item *item = pattern;
1798         enum rte_flow_item_type item_type;
1799         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1800         enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
1801         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1802         const struct rte_flow_item_raw *raw_spec, *raw_mask;
1803         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1804         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
1805         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1806         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
1807                                         *ipv6_frag_mask;
1808         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1809         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1810         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1811         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1812         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1813         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1814         const struct rte_flow_item_esp *esp_spec, *esp_mask;
1815         uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1816         uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1817         uint64_t *input_set;
1818         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1819         uint8_t  ipv6_addr_mask[16] = {
1820                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1821                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1822         };
1823         uint32_t vtc_flow_cpu;
1824         uint16_t ether_type;
1825         enum rte_flow_item_type next_type;
1826         bool is_outer = true;
1827         struct ice_fdir_extra *p_ext_data;
1828         struct ice_fdir_v4 *p_v4 = NULL;
1829         struct ice_fdir_v6 *p_v6 = NULL;
1830         struct ice_parser_result rslt;
1831         struct ice_parser *psr;
1832         uint8_t item_num = 0;
1833
1834         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1835                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1836                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1837                 /* To align with shared code behavior, save gtpu outer
1838                  * fields in inner struct.
1839                  */
1840                 if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1841                     item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1842                         is_outer = false;
1843                 }
1844                 item_num++;
1845         }
1846
1847         /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1848          * flow. input_set_i is used for inner part.
1849          */
1850         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1851                 item_type = item->type;
1852
1853                 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1854                                     item_type ==
1855                                     RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
1856                         rte_flow_error_set(error, EINVAL,
1857                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
1858                                            "Not support range");
1859                 }
1860
1861                 input_set = (tunnel_type && !is_outer) ?
1862                             &input_set_i : &input_set_o;
1863
1864                 switch (item_type) {
1865                 case RTE_FLOW_ITEM_TYPE_RAW: {
1866                         raw_spec = item->spec;
1867                         raw_mask = item->mask;
1868
1869                         if (item_num != 1)
1870                                 break;
1871
1872                         /* convert raw spec & mask from byte string to int */
1873                         unsigned char *tmp_spec =
1874                                 (uint8_t *)(uintptr_t)raw_spec->pattern;
1875                         unsigned char *tmp_mask =
1876                                 (uint8_t *)(uintptr_t)raw_mask->pattern;
1877                         uint16_t udp_port = 0;
1878                         uint16_t tmp_val = 0;
1879                         uint8_t pkt_len = 0;
1880                         uint8_t tmp = 0;
1881                         int i, j;
1882
1883                         pkt_len = strlen((char *)(uintptr_t)raw_spec->pattern);
1884                         if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
1885                                 pkt_len)
1886                                 return -rte_errno;
1887
1888                         for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
1889                                 tmp = tmp_spec[i];
1890                                 if (tmp >= 'a' && tmp <= 'f')
1891                                         tmp_val = tmp - 'a' + 10;
1892                                 if (tmp >= 'A' && tmp <= 'F')
1893                                         tmp_val = tmp - 'A' + 10;
1894                                 if (tmp >= '0' && tmp <= '9')
1895                                         tmp_val = tmp - '0';
1896
1897                                 tmp_val *= 16;
1898                                 tmp = tmp_spec[i + 1];
1899                                 if (tmp >= 'a' && tmp <= 'f')
1900                                         tmp_spec[j] = tmp_val + tmp - 'a' + 10;
1901                                 if (tmp >= 'A' && tmp <= 'F')
1902                                         tmp_spec[j] = tmp_val + tmp - 'A' + 10;
1903                                 if (tmp >= '0' && tmp <= '9')
1904                                         tmp_spec[j] = tmp_val + tmp - '0';
1905
1906                                 tmp = tmp_mask[i];
1907                                 if (tmp >= 'a' && tmp <= 'f')
1908                                         tmp_val = tmp - 'a' + 10;
1909                                 if (tmp >= 'A' && tmp <= 'F')
1910                                         tmp_val = tmp - 'A' + 10;
1911                                 if (tmp >= '0' && tmp <= '9')
1912                                         tmp_val = tmp - '0';
1913
1914                                 tmp_val *= 16;
1915                                 tmp = tmp_mask[i + 1];
1916                                 if (tmp >= 'a' && tmp <= 'f')
1917                                         tmp_mask[j] = tmp_val + tmp - 'a' + 10;
1918                                 if (tmp >= 'A' && tmp <= 'F')
1919                                         tmp_mask[j] = tmp_val + tmp - 'A' + 10;
1920                                 if (tmp >= '0' && tmp <= '9')
1921                                         tmp_mask[j] = tmp_val + tmp - '0';
1922                         }
1923
1924                         pkt_len /= 2;
1925
1926                         if (ice_parser_create(&ad->hw, &psr))
1927                                 return -rte_errno;
1928                         if (ice_get_open_tunnel_port(&ad->hw, TNL_VXLAN,
1929                                                      &udp_port))
1930                                 ice_parser_vxlan_tunnel_set(psr, udp_port,
1931                                                             true);
1932                         if (ice_parser_run(psr, tmp_spec, pkt_len, &rslt))
1933                                 return -rte_errno;
1934                         ice_parser_destroy(psr);
1935
1936                         if (!tmp_mask)
1937                                 return -rte_errno;
1938
1939                         filter->prof = (struct ice_parser_profile *)
1940                                 ice_malloc(&ad->hw, sizeof(*filter->prof));
1941                         if (!filter->prof)
1942                                 return -ENOMEM;
1943
1944                         if (ice_parser_profile_init(&rslt, tmp_spec, tmp_mask,
1945                                 pkt_len, ICE_BLK_FD, true, filter->prof))
1946                                 return -rte_errno;
1947
1948                         u8 *pkt_buf = (u8 *)ice_malloc(&ad->hw, pkt_len + 1);
1949                         if (!pkt_buf)
1950                                 return -ENOMEM;
1951                         rte_memcpy(pkt_buf, tmp_spec, pkt_len);
1952                         filter->pkt_buf = pkt_buf;
1953
1954                         filter->pkt_len = pkt_len;
1955
1956                         filter->parser_ena = true;
1957
1958                         break;
1959                 }
1960
1961                 case RTE_FLOW_ITEM_TYPE_ETH:
1962                         flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1963                         eth_spec = item->spec;
1964                         eth_mask = item->mask;
1965
1966                         if (!(eth_spec && eth_mask))
1967                                 break;
1968
1969                         if (!rte_is_zero_ether_addr(&eth_mask->dst))
1970                                 *input_set |= ICE_INSET_DMAC;
1971                         if (!rte_is_zero_ether_addr(&eth_mask->src))
1972                                 *input_set |= ICE_INSET_SMAC;
1973
1974                         next_type = (item + 1)->type;
1975                         /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1976                         if (eth_mask->type == RTE_BE16(0xffff) &&
1977                             next_type == RTE_FLOW_ITEM_TYPE_END) {
1978                                 *input_set |= ICE_INSET_ETHERTYPE;
1979                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
1980
1981                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1982                                     ether_type == RTE_ETHER_TYPE_IPV6) {
1983                                         rte_flow_error_set(error, EINVAL,
1984                                                            RTE_FLOW_ERROR_TYPE_ITEM,
1985                                                            item,
1986                                                            "Unsupported ether_type.");
1987                                         return -rte_errno;
1988                                 }
1989                         }
1990
1991                         p_ext_data = (tunnel_type && is_outer) ?
1992                                      &filter->input.ext_data_outer :
1993                                      &filter->input.ext_data;
1994                         rte_memcpy(&p_ext_data->src_mac,
1995                                    &eth_spec->src, RTE_ETHER_ADDR_LEN);
1996                         rte_memcpy(&p_ext_data->dst_mac,
1997                                    &eth_spec->dst, RTE_ETHER_ADDR_LEN);
1998                         rte_memcpy(&p_ext_data->ether_type,
1999                                    &eth_spec->type, sizeof(eth_spec->type));
2000                         break;
2001                 case RTE_FLOW_ITEM_TYPE_IPV4:
2002                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
2003                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2004                         ipv4_spec = item->spec;
2005                         ipv4_last = item->last;
2006                         ipv4_mask = item->mask;
2007                         p_v4 = (tunnel_type && is_outer) ?
2008                                &filter->input.ip_outer.v4 :
2009                                &filter->input.ip.v4;
2010
2011                         if (!(ipv4_spec && ipv4_mask))
2012                                 break;
2013
2014                         /* Check IPv4 mask and update input set */
2015                         if (ipv4_mask->hdr.version_ihl ||
2016                             ipv4_mask->hdr.total_length ||
2017                             ipv4_mask->hdr.hdr_checksum) {
2018                                 rte_flow_error_set(error, EINVAL,
2019                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2020                                                    item,
2021                                                    "Invalid IPv4 mask.");
2022                                 return -rte_errno;
2023                         }
2024
2025                         if (ipv4_last &&
2026                             (ipv4_last->hdr.version_ihl ||
2027                              ipv4_last->hdr.type_of_service ||
2028                              ipv4_last->hdr.time_to_live ||
2029                              ipv4_last->hdr.total_length |
2030                              ipv4_last->hdr.next_proto_id ||
2031                              ipv4_last->hdr.hdr_checksum ||
2032                              ipv4_last->hdr.src_addr ||
2033                              ipv4_last->hdr.dst_addr)) {
2034                                 rte_flow_error_set(error, EINVAL,
2035                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2036                                                    item, "Invalid IPv4 last.");
2037                                 return -rte_errno;
2038                         }
2039
2040                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2041                                 *input_set |= ICE_INSET_IPV4_DST;
2042                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2043                                 *input_set |= ICE_INSET_IPV4_SRC;
2044                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2045                                 *input_set |= ICE_INSET_IPV4_TTL;
2046                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2047                                 *input_set |= ICE_INSET_IPV4_PROTO;
2048                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2049                                 *input_set |= ICE_INSET_IPV4_TOS;
2050
2051                         p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
2052                         p_v4->src_ip = ipv4_spec->hdr.src_addr;
2053                         p_v4->ttl = ipv4_spec->hdr.time_to_live;
2054                         p_v4->proto = ipv4_spec->hdr.next_proto_id;
2055                         p_v4->tos = ipv4_spec->hdr.type_of_service;
2056
2057                         /* fragment Ipv4:
2058                          * spec is 0x2000, mask is 0x2000
2059                          */
2060                         if (ipv4_spec->hdr.fragment_offset ==
2061                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
2062                             ipv4_mask->hdr.fragment_offset ==
2063                             rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
2064                                 /* all IPv4 fragment packet has the same
2065                                  * ethertype, if the spec and mask is valid,
2066                                  * set ethertype into input set.
2067                                  */
2068                                 flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
2069                                 *input_set |= ICE_INSET_ETHERTYPE;
2070                                 input_set_o |= ICE_INSET_ETHERTYPE;
2071                         } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
2072                                 rte_flow_error_set(error, EINVAL,
2073                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2074                                                    item, "Invalid IPv4 mask.");
2075                                 return -rte_errno;
2076                         }
2077
2078                         break;
2079                 case RTE_FLOW_ITEM_TYPE_IPV6:
2080                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
2081                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2082                         ipv6_spec = item->spec;
2083                         ipv6_mask = item->mask;
2084                         p_v6 = (tunnel_type && is_outer) ?
2085                                &filter->input.ip_outer.v6 :
2086                                &filter->input.ip.v6;
2087
2088                         if (!(ipv6_spec && ipv6_mask))
2089                                 break;
2090
2091                         /* Check IPv6 mask and update input set */
2092                         if (ipv6_mask->hdr.payload_len) {
2093                                 rte_flow_error_set(error, EINVAL,
2094                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2095                                                    item,
2096                                                    "Invalid IPv6 mask");
2097                                 return -rte_errno;
2098                         }
2099
2100                         if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
2101                                     RTE_DIM(ipv6_mask->hdr.src_addr)))
2102                                 *input_set |= ICE_INSET_IPV6_SRC;
2103                         if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
2104                                     RTE_DIM(ipv6_mask->hdr.dst_addr)))
2105                                 *input_set |= ICE_INSET_IPV6_DST;
2106
2107                         if ((ipv6_mask->hdr.vtc_flow &
2108                              rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
2109                             == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
2110                                 *input_set |= ICE_INSET_IPV6_TC;
2111                         if (ipv6_mask->hdr.proto == UINT8_MAX)
2112                                 *input_set |= ICE_INSET_IPV6_NEXT_HDR;
2113                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2114                                 *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
2115
2116                         rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
2117                         rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
2118                         vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2119                         p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
2120                         p_v6->proto = ipv6_spec->hdr.proto;
2121                         p_v6->hlim = ipv6_spec->hdr.hop_limits;
2122                         break;
2123                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
2124                         l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
2125                         flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
2126                         ipv6_frag_spec = item->spec;
2127                         ipv6_frag_mask = item->mask;
2128
2129                         if (!(ipv6_frag_spec && ipv6_frag_mask))
2130                                 break;
2131
2132                         /* fragment Ipv6:
2133                          * spec is 0x1, mask is 0x1
2134                          */
2135                         if (ipv6_frag_spec->hdr.frag_data ==
2136                             rte_cpu_to_be_16(1) &&
2137                             ipv6_frag_mask->hdr.frag_data ==
2138                             rte_cpu_to_be_16(1)) {
2139                                 /* all IPv6 fragment packet has the same
2140                                  * ethertype, if the spec and mask is valid,
2141                                  * set ethertype into input set.
2142                                  */
2143                                 *input_set |= ICE_INSET_ETHERTYPE;
2144                                 input_set_o |= ICE_INSET_ETHERTYPE;
2145                         } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
2146                                 rte_flow_error_set(error, EINVAL,
2147                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2148                                                    item, "Invalid IPv6 mask.");
2149                                 return -rte_errno;
2150                         }
2151
2152                         break;
2153
2154                 case RTE_FLOW_ITEM_TYPE_TCP:
2155                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2156                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
2157                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2158                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
2159
2160                         tcp_spec = item->spec;
2161                         tcp_mask = item->mask;
2162
2163                         if (!(tcp_spec && tcp_mask))
2164                                 break;
2165
2166                         /* Check TCP mask and update input set */
2167                         if (tcp_mask->hdr.sent_seq ||
2168                             tcp_mask->hdr.recv_ack ||
2169                             tcp_mask->hdr.data_off ||
2170                             tcp_mask->hdr.tcp_flags ||
2171                             tcp_mask->hdr.rx_win ||
2172                             tcp_mask->hdr.cksum ||
2173                             tcp_mask->hdr.tcp_urp) {
2174                                 rte_flow_error_set(error, EINVAL,
2175                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2176                                                    item,
2177                                                    "Invalid TCP mask");
2178                                 return -rte_errno;
2179                         }
2180
2181                         if (tcp_mask->hdr.src_port == UINT16_MAX)
2182                                 *input_set |= ICE_INSET_TCP_SRC_PORT;
2183                         if (tcp_mask->hdr.dst_port == UINT16_MAX)
2184                                 *input_set |= ICE_INSET_TCP_DST_PORT;
2185
2186                         /* Get filter info */
2187                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2188                                 assert(p_v4);
2189                                 p_v4->dst_port = tcp_spec->hdr.dst_port;
2190                                 p_v4->src_port = tcp_spec->hdr.src_port;
2191                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2192                                 assert(p_v6);
2193                                 p_v6->dst_port = tcp_spec->hdr.dst_port;
2194                                 p_v6->src_port = tcp_spec->hdr.src_port;
2195                         }
2196                         break;
2197                 case RTE_FLOW_ITEM_TYPE_UDP:
2198                         l4 = RTE_FLOW_ITEM_TYPE_UDP;
2199                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2200                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
2201                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2202                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
2203
2204                         udp_spec = item->spec;
2205                         udp_mask = item->mask;
2206
2207                         if (!(udp_spec && udp_mask))
2208                                 break;
2209
2210                         /* Check UDP mask and update input set*/
2211                         if (udp_mask->hdr.dgram_len ||
2212                             udp_mask->hdr.dgram_cksum) {
2213                                 rte_flow_error_set(error, EINVAL,
2214                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2215                                                    item,
2216                                                    "Invalid UDP mask");
2217                                 return -rte_errno;
2218                         }
2219
2220                         if (udp_mask->hdr.src_port == UINT16_MAX)
2221                                 *input_set |= ICE_INSET_UDP_SRC_PORT;
2222                         if (udp_mask->hdr.dst_port == UINT16_MAX)
2223                                 *input_set |= ICE_INSET_UDP_DST_PORT;
2224
2225                         /* Get filter info */
2226                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2227                                 assert(p_v4);
2228                                 p_v4->dst_port = udp_spec->hdr.dst_port;
2229                                 p_v4->src_port = udp_spec->hdr.src_port;
2230                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2231                                 assert(p_v6);
2232                                 p_v6->src_port = udp_spec->hdr.src_port;
2233                                 p_v6->dst_port = udp_spec->hdr.dst_port;
2234                         }
2235                         break;
2236                 case RTE_FLOW_ITEM_TYPE_SCTP:
2237                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2238                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
2239                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2240                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
2241
2242                         sctp_spec = item->spec;
2243                         sctp_mask = item->mask;
2244
2245                         if (!(sctp_spec && sctp_mask))
2246                                 break;
2247
2248                         /* Check SCTP mask and update input set */
2249                         if (sctp_mask->hdr.cksum) {
2250                                 rte_flow_error_set(error, EINVAL,
2251                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2252                                                    item,
2253                                                    "Invalid UDP mask");
2254                                 return -rte_errno;
2255                         }
2256
2257                         if (sctp_mask->hdr.src_port == UINT16_MAX)
2258                                 *input_set |= ICE_INSET_SCTP_SRC_PORT;
2259                         if (sctp_mask->hdr.dst_port == UINT16_MAX)
2260                                 *input_set |= ICE_INSET_SCTP_DST_PORT;
2261
2262                         /* Get filter info */
2263                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2264                                 assert(p_v4);
2265                                 p_v4->dst_port = sctp_spec->hdr.dst_port;
2266                                 p_v4->src_port = sctp_spec->hdr.src_port;
2267                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2268                                 assert(p_v6);
2269                                 p_v6->dst_port = sctp_spec->hdr.dst_port;
2270                                 p_v6->src_port = sctp_spec->hdr.src_port;
2271                         }
2272                         break;
2273                 case RTE_FLOW_ITEM_TYPE_VOID:
2274                         break;
2275                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2276                         l3 = RTE_FLOW_ITEM_TYPE_END;
2277                         vxlan_spec = item->spec;
2278                         vxlan_mask = item->mask;
2279                         is_outer = false;
2280
2281                         if (!(vxlan_spec && vxlan_mask))
2282                                 break;
2283
2284                         if (vxlan_mask->hdr.vx_flags) {
2285                                 rte_flow_error_set(error, EINVAL,
2286                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2287                                                    item,
2288                                                    "Invalid vxlan field");
2289                                 return -rte_errno;
2290                         }
2291
2292                         if (vxlan_mask->hdr.vx_vni)
2293                                 *input_set |= ICE_INSET_VXLAN_VNI;
2294
2295                         filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
2296
2297                         break;
2298                 case RTE_FLOW_ITEM_TYPE_GTPU:
2299                         l3 = RTE_FLOW_ITEM_TYPE_END;
2300                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
2301                         gtp_spec = item->spec;
2302                         gtp_mask = item->mask;
2303
2304                         if (!(gtp_spec && gtp_mask))
2305                                 break;
2306
2307                         if (gtp_mask->v_pt_rsv_flags ||
2308                             gtp_mask->msg_type ||
2309                             gtp_mask->msg_len) {
2310                                 rte_flow_error_set(error, EINVAL,
2311                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2312                                                    item,
2313                                                    "Invalid GTP mask");
2314                                 return -rte_errno;
2315                         }
2316
2317                         if (gtp_mask->teid == UINT32_MAX)
2318                                 input_set_o |= ICE_INSET_GTPU_TEID;
2319
2320                         filter->input.gtpu_data.teid = gtp_spec->teid;
2321                         break;
2322                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
2323                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
2324                         gtp_psc_spec = item->spec;
2325                         gtp_psc_mask = item->mask;
2326
2327                         if (!(gtp_psc_spec && gtp_psc_mask))
2328                                 break;
2329
2330                         if (gtp_psc_mask->hdr.qfi == 0x3F)
2331                                 input_set_o |= ICE_INSET_GTPU_QFI;
2332
2333                         filter->input.gtpu_data.qfi =
2334                                 gtp_psc_spec->hdr.qfi;
2335                         break;
2336                 case RTE_FLOW_ITEM_TYPE_ESP:
2337                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2338                             l4 == RTE_FLOW_ITEM_TYPE_UDP)
2339                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
2340                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2341                                  l4 == RTE_FLOW_ITEM_TYPE_UDP)
2342                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
2343                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2344                                  l4 == RTE_FLOW_ITEM_TYPE_END)
2345                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
2346                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2347                                  l4 == RTE_FLOW_ITEM_TYPE_END)
2348                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
2349
2350                         esp_spec = item->spec;
2351                         esp_mask = item->mask;
2352
2353                         if (!(esp_spec && esp_mask))
2354                                 break;
2355
2356                         if (esp_mask->hdr.spi == UINT32_MAX) {
2357                                 if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
2358                                         *input_set |= ICE_INSET_NAT_T_ESP_SPI;
2359                                 else
2360                                         *input_set |= ICE_INSET_ESP_SPI;
2361                         }
2362
2363                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2364                                 filter->input.ip.v4.sec_parm_idx =
2365                                         esp_spec->hdr.spi;
2366                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2367                                 filter->input.ip.v6.sec_parm_idx =
2368                                         esp_spec->hdr.spi;
2369                         break;
2370                 default:
2371                         rte_flow_error_set(error, EINVAL,
2372                                            RTE_FLOW_ERROR_TYPE_ITEM,
2373                                            item,
2374                                            "Invalid pattern item.");
2375                         return -rte_errno;
2376                 }
2377         }
2378
2379         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2380                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2381                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
2382         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2383                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2384                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
2385         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2386                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2387                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
2388         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2389                 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2390                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
2391         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2392                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2393                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
2394         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2395                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
2396                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
2397         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2398                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
2399                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
2400         else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2401                 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
2402                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
2403
2404         filter->tunnel_type = tunnel_type;
2405         filter->input.flow_type = flow_type;
2406         filter->input_set_o = input_set_o;
2407         filter->input_set_i = input_set_i;
2408
2409         return 0;
2410 }
2411
2412 static int
2413 ice_fdir_parse(struct ice_adapter *ad,
2414                struct ice_pattern_match_item *array,
2415                uint32_t array_len,
2416                const struct rte_flow_item pattern[],
2417                const struct rte_flow_action actions[],
2418                uint32_t priority,
2419                void **meta,
2420                struct rte_flow_error *error)
2421 {
2422         struct ice_pf *pf = &ad->pf;
2423         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2424         struct ice_pattern_match_item *item = NULL;
2425         uint64_t input_set;
2426         bool raw = false;
2427         int ret;
2428
2429         memset(filter, 0, sizeof(*filter));
2430         item = ice_search_pattern_match_item(ad, pattern, array, array_len,
2431                                              error);
2432
2433         if (!ad->devargs.pipe_mode_support && priority >= 1)
2434                 return -rte_errno;
2435
2436         if (!item)
2437                 return -rte_errno;
2438
2439         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2440         if (ret)
2441                 goto error;
2442
2443         if (item->pattern_list[0] == RTE_FLOW_ITEM_TYPE_RAW)
2444                 raw = true;
2445
2446         input_set = filter->input_set_o | filter->input_set_i;
2447         input_set = raw ? ~input_set : input_set;
2448
2449         if (!input_set || filter->input_set_o &
2450             ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
2451             filter->input_set_i & ~item->input_set_mask_i) {
2452                 rte_flow_error_set(error, EINVAL,
2453                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2454                                    pattern,
2455                                    "Invalid input set");
2456                 ret = -rte_errno;
2457                 goto error;
2458         }
2459
2460         ret = ice_fdir_parse_action(ad, actions, error, filter);
2461         if (ret)
2462                 goto error;
2463
2464         if (meta)
2465                 *meta = filter;
2466
2467         rte_free(item);
2468         return ret;
2469 error:
2470         rte_free(filter->prof);
2471         rte_free(filter->pkt_buf);
2472         rte_free(item);
2473         return ret;
2474 }
2475
2476 static struct ice_flow_parser ice_fdir_parser = {
2477         .engine = &ice_fdir_engine,
2478         .array = ice_fdir_pattern_list,
2479         .array_len = RTE_DIM(ice_fdir_pattern_list),
2480         .parse_pattern_action = ice_fdir_parse,
2481         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2482 };
2483
2484 RTE_INIT(ice_fdir_engine_register)
2485 {
2486         ice_register_flow_engine(&ice_fdir_engine);
2487 }