net/ice: support src MAC filter for flow director
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET         20
17 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE       128
20
21 #define ICE_FDIR_INSET_ETH (\
22         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25         ICE_FDIR_INSET_ETH | \
26         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30         ICE_FDIR_INSET_ETH_IPV4 | \
31         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34         ICE_FDIR_INSET_ETH_IPV4 | \
35         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38         ICE_FDIR_INSET_ETH_IPV4 | \
39         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42         ICE_INSET_DMAC | \
43         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
45
46 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
47         ICE_FDIR_INSET_ETH_IPV6 | \
48         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
49
50 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
51         ICE_FDIR_INSET_ETH_IPV6 | \
52         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
53
54 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
55         ICE_FDIR_INSET_ETH_IPV6 | \
56         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
60
61 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
62         ICE_FDIR_INSET_VXLAN_IPV4 | \
63         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
64
65 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
66         ICE_FDIR_INSET_VXLAN_IPV4 | \
67         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
68
69 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
70         ICE_FDIR_INSET_VXLAN_IPV4 | \
71         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
72
73 #define ICE_FDIR_INSET_GTPU (\
74         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
75
76 #define ICE_FDIR_INSET_GTPU_EH (\
77         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
78         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
79
80 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
81         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
83         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
84         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
85         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
86         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
87         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
88         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
89         {pattern_eth_ipv4_udp_vxlan_ipv4,
90                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
91         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
92                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
93         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
94                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
95         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
96                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
97         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
98                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
99         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
100                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
101         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
102                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
103         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
104                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
105 };
106
107 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
108         {pattern_ethertype,            ICE_FDIR_INSET_ETH,                   ICE_INSET_NONE},
109         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
110         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
111         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
112         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
113         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
114         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
115         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
116         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_ipv4,
118                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
121         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
122                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
123         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
124                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
125         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
126                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
128                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
129         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
130                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
131         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
132                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
133         {pattern_eth_ipv4_gtpu,        ICE_FDIR_INSET_GTPU,                  ICE_INSET_NONE},
134         {pattern_eth_ipv4_gtpu_eh,     ICE_FDIR_INSET_GTPU_EH,               ICE_INSET_NONE},
135 };
136
137 static struct ice_flow_parser ice_fdir_parser_os;
138 static struct ice_flow_parser ice_fdir_parser_comms;
139
140 static int
141 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
142
143 static const struct rte_memzone *
144 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
145 {
146         const struct rte_memzone *mz;
147
148         mz = rte_memzone_lookup(name);
149         if (mz)
150                 return mz;
151
152         return rte_memzone_reserve_aligned(name, len, socket_id,
153                                            RTE_MEMZONE_IOVA_CONTIG,
154                                            ICE_RING_BASE_ALIGN);
155 }
156
157 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
158
159 static int
160 ice_fdir_prof_alloc(struct ice_hw *hw)
161 {
162         enum ice_fltr_ptype ptype, fltr_ptype;
163
164         if (!hw->fdir_prof) {
165                 hw->fdir_prof = (struct ice_fd_hw_prof **)
166                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
167                                    sizeof(*hw->fdir_prof));
168                 if (!hw->fdir_prof)
169                         return -ENOMEM;
170         }
171         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
172              ptype < ICE_FLTR_PTYPE_MAX;
173              ptype++) {
174                 if (!hw->fdir_prof[ptype]) {
175                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
176                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
177                         if (!hw->fdir_prof[ptype])
178                                 goto fail_mem;
179                 }
180         }
181         return 0;
182
183 fail_mem:
184         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
185              fltr_ptype < ptype;
186              fltr_ptype++) {
187                 rte_free(hw->fdir_prof[fltr_ptype]);
188                 hw->fdir_prof[fltr_ptype] = NULL;
189         }
190
191         rte_free(hw->fdir_prof);
192         hw->fdir_prof = NULL;
193
194         return -ENOMEM;
195 }
196
197 static int
198 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
199                           struct ice_fdir_counter_pool_container *container,
200                           uint32_t index_start,
201                           uint32_t len)
202 {
203         struct ice_fdir_counter_pool *pool;
204         uint32_t i;
205         int ret = 0;
206
207         pool = rte_zmalloc("ice_fdir_counter_pool",
208                            sizeof(*pool) +
209                            sizeof(struct ice_fdir_counter) * len,
210                            0);
211         if (!pool) {
212                 PMD_INIT_LOG(ERR,
213                              "Failed to allocate memory for fdir counter pool");
214                 return -ENOMEM;
215         }
216
217         TAILQ_INIT(&pool->counter_list);
218         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
219
220         for (i = 0; i < len; i++) {
221                 struct ice_fdir_counter *counter = &pool->counters[i];
222
223                 counter->hw_index = index_start + i;
224                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
225         }
226
227         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
228                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
229                 ret = -EINVAL;
230                 goto free_pool;
231         }
232
233         container->pools[container->index_free++] = pool;
234         return 0;
235
236 free_pool:
237         rte_free(pool);
238         return ret;
239 }
240
241 static int
242 ice_fdir_counter_init(struct ice_pf *pf)
243 {
244         struct ice_hw *hw = ICE_PF_TO_HW(pf);
245         struct ice_fdir_info *fdir_info = &pf->fdir;
246         struct ice_fdir_counter_pool_container *container =
247                                 &fdir_info->counter;
248         uint32_t cnt_index, len;
249         int ret;
250
251         TAILQ_INIT(&container->pool_list);
252
253         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
254         len = ICE_FDIR_COUNTERS_PER_BLOCK;
255
256         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
257         if (ret) {
258                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
259                 return ret;
260         }
261
262         return 0;
263 }
264
265 static int
266 ice_fdir_counter_release(struct ice_pf *pf)
267 {
268         struct ice_fdir_info *fdir_info = &pf->fdir;
269         struct ice_fdir_counter_pool_container *container =
270                                 &fdir_info->counter;
271         uint8_t i;
272
273         for (i = 0; i < container->index_free; i++) {
274                 rte_free(container->pools[i]);
275                 container->pools[i] = NULL;
276         }
277
278         TAILQ_INIT(&container->pool_list);
279         container->index_free = 0;
280
281         return 0;
282 }
283
284 static struct ice_fdir_counter *
285 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
286                                         *container,
287                                uint32_t id)
288 {
289         struct ice_fdir_counter_pool *pool;
290         struct ice_fdir_counter *counter;
291         int i;
292
293         TAILQ_FOREACH(pool, &container->pool_list, next) {
294                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
295                         counter = &pool->counters[i];
296
297                         if (counter->shared &&
298                             counter->ref_cnt &&
299                             counter->id == id)
300                                 return counter;
301                 }
302         }
303
304         return NULL;
305 }
306
307 static struct ice_fdir_counter *
308 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
309 {
310         struct ice_hw *hw = ICE_PF_TO_HW(pf);
311         struct ice_fdir_info *fdir_info = &pf->fdir;
312         struct ice_fdir_counter_pool_container *container =
313                                 &fdir_info->counter;
314         struct ice_fdir_counter_pool *pool = NULL;
315         struct ice_fdir_counter *counter_free = NULL;
316
317         if (shared) {
318                 counter_free = ice_fdir_counter_shared_search(container, id);
319                 if (counter_free) {
320                         if (counter_free->ref_cnt + 1 == 0) {
321                                 rte_errno = E2BIG;
322                                 return NULL;
323                         }
324                         counter_free->ref_cnt++;
325                         return counter_free;
326                 }
327         }
328
329         TAILQ_FOREACH(pool, &container->pool_list, next) {
330                 counter_free = TAILQ_FIRST(&pool->counter_list);
331                 if (counter_free)
332                         break;
333                 counter_free = NULL;
334         }
335
336         if (!counter_free) {
337                 PMD_DRV_LOG(ERR, "No free counter found\n");
338                 return NULL;
339         }
340
341         counter_free->shared = shared;
342         counter_free->id = id;
343         counter_free->ref_cnt = 1;
344         counter_free->pool = pool;
345
346         /* reset statistic counter value */
347         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
348         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
349
350         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
351         if (TAILQ_EMPTY(&pool->counter_list)) {
352                 TAILQ_REMOVE(&container->pool_list, pool, next);
353                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
354         }
355
356         return counter_free;
357 }
358
359 static void
360 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
361                       struct ice_fdir_counter *counter)
362 {
363         if (!counter)
364                 return;
365
366         if (--counter->ref_cnt == 0) {
367                 struct ice_fdir_counter_pool *pool = counter->pool;
368
369                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
370         }
371 }
372
373 static int
374 ice_fdir_init_filter_list(struct ice_pf *pf)
375 {
376         struct rte_eth_dev *dev = pf->adapter->eth_dev;
377         struct ice_fdir_info *fdir_info = &pf->fdir;
378         char fdir_hash_name[RTE_HASH_NAMESIZE];
379         int ret;
380
381         struct rte_hash_parameters fdir_hash_params = {
382                 .name = fdir_hash_name,
383                 .entries = ICE_MAX_FDIR_FILTER_NUM,
384                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
385                 .hash_func = rte_hash_crc,
386                 .hash_func_init_val = 0,
387                 .socket_id = rte_socket_id(),
388                 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
389         };
390
391         /* Initialize hash */
392         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
393                  "fdir_%s", dev->device->name);
394         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
395         if (!fdir_info->hash_table) {
396                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
397                 return -EINVAL;
398         }
399         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
400                                           sizeof(*fdir_info->hash_map) *
401                                           ICE_MAX_FDIR_FILTER_NUM,
402                                           0);
403         if (!fdir_info->hash_map) {
404                 PMD_INIT_LOG(ERR,
405                              "Failed to allocate memory for fdir hash map!");
406                 ret = -ENOMEM;
407                 goto err_fdir_hash_map_alloc;
408         }
409         return 0;
410
411 err_fdir_hash_map_alloc:
412         rte_hash_free(fdir_info->hash_table);
413
414         return ret;
415 }
416
417 static void
418 ice_fdir_release_filter_list(struct ice_pf *pf)
419 {
420         struct ice_fdir_info *fdir_info = &pf->fdir;
421
422         if (fdir_info->hash_map)
423                 rte_free(fdir_info->hash_map);
424         if (fdir_info->hash_table)
425                 rte_hash_free(fdir_info->hash_table);
426
427         fdir_info->hash_map = NULL;
428         fdir_info->hash_table = NULL;
429 }
430
431 /*
432  * ice_fdir_setup - reserve and initialize the Flow Director resources
433  * @pf: board private structure
434  */
435 static int
436 ice_fdir_setup(struct ice_pf *pf)
437 {
438         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
439         struct ice_hw *hw = ICE_PF_TO_HW(pf);
440         const struct rte_memzone *mz = NULL;
441         char z_name[RTE_MEMZONE_NAMESIZE];
442         struct ice_vsi *vsi;
443         int err = ICE_SUCCESS;
444
445         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
446                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
447                 return -ENOTSUP;
448         }
449
450         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
451                     " fd_fltr_best_effort = %u.",
452                     hw->func_caps.fd_fltr_guar,
453                     hw->func_caps.fd_fltr_best_effort);
454
455         if (pf->fdir.fdir_vsi) {
456                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
457                 return ICE_SUCCESS;
458         }
459
460         /* make new FDIR VSI */
461         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
462         if (!vsi) {
463                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
464                 return -EINVAL;
465         }
466         pf->fdir.fdir_vsi = vsi;
467
468         err = ice_fdir_init_filter_list(pf);
469         if (err) {
470                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
471                 return -EINVAL;
472         }
473
474         err = ice_fdir_counter_init(pf);
475         if (err) {
476                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
477                 return -EINVAL;
478         }
479
480         /*Fdir tx queue setup*/
481         err = ice_fdir_setup_tx_resources(pf);
482         if (err) {
483                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
484                 goto fail_setup_tx;
485         }
486
487         /*Fdir rx queue setup*/
488         err = ice_fdir_setup_rx_resources(pf);
489         if (err) {
490                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
491                 goto fail_setup_rx;
492         }
493
494         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
495         if (err) {
496                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
497                 goto fail_mem;
498         }
499
500         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
501         if (err) {
502                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
503                 goto fail_mem;
504         }
505
506         /* Enable FDIR MSIX interrupt */
507         vsi->nb_used_qps = 1;
508         ice_vsi_queues_bind_intr(vsi);
509         ice_vsi_enable_queues_intr(vsi);
510
511         /* reserve memory for the fdir programming packet */
512         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
513                  ICE_FDIR_MZ_NAME,
514                  eth_dev->data->port_id);
515         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
516         if (!mz) {
517                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
518                             "flow director program packet.");
519                 err = -ENOMEM;
520                 goto fail_mem;
521         }
522         pf->fdir.prg_pkt = mz->addr;
523         pf->fdir.dma_addr = mz->iova;
524         pf->fdir.mz = mz;
525
526         err = ice_fdir_prof_alloc(hw);
527         if (err) {
528                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
529                             "flow director profile.");
530                 err = -ENOMEM;
531                 goto fail_prof;
532         }
533
534         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
535                     vsi->base_queue);
536         return ICE_SUCCESS;
537
538 fail_prof:
539         rte_memzone_free(pf->fdir.mz);
540         pf->fdir.mz = NULL;
541 fail_mem:
542         ice_rx_queue_release(pf->fdir.rxq);
543         pf->fdir.rxq = NULL;
544 fail_setup_rx:
545         ice_tx_queue_release(pf->fdir.txq);
546         pf->fdir.txq = NULL;
547 fail_setup_tx:
548         ice_release_vsi(vsi);
549         pf->fdir.fdir_vsi = NULL;
550         return err;
551 }
552
553 static void
554 ice_fdir_prof_free(struct ice_hw *hw)
555 {
556         enum ice_fltr_ptype ptype;
557
558         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
559              ptype < ICE_FLTR_PTYPE_MAX;
560              ptype++) {
561                 rte_free(hw->fdir_prof[ptype]);
562                 hw->fdir_prof[ptype] = NULL;
563         }
564
565         rte_free(hw->fdir_prof);
566         hw->fdir_prof = NULL;
567 }
568
569 /* Remove a profile for some filter type */
570 static void
571 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
572 {
573         struct ice_hw *hw = ICE_PF_TO_HW(pf);
574         struct ice_fd_hw_prof *hw_prof;
575         uint64_t prof_id;
576         uint16_t vsi_num;
577         int i;
578
579         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
580                 return;
581
582         hw_prof = hw->fdir_prof[ptype];
583
584         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
585         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
586                 if (hw_prof->entry_h[i][is_tunnel]) {
587                         vsi_num = ice_get_hw_vsi_num(hw,
588                                                      hw_prof->vsi_h[i]);
589                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
590                                              vsi_num, ptype);
591                         ice_flow_rem_entry(hw, ICE_BLK_FD,
592                                            hw_prof->entry_h[i][is_tunnel]);
593                         hw_prof->entry_h[i][is_tunnel] = 0;
594                 }
595         }
596         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
597         rte_free(hw_prof->fdir_seg[is_tunnel]);
598         hw_prof->fdir_seg[is_tunnel] = NULL;
599
600         for (i = 0; i < hw_prof->cnt; i++)
601                 hw_prof->vsi_h[i] = 0;
602         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
603 }
604
605 /* Remove all created profiles */
606 static void
607 ice_fdir_prof_rm_all(struct ice_pf *pf)
608 {
609         enum ice_fltr_ptype ptype;
610
611         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
612              ptype < ICE_FLTR_PTYPE_MAX;
613              ptype++) {
614                 ice_fdir_prof_rm(pf, ptype, false);
615                 ice_fdir_prof_rm(pf, ptype, true);
616         }
617 }
618
619 /*
620  * ice_fdir_teardown - release the Flow Director resources
621  * @pf: board private structure
622  */
623 static void
624 ice_fdir_teardown(struct ice_pf *pf)
625 {
626         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
627         struct ice_hw *hw = ICE_PF_TO_HW(pf);
628         struct ice_vsi *vsi;
629         int err;
630
631         vsi = pf->fdir.fdir_vsi;
632         if (!vsi)
633                 return;
634
635         ice_vsi_disable_queues_intr(vsi);
636
637         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
638         if (err)
639                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
640
641         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
642         if (err)
643                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
644
645         err = ice_fdir_counter_release(pf);
646         if (err)
647                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
648
649         ice_fdir_release_filter_list(pf);
650
651         ice_tx_queue_release(pf->fdir.txq);
652         pf->fdir.txq = NULL;
653         ice_rx_queue_release(pf->fdir.rxq);
654         pf->fdir.rxq = NULL;
655         ice_fdir_prof_rm_all(pf);
656         ice_fdir_prof_free(hw);
657         ice_release_vsi(vsi);
658         pf->fdir.fdir_vsi = NULL;
659
660         if (pf->fdir.mz) {
661                 err = rte_memzone_free(pf->fdir.mz);
662                 pf->fdir.mz = NULL;
663                 if (err)
664                         PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
665         }
666 }
667
668 static int
669 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
670                            enum ice_fltr_ptype ptype,
671                            struct ice_flow_seg_info *seg,
672                            bool is_tunnel)
673 {
674         struct ice_hw *hw = ICE_PF_TO_HW(pf);
675         struct ice_flow_seg_info *ori_seg;
676         struct ice_fd_hw_prof *hw_prof;
677
678         hw_prof = hw->fdir_prof[ptype];
679         ori_seg = hw_prof->fdir_seg[is_tunnel];
680
681         /* profile does not exist */
682         if (!ori_seg)
683                 return 0;
684
685         /* if no input set conflict, return -EEXIST */
686         if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
687             (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
688                 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
689                             ptype);
690                 return -EEXIST;
691         }
692
693         /* a rule with input set conflict already exist, so give up */
694         if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
695                 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
696                             ptype);
697                 return -EINVAL;
698         }
699
700         /* it's safe to delete an empty profile */
701         ice_fdir_prof_rm(pf, ptype, is_tunnel);
702         return 0;
703 }
704
705 static bool
706 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
707                                enum ice_fltr_ptype ptype,
708                                bool is_tunnel)
709 {
710         struct ice_hw *hw = ICE_PF_TO_HW(pf);
711         struct ice_fd_hw_prof *hw_prof;
712         struct ice_flow_seg_info *seg;
713
714         hw_prof = hw->fdir_prof[ptype];
715         seg = hw_prof->fdir_seg[is_tunnel];
716
717         /* profile does not exist */
718         if (!seg)
719                 return true;
720
721         /* profile exists and rule exists, fail to resolve the conflict */
722         if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
723                 return false;
724
725         /* it's safe to delete an empty profile */
726         ice_fdir_prof_rm(pf, ptype, is_tunnel);
727
728         return true;
729 }
730
731 static int
732 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
733                              enum ice_fltr_ptype ptype,
734                              bool is_tunnel)
735 {
736         enum ice_fltr_ptype cflct_ptype;
737
738         switch (ptype) {
739         /* IPv4 */
740         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
741         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
742         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
743                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
744                 if (!ice_fdir_prof_resolve_conflict
745                         (pf, cflct_ptype, is_tunnel))
746                         goto err;
747                 break;
748         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
749                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
750                 if (!ice_fdir_prof_resolve_conflict
751                         (pf, cflct_ptype, is_tunnel))
752                         goto err;
753                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
754                 if (!ice_fdir_prof_resolve_conflict
755                         (pf, cflct_ptype, is_tunnel))
756                         goto err;
757                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
758                 if (!ice_fdir_prof_resolve_conflict
759                         (pf, cflct_ptype, is_tunnel))
760                         goto err;
761                 break;
762         /* IPv4 GTPU */
763         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
764         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
765         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
766                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
767                 if (!ice_fdir_prof_resolve_conflict
768                         (pf, cflct_ptype, is_tunnel))
769                         goto err;
770                 break;
771         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
772                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
773                 if (!ice_fdir_prof_resolve_conflict
774                         (pf, cflct_ptype, is_tunnel))
775                         goto err;
776                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
777                 if (!ice_fdir_prof_resolve_conflict
778                         (pf, cflct_ptype, is_tunnel))
779                         goto err;
780                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
781                 if (!ice_fdir_prof_resolve_conflict
782                         (pf, cflct_ptype, is_tunnel))
783                         goto err;
784                 break;
785         /* IPv6 */
786         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
787         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
788         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
789                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
790                 if (!ice_fdir_prof_resolve_conflict
791                         (pf, cflct_ptype, is_tunnel))
792                         goto err;
793                 break;
794         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
795                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
796                 if (!ice_fdir_prof_resolve_conflict
797                         (pf, cflct_ptype, is_tunnel))
798                         goto err;
799                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
800                 if (!ice_fdir_prof_resolve_conflict
801                         (pf, cflct_ptype, is_tunnel))
802                         goto err;
803                 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
804                 if (!ice_fdir_prof_resolve_conflict
805                         (pf, cflct_ptype, is_tunnel))
806                         goto err;
807                 break;
808         default:
809                 break;
810         }
811         return 0;
812 err:
813         PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
814                     ptype, cflct_ptype);
815         return -EINVAL;
816 }
817
818 static int
819 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
820                      struct ice_vsi *ctrl_vsi,
821                      struct ice_flow_seg_info *seg,
822                      enum ice_fltr_ptype ptype,
823                      bool is_tunnel)
824 {
825         struct ice_hw *hw = ICE_PF_TO_HW(pf);
826         enum ice_flow_dir dir = ICE_FLOW_RX;
827         struct ice_fd_hw_prof *hw_prof;
828         struct ice_flow_prof *prof;
829         uint64_t entry_1 = 0;
830         uint64_t entry_2 = 0;
831         uint16_t vsi_num;
832         int ret;
833         uint64_t prof_id;
834
835         /* check if have input set conflict on current profile. */
836         ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
837         if (ret)
838                 return ret;
839
840         /* check if the profile is conflict with other profile. */
841         ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
842         if (ret)
843                 return ret;
844
845         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
846         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
847                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
848         if (ret)
849                 return ret;
850         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
851                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
852                                  seg, NULL, 0, &entry_1);
853         if (ret) {
854                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
855                             ptype);
856                 goto err_add_prof;
857         }
858         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
859                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
860                                  seg, NULL, 0, &entry_2);
861         if (ret) {
862                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
863                             ptype);
864                 goto err_add_entry;
865         }
866
867         hw_prof = hw->fdir_prof[ptype];
868         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
869         hw_prof->cnt = 0;
870         hw_prof->fdir_seg[is_tunnel] = seg;
871         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
872         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
873         pf->hw_prof_cnt[ptype][is_tunnel]++;
874         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
875         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
876         pf->hw_prof_cnt[ptype][is_tunnel]++;
877
878         return ret;
879
880 err_add_entry:
881         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
882         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
883         ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
884 err_add_prof:
885         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
886
887         return ret;
888 }
889
890 static void
891 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
892 {
893         uint32_t i, j;
894
895         struct ice_inset_map {
896                 uint64_t inset;
897                 enum ice_flow_field fld;
898         };
899         static const struct ice_inset_map ice_inset_map[] = {
900                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
901                 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
902                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
903                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
904                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
905                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
906                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
907                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
908                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
909                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
910                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
911                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
912                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
913                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
914                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
915                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
916                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
917                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
918                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
919                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
920                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
921                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
922                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
923                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
924                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
925                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
926                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
927                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
928         };
929
930         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
931                 if ((inset & ice_inset_map[i].inset) ==
932                     ice_inset_map[i].inset)
933                         field[j++] = ice_inset_map[i].fld;
934         }
935 }
936
937 static int
938 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
939                         uint64_t input_set, enum ice_fdir_tunnel_type ttype)
940 {
941         struct ice_flow_seg_info *seg;
942         struct ice_flow_seg_info *seg_tun = NULL;
943         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
944         bool is_tunnel;
945         int i, ret;
946
947         if (!input_set)
948                 return -EINVAL;
949
950         seg = (struct ice_flow_seg_info *)
951                 ice_malloc(hw, sizeof(*seg));
952         if (!seg) {
953                 PMD_DRV_LOG(ERR, "No memory can be allocated");
954                 return -ENOMEM;
955         }
956
957         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
958                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
959         ice_fdir_input_set_parse(input_set, field);
960
961         switch (flow) {
962         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
963                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
964                                   ICE_FLOW_SEG_HDR_IPV4 |
965                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
966                 break;
967         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
968                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
969                                   ICE_FLOW_SEG_HDR_IPV4 |
970                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
971                 break;
972         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
973                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
974                                   ICE_FLOW_SEG_HDR_IPV4 |
975                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
976                 break;
977         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
978                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
979                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
980                 break;
981         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
982                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
983                                   ICE_FLOW_SEG_HDR_IPV6 |
984                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
985                 break;
986         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
987                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
988                                   ICE_FLOW_SEG_HDR_IPV6 |
989                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
990                 break;
991         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
992                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
993                                   ICE_FLOW_SEG_HDR_IPV6 |
994                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
995                 break;
996         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
997                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
998                                   ICE_FLOW_SEG_HDR_IPV_OTHER);
999                 break;
1000         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
1001         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
1002         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
1003         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
1004                 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU)
1005                         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1006                                           ICE_FLOW_SEG_HDR_IPV4 |
1007                                           ICE_FLOW_SEG_HDR_IPV_OTHER);
1008                 else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1009                         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1010                                           ICE_FLOW_SEG_HDR_GTPU_IP |
1011                                           ICE_FLOW_SEG_HDR_IPV4 |
1012                                           ICE_FLOW_SEG_HDR_IPV_OTHER);
1013                 else
1014                         PMD_DRV_LOG(ERR, "not supported tunnel type.");
1015                 break;
1016         case ICE_FLTR_PTYPE_NON_IP_L2:
1017                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1018                 break;
1019         default:
1020                 PMD_DRV_LOG(ERR, "not supported filter type.");
1021                 break;
1022         }
1023
1024         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1025                 ice_flow_set_fld(seg, field[i],
1026                                  ICE_FLOW_FLD_OFF_INVAL,
1027                                  ICE_FLOW_FLD_OFF_INVAL,
1028                                  ICE_FLOW_FLD_OFF_INVAL, false);
1029         }
1030
1031         is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1032         if (!is_tunnel) {
1033                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1034                                            seg, flow, false);
1035         } else {
1036                 seg_tun = (struct ice_flow_seg_info *)
1037                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1038                 if (!seg_tun) {
1039                         PMD_DRV_LOG(ERR, "No memory can be allocated");
1040                         rte_free(seg);
1041                         return -ENOMEM;
1042                 }
1043                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1044                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1045                                            seg_tun, flow, true);
1046         }
1047
1048         if (!ret) {
1049                 return ret;
1050         } else if (ret < 0) {
1051                 rte_free(seg);
1052                 if (is_tunnel)
1053                         rte_free(seg_tun);
1054                 return (ret == -EEXIST) ? 0 : ret;
1055         } else {
1056                 return ret;
1057         }
1058 }
1059
1060 static void
1061 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1062                     bool is_tunnel, bool add)
1063 {
1064         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1065         int cnt;
1066
1067         cnt = (add) ? 1 : -1;
1068         hw->fdir_active_fltr += cnt;
1069         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1070                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1071         else
1072                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1073 }
1074
1075 static int
1076 ice_fdir_init(struct ice_adapter *ad)
1077 {
1078         struct ice_pf *pf = &ad->pf;
1079         struct ice_flow_parser *parser;
1080         int ret;
1081
1082         if (ad->hw.dcf_enabled)
1083                 return 0;
1084
1085         ret = ice_fdir_setup(pf);
1086         if (ret)
1087                 return ret;
1088
1089         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1090                 parser = &ice_fdir_parser_comms;
1091         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1092                 parser = &ice_fdir_parser_os;
1093         else
1094                 return -EINVAL;
1095
1096         return ice_register_parser(parser, ad);
1097 }
1098
1099 static void
1100 ice_fdir_uninit(struct ice_adapter *ad)
1101 {
1102         struct ice_pf *pf = &ad->pf;
1103         struct ice_flow_parser *parser;
1104
1105         if (ad->hw.dcf_enabled)
1106                 return;
1107
1108         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1109                 parser = &ice_fdir_parser_comms;
1110         else
1111                 parser = &ice_fdir_parser_os;
1112
1113         ice_unregister_parser(parser, ad);
1114
1115         ice_fdir_teardown(pf);
1116 }
1117
1118 static int
1119 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1120 {
1121         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1122                 return 1;
1123         else
1124                 return 0;
1125 }
1126
1127 static int
1128 ice_fdir_add_del_filter(struct ice_pf *pf,
1129                         struct ice_fdir_filter_conf *filter,
1130                         bool add)
1131 {
1132         struct ice_fltr_desc desc;
1133         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1134         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1135         bool is_tun;
1136         int ret;
1137
1138         filter->input.dest_vsi = pf->main_vsi->idx;
1139
1140         memset(&desc, 0, sizeof(desc));
1141         filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1142         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1143
1144         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1145
1146         memset(pkt, 0, ICE_FDIR_PKT_LEN);
1147         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1148         if (ret) {
1149                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1150                 return -EINVAL;
1151         }
1152
1153         return ice_fdir_programming(pf, &desc);
1154 }
1155
1156 static void
1157 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1158                           struct ice_fdir_filter_conf *filter)
1159 {
1160         struct ice_fdir_fltr *input = &filter->input;
1161         memset(key, 0, sizeof(*key));
1162
1163         key->flow_type = input->flow_type;
1164         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1165         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1166         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1167         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1168
1169         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1170         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1171
1172         key->tunnel_type = filter->tunnel_type;
1173 }
1174
1175 /* Check if there exists the flow director filter */
1176 static struct ice_fdir_filter_conf *
1177 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1178                         const struct ice_fdir_fltr_pattern *key)
1179 {
1180         int ret;
1181
1182         ret = rte_hash_lookup(fdir_info->hash_table, key);
1183         if (ret < 0)
1184                 return NULL;
1185
1186         return fdir_info->hash_map[ret];
1187 }
1188
1189 /* Add a flow director entry into the SW list */
1190 static int
1191 ice_fdir_entry_insert(struct ice_pf *pf,
1192                       struct ice_fdir_filter_conf *entry,
1193                       struct ice_fdir_fltr_pattern *key)
1194 {
1195         struct ice_fdir_info *fdir_info = &pf->fdir;
1196         int ret;
1197
1198         ret = rte_hash_add_key(fdir_info->hash_table, key);
1199         if (ret < 0) {
1200                 PMD_DRV_LOG(ERR,
1201                             "Failed to insert fdir entry to hash table %d!",
1202                             ret);
1203                 return ret;
1204         }
1205         fdir_info->hash_map[ret] = entry;
1206
1207         return 0;
1208 }
1209
1210 /* Delete a flow director entry from the SW list */
1211 static int
1212 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1213 {
1214         struct ice_fdir_info *fdir_info = &pf->fdir;
1215         int ret;
1216
1217         ret = rte_hash_del_key(fdir_info->hash_table, key);
1218         if (ret < 0) {
1219                 PMD_DRV_LOG(ERR,
1220                             "Failed to delete fdir filter to hash table %d!",
1221                             ret);
1222                 return ret;
1223         }
1224         fdir_info->hash_map[ret] = NULL;
1225
1226         return 0;
1227 }
1228
1229 static int
1230 ice_fdir_create_filter(struct ice_adapter *ad,
1231                        struct rte_flow *flow,
1232                        void *meta,
1233                        struct rte_flow_error *error)
1234 {
1235         struct ice_pf *pf = &ad->pf;
1236         struct ice_fdir_filter_conf *filter = meta;
1237         struct ice_fdir_info *fdir_info = &pf->fdir;
1238         struct ice_fdir_filter_conf *entry, *node;
1239         struct ice_fdir_fltr_pattern key;
1240         bool is_tun;
1241         int ret;
1242
1243         ice_fdir_extract_fltr_key(&key, filter);
1244         node = ice_fdir_entry_lookup(fdir_info, &key);
1245         if (node) {
1246                 rte_flow_error_set(error, EEXIST,
1247                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1248                                    "Rule already exists!");
1249                 return -rte_errno;
1250         }
1251
1252         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1253         if (!entry) {
1254                 rte_flow_error_set(error, ENOMEM,
1255                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1256                                    "Failed to allocate memory");
1257                 return -rte_errno;
1258         }
1259
1260         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1261
1262         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1263                         filter->input_set, filter->tunnel_type);
1264         if (ret) {
1265                 rte_flow_error_set(error, -ret,
1266                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1267                                    "Profile configure failed.");
1268                 goto free_entry;
1269         }
1270
1271         /* alloc counter for FDIR */
1272         if (filter->input.cnt_ena) {
1273                 struct rte_flow_action_count *act_count = &filter->act_count;
1274
1275                 filter->counter = ice_fdir_counter_alloc(pf,
1276                                                          act_count->shared,
1277                                                          act_count->id);
1278                 if (!filter->counter) {
1279                         rte_flow_error_set(error, EINVAL,
1280                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1281                                         "Failed to alloc FDIR counter.");
1282                         goto free_entry;
1283                 }
1284                 filter->input.cnt_index = filter->counter->hw_index;
1285         }
1286
1287         ret = ice_fdir_add_del_filter(pf, filter, true);
1288         if (ret) {
1289                 rte_flow_error_set(error, -ret,
1290                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1291                                    "Add filter rule failed.");
1292                 goto free_counter;
1293         }
1294
1295         rte_memcpy(entry, filter, sizeof(*entry));
1296         ret = ice_fdir_entry_insert(pf, entry, &key);
1297         if (ret) {
1298                 rte_flow_error_set(error, -ret,
1299                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1300                                    "Insert entry to table failed.");
1301                 goto free_entry;
1302         }
1303
1304         flow->rule = entry;
1305         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1306
1307         return 0;
1308
1309 free_counter:
1310         if (filter->counter) {
1311                 ice_fdir_counter_free(pf, filter->counter);
1312                 filter->counter = NULL;
1313         }
1314
1315 free_entry:
1316         rte_free(entry);
1317         return -rte_errno;
1318 }
1319
1320 static int
1321 ice_fdir_destroy_filter(struct ice_adapter *ad,
1322                         struct rte_flow *flow,
1323                         struct rte_flow_error *error)
1324 {
1325         struct ice_pf *pf = &ad->pf;
1326         struct ice_fdir_info *fdir_info = &pf->fdir;
1327         struct ice_fdir_filter_conf *filter, *entry;
1328         struct ice_fdir_fltr_pattern key;
1329         bool is_tun;
1330         int ret;
1331
1332         filter = (struct ice_fdir_filter_conf *)flow->rule;
1333
1334         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1335
1336         if (filter->counter) {
1337                 ice_fdir_counter_free(pf, filter->counter);
1338                 filter->counter = NULL;
1339         }
1340
1341         ice_fdir_extract_fltr_key(&key, filter);
1342         entry = ice_fdir_entry_lookup(fdir_info, &key);
1343         if (!entry) {
1344                 rte_flow_error_set(error, ENOENT,
1345                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1346                                    "Can't find entry.");
1347                 return -rte_errno;
1348         }
1349
1350         ret = ice_fdir_add_del_filter(pf, filter, false);
1351         if (ret) {
1352                 rte_flow_error_set(error, -ret,
1353                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1354                                    "Del filter rule failed.");
1355                 return -rte_errno;
1356         }
1357
1358         ret = ice_fdir_entry_del(pf, &key);
1359         if (ret) {
1360                 rte_flow_error_set(error, -ret,
1361                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1362                                    "Remove entry from table failed.");
1363                 return -rte_errno;
1364         }
1365
1366         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1367         flow->rule = NULL;
1368
1369         rte_free(filter);
1370
1371         return 0;
1372 }
1373
1374 static int
1375 ice_fdir_query_count(struct ice_adapter *ad,
1376                       struct rte_flow *flow,
1377                       struct rte_flow_query_count *flow_stats,
1378                       struct rte_flow_error *error)
1379 {
1380         struct ice_pf *pf = &ad->pf;
1381         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1382         struct ice_fdir_filter_conf *filter = flow->rule;
1383         struct ice_fdir_counter *counter = filter->counter;
1384         uint64_t hits_lo, hits_hi;
1385
1386         if (!counter) {
1387                 rte_flow_error_set(error, EINVAL,
1388                                   RTE_FLOW_ERROR_TYPE_ACTION,
1389                                   NULL,
1390                                   "FDIR counters not available");
1391                 return -rte_errno;
1392         }
1393
1394         /*
1395          * Reading the low 32-bits latches the high 32-bits into a shadow
1396          * register. Reading the high 32-bit returns the value in the
1397          * shadow register.
1398          */
1399         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1400         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1401
1402         flow_stats->hits_set = 1;
1403         flow_stats->hits = hits_lo | (hits_hi << 32);
1404         flow_stats->bytes_set = 0;
1405         flow_stats->bytes = 0;
1406
1407         if (flow_stats->reset) {
1408                 /* reset statistic counter value */
1409                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1410                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1411         }
1412
1413         return 0;
1414 }
1415
1416 static struct ice_flow_engine ice_fdir_engine = {
1417         .init = ice_fdir_init,
1418         .uninit = ice_fdir_uninit,
1419         .create = ice_fdir_create_filter,
1420         .destroy = ice_fdir_destroy_filter,
1421         .query_count = ice_fdir_query_count,
1422         .type = ICE_FLOW_ENGINE_FDIR,
1423 };
1424
1425 static int
1426 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1427                               struct rte_flow_error *error,
1428                               const struct rte_flow_action *act,
1429                               struct ice_fdir_filter_conf *filter)
1430 {
1431         const struct rte_flow_action_rss *rss = act->conf;
1432         uint32_t i;
1433
1434         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1435                 rte_flow_error_set(error, EINVAL,
1436                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1437                                    "Invalid action.");
1438                 return -rte_errno;
1439         }
1440
1441         if (rss->queue_num <= 1) {
1442                 rte_flow_error_set(error, EINVAL,
1443                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1444                                    "Queue region size can't be 0 or 1.");
1445                 return -rte_errno;
1446         }
1447
1448         /* check if queue index for queue region is continuous */
1449         for (i = 0; i < rss->queue_num - 1; i++) {
1450                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1451                         rte_flow_error_set(error, EINVAL,
1452                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1453                                            "Discontinuous queue region");
1454                         return -rte_errno;
1455                 }
1456         }
1457
1458         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1459                 rte_flow_error_set(error, EINVAL,
1460                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1461                                    "Invalid queue region indexes.");
1462                 return -rte_errno;
1463         }
1464
1465         if (!(rte_is_power_of_2(rss->queue_num) &&
1466              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1467                 rte_flow_error_set(error, EINVAL,
1468                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1469                                    "The region size should be any of the following values:"
1470                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1471                                    "of queues do not exceed the VSI allocation.");
1472                 return -rte_errno;
1473         }
1474
1475         filter->input.q_index = rss->queue[0];
1476         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1477         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1478
1479         return 0;
1480 }
1481
1482 static int
1483 ice_fdir_parse_action(struct ice_adapter *ad,
1484                       const struct rte_flow_action actions[],
1485                       struct rte_flow_error *error,
1486                       struct ice_fdir_filter_conf *filter)
1487 {
1488         struct ice_pf *pf = &ad->pf;
1489         const struct rte_flow_action_queue *act_q;
1490         const struct rte_flow_action_mark *mark_spec = NULL;
1491         const struct rte_flow_action_count *act_count;
1492         uint32_t dest_num = 0;
1493         uint32_t mark_num = 0;
1494         uint32_t counter_num = 0;
1495         int ret;
1496
1497         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1498                 switch (actions->type) {
1499                 case RTE_FLOW_ACTION_TYPE_VOID:
1500                         break;
1501                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1502                         dest_num++;
1503
1504                         act_q = actions->conf;
1505                         filter->input.q_index = act_q->index;
1506                         if (filter->input.q_index >=
1507                                         pf->dev_data->nb_rx_queues) {
1508                                 rte_flow_error_set(error, EINVAL,
1509                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1510                                                    actions,
1511                                                    "Invalid queue for FDIR.");
1512                                 return -rte_errno;
1513                         }
1514                         filter->input.dest_ctl =
1515                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1516                         break;
1517                 case RTE_FLOW_ACTION_TYPE_DROP:
1518                         dest_num++;
1519
1520                         filter->input.dest_ctl =
1521                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1522                         break;
1523                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1524                         dest_num++;
1525
1526                         filter->input.dest_ctl =
1527                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1528                         break;
1529                 case RTE_FLOW_ACTION_TYPE_RSS:
1530                         dest_num++;
1531
1532                         ret = ice_fdir_parse_action_qregion(pf,
1533                                                 error, actions, filter);
1534                         if (ret)
1535                                 return ret;
1536                         break;
1537                 case RTE_FLOW_ACTION_TYPE_MARK:
1538                         mark_num++;
1539
1540                         mark_spec = actions->conf;
1541                         filter->input.fltr_id = mark_spec->id;
1542                         filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1543                         break;
1544                 case RTE_FLOW_ACTION_TYPE_COUNT:
1545                         counter_num++;
1546
1547                         act_count = actions->conf;
1548                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1549                         rte_memcpy(&filter->act_count, act_count,
1550                                                 sizeof(filter->act_count));
1551
1552                         break;
1553                 default:
1554                         rte_flow_error_set(error, EINVAL,
1555                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1556                                    "Invalid action.");
1557                         return -rte_errno;
1558                 }
1559         }
1560
1561         if (dest_num >= 2) {
1562                 rte_flow_error_set(error, EINVAL,
1563                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1564                            "Unsupported action combination");
1565                 return -rte_errno;
1566         }
1567
1568         if (mark_num >= 2) {
1569                 rte_flow_error_set(error, EINVAL,
1570                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1571                            "Too many mark actions");
1572                 return -rte_errno;
1573         }
1574
1575         if (counter_num >= 2) {
1576                 rte_flow_error_set(error, EINVAL,
1577                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1578                            "Too many count actions");
1579                 return -rte_errno;
1580         }
1581
1582         if (dest_num + mark_num + counter_num == 0) {
1583                 rte_flow_error_set(error, EINVAL,
1584                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1585                            "Empty action");
1586                 return -rte_errno;
1587         }
1588
1589         /* set default action to PASSTHRU mode, in "mark/count only" case. */
1590         if (dest_num == 0)
1591                 filter->input.dest_ctl =
1592                         ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1593
1594         return 0;
1595 }
1596
1597 static int
1598 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1599                        const struct rte_flow_item pattern[],
1600                        struct rte_flow_error *error,
1601                        struct ice_fdir_filter_conf *filter)
1602 {
1603         const struct rte_flow_item *item = pattern;
1604         enum rte_flow_item_type item_type;
1605         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1606         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1607         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1608         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1609         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1610         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1611         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1612         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1613         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1614         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1615         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1616         uint64_t input_set = ICE_INSET_NONE;
1617         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1618         uint8_t  ipv6_addr_mask[16] = {
1619                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1620                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1621         };
1622         uint32_t vtc_flow_cpu;
1623         uint16_t ether_type;
1624         enum rte_flow_item_type next_type;
1625
1626         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1627                 if (item->last) {
1628                         rte_flow_error_set(error, EINVAL,
1629                                         RTE_FLOW_ERROR_TYPE_ITEM,
1630                                         item,
1631                                         "Not support range");
1632                         return -rte_errno;
1633                 }
1634                 item_type = item->type;
1635
1636                 switch (item_type) {
1637                 case RTE_FLOW_ITEM_TYPE_ETH:
1638                         eth_spec = item->spec;
1639                         eth_mask = item->mask;
1640                         next_type = (item + 1)->type;
1641
1642                         if (eth_spec && eth_mask) {
1643                                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
1644                                         input_set |= ICE_INSET_DMAC;
1645                                         rte_memcpy(&filter->input.ext_data.dst_mac,
1646                                                    &eth_spec->dst,
1647                                                    RTE_ETHER_ADDR_LEN);
1648                                 }
1649
1650                                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
1651                                         input_set |= ICE_INSET_SMAC;
1652                                         rte_memcpy(&filter->input.ext_data.src_mac,
1653                                                    &eth_spec->src,
1654                                                    RTE_ETHER_ADDR_LEN);
1655                                 }
1656
1657                                 /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1658                                 if (eth_mask->type == RTE_BE16(0xffff) &&
1659                                     next_type == RTE_FLOW_ITEM_TYPE_END) {
1660                                         input_set |= ICE_INSET_ETHERTYPE;
1661                                         ether_type = rte_be_to_cpu_16(eth_spec->type);
1662
1663                                         if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1664                                             ether_type == RTE_ETHER_TYPE_IPV6) {
1665                                                 rte_flow_error_set(error, EINVAL,
1666                                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1667                                                                    item,
1668                                                                    "Unsupported ether_type.");
1669                                                 return -rte_errno;
1670                                         }
1671
1672                                         rte_memcpy(&filter->input.ext_data.ether_type,
1673                                                    &eth_spec->type,
1674                                                    sizeof(eth_spec->type));
1675                                         flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1676                                 }
1677                         }
1678                         break;
1679                 case RTE_FLOW_ITEM_TYPE_IPV4:
1680                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1681                         ipv4_spec = item->spec;
1682                         ipv4_mask = item->mask;
1683
1684                         if (ipv4_spec && ipv4_mask) {
1685                                 /* Check IPv4 mask and update input set */
1686                                 if (ipv4_mask->hdr.version_ihl ||
1687                                     ipv4_mask->hdr.total_length ||
1688                                     ipv4_mask->hdr.packet_id ||
1689                                     ipv4_mask->hdr.fragment_offset ||
1690                                     ipv4_mask->hdr.hdr_checksum) {
1691                                         rte_flow_error_set(error, EINVAL,
1692                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1693                                                    item,
1694                                                    "Invalid IPv4 mask.");
1695                                         return -rte_errno;
1696                                 }
1697                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1698                                         input_set |= tunnel_type ?
1699                                                      ICE_INSET_TUN_IPV4_SRC :
1700                                                      ICE_INSET_IPV4_SRC;
1701                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1702                                         input_set |= tunnel_type ?
1703                                                      ICE_INSET_TUN_IPV4_DST :
1704                                                      ICE_INSET_IPV4_DST;
1705                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1706                                         input_set |= ICE_INSET_IPV4_TOS;
1707                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1708                                         input_set |= ICE_INSET_IPV4_TTL;
1709                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1710                                         input_set |= ICE_INSET_IPV4_PROTO;
1711
1712                                 filter->input.ip.v4.dst_ip =
1713                                         ipv4_spec->hdr.dst_addr;
1714                                 filter->input.ip.v4.src_ip =
1715                                         ipv4_spec->hdr.src_addr;
1716                                 filter->input.ip.v4.tos =
1717                                         ipv4_spec->hdr.type_of_service;
1718                                 filter->input.ip.v4.ttl =
1719                                         ipv4_spec->hdr.time_to_live;
1720                                 filter->input.ip.v4.proto =
1721                                         ipv4_spec->hdr.next_proto_id;
1722                         }
1723
1724                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1725                         break;
1726                 case RTE_FLOW_ITEM_TYPE_IPV6:
1727                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1728                         ipv6_spec = item->spec;
1729                         ipv6_mask = item->mask;
1730
1731                         if (ipv6_spec && ipv6_mask) {
1732                                 /* Check IPv6 mask and update input set */
1733                                 if (ipv6_mask->hdr.payload_len) {
1734                                         rte_flow_error_set(error, EINVAL,
1735                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1736                                                    item,
1737                                                    "Invalid IPv6 mask");
1738                                         return -rte_errno;
1739                                 }
1740
1741                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1742                                             ipv6_addr_mask,
1743                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1744                                         input_set |= ICE_INSET_IPV6_SRC;
1745                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1746                                             ipv6_addr_mask,
1747                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1748                                         input_set |= ICE_INSET_IPV6_DST;
1749
1750                                 if ((ipv6_mask->hdr.vtc_flow &
1751                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1752                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1753                                         input_set |= ICE_INSET_IPV6_TC;
1754                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1755                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1756                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1757                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1758
1759                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1760                                            ipv6_spec->hdr.dst_addr, 16);
1761                                 rte_memcpy(filter->input.ip.v6.src_ip,
1762                                            ipv6_spec->hdr.src_addr, 16);
1763
1764                                 vtc_flow_cpu =
1765                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1766                                 filter->input.ip.v6.tc =
1767                                         (uint8_t)(vtc_flow_cpu >>
1768                                                   ICE_FDIR_IPV6_TC_OFFSET);
1769                                 filter->input.ip.v6.proto =
1770                                         ipv6_spec->hdr.proto;
1771                                 filter->input.ip.v6.hlim =
1772                                         ipv6_spec->hdr.hop_limits;
1773                         }
1774
1775                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1776                         break;
1777                 case RTE_FLOW_ITEM_TYPE_TCP:
1778                         tcp_spec = item->spec;
1779                         tcp_mask = item->mask;
1780
1781                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1782                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1783                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1784                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1785
1786                         if (tcp_spec && tcp_mask) {
1787                                 /* Check TCP mask and update input set */
1788                                 if (tcp_mask->hdr.sent_seq ||
1789                                     tcp_mask->hdr.recv_ack ||
1790                                     tcp_mask->hdr.data_off ||
1791                                     tcp_mask->hdr.tcp_flags ||
1792                                     tcp_mask->hdr.rx_win ||
1793                                     tcp_mask->hdr.cksum ||
1794                                     tcp_mask->hdr.tcp_urp) {
1795                                         rte_flow_error_set(error, EINVAL,
1796                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1797                                                    item,
1798                                                    "Invalid TCP mask");
1799                                         return -rte_errno;
1800                                 }
1801
1802                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1803                                         input_set |= tunnel_type ?
1804                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1805                                                      ICE_INSET_TCP_SRC_PORT;
1806                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1807                                         input_set |= tunnel_type ?
1808                                                      ICE_INSET_TUN_TCP_DST_PORT :
1809                                                      ICE_INSET_TCP_DST_PORT;
1810
1811                                 /* Get filter info */
1812                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1813                                         filter->input.ip.v4.dst_port =
1814                                                 tcp_spec->hdr.dst_port;
1815                                         filter->input.ip.v4.src_port =
1816                                                 tcp_spec->hdr.src_port;
1817                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1818                                         filter->input.ip.v6.dst_port =
1819                                                 tcp_spec->hdr.dst_port;
1820                                         filter->input.ip.v6.src_port =
1821                                                 tcp_spec->hdr.src_port;
1822                                 }
1823                         }
1824                         break;
1825                 case RTE_FLOW_ITEM_TYPE_UDP:
1826                         udp_spec = item->spec;
1827                         udp_mask = item->mask;
1828
1829                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1830                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1831                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1832                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1833
1834                         if (udp_spec && udp_mask) {
1835                                 /* Check UDP mask and update input set*/
1836                                 if (udp_mask->hdr.dgram_len ||
1837                                     udp_mask->hdr.dgram_cksum) {
1838                                         rte_flow_error_set(error, EINVAL,
1839                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1840                                                    item,
1841                                                    "Invalid UDP mask");
1842                                         return -rte_errno;
1843                                 }
1844
1845                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1846                                         input_set |= tunnel_type ?
1847                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1848                                                      ICE_INSET_UDP_SRC_PORT;
1849                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1850                                         input_set |= tunnel_type ?
1851                                                      ICE_INSET_TUN_UDP_DST_PORT :
1852                                                      ICE_INSET_UDP_DST_PORT;
1853
1854                                 /* Get filter info */
1855                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1856                                         filter->input.ip.v4.dst_port =
1857                                                 udp_spec->hdr.dst_port;
1858                                         filter->input.ip.v4.src_port =
1859                                                 udp_spec->hdr.src_port;
1860                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1861                                         filter->input.ip.v6.src_port =
1862                                                 udp_spec->hdr.src_port;
1863                                         filter->input.ip.v6.dst_port =
1864                                                 udp_spec->hdr.dst_port;
1865                                 }
1866                         }
1867                         break;
1868                 case RTE_FLOW_ITEM_TYPE_SCTP:
1869                         sctp_spec = item->spec;
1870                         sctp_mask = item->mask;
1871
1872                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1873                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1874                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1875                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1876
1877                         if (sctp_spec && sctp_mask) {
1878                                 /* Check SCTP mask and update input set */
1879                                 if (sctp_mask->hdr.cksum) {
1880                                         rte_flow_error_set(error, EINVAL,
1881                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1882                                                    item,
1883                                                    "Invalid UDP mask");
1884                                         return -rte_errno;
1885                                 }
1886
1887                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1888                                         input_set |= tunnel_type ?
1889                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1890                                                      ICE_INSET_SCTP_SRC_PORT;
1891                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1892                                         input_set |= tunnel_type ?
1893                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1894                                                      ICE_INSET_SCTP_DST_PORT;
1895
1896                                 /* Get filter info */
1897                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1898                                         filter->input.ip.v4.dst_port =
1899                                                 sctp_spec->hdr.dst_port;
1900                                         filter->input.ip.v4.src_port =
1901                                                 sctp_spec->hdr.src_port;
1902                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1903                                         filter->input.ip.v6.dst_port =
1904                                                 sctp_spec->hdr.dst_port;
1905                                         filter->input.ip.v6.src_port =
1906                                                 sctp_spec->hdr.src_port;
1907                                 }
1908                         }
1909                         break;
1910                 case RTE_FLOW_ITEM_TYPE_VOID:
1911                         break;
1912                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1913                         l3 = RTE_FLOW_ITEM_TYPE_END;
1914                         vxlan_spec = item->spec;
1915                         vxlan_mask = item->mask;
1916
1917                         if (vxlan_spec || vxlan_mask) {
1918                                 rte_flow_error_set(error, EINVAL,
1919                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1920                                                    item,
1921                                                    "Invalid vxlan field");
1922                                 return -rte_errno;
1923                         }
1924
1925                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1926                         break;
1927                 case RTE_FLOW_ITEM_TYPE_GTPU:
1928                         l3 = RTE_FLOW_ITEM_TYPE_END;
1929                         gtp_spec = item->spec;
1930                         gtp_mask = item->mask;
1931
1932                         if (gtp_spec && gtp_mask) {
1933                                 if (gtp_mask->v_pt_rsv_flags ||
1934                                     gtp_mask->msg_type ||
1935                                     gtp_mask->msg_len) {
1936                                         rte_flow_error_set(error, EINVAL,
1937                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1938                                                    item,
1939                                                    "Invalid GTP mask");
1940                                         return -rte_errno;
1941                                 }
1942
1943                                 if (gtp_mask->teid == UINT32_MAX)
1944                                         input_set |= ICE_INSET_GTPU_TEID;
1945
1946                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1947                         }
1948
1949                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1950                         break;
1951                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1952                         gtp_psc_spec = item->spec;
1953                         gtp_psc_mask = item->mask;
1954
1955                         if (gtp_psc_spec && gtp_psc_mask) {
1956                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1957                                         input_set |= ICE_INSET_GTPU_QFI;
1958
1959                                 filter->input.gtpu_data.qfi =
1960                                         gtp_psc_spec->qfi;
1961                         }
1962                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1963                         break;
1964                 default:
1965                         rte_flow_error_set(error, EINVAL,
1966                                    RTE_FLOW_ERROR_TYPE_ITEM,
1967                                    item,
1968                                    "Invalid pattern item.");
1969                         return -rte_errno;
1970                 }
1971         }
1972
1973         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU ||
1974             tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1975                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1976
1977         filter->tunnel_type = tunnel_type;
1978         filter->input.flow_type = flow_type;
1979         filter->input_set = input_set;
1980
1981         return 0;
1982 }
1983
1984 static int
1985 ice_fdir_parse(struct ice_adapter *ad,
1986                struct ice_pattern_match_item *array,
1987                uint32_t array_len,
1988                const struct rte_flow_item pattern[],
1989                const struct rte_flow_action actions[],
1990                void **meta,
1991                struct rte_flow_error *error)
1992 {
1993         struct ice_pf *pf = &ad->pf;
1994         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1995         struct ice_pattern_match_item *item = NULL;
1996         uint64_t input_set;
1997         int ret;
1998
1999         memset(filter, 0, sizeof(*filter));
2000         item = ice_search_pattern_match_item(pattern, array, array_len, error);
2001         if (!item)
2002                 return -rte_errno;
2003
2004         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2005         if (ret)
2006                 goto error;
2007         input_set = filter->input_set;
2008         if (!input_set || input_set & ~item->input_set_mask) {
2009                 rte_flow_error_set(error, EINVAL,
2010                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2011                                    pattern,
2012                                    "Invalid input set");
2013                 ret = -rte_errno;
2014                 goto error;
2015         }
2016
2017         ret = ice_fdir_parse_action(ad, actions, error, filter);
2018         if (ret)
2019                 goto error;
2020
2021         if (meta)
2022                 *meta = filter;
2023 error:
2024         rte_free(item);
2025         return ret;
2026 }
2027
2028 static struct ice_flow_parser ice_fdir_parser_os = {
2029         .engine = &ice_fdir_engine,
2030         .array = ice_fdir_pattern_os,
2031         .array_len = RTE_DIM(ice_fdir_pattern_os),
2032         .parse_pattern_action = ice_fdir_parse,
2033         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2034 };
2035
2036 static struct ice_flow_parser ice_fdir_parser_comms = {
2037         .engine = &ice_fdir_engine,
2038         .array = ice_fdir_pattern_comms,
2039         .array_len = RTE_DIM(ice_fdir_pattern_comms),
2040         .parse_pattern_action = ice_fdir_parse,
2041         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2042 };
2043
2044 RTE_INIT(ice_fdir_engine_register)
2045 {
2046         ice_register_flow_engine(&ice_fdir_engine);
2047 }