net/ice: support flow director GTPU tunnel
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
36         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
37
38 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
39         ICE_FDIR_INSET_ETH_IPV6 | \
40         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
41
42 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
43         ICE_FDIR_INSET_ETH_IPV6 | \
44         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
45
46 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
47         ICE_FDIR_INSET_ETH_IPV6 | \
48         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
49
50 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
51         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
52
53 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
54         ICE_FDIR_INSET_VXLAN_IPV4 | \
55         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
56
57 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
58         ICE_FDIR_INSET_VXLAN_IPV4 | \
59         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
60
61 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
62         ICE_FDIR_INSET_VXLAN_IPV4 | \
63         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
64
65 #define ICE_FDIR_INSET_GTPU_IPV4 (\
66         ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
67
68 #define ICE_FDIR_INSET_GTPU_IPV4_TCP (\
69         ICE_FDIR_INSET_GTPU_IPV4)
70
71 #define ICE_FDIR_INSET_GTPU_IPV4_UDP (\
72         ICE_FDIR_INSET_GTPU_IPV4)
73
74 #define ICE_FDIR_INSET_GTPU_IPV4_UDP (\
75         ICE_FDIR_INSET_GTPU_IPV4)
76
77 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
78         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
79         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
80         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
81         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
82         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
83         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
84         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
85         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_ipv4,
87                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
90         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
91                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
92         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
93                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
94         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
95                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
96         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
97                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
98         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
99                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
100         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
101                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
102 };
103
104 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
105         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
106         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
107         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
108         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
109         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
110         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
111         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
112         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
113         {pattern_eth_ipv4_udp_vxlan_ipv4,
114                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
115         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
116                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
118                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
119         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
120                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
121         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
122                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
123         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
124                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
125         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
126                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
128                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
129         {pattern_eth_ipv4_gtpu_ipv4,   ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
130         {pattern_eth_ipv4_gtpu_ipv4_tcp,
131                                        ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
132         {pattern_eth_ipv4_gtpu_ipv4_udp,
133                                        ICE_FDIR_INSET_GTPU_IPV4,             ICE_INSET_NONE},
134 };
135
136 static struct ice_flow_parser ice_fdir_parser_os;
137 static struct ice_flow_parser ice_fdir_parser_comms;
138
139 static const struct rte_memzone *
140 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
141 {
142         return rte_memzone_reserve_aligned(name, len, socket_id,
143                                            RTE_MEMZONE_IOVA_CONTIG,
144                                            ICE_RING_BASE_ALIGN);
145 }
146
147 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
148
149 static int
150 ice_fdir_prof_alloc(struct ice_hw *hw)
151 {
152         enum ice_fltr_ptype ptype, fltr_ptype;
153
154         if (!hw->fdir_prof) {
155                 hw->fdir_prof = (struct ice_fd_hw_prof **)
156                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
157                                    sizeof(*hw->fdir_prof));
158                 if (!hw->fdir_prof)
159                         return -ENOMEM;
160         }
161         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
162              ptype < ICE_FLTR_PTYPE_MAX;
163              ptype++) {
164                 if (!hw->fdir_prof[ptype]) {
165                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
166                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
167                         if (!hw->fdir_prof[ptype])
168                                 goto fail_mem;
169                 }
170         }
171         return 0;
172
173 fail_mem:
174         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
175              fltr_ptype < ptype;
176              fltr_ptype++)
177                 rte_free(hw->fdir_prof[fltr_ptype]);
178         rte_free(hw->fdir_prof);
179         return -ENOMEM;
180 }
181
182 static int
183 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
184                           struct ice_fdir_counter_pool_container *container,
185                           uint32_t index_start,
186                           uint32_t len)
187 {
188         struct ice_fdir_counter_pool *pool;
189         uint32_t i;
190         int ret = 0;
191
192         pool = rte_zmalloc("ice_fdir_counter_pool",
193                            sizeof(*pool) +
194                            sizeof(struct ice_fdir_counter) * len,
195                            0);
196         if (!pool) {
197                 PMD_INIT_LOG(ERR,
198                              "Failed to allocate memory for fdir counter pool");
199                 return -ENOMEM;
200         }
201
202         TAILQ_INIT(&pool->counter_list);
203         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
204
205         for (i = 0; i < len; i++) {
206                 struct ice_fdir_counter *counter = &pool->counters[i];
207
208                 counter->hw_index = index_start + i;
209                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
210         }
211
212         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
213                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
214                 ret = -EINVAL;
215                 goto free_pool;
216         }
217
218         container->pools[container->index_free++] = pool;
219         return 0;
220
221 free_pool:
222         rte_free(pool);
223         return ret;
224 }
225
226 static int
227 ice_fdir_counter_init(struct ice_pf *pf)
228 {
229         struct ice_hw *hw = ICE_PF_TO_HW(pf);
230         struct ice_fdir_info *fdir_info = &pf->fdir;
231         struct ice_fdir_counter_pool_container *container =
232                                 &fdir_info->counter;
233         uint32_t cnt_index, len;
234         int ret;
235
236         TAILQ_INIT(&container->pool_list);
237
238         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
239         len = ICE_FDIR_COUNTERS_PER_BLOCK;
240
241         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
242         if (ret) {
243                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
244                 return ret;
245         }
246
247         return 0;
248 }
249
250 static int
251 ice_fdir_counter_release(struct ice_pf *pf)
252 {
253         struct ice_fdir_info *fdir_info = &pf->fdir;
254         struct ice_fdir_counter_pool_container *container =
255                                 &fdir_info->counter;
256         uint8_t i;
257
258         for (i = 0; i < container->index_free; i++)
259                 rte_free(container->pools[i]);
260
261         return 0;
262 }
263
264 static struct ice_fdir_counter *
265 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
266                                         *container,
267                                uint32_t id)
268 {
269         struct ice_fdir_counter_pool *pool;
270         struct ice_fdir_counter *counter;
271         int i;
272
273         TAILQ_FOREACH(pool, &container->pool_list, next) {
274                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
275                         counter = &pool->counters[i];
276
277                         if (counter->shared &&
278                             counter->ref_cnt &&
279                             counter->id == id)
280                                 return counter;
281                 }
282         }
283
284         return NULL;
285 }
286
287 static struct ice_fdir_counter *
288 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
289 {
290         struct ice_hw *hw = ICE_PF_TO_HW(pf);
291         struct ice_fdir_info *fdir_info = &pf->fdir;
292         struct ice_fdir_counter_pool_container *container =
293                                 &fdir_info->counter;
294         struct ice_fdir_counter_pool *pool = NULL;
295         struct ice_fdir_counter *counter_free = NULL;
296
297         if (shared) {
298                 counter_free = ice_fdir_counter_shared_search(container, id);
299                 if (counter_free) {
300                         if (counter_free->ref_cnt + 1 == 0) {
301                                 rte_errno = E2BIG;
302                                 return NULL;
303                         }
304                         counter_free->ref_cnt++;
305                         return counter_free;
306                 }
307         }
308
309         TAILQ_FOREACH(pool, &container->pool_list, next) {
310                 counter_free = TAILQ_FIRST(&pool->counter_list);
311                 if (counter_free)
312                         break;
313                 counter_free = NULL;
314         }
315
316         if (!counter_free) {
317                 PMD_DRV_LOG(ERR, "No free counter found\n");
318                 return NULL;
319         }
320
321         counter_free->shared = shared;
322         counter_free->id = id;
323         counter_free->ref_cnt = 1;
324         counter_free->pool = pool;
325
326         /* reset statistic counter value */
327         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
328         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
329
330         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
331         if (TAILQ_EMPTY(&pool->counter_list)) {
332                 TAILQ_REMOVE(&container->pool_list, pool, next);
333                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
334         }
335
336         return counter_free;
337 }
338
339 static void
340 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
341                       struct ice_fdir_counter *counter)
342 {
343         if (!counter)
344                 return;
345
346         if (--counter->ref_cnt == 0) {
347                 struct ice_fdir_counter_pool *pool = counter->pool;
348
349                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
350         }
351 }
352
353 static int
354 ice_fdir_init_filter_list(struct ice_pf *pf)
355 {
356         struct rte_eth_dev *dev = pf->adapter->eth_dev;
357         struct ice_fdir_info *fdir_info = &pf->fdir;
358         char fdir_hash_name[RTE_HASH_NAMESIZE];
359         int ret;
360
361         struct rte_hash_parameters fdir_hash_params = {
362                 .name = fdir_hash_name,
363                 .entries = ICE_MAX_FDIR_FILTER_NUM,
364                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
365                 .hash_func = rte_hash_crc,
366                 .hash_func_init_val = 0,
367                 .socket_id = rte_socket_id(),
368         };
369
370         /* Initialize hash */
371         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
372                  "fdir_%s", dev->device->name);
373         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
374         if (!fdir_info->hash_table) {
375                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
376                 return -EINVAL;
377         }
378         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
379                                           sizeof(*fdir_info->hash_map) *
380                                           ICE_MAX_FDIR_FILTER_NUM,
381                                           0);
382         if (!fdir_info->hash_map) {
383                 PMD_INIT_LOG(ERR,
384                              "Failed to allocate memory for fdir hash map!");
385                 ret = -ENOMEM;
386                 goto err_fdir_hash_map_alloc;
387         }
388         return 0;
389
390 err_fdir_hash_map_alloc:
391         rte_hash_free(fdir_info->hash_table);
392
393         return ret;
394 }
395
396 static void
397 ice_fdir_release_filter_list(struct ice_pf *pf)
398 {
399         struct ice_fdir_info *fdir_info = &pf->fdir;
400
401         if (fdir_info->hash_map)
402                 rte_free(fdir_info->hash_map);
403         if (fdir_info->hash_table)
404                 rte_hash_free(fdir_info->hash_table);
405 }
406
407 /*
408  * ice_fdir_setup - reserve and initialize the Flow Director resources
409  * @pf: board private structure
410  */
411 static int
412 ice_fdir_setup(struct ice_pf *pf)
413 {
414         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
415         struct ice_hw *hw = ICE_PF_TO_HW(pf);
416         const struct rte_memzone *mz = NULL;
417         char z_name[RTE_MEMZONE_NAMESIZE];
418         struct ice_vsi *vsi;
419         int err = ICE_SUCCESS;
420
421         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
422                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
423                 return -ENOTSUP;
424         }
425
426         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
427                     " fd_fltr_best_effort = %u.",
428                     hw->func_caps.fd_fltr_guar,
429                     hw->func_caps.fd_fltr_best_effort);
430
431         if (pf->fdir.fdir_vsi) {
432                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
433                 return ICE_SUCCESS;
434         }
435
436         /* make new FDIR VSI */
437         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
438         if (!vsi) {
439                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
440                 return -EINVAL;
441         }
442         pf->fdir.fdir_vsi = vsi;
443
444         err = ice_fdir_init_filter_list(pf);
445         if (err) {
446                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
447                 return -EINVAL;
448         }
449
450         err = ice_fdir_counter_init(pf);
451         if (err) {
452                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
453                 return -EINVAL;
454         }
455
456         /*Fdir tx queue setup*/
457         err = ice_fdir_setup_tx_resources(pf);
458         if (err) {
459                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
460                 goto fail_setup_tx;
461         }
462
463         /*Fdir rx queue setup*/
464         err = ice_fdir_setup_rx_resources(pf);
465         if (err) {
466                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
467                 goto fail_setup_rx;
468         }
469
470         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
471         if (err) {
472                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
473                 goto fail_mem;
474         }
475
476         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
477         if (err) {
478                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
479                 goto fail_mem;
480         }
481
482         /* reserve memory for the fdir programming packet */
483         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
484                  ICE_FDIR_MZ_NAME,
485                  eth_dev->data->port_id);
486         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
487         if (!mz) {
488                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
489                             "flow director program packet.");
490                 err = -ENOMEM;
491                 goto fail_mem;
492         }
493         pf->fdir.prg_pkt = mz->addr;
494         pf->fdir.dma_addr = mz->iova;
495
496         err = ice_fdir_prof_alloc(hw);
497         if (err) {
498                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
499                             "flow director profile.");
500                 err = -ENOMEM;
501                 goto fail_mem;
502         }
503
504         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
505                     vsi->base_queue);
506         return ICE_SUCCESS;
507
508 fail_mem:
509         ice_rx_queue_release(pf->fdir.rxq);
510         pf->fdir.rxq = NULL;
511 fail_setup_rx:
512         ice_tx_queue_release(pf->fdir.txq);
513         pf->fdir.txq = NULL;
514 fail_setup_tx:
515         ice_release_vsi(vsi);
516         pf->fdir.fdir_vsi = NULL;
517         return err;
518 }
519
520 static void
521 ice_fdir_prof_free(struct ice_hw *hw)
522 {
523         enum ice_fltr_ptype ptype;
524
525         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
526              ptype < ICE_FLTR_PTYPE_MAX;
527              ptype++)
528                 rte_free(hw->fdir_prof[ptype]);
529
530         rte_free(hw->fdir_prof);
531 }
532
533 /* Remove a profile for some filter type */
534 static void
535 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
536 {
537         struct ice_hw *hw = ICE_PF_TO_HW(pf);
538         struct ice_fd_hw_prof *hw_prof;
539         uint64_t prof_id;
540         uint16_t vsi_num;
541         int i;
542
543         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
544                 return;
545
546         hw_prof = hw->fdir_prof[ptype];
547
548         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
549         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
550                 if (hw_prof->entry_h[i][is_tunnel]) {
551                         vsi_num = ice_get_hw_vsi_num(hw,
552                                                      hw_prof->vsi_h[i]);
553                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
554                                              vsi_num, ptype);
555                         ice_flow_rem_entry(hw,
556                                            hw_prof->entry_h[i][is_tunnel]);
557                         hw_prof->entry_h[i][is_tunnel] = 0;
558                 }
559         }
560         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
561         rte_free(hw_prof->fdir_seg[is_tunnel]);
562         hw_prof->fdir_seg[is_tunnel] = NULL;
563
564         for (i = 0; i < hw_prof->cnt; i++)
565                 hw_prof->vsi_h[i] = 0;
566         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
567 }
568
569 /* Remove all created profiles */
570 static void
571 ice_fdir_prof_rm_all(struct ice_pf *pf)
572 {
573         enum ice_fltr_ptype ptype;
574
575         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
576              ptype < ICE_FLTR_PTYPE_MAX;
577              ptype++) {
578                 ice_fdir_prof_rm(pf, ptype, false);
579                 ice_fdir_prof_rm(pf, ptype, true);
580         }
581 }
582
583 /*
584  * ice_fdir_teardown - release the Flow Director resources
585  * @pf: board private structure
586  */
587 static void
588 ice_fdir_teardown(struct ice_pf *pf)
589 {
590         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
591         struct ice_hw *hw = ICE_PF_TO_HW(pf);
592         struct ice_vsi *vsi;
593         int err;
594
595         vsi = pf->fdir.fdir_vsi;
596         if (!vsi)
597                 return;
598
599         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
600         if (err)
601                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
602
603         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
604         if (err)
605                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
606
607         err = ice_fdir_counter_release(pf);
608         if (err)
609                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
610
611         ice_fdir_release_filter_list(pf);
612
613         ice_tx_queue_release(pf->fdir.txq);
614         pf->fdir.txq = NULL;
615         ice_rx_queue_release(pf->fdir.rxq);
616         pf->fdir.rxq = NULL;
617         ice_fdir_prof_rm_all(pf);
618         ice_fdir_prof_free(hw);
619         ice_release_vsi(vsi);
620         pf->fdir.fdir_vsi = NULL;
621 }
622
623 static int
624 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
625                      struct ice_vsi *ctrl_vsi,
626                      struct ice_flow_seg_info *seg,
627                      enum ice_fltr_ptype ptype,
628                      bool is_tunnel)
629 {
630         struct ice_hw *hw = ICE_PF_TO_HW(pf);
631         enum ice_flow_dir dir = ICE_FLOW_RX;
632         struct ice_flow_seg_info *ori_seg;
633         struct ice_fd_hw_prof *hw_prof;
634         struct ice_flow_prof *prof;
635         uint64_t entry_1 = 0;
636         uint64_t entry_2 = 0;
637         uint16_t vsi_num;
638         int ret;
639         uint64_t prof_id;
640
641         hw_prof = hw->fdir_prof[ptype];
642         ori_seg = hw_prof->fdir_seg[is_tunnel];
643         if (ori_seg) {
644                 if (!is_tunnel) {
645                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
646                                 return -EAGAIN;
647                 } else {
648                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
649                                 return -EAGAIN;
650                 }
651
652                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
653                         return -EINVAL;
654
655                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
656         }
657
658         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
659         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
660                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
661         if (ret)
662                 return ret;
663         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
664                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
665                                  seg, NULL, 0, &entry_1);
666         if (ret) {
667                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
668                             ptype);
669                 goto err_add_prof;
670         }
671         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
672                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
673                                  seg, NULL, 0, &entry_2);
674         if (ret) {
675                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
676                             ptype);
677                 goto err_add_entry;
678         }
679
680         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
681         hw_prof->cnt = 0;
682         hw_prof->fdir_seg[is_tunnel] = seg;
683         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
684         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
685         pf->hw_prof_cnt[ptype][is_tunnel]++;
686         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
687         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
688         pf->hw_prof_cnt[ptype][is_tunnel]++;
689
690         return ret;
691
692 err_add_entry:
693         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
694         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
695         ice_flow_rem_entry(hw, entry_1);
696 err_add_prof:
697         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
698
699         return ret;
700 }
701
702 static void
703 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
704 {
705         uint32_t i, j;
706
707         struct ice_inset_map {
708                 uint64_t inset;
709                 enum ice_flow_field fld;
710         };
711         static const struct ice_inset_map ice_inset_map[] = {
712                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
713                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
714                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
715                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
716                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
717                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
718                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
719                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
720                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
721                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
722                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
723                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
724                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
725                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
726                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
727                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
728                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
729                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
730                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
731                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
732                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
733                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
734                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
735                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
736                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
737                 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
738                 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
739         };
740
741         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
742                 if ((inset & ice_inset_map[i].inset) ==
743                     ice_inset_map[i].inset)
744                         field[j++] = ice_inset_map[i].fld;
745         }
746 }
747
748 static int
749 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
750                         uint64_t input_set, bool is_tunnel)
751 {
752         struct ice_flow_seg_info *seg;
753         struct ice_flow_seg_info *seg_tun = NULL;
754         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
755         int i, ret;
756
757         if (!input_set)
758                 return -EINVAL;
759
760         seg = (struct ice_flow_seg_info *)
761                 ice_malloc(hw, sizeof(*seg));
762         if (!seg) {
763                 PMD_DRV_LOG(ERR, "No memory can be allocated");
764                 return -ENOMEM;
765         }
766
767         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
768                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
769         ice_fdir_input_set_parse(input_set, field);
770
771         switch (flow) {
772         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
773                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
774                                   ICE_FLOW_SEG_HDR_IPV4);
775                 break;
776         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
777                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
778                                   ICE_FLOW_SEG_HDR_IPV4);
779                 break;
780         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
781                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
782                                   ICE_FLOW_SEG_HDR_IPV4);
783                 break;
784         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
785                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
786                 break;
787         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
788                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
789                                   ICE_FLOW_SEG_HDR_IPV6);
790                 break;
791         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
792                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
793                                   ICE_FLOW_SEG_HDR_IPV6);
794                 break;
795         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
796                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
797                                   ICE_FLOW_SEG_HDR_IPV6);
798                 break;
799         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
800                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
801                 break;
802         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
803         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
804         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
805         case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
806                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
807                                   ICE_FLOW_SEG_HDR_IPV4);
808                 break;
809         default:
810                 PMD_DRV_LOG(ERR, "not supported filter type.");
811                 break;
812         }
813
814         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
815                 ice_flow_set_fld(seg, field[i],
816                                  ICE_FLOW_FLD_OFF_INVAL,
817                                  ICE_FLOW_FLD_OFF_INVAL,
818                                  ICE_FLOW_FLD_OFF_INVAL, false);
819         }
820
821         if (!is_tunnel) {
822                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
823                                            seg, flow, false);
824         } else {
825                 seg_tun = (struct ice_flow_seg_info *)
826                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
827                 if (!seg_tun) {
828                         PMD_DRV_LOG(ERR, "No memory can be allocated");
829                         rte_free(seg);
830                         return -ENOMEM;
831                 }
832                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
833                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
834                                            seg_tun, flow, true);
835         }
836
837         if (!ret) {
838                 return ret;
839         } else if (ret < 0) {
840                 rte_free(seg);
841                 if (is_tunnel)
842                         rte_free(seg_tun);
843                 return (ret == -EAGAIN) ? 0 : ret;
844         } else {
845                 return ret;
846         }
847 }
848
849 static void
850 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
851                     bool is_tunnel, bool add)
852 {
853         struct ice_hw *hw = ICE_PF_TO_HW(pf);
854         int cnt;
855
856         cnt = (add) ? 1 : -1;
857         hw->fdir_active_fltr += cnt;
858         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
859                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
860         else
861                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
862 }
863
864 static int
865 ice_fdir_init(struct ice_adapter *ad)
866 {
867         struct ice_pf *pf = &ad->pf;
868         struct ice_flow_parser *parser;
869         int ret;
870
871         ret = ice_fdir_setup(pf);
872         if (ret)
873                 return ret;
874
875         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
876                 parser = &ice_fdir_parser_comms;
877         else
878                 parser = &ice_fdir_parser_os;
879
880         return ice_register_parser(parser, ad);
881 }
882
883 static void
884 ice_fdir_uninit(struct ice_adapter *ad)
885 {
886         struct ice_pf *pf = &ad->pf;
887         struct ice_flow_parser *parser;
888
889         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
890                 parser = &ice_fdir_parser_comms;
891         else
892                 parser = &ice_fdir_parser_os;
893
894         ice_unregister_parser(parser, ad);
895
896         ice_fdir_teardown(pf);
897 }
898
899 static int
900 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
901 {
902         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
903                 return 1;
904         else
905                 return 0;
906 }
907
908 static int
909 ice_fdir_add_del_filter(struct ice_pf *pf,
910                         struct ice_fdir_filter_conf *filter,
911                         bool add)
912 {
913         struct ice_fltr_desc desc;
914         struct ice_hw *hw = ICE_PF_TO_HW(pf);
915         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
916         bool is_tun;
917         int ret;
918
919         filter->input.dest_vsi = pf->main_vsi->idx;
920
921         memset(&desc, 0, sizeof(desc));
922         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
923
924         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
925
926         memset(pkt, 0, ICE_FDIR_PKT_LEN);
927         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
928         if (ret) {
929                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
930                 return -EINVAL;
931         }
932
933         return ice_fdir_programming(pf, &desc);
934 }
935
936 static void
937 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
938                           struct ice_fdir_filter_conf *filter)
939 {
940         struct ice_fdir_fltr *input = &filter->input;
941         memset(key, 0, sizeof(*key));
942
943         key->flow_type = input->flow_type;
944         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
945         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
946         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
947         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
948
949         rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
950         rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
951
952         key->tunnel_type = filter->tunnel_type;
953 }
954
955 /* Check if there exists the flow director filter */
956 static struct ice_fdir_filter_conf *
957 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
958                         const struct ice_fdir_fltr_pattern *key)
959 {
960         int ret;
961
962         ret = rte_hash_lookup(fdir_info->hash_table, key);
963         if (ret < 0)
964                 return NULL;
965
966         return fdir_info->hash_map[ret];
967 }
968
969 /* Add a flow director entry into the SW list */
970 static int
971 ice_fdir_entry_insert(struct ice_pf *pf,
972                       struct ice_fdir_filter_conf *entry,
973                       struct ice_fdir_fltr_pattern *key)
974 {
975         struct ice_fdir_info *fdir_info = &pf->fdir;
976         int ret;
977
978         ret = rte_hash_add_key(fdir_info->hash_table, key);
979         if (ret < 0) {
980                 PMD_DRV_LOG(ERR,
981                             "Failed to insert fdir entry to hash table %d!",
982                             ret);
983                 return ret;
984         }
985         fdir_info->hash_map[ret] = entry;
986
987         return 0;
988 }
989
990 /* Delete a flow director entry from the SW list */
991 static int
992 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
993 {
994         struct ice_fdir_info *fdir_info = &pf->fdir;
995         int ret;
996
997         ret = rte_hash_del_key(fdir_info->hash_table, key);
998         if (ret < 0) {
999                 PMD_DRV_LOG(ERR,
1000                             "Failed to delete fdir filter to hash table %d!",
1001                             ret);
1002                 return ret;
1003         }
1004         fdir_info->hash_map[ret] = NULL;
1005
1006         return 0;
1007 }
1008
1009 static int
1010 ice_fdir_create_filter(struct ice_adapter *ad,
1011                        struct rte_flow *flow,
1012                        void *meta,
1013                        struct rte_flow_error *error)
1014 {
1015         struct ice_pf *pf = &ad->pf;
1016         struct ice_fdir_filter_conf *filter = meta;
1017         struct ice_fdir_info *fdir_info = &pf->fdir;
1018         struct ice_fdir_filter_conf *entry, *node;
1019         struct ice_fdir_fltr_pattern key;
1020         bool is_tun;
1021         int ret;
1022
1023         ice_fdir_extract_fltr_key(&key, filter);
1024         node = ice_fdir_entry_lookup(fdir_info, &key);
1025         if (node) {
1026                 rte_flow_error_set(error, EEXIST,
1027                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1028                                    "Rule already exists!");
1029                 return -rte_errno;
1030         }
1031
1032         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1033         if (!entry) {
1034                 rte_flow_error_set(error, ENOMEM,
1035                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1036                                    "Failed to allocate memory");
1037                 return -rte_errno;
1038         }
1039
1040         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1041
1042         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1043                         filter->input_set, is_tun);
1044         if (ret) {
1045                 rte_flow_error_set(error, -ret,
1046                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1047                                    "Profile configure failed.");
1048                 goto free_entry;
1049         }
1050
1051         /* alloc counter for FDIR */
1052         if (filter->input.cnt_ena) {
1053                 struct rte_flow_action_count *act_count = &filter->act_count;
1054
1055                 filter->counter = ice_fdir_counter_alloc(pf,
1056                                                          act_count->shared,
1057                                                          act_count->id);
1058                 if (!filter->counter) {
1059                         rte_flow_error_set(error, EINVAL,
1060                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1061                                         "Failed to alloc FDIR counter.");
1062                         goto free_entry;
1063                 }
1064                 filter->input.cnt_index = filter->counter->hw_index;
1065         }
1066
1067         ret = ice_fdir_add_del_filter(pf, filter, true);
1068         if (ret) {
1069                 rte_flow_error_set(error, -ret,
1070                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1071                                    "Add filter rule failed.");
1072                 goto free_counter;
1073         }
1074
1075         rte_memcpy(entry, filter, sizeof(*entry));
1076         ret = ice_fdir_entry_insert(pf, entry, &key);
1077         if (ret) {
1078                 rte_flow_error_set(error, -ret,
1079                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1080                                    "Insert entry to table failed.");
1081                 goto free_entry;
1082         }
1083
1084         flow->rule = entry;
1085         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1086
1087         return 0;
1088
1089 free_counter:
1090         if (filter->counter) {
1091                 ice_fdir_counter_free(pf, filter->counter);
1092                 filter->counter = NULL;
1093         }
1094
1095 free_entry:
1096         rte_free(entry);
1097         return -rte_errno;
1098 }
1099
1100 static int
1101 ice_fdir_destroy_filter(struct ice_adapter *ad,
1102                         struct rte_flow *flow,
1103                         struct rte_flow_error *error)
1104 {
1105         struct ice_pf *pf = &ad->pf;
1106         struct ice_fdir_info *fdir_info = &pf->fdir;
1107         struct ice_fdir_filter_conf *filter, *entry;
1108         struct ice_fdir_fltr_pattern key;
1109         bool is_tun;
1110         int ret;
1111
1112         filter = (struct ice_fdir_filter_conf *)flow->rule;
1113
1114         is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1115
1116         if (filter->counter) {
1117                 ice_fdir_counter_free(pf, filter->counter);
1118                 filter->counter = NULL;
1119         }
1120
1121         ice_fdir_extract_fltr_key(&key, filter);
1122         entry = ice_fdir_entry_lookup(fdir_info, &key);
1123         if (!entry) {
1124                 rte_flow_error_set(error, ENOENT,
1125                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1126                                    "Can't find entry.");
1127                 return -rte_errno;
1128         }
1129
1130         ret = ice_fdir_add_del_filter(pf, filter, false);
1131         if (ret) {
1132                 rte_flow_error_set(error, -ret,
1133                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1134                                    "Del filter rule failed.");
1135                 return -rte_errno;
1136         }
1137
1138         ret = ice_fdir_entry_del(pf, &key);
1139         if (ret) {
1140                 rte_flow_error_set(error, -ret,
1141                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1142                                    "Remove entry from table failed.");
1143                 return -rte_errno;
1144         }
1145
1146         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1147         flow->rule = NULL;
1148
1149         rte_free(filter);
1150
1151         return 0;
1152 }
1153
1154 static int
1155 ice_fdir_query_count(struct ice_adapter *ad,
1156                       struct rte_flow *flow,
1157                       struct rte_flow_query_count *flow_stats,
1158                       struct rte_flow_error *error)
1159 {
1160         struct ice_pf *pf = &ad->pf;
1161         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1162         struct ice_fdir_filter_conf *filter = flow->rule;
1163         struct ice_fdir_counter *counter = filter->counter;
1164         uint64_t hits_lo, hits_hi;
1165
1166         if (!counter) {
1167                 rte_flow_error_set(error, EINVAL,
1168                                   RTE_FLOW_ERROR_TYPE_ACTION,
1169                                   NULL,
1170                                   "FDIR counters not available");
1171                 return -rte_errno;
1172         }
1173
1174         /*
1175          * Reading the low 32-bits latches the high 32-bits into a shadow
1176          * register. Reading the high 32-bit returns the value in the
1177          * shadow register.
1178          */
1179         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1180         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1181
1182         flow_stats->hits_set = 1;
1183         flow_stats->hits = hits_lo | (hits_hi << 32);
1184         flow_stats->bytes_set = 0;
1185         flow_stats->bytes = 0;
1186
1187         if (flow_stats->reset) {
1188                 /* reset statistic counter value */
1189                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1190                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1191         }
1192
1193         return 0;
1194 }
1195
1196 static struct ice_flow_engine ice_fdir_engine = {
1197         .init = ice_fdir_init,
1198         .uninit = ice_fdir_uninit,
1199         .create = ice_fdir_create_filter,
1200         .destroy = ice_fdir_destroy_filter,
1201         .query_count = ice_fdir_query_count,
1202         .type = ICE_FLOW_ENGINE_FDIR,
1203 };
1204
1205 static int
1206 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1207                               struct rte_flow_error *error,
1208                               const struct rte_flow_action *act,
1209                               struct ice_fdir_filter_conf *filter)
1210 {
1211         const struct rte_flow_action_rss *rss = act->conf;
1212         uint32_t i;
1213
1214         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1215                 rte_flow_error_set(error, EINVAL,
1216                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1217                                    "Invalid action.");
1218                 return -rte_errno;
1219         }
1220
1221         if (rss->queue_num <= 1) {
1222                 rte_flow_error_set(error, EINVAL,
1223                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1224                                    "Queue region size can't be 0 or 1.");
1225                 return -rte_errno;
1226         }
1227
1228         /* check if queue index for queue region is continuous */
1229         for (i = 0; i < rss->queue_num - 1; i++) {
1230                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1231                         rte_flow_error_set(error, EINVAL,
1232                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1233                                            "Discontinuous queue region");
1234                         return -rte_errno;
1235                 }
1236         }
1237
1238         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1239                 rte_flow_error_set(error, EINVAL,
1240                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1241                                    "Invalid queue region indexes.");
1242                 return -rte_errno;
1243         }
1244
1245         if (!(rte_is_power_of_2(rss->queue_num) &&
1246              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1247                 rte_flow_error_set(error, EINVAL,
1248                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1249                                    "The region size should be any of the following values:"
1250                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1251                                    "of queues do not exceed the VSI allocation.");
1252                 return -rte_errno;
1253         }
1254
1255         filter->input.q_index = rss->queue[0];
1256         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1257         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1258
1259         return 0;
1260 }
1261
1262 static int
1263 ice_fdir_parse_action(struct ice_adapter *ad,
1264                       const struct rte_flow_action actions[],
1265                       struct rte_flow_error *error,
1266                       struct ice_fdir_filter_conf *filter)
1267 {
1268         struct ice_pf *pf = &ad->pf;
1269         const struct rte_flow_action_queue *act_q;
1270         const struct rte_flow_action_mark *mark_spec = NULL;
1271         const struct rte_flow_action_count *act_count;
1272         uint32_t dest_num = 0;
1273         uint32_t mark_num = 0;
1274         uint32_t counter_num = 0;
1275         int ret;
1276
1277         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1278                 switch (actions->type) {
1279                 case RTE_FLOW_ACTION_TYPE_VOID:
1280                         break;
1281                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1282                         dest_num++;
1283
1284                         act_q = actions->conf;
1285                         filter->input.q_index = act_q->index;
1286                         if (filter->input.q_index >=
1287                                         pf->dev_data->nb_rx_queues) {
1288                                 rte_flow_error_set(error, EINVAL,
1289                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1290                                                    actions,
1291                                                    "Invalid queue for FDIR.");
1292                                 return -rte_errno;
1293                         }
1294                         filter->input.dest_ctl =
1295                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1296                         break;
1297                 case RTE_FLOW_ACTION_TYPE_DROP:
1298                         dest_num++;
1299
1300                         filter->input.dest_ctl =
1301                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1302                         break;
1303                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1304                         dest_num++;
1305
1306                         filter->input.dest_ctl =
1307                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1308                         filter->input.q_index = 0;
1309                         break;
1310                 case RTE_FLOW_ACTION_TYPE_RSS:
1311                         dest_num++;
1312
1313                         ret = ice_fdir_parse_action_qregion(pf,
1314                                                 error, actions, filter);
1315                         if (ret)
1316                                 return ret;
1317                         break;
1318                 case RTE_FLOW_ACTION_TYPE_MARK:
1319                         mark_num++;
1320
1321                         mark_spec = actions->conf;
1322                         filter->input.fltr_id = mark_spec->id;
1323                         break;
1324                 case RTE_FLOW_ACTION_TYPE_COUNT:
1325                         counter_num++;
1326
1327                         act_count = actions->conf;
1328                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1329                         rte_memcpy(&filter->act_count, act_count,
1330                                                 sizeof(filter->act_count));
1331
1332                         break;
1333                 default:
1334                         rte_flow_error_set(error, EINVAL,
1335                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1336                                    "Invalid action.");
1337                         return -rte_errno;
1338                 }
1339         }
1340
1341         if (dest_num == 0 || dest_num >= 2) {
1342                 rte_flow_error_set(error, EINVAL,
1343                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1344                            "Unsupported action combination");
1345                 return -rte_errno;
1346         }
1347
1348         if (mark_num >= 2) {
1349                 rte_flow_error_set(error, EINVAL,
1350                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1351                            "Too many mark actions");
1352                 return -rte_errno;
1353         }
1354
1355         if (counter_num >= 2) {
1356                 rte_flow_error_set(error, EINVAL,
1357                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1358                            "Too many count actions");
1359                 return -rte_errno;
1360         }
1361
1362         return 0;
1363 }
1364
1365 static int
1366 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1367                        const struct rte_flow_item pattern[],
1368                        struct rte_flow_error *error,
1369                        struct ice_fdir_filter_conf *filter)
1370 {
1371         const struct rte_flow_item *item = pattern;
1372         enum rte_flow_item_type item_type;
1373         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1374         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1375         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1376         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1377         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1378         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1379         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1380         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1381         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1382         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1383         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1384         uint64_t input_set = ICE_INSET_NONE;
1385         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1386         uint8_t  ipv6_addr_mask[16] = {
1387                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1388                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1389         };
1390         uint32_t vtc_flow_cpu;
1391
1392
1393         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1394                 if (item->last) {
1395                         rte_flow_error_set(error, EINVAL,
1396                                         RTE_FLOW_ERROR_TYPE_ITEM,
1397                                         item,
1398                                         "Not support range");
1399                         return -rte_errno;
1400                 }
1401                 item_type = item->type;
1402
1403                 switch (item_type) {
1404                 case RTE_FLOW_ITEM_TYPE_ETH:
1405                         eth_spec = item->spec;
1406                         eth_mask = item->mask;
1407
1408                         if (eth_spec && eth_mask) {
1409                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1410                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1411                                         rte_flow_error_set(error, EINVAL,
1412                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1413                                                 item,
1414                                                 "Src mac not support");
1415                                         return -rte_errno;
1416                                 }
1417
1418                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1419                                         rte_flow_error_set(error, EINVAL,
1420                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1421                                                 item,
1422                                                 "Invalid mac addr mask");
1423                                         return -rte_errno;
1424                                 }
1425
1426                                 input_set |= ICE_INSET_DMAC;
1427                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1428                                            &eth_spec->dst,
1429                                            RTE_ETHER_ADDR_LEN);
1430                         }
1431                         break;
1432                 case RTE_FLOW_ITEM_TYPE_IPV4:
1433                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1434                         ipv4_spec = item->spec;
1435                         ipv4_mask = item->mask;
1436
1437                         if (ipv4_spec && ipv4_mask) {
1438                                 /* Check IPv4 mask and update input set */
1439                                 if (ipv4_mask->hdr.version_ihl ||
1440                                     ipv4_mask->hdr.total_length ||
1441                                     ipv4_mask->hdr.packet_id ||
1442                                     ipv4_mask->hdr.fragment_offset ||
1443                                     ipv4_mask->hdr.hdr_checksum) {
1444                                         rte_flow_error_set(error, EINVAL,
1445                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1446                                                    item,
1447                                                    "Invalid IPv4 mask.");
1448                                         return -rte_errno;
1449                                 }
1450                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1451                                         input_set |= tunnel_type ?
1452                                                      ICE_INSET_TUN_IPV4_SRC :
1453                                                      ICE_INSET_IPV4_SRC;
1454                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1455                                         input_set |= tunnel_type ?
1456                                                      ICE_INSET_TUN_IPV4_DST :
1457                                                      ICE_INSET_IPV4_DST;
1458                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1459                                         input_set |= ICE_INSET_IPV4_TOS;
1460                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1461                                         input_set |= ICE_INSET_IPV4_TTL;
1462                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1463                                         input_set |= ICE_INSET_IPV4_PROTO;
1464
1465                                 filter->input.ip.v4.dst_ip =
1466                                         ipv4_spec->hdr.src_addr;
1467                                 filter->input.ip.v4.src_ip =
1468                                         ipv4_spec->hdr.dst_addr;
1469                                 filter->input.ip.v4.tos =
1470                                         ipv4_spec->hdr.type_of_service;
1471                                 filter->input.ip.v4.ttl =
1472                                         ipv4_spec->hdr.time_to_live;
1473                                 filter->input.ip.v4.proto =
1474                                         ipv4_spec->hdr.next_proto_id;
1475                         }
1476
1477                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1478                         break;
1479                 case RTE_FLOW_ITEM_TYPE_IPV6:
1480                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1481                         ipv6_spec = item->spec;
1482                         ipv6_mask = item->mask;
1483
1484                         if (ipv6_spec && ipv6_mask) {
1485                                 /* Check IPv6 mask and update input set */
1486                                 if (ipv6_mask->hdr.payload_len) {
1487                                         rte_flow_error_set(error, EINVAL,
1488                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1489                                                    item,
1490                                                    "Invalid IPv6 mask");
1491                                         return -rte_errno;
1492                                 }
1493
1494                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1495                                             ipv6_addr_mask,
1496                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1497                                         input_set |= ICE_INSET_IPV6_SRC;
1498                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1499                                             ipv6_addr_mask,
1500                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1501                                         input_set |= ICE_INSET_IPV6_DST;
1502
1503                                 if ((ipv6_mask->hdr.vtc_flow &
1504                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1505                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1506                                         input_set |= ICE_INSET_IPV6_TC;
1507                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1508                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1509                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1510                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1511
1512                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1513                                            ipv6_spec->hdr.src_addr, 16);
1514                                 rte_memcpy(filter->input.ip.v6.src_ip,
1515                                            ipv6_spec->hdr.dst_addr, 16);
1516
1517                                 vtc_flow_cpu =
1518                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1519                                 filter->input.ip.v6.tc =
1520                                         (uint8_t)(vtc_flow_cpu >>
1521                                                   ICE_FDIR_IPV6_TC_OFFSET);
1522                                 filter->input.ip.v6.proto =
1523                                         ipv6_spec->hdr.proto;
1524                                 filter->input.ip.v6.hlim =
1525                                         ipv6_spec->hdr.hop_limits;
1526                         }
1527
1528                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1529                         break;
1530                 case RTE_FLOW_ITEM_TYPE_TCP:
1531                         tcp_spec = item->spec;
1532                         tcp_mask = item->mask;
1533
1534                         if (tcp_spec && tcp_mask) {
1535                                 /* Check TCP mask and update input set */
1536                                 if (tcp_mask->hdr.sent_seq ||
1537                                     tcp_mask->hdr.recv_ack ||
1538                                     tcp_mask->hdr.data_off ||
1539                                     tcp_mask->hdr.tcp_flags ||
1540                                     tcp_mask->hdr.rx_win ||
1541                                     tcp_mask->hdr.cksum ||
1542                                     tcp_mask->hdr.tcp_urp) {
1543                                         rte_flow_error_set(error, EINVAL,
1544                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1545                                                    item,
1546                                                    "Invalid TCP mask");
1547                                         return -rte_errno;
1548                                 }
1549
1550                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1551                                         input_set |= tunnel_type ?
1552                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1553                                                      ICE_INSET_TCP_SRC_PORT;
1554                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1555                                         input_set |= tunnel_type ?
1556                                                      ICE_INSET_TUN_TCP_DST_PORT :
1557                                                      ICE_INSET_TCP_DST_PORT;
1558
1559                                 /* Get filter info */
1560                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1561                                         filter->input.ip.v4.dst_port =
1562                                                 tcp_spec->hdr.src_port;
1563                                         filter->input.ip.v4.src_port =
1564                                                 tcp_spec->hdr.dst_port;
1565                                         flow_type =
1566                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1567                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1568                                         filter->input.ip.v6.dst_port =
1569                                                 tcp_spec->hdr.src_port;
1570                                         filter->input.ip.v6.src_port =
1571                                                 tcp_spec->hdr.dst_port;
1572                                         flow_type =
1573                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1574                                 }
1575                         }
1576                         break;
1577                 case RTE_FLOW_ITEM_TYPE_UDP:
1578                         udp_spec = item->spec;
1579                         udp_mask = item->mask;
1580
1581                         if (udp_spec && udp_mask) {
1582                                 /* Check UDP mask and update input set*/
1583                                 if (udp_mask->hdr.dgram_len ||
1584                                     udp_mask->hdr.dgram_cksum) {
1585                                         rte_flow_error_set(error, EINVAL,
1586                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1587                                                    item,
1588                                                    "Invalid UDP mask");
1589                                         return -rte_errno;
1590                                 }
1591
1592                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1593                                         input_set |= tunnel_type ?
1594                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1595                                                      ICE_INSET_UDP_SRC_PORT;
1596                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1597                                         input_set |= tunnel_type ?
1598                                                      ICE_INSET_TUN_UDP_DST_PORT :
1599                                                      ICE_INSET_UDP_DST_PORT;
1600
1601                                 /* Get filter info */
1602                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1603                                         filter->input.ip.v4.dst_port =
1604                                                 udp_spec->hdr.src_port;
1605                                         filter->input.ip.v4.src_port =
1606                                                 udp_spec->hdr.dst_port;
1607                                         flow_type =
1608                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1609                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1610                                         filter->input.ip.v6.src_port =
1611                                                 udp_spec->hdr.src_port;
1612                                         filter->input.ip.v6.dst_port =
1613                                                 udp_spec->hdr.dst_port;
1614                                         flow_type =
1615                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1616                                 }
1617                         }
1618                         break;
1619                 case RTE_FLOW_ITEM_TYPE_SCTP:
1620                         sctp_spec = item->spec;
1621                         sctp_mask = item->mask;
1622
1623                         if (sctp_spec && sctp_mask) {
1624                                 /* Check SCTP mask and update input set */
1625                                 if (sctp_mask->hdr.cksum) {
1626                                         rte_flow_error_set(error, EINVAL,
1627                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1628                                                    item,
1629                                                    "Invalid UDP mask");
1630                                         return -rte_errno;
1631                                 }
1632
1633                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1634                                         input_set |= tunnel_type ?
1635                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1636                                                      ICE_INSET_SCTP_SRC_PORT;
1637                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1638                                         input_set |= tunnel_type ?
1639                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1640                                                      ICE_INSET_SCTP_DST_PORT;
1641
1642                                 /* Get filter info */
1643                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1644                                         filter->input.ip.v4.dst_port =
1645                                                 sctp_spec->hdr.src_port;
1646                                         filter->input.ip.v4.src_port =
1647                                                 sctp_spec->hdr.dst_port;
1648                                         flow_type =
1649                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1650                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1651                                         filter->input.ip.v6.dst_port =
1652                                                 sctp_spec->hdr.src_port;
1653                                         filter->input.ip.v6.src_port =
1654                                                 sctp_spec->hdr.dst_port;
1655                                         flow_type =
1656                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1657                                 }
1658                         }
1659                         break;
1660                 case RTE_FLOW_ITEM_TYPE_VOID:
1661                         break;
1662                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1663                         l3 = RTE_FLOW_ITEM_TYPE_END;
1664                         vxlan_spec = item->spec;
1665                         vxlan_mask = item->mask;
1666
1667                         if (vxlan_spec || vxlan_mask) {
1668                                 rte_flow_error_set(error, EINVAL,
1669                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1670                                                    item,
1671                                                    "Invalid vxlan field");
1672                                 return -rte_errno;
1673                         }
1674
1675                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1676                         break;
1677                 case RTE_FLOW_ITEM_TYPE_GTPU:
1678                         l3 = RTE_FLOW_ITEM_TYPE_END;
1679                         gtp_spec = item->spec;
1680                         gtp_mask = item->mask;
1681
1682                         if (gtp_spec && gtp_mask) {
1683                                 if (gtp_mask->v_pt_rsv_flags ||
1684                                     gtp_mask->msg_type ||
1685                                     gtp_mask->msg_len) {
1686                                         rte_flow_error_set(error, EINVAL,
1687                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1688                                                    item,
1689                                                    "Invalid GTP mask");
1690                                         return -rte_errno;
1691                                 }
1692
1693                                 if (gtp_mask->teid == UINT32_MAX)
1694                                         input_set |= ICE_INSET_GTPU_TEID;
1695
1696                                 filter->input.gtpu_data.teid = gtp_spec->teid;
1697                         }
1698                         break;
1699                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1700                         gtp_psc_spec = item->spec;
1701                         gtp_psc_mask = item->mask;
1702
1703                         if (gtp_psc_spec && gtp_psc_mask) {
1704                                 if (gtp_psc_mask->qfi == UINT8_MAX)
1705                                         input_set |= ICE_INSET_GTPU_QFI;
1706
1707                                 filter->input.gtpu_data.qfi =
1708                                         gtp_psc_spec->qfi;
1709                         }
1710
1711                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1712                         break;
1713                 default:
1714                         rte_flow_error_set(error, EINVAL,
1715                                    RTE_FLOW_ERROR_TYPE_ITEM,
1716                                    item,
1717                                    "Invalid pattern item.");
1718                         return -rte_errno;
1719                 }
1720         }
1721
1722         if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1723                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1724
1725         filter->tunnel_type = tunnel_type;
1726         filter->input.flow_type = flow_type;
1727         filter->input_set = input_set;
1728
1729         return 0;
1730 }
1731
1732 static int
1733 ice_fdir_parse(struct ice_adapter *ad,
1734                struct ice_pattern_match_item *array,
1735                uint32_t array_len,
1736                const struct rte_flow_item pattern[],
1737                const struct rte_flow_action actions[],
1738                void **meta,
1739                struct rte_flow_error *error)
1740 {
1741         struct ice_pf *pf = &ad->pf;
1742         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1743         struct ice_pattern_match_item *item = NULL;
1744         uint64_t input_set;
1745         int ret;
1746
1747         memset(filter, 0, sizeof(*filter));
1748         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1749         if (!item)
1750                 return -rte_errno;
1751
1752         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1753         if (ret)
1754                 return ret;
1755         input_set = filter->input_set;
1756         if (!input_set || input_set & ~item->input_set_mask) {
1757                 rte_flow_error_set(error, EINVAL,
1758                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1759                                    pattern,
1760                                    "Invalid input set");
1761                 return -rte_errno;
1762         }
1763
1764         ret = ice_fdir_parse_action(ad, actions, error, filter);
1765         if (ret)
1766                 return ret;
1767
1768         *meta = filter;
1769
1770         return 0;
1771 }
1772
1773 static struct ice_flow_parser ice_fdir_parser_os = {
1774         .engine = &ice_fdir_engine,
1775         .array = ice_fdir_pattern_os,
1776         .array_len = RTE_DIM(ice_fdir_pattern_os),
1777         .parse_pattern_action = ice_fdir_parse,
1778         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1779 };
1780
1781 static struct ice_flow_parser ice_fdir_parser_comms = {
1782         .engine = &ice_fdir_engine,
1783         .array = ice_fdir_pattern_comms,
1784         .array_len = RTE_DIM(ice_fdir_pattern_comms),
1785         .parse_pattern_action = ice_fdir_parse,
1786         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1787 };
1788
1789 RTE_INIT(ice_fdir_engine_register)
1790 {
1791         ice_register_flow_engine(&ice_fdir_engine);
1792 }