net/ice: reject duplicated flow for flow director
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
36         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
37
38 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
39         ICE_FDIR_INSET_ETH_IPV6 | \
40         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
41
42 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
43         ICE_FDIR_INSET_ETH_IPV6 | \
44         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
45
46 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
47         ICE_FDIR_INSET_ETH_IPV6 | \
48         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
49
50 static struct ice_pattern_match_item ice_fdir_pattern[] = {
51         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
52         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
53         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
54         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
55         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
56         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
57         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
58         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
59 };
60
61 static struct ice_flow_parser ice_fdir_parser;
62
63 static const struct rte_memzone *
64 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
65 {
66         return rte_memzone_reserve_aligned(name, len, socket_id,
67                                            RTE_MEMZONE_IOVA_CONTIG,
68                                            ICE_RING_BASE_ALIGN);
69 }
70
71 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
72
73 static int
74 ice_fdir_prof_alloc(struct ice_hw *hw)
75 {
76         enum ice_fltr_ptype ptype, fltr_ptype;
77
78         if (!hw->fdir_prof) {
79                 hw->fdir_prof = (struct ice_fd_hw_prof **)
80                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
81                                    sizeof(*hw->fdir_prof));
82                 if (!hw->fdir_prof)
83                         return -ENOMEM;
84         }
85         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
86              ptype < ICE_FLTR_PTYPE_MAX;
87              ptype++) {
88                 if (!hw->fdir_prof[ptype]) {
89                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
90                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
91                         if (!hw->fdir_prof[ptype])
92                                 goto fail_mem;
93                 }
94         }
95         return 0;
96
97 fail_mem:
98         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
99              fltr_ptype < ptype;
100              fltr_ptype++)
101                 rte_free(hw->fdir_prof[fltr_ptype]);
102         rte_free(hw->fdir_prof);
103         return -ENOMEM;
104 }
105
106 static int
107 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
108                           struct ice_fdir_counter_pool_container *container,
109                           uint32_t index_start,
110                           uint32_t len)
111 {
112         struct ice_fdir_counter_pool *pool;
113         uint32_t i;
114         int ret = 0;
115
116         pool = rte_zmalloc("ice_fdir_counter_pool",
117                            sizeof(*pool) +
118                            sizeof(struct ice_fdir_counter) * len,
119                            0);
120         if (!pool) {
121                 PMD_INIT_LOG(ERR,
122                              "Failed to allocate memory for fdir counter pool");
123                 return -ENOMEM;
124         }
125
126         TAILQ_INIT(&pool->counter_list);
127         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
128
129         for (i = 0; i < len; i++) {
130                 struct ice_fdir_counter *counter = &pool->counters[i];
131
132                 counter->hw_index = index_start + i;
133                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
134         }
135
136         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
137                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
138                 ret = -EINVAL;
139                 goto free_pool;
140         }
141
142         container->pools[container->index_free++] = pool;
143         return 0;
144
145 free_pool:
146         rte_free(pool);
147         return ret;
148 }
149
150 static int
151 ice_fdir_counter_init(struct ice_pf *pf)
152 {
153         struct ice_hw *hw = ICE_PF_TO_HW(pf);
154         struct ice_fdir_info *fdir_info = &pf->fdir;
155         struct ice_fdir_counter_pool_container *container =
156                                 &fdir_info->counter;
157         uint32_t cnt_index, len;
158         int ret;
159
160         TAILQ_INIT(&container->pool_list);
161
162         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
163         len = ICE_FDIR_COUNTERS_PER_BLOCK;
164
165         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
166         if (ret) {
167                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
168                 return ret;
169         }
170
171         return 0;
172 }
173
174 static int
175 ice_fdir_counter_release(struct ice_pf *pf)
176 {
177         struct ice_fdir_info *fdir_info = &pf->fdir;
178         struct ice_fdir_counter_pool_container *container =
179                                 &fdir_info->counter;
180         uint8_t i;
181
182         for (i = 0; i < container->index_free; i++)
183                 rte_free(container->pools[i]);
184
185         return 0;
186 }
187
188 static struct ice_fdir_counter *
189 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
190                                         *container,
191                                uint32_t id)
192 {
193         struct ice_fdir_counter_pool *pool;
194         struct ice_fdir_counter *counter;
195         int i;
196
197         TAILQ_FOREACH(pool, &container->pool_list, next) {
198                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
199                         counter = &pool->counters[i];
200
201                         if (counter->shared &&
202                             counter->ref_cnt &&
203                             counter->id == id)
204                                 return counter;
205                 }
206         }
207
208         return NULL;
209 }
210
211 static struct ice_fdir_counter *
212 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
213 {
214         struct ice_hw *hw = ICE_PF_TO_HW(pf);
215         struct ice_fdir_info *fdir_info = &pf->fdir;
216         struct ice_fdir_counter_pool_container *container =
217                                 &fdir_info->counter;
218         struct ice_fdir_counter_pool *pool = NULL;
219         struct ice_fdir_counter *counter_free = NULL;
220
221         if (shared) {
222                 counter_free = ice_fdir_counter_shared_search(container, id);
223                 if (counter_free) {
224                         if (counter_free->ref_cnt + 1 == 0) {
225                                 rte_errno = E2BIG;
226                                 return NULL;
227                         }
228                         counter_free->ref_cnt++;
229                         return counter_free;
230                 }
231         }
232
233         TAILQ_FOREACH(pool, &container->pool_list, next) {
234                 counter_free = TAILQ_FIRST(&pool->counter_list);
235                 if (counter_free)
236                         break;
237                 counter_free = NULL;
238         }
239
240         if (!counter_free) {
241                 PMD_DRV_LOG(ERR, "No free counter found\n");
242                 return NULL;
243         }
244
245         counter_free->shared = shared;
246         counter_free->id = id;
247         counter_free->ref_cnt = 1;
248         counter_free->pool = pool;
249
250         /* reset statistic counter value */
251         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
252         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
253
254         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
255         if (TAILQ_EMPTY(&pool->counter_list)) {
256                 TAILQ_REMOVE(&container->pool_list, pool, next);
257                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
258         }
259
260         return counter_free;
261 }
262
263 static void
264 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
265                       struct ice_fdir_counter *counter)
266 {
267         if (!counter)
268                 return;
269
270         if (--counter->ref_cnt == 0) {
271                 struct ice_fdir_counter_pool *pool = counter->pool;
272
273                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
274         }
275 }
276
277 static int
278 ice_fdir_init_filter_list(struct ice_pf *pf)
279 {
280         struct rte_eth_dev *dev = pf->adapter->eth_dev;
281         struct ice_fdir_info *fdir_info = &pf->fdir;
282         char fdir_hash_name[RTE_HASH_NAMESIZE];
283         int ret;
284
285         struct rte_hash_parameters fdir_hash_params = {
286                 .name = fdir_hash_name,
287                 .entries = ICE_MAX_FDIR_FILTER_NUM,
288                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
289                 .hash_func = rte_hash_crc,
290                 .hash_func_init_val = 0,
291                 .socket_id = rte_socket_id(),
292         };
293
294         /* Initialize hash */
295         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
296                  "fdir_%s", dev->device->name);
297         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
298         if (!fdir_info->hash_table) {
299                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
300                 return -EINVAL;
301         }
302         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
303                                           sizeof(*fdir_info->hash_map) *
304                                           ICE_MAX_FDIR_FILTER_NUM,
305                                           0);
306         if (!fdir_info->hash_map) {
307                 PMD_INIT_LOG(ERR,
308                              "Failed to allocate memory for fdir hash map!");
309                 ret = -ENOMEM;
310                 goto err_fdir_hash_map_alloc;
311         }
312         return 0;
313
314 err_fdir_hash_map_alloc:
315         rte_hash_free(fdir_info->hash_table);
316
317         return ret;
318 }
319
320 static void
321 ice_fdir_release_filter_list(struct ice_pf *pf)
322 {
323         struct ice_fdir_info *fdir_info = &pf->fdir;
324
325         if (fdir_info->hash_map)
326                 rte_free(fdir_info->hash_map);
327         if (fdir_info->hash_table)
328                 rte_hash_free(fdir_info->hash_table);
329 }
330
331 /*
332  * ice_fdir_setup - reserve and initialize the Flow Director resources
333  * @pf: board private structure
334  */
335 static int
336 ice_fdir_setup(struct ice_pf *pf)
337 {
338         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
339         struct ice_hw *hw = ICE_PF_TO_HW(pf);
340         const struct rte_memzone *mz = NULL;
341         char z_name[RTE_MEMZONE_NAMESIZE];
342         struct ice_vsi *vsi;
343         int err = ICE_SUCCESS;
344
345         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
346                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
347                 return -ENOTSUP;
348         }
349
350         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
351                     " fd_fltr_best_effort = %u.",
352                     hw->func_caps.fd_fltr_guar,
353                     hw->func_caps.fd_fltr_best_effort);
354
355         if (pf->fdir.fdir_vsi) {
356                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
357                 return ICE_SUCCESS;
358         }
359
360         /* make new FDIR VSI */
361         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
362         if (!vsi) {
363                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
364                 return -EINVAL;
365         }
366         pf->fdir.fdir_vsi = vsi;
367
368         err = ice_fdir_init_filter_list(pf);
369         if (err) {
370                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
371                 return -EINVAL;
372         }
373
374         err = ice_fdir_counter_init(pf);
375         if (err) {
376                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
377                 return -EINVAL;
378         }
379
380         /*Fdir tx queue setup*/
381         err = ice_fdir_setup_tx_resources(pf);
382         if (err) {
383                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
384                 goto fail_setup_tx;
385         }
386
387         /*Fdir rx queue setup*/
388         err = ice_fdir_setup_rx_resources(pf);
389         if (err) {
390                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
391                 goto fail_setup_rx;
392         }
393
394         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
395         if (err) {
396                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
397                 goto fail_mem;
398         }
399
400         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
401         if (err) {
402                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
403                 goto fail_mem;
404         }
405
406         /* reserve memory for the fdir programming packet */
407         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
408                  ICE_FDIR_MZ_NAME,
409                  eth_dev->data->port_id);
410         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
411         if (!mz) {
412                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
413                             "flow director program packet.");
414                 err = -ENOMEM;
415                 goto fail_mem;
416         }
417         pf->fdir.prg_pkt = mz->addr;
418         pf->fdir.dma_addr = mz->iova;
419
420         err = ice_fdir_prof_alloc(hw);
421         if (err) {
422                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
423                             "flow director profile.");
424                 err = -ENOMEM;
425                 goto fail_mem;
426         }
427
428         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
429                     vsi->base_queue);
430         return ICE_SUCCESS;
431
432 fail_mem:
433         ice_rx_queue_release(pf->fdir.rxq);
434         pf->fdir.rxq = NULL;
435 fail_setup_rx:
436         ice_tx_queue_release(pf->fdir.txq);
437         pf->fdir.txq = NULL;
438 fail_setup_tx:
439         ice_release_vsi(vsi);
440         pf->fdir.fdir_vsi = NULL;
441         return err;
442 }
443
444 static void
445 ice_fdir_prof_free(struct ice_hw *hw)
446 {
447         enum ice_fltr_ptype ptype;
448
449         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
450              ptype < ICE_FLTR_PTYPE_MAX;
451              ptype++)
452                 rte_free(hw->fdir_prof[ptype]);
453
454         rte_free(hw->fdir_prof);
455 }
456
457 /* Remove a profile for some filter type */
458 static void
459 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
460 {
461         struct ice_hw *hw = ICE_PF_TO_HW(pf);
462         struct ice_fd_hw_prof *hw_prof;
463         uint64_t prof_id;
464         uint16_t vsi_num;
465         int i;
466
467         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
468                 return;
469
470         hw_prof = hw->fdir_prof[ptype];
471
472         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
473         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
474                 if (hw_prof->entry_h[i][is_tunnel]) {
475                         vsi_num = ice_get_hw_vsi_num(hw,
476                                                      hw_prof->vsi_h[i]);
477                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
478                                              vsi_num, ptype);
479                         ice_flow_rem_entry(hw,
480                                            hw_prof->entry_h[i][is_tunnel]);
481                         hw_prof->entry_h[i][is_tunnel] = 0;
482                 }
483         }
484         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
485         rte_free(hw_prof->fdir_seg[is_tunnel]);
486         hw_prof->fdir_seg[is_tunnel] = NULL;
487
488         for (i = 0; i < hw_prof->cnt; i++)
489                 hw_prof->vsi_h[i] = 0;
490         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
491 }
492
493 /* Remove all created profiles */
494 static void
495 ice_fdir_prof_rm_all(struct ice_pf *pf)
496 {
497         enum ice_fltr_ptype ptype;
498
499         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
500              ptype < ICE_FLTR_PTYPE_MAX;
501              ptype++) {
502                 ice_fdir_prof_rm(pf, ptype, false);
503                 ice_fdir_prof_rm(pf, ptype, true);
504         }
505 }
506
507 /*
508  * ice_fdir_teardown - release the Flow Director resources
509  * @pf: board private structure
510  */
511 static void
512 ice_fdir_teardown(struct ice_pf *pf)
513 {
514         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
515         struct ice_hw *hw = ICE_PF_TO_HW(pf);
516         struct ice_vsi *vsi;
517         int err;
518
519         vsi = pf->fdir.fdir_vsi;
520         if (!vsi)
521                 return;
522
523         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
524         if (err)
525                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
526
527         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
528         if (err)
529                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
530
531         err = ice_fdir_counter_release(pf);
532         if (err)
533                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
534
535         ice_fdir_release_filter_list(pf);
536
537         ice_tx_queue_release(pf->fdir.txq);
538         pf->fdir.txq = NULL;
539         ice_rx_queue_release(pf->fdir.rxq);
540         pf->fdir.rxq = NULL;
541         ice_fdir_prof_rm_all(pf);
542         ice_fdir_prof_free(hw);
543         ice_release_vsi(vsi);
544         pf->fdir.fdir_vsi = NULL;
545 }
546
547 static int
548 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
549                      struct ice_vsi *ctrl_vsi,
550                      struct ice_flow_seg_info *seg,
551                      enum ice_fltr_ptype ptype,
552                      bool is_tunnel)
553 {
554         struct ice_hw *hw = ICE_PF_TO_HW(pf);
555         enum ice_flow_dir dir = ICE_FLOW_RX;
556         struct ice_flow_seg_info *ori_seg;
557         struct ice_fd_hw_prof *hw_prof;
558         struct ice_flow_prof *prof;
559         uint64_t entry_1 = 0;
560         uint64_t entry_2 = 0;
561         uint16_t vsi_num;
562         int ret;
563         uint64_t prof_id;
564
565         hw_prof = hw->fdir_prof[ptype];
566         ori_seg = hw_prof->fdir_seg[is_tunnel];
567         if (ori_seg) {
568                 if (!is_tunnel) {
569                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
570                                 return -EAGAIN;
571                 } else {
572                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
573                                 return -EAGAIN;
574                 }
575
576                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
577                         return -EINVAL;
578
579                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
580         }
581
582         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
583         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
584                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
585         if (ret)
586                 return ret;
587         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
588                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
589                                  seg, NULL, 0, &entry_1);
590         if (ret) {
591                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
592                             ptype);
593                 goto err_add_prof;
594         }
595         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
596                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
597                                  seg, NULL, 0, &entry_2);
598         if (ret) {
599                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
600                             ptype);
601                 goto err_add_entry;
602         }
603
604         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
605         hw_prof->cnt = 0;
606         hw_prof->fdir_seg[is_tunnel] = seg;
607         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
608         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
609         pf->hw_prof_cnt[ptype][is_tunnel]++;
610         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
611         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
612         pf->hw_prof_cnt[ptype][is_tunnel]++;
613
614         return ret;
615
616 err_add_entry:
617         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
618         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
619         ice_flow_rem_entry(hw, entry_1);
620 err_add_prof:
621         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
622
623         return ret;
624 }
625
626 static void
627 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
628 {
629         uint32_t i, j;
630
631         struct ice_inset_map {
632                 uint64_t inset;
633                 enum ice_flow_field fld;
634         };
635         static const struct ice_inset_map ice_inset_map[] = {
636                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
637                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
638                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
639                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
640                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
641                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
642                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
643                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
644                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
645                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
646                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
647                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
648                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
649                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
650                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
651                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
652                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
653         };
654
655         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
656                 if ((inset & ice_inset_map[i].inset) ==
657                     ice_inset_map[i].inset)
658                         field[j++] = ice_inset_map[i].fld;
659         }
660 }
661
662 static int
663 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
664                         uint64_t input_set, bool is_tunnel)
665 {
666         struct ice_flow_seg_info *seg;
667         struct ice_flow_seg_info *seg_tun = NULL;
668         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
669         int i, ret;
670
671         if (!input_set)
672                 return -EINVAL;
673
674         seg = (struct ice_flow_seg_info *)
675                 ice_malloc(hw, sizeof(*seg));
676         if (!seg) {
677                 PMD_DRV_LOG(ERR, "No memory can be allocated");
678                 return -ENOMEM;
679         }
680
681         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
682                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
683         ice_fdir_input_set_parse(input_set, field);
684
685         switch (flow) {
686         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
687                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
688                                   ICE_FLOW_SEG_HDR_IPV4);
689                 break;
690         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
691                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
692                                   ICE_FLOW_SEG_HDR_IPV4);
693                 break;
694         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
695                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
696                                   ICE_FLOW_SEG_HDR_IPV4);
697                 break;
698         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
699                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
700                 break;
701         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
702                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
703                                   ICE_FLOW_SEG_HDR_IPV6);
704                 break;
705         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
706                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
707                                   ICE_FLOW_SEG_HDR_IPV6);
708                 break;
709         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
710                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
711                                   ICE_FLOW_SEG_HDR_IPV6);
712                 break;
713         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
714                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
715                 break;
716         default:
717                 PMD_DRV_LOG(ERR, "not supported filter type.");
718                 break;
719         }
720
721         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
722                 ice_flow_set_fld(seg, field[i],
723                                  ICE_FLOW_FLD_OFF_INVAL,
724                                  ICE_FLOW_FLD_OFF_INVAL,
725                                  ICE_FLOW_FLD_OFF_INVAL, false);
726         }
727
728         if (!is_tunnel) {
729                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
730                                            seg, flow, false);
731         } else {
732                 seg_tun = (struct ice_flow_seg_info *)
733                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
734                 if (!seg_tun) {
735                         PMD_DRV_LOG(ERR, "No memory can be allocated");
736                         rte_free(seg);
737                         return -ENOMEM;
738                 }
739                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
740                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
741                                            seg_tun, flow, true);
742         }
743
744         if (!ret) {
745                 return ret;
746         } else if (ret < 0) {
747                 rte_free(seg);
748                 if (is_tunnel)
749                         rte_free(seg_tun);
750                 return (ret == -EAGAIN) ? 0 : ret;
751         } else {
752                 return ret;
753         }
754 }
755
756 static void
757 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
758                     bool is_tunnel, bool add)
759 {
760         struct ice_hw *hw = ICE_PF_TO_HW(pf);
761         int cnt;
762
763         cnt = (add) ? 1 : -1;
764         hw->fdir_active_fltr += cnt;
765         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
766                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
767         else
768                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
769 }
770
771 static int
772 ice_fdir_init(struct ice_adapter *ad)
773 {
774         struct ice_pf *pf = &ad->pf;
775         int ret;
776
777         ret = ice_fdir_setup(pf);
778         if (ret)
779                 return ret;
780
781         return ice_register_parser(&ice_fdir_parser, ad);
782 }
783
784 static void
785 ice_fdir_uninit(struct ice_adapter *ad)
786 {
787         struct ice_pf *pf = &ad->pf;
788
789         ice_unregister_parser(&ice_fdir_parser, ad);
790
791         ice_fdir_teardown(pf);
792 }
793
794 static int
795 ice_fdir_add_del_filter(struct ice_pf *pf,
796                         struct ice_fdir_filter_conf *filter,
797                         bool add)
798 {
799         struct ice_fltr_desc desc;
800         struct ice_hw *hw = ICE_PF_TO_HW(pf);
801         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
802         int ret;
803
804         filter->input.dest_vsi = pf->main_vsi->idx;
805
806         memset(&desc, 0, sizeof(desc));
807         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
808
809         memset(pkt, 0, ICE_FDIR_PKT_LEN);
810         ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
811         if (ret) {
812                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
813                 return -EINVAL;
814         }
815
816         return ice_fdir_programming(pf, &desc);
817 }
818
819 static void
820 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
821                           struct ice_fdir_filter_conf *filter)
822 {
823         struct ice_fdir_fltr *input = &filter->input;
824         memset(key, 0, sizeof(*key));
825
826         key->flow_type = input->flow_type;
827         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
828         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
829         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
830         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
831 }
832
833 /* Check if there exists the flow director filter */
834 static struct ice_fdir_filter_conf *
835 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
836                         const struct ice_fdir_fltr_pattern *key)
837 {
838         int ret;
839
840         ret = rte_hash_lookup(fdir_info->hash_table, key);
841         if (ret < 0)
842                 return NULL;
843
844         return fdir_info->hash_map[ret];
845 }
846
847 /* Add a flow director entry into the SW list */
848 static int
849 ice_fdir_entry_insert(struct ice_pf *pf,
850                       struct ice_fdir_filter_conf *entry,
851                       struct ice_fdir_fltr_pattern *key)
852 {
853         struct ice_fdir_info *fdir_info = &pf->fdir;
854         int ret;
855
856         ret = rte_hash_add_key(fdir_info->hash_table, key);
857         if (ret < 0) {
858                 PMD_DRV_LOG(ERR,
859                             "Failed to insert fdir entry to hash table %d!",
860                             ret);
861                 return ret;
862         }
863         fdir_info->hash_map[ret] = entry;
864
865         return 0;
866 }
867
868 /* Delete a flow director entry from the SW list */
869 static int
870 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
871 {
872         struct ice_fdir_info *fdir_info = &pf->fdir;
873         int ret;
874
875         ret = rte_hash_del_key(fdir_info->hash_table, key);
876         if (ret < 0) {
877                 PMD_DRV_LOG(ERR,
878                             "Failed to delete fdir filter to hash table %d!",
879                             ret);
880                 return ret;
881         }
882         fdir_info->hash_map[ret] = NULL;
883
884         return 0;
885 }
886
887 static int
888 ice_fdir_create_filter(struct ice_adapter *ad,
889                        struct rte_flow *flow,
890                        void *meta,
891                        struct rte_flow_error *error)
892 {
893         struct ice_pf *pf = &ad->pf;
894         struct ice_fdir_filter_conf *filter = meta;
895         struct ice_fdir_info *fdir_info = &pf->fdir;
896         struct ice_fdir_filter_conf *entry, *node;
897         struct ice_fdir_fltr_pattern key;
898         int ret;
899
900         ice_fdir_extract_fltr_key(&key, filter);
901         node = ice_fdir_entry_lookup(fdir_info, &key);
902         if (node) {
903                 rte_flow_error_set(error, EEXIST,
904                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
905                                    "Rule already exists!");
906                 return -rte_errno;
907         }
908
909         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
910         if (!entry) {
911                 rte_flow_error_set(error, ENOMEM,
912                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
913                                    "Failed to allocate memory");
914                 return -rte_errno;
915         }
916
917         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
918                         filter->input_set, false);
919         if (ret) {
920                 rte_flow_error_set(error, -ret,
921                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
922                                    "Profile configure failed.");
923                 goto free_entry;
924         }
925
926         /* alloc counter for FDIR */
927         if (filter->input.cnt_ena) {
928                 struct rte_flow_action_count *act_count = &filter->act_count;
929
930                 filter->counter = ice_fdir_counter_alloc(pf,
931                                                          act_count->shared,
932                                                          act_count->id);
933                 if (!filter->counter) {
934                         rte_flow_error_set(error, EINVAL,
935                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
936                                         "Failed to alloc FDIR counter.");
937                         goto free_entry;
938                 }
939                 filter->input.cnt_index = filter->counter->hw_index;
940         }
941
942         ret = ice_fdir_add_del_filter(pf, filter, true);
943         if (ret) {
944                 rte_flow_error_set(error, -ret,
945                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
946                                    "Add filter rule failed.");
947                 goto free_counter;
948         }
949
950         rte_memcpy(entry, filter, sizeof(*entry));
951         ret = ice_fdir_entry_insert(pf, entry, &key);
952         if (ret) {
953                 rte_flow_error_set(error, -ret,
954                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
955                                    "Insert entry to table failed.");
956                 goto free_entry;
957         }
958
959         flow->rule = entry;
960         ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
961
962         return 0;
963
964 free_counter:
965         if (filter->counter) {
966                 ice_fdir_counter_free(pf, filter->counter);
967                 filter->counter = NULL;
968         }
969
970 free_entry:
971         rte_free(entry);
972         return -rte_errno;
973 }
974
975 static int
976 ice_fdir_destroy_filter(struct ice_adapter *ad,
977                         struct rte_flow *flow,
978                         struct rte_flow_error *error)
979 {
980         struct ice_pf *pf = &ad->pf;
981         struct ice_fdir_info *fdir_info = &pf->fdir;
982         struct ice_fdir_filter_conf *filter, *entry;
983         struct ice_fdir_fltr_pattern key;
984         int ret;
985
986         filter = (struct ice_fdir_filter_conf *)flow->rule;
987
988         if (filter->counter) {
989                 ice_fdir_counter_free(pf, filter->counter);
990                 filter->counter = NULL;
991         }
992
993         ice_fdir_extract_fltr_key(&key, filter);
994         entry = ice_fdir_entry_lookup(fdir_info, &key);
995         if (!entry) {
996                 rte_flow_error_set(error, ENOENT,
997                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
998                                    "Can't find entry.");
999                 return -rte_errno;
1000         }
1001
1002         ret = ice_fdir_add_del_filter(pf, filter, false);
1003         if (ret) {
1004                 rte_flow_error_set(error, -ret,
1005                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1006                                    "Del filter rule failed.");
1007                 return -rte_errno;
1008         }
1009
1010         ret = ice_fdir_entry_del(pf, &key);
1011         if (ret) {
1012                 rte_flow_error_set(error, -ret,
1013                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1014                                    "Remove entry from table failed.");
1015                 return -rte_errno;
1016         }
1017
1018         ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
1019         flow->rule = NULL;
1020
1021         rte_free(filter);
1022
1023         return 0;
1024 }
1025
1026 static int
1027 ice_fdir_query_count(struct ice_adapter *ad,
1028                       struct rte_flow *flow,
1029                       struct rte_flow_query_count *flow_stats,
1030                       struct rte_flow_error *error)
1031 {
1032         struct ice_pf *pf = &ad->pf;
1033         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1034         struct ice_fdir_filter_conf *filter = flow->rule;
1035         struct ice_fdir_counter *counter = filter->counter;
1036         uint64_t hits_lo, hits_hi;
1037
1038         if (!counter) {
1039                 rte_flow_error_set(error, EINVAL,
1040                                   RTE_FLOW_ERROR_TYPE_ACTION,
1041                                   NULL,
1042                                   "FDIR counters not available");
1043                 return -rte_errno;
1044         }
1045
1046         /*
1047          * Reading the low 32-bits latches the high 32-bits into a shadow
1048          * register. Reading the high 32-bit returns the value in the
1049          * shadow register.
1050          */
1051         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1052         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1053
1054         flow_stats->hits_set = 1;
1055         flow_stats->hits = hits_lo | (hits_hi << 32);
1056         flow_stats->bytes_set = 0;
1057         flow_stats->bytes = 0;
1058
1059         if (flow_stats->reset) {
1060                 /* reset statistic counter value */
1061                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1062                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1063         }
1064
1065         return 0;
1066 }
1067
1068 static struct ice_flow_engine ice_fdir_engine = {
1069         .init = ice_fdir_init,
1070         .uninit = ice_fdir_uninit,
1071         .create = ice_fdir_create_filter,
1072         .destroy = ice_fdir_destroy_filter,
1073         .query_count = ice_fdir_query_count,
1074         .type = ICE_FLOW_ENGINE_FDIR,
1075 };
1076
1077 static int
1078 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1079                               struct rte_flow_error *error,
1080                               const struct rte_flow_action *act,
1081                               struct ice_fdir_filter_conf *filter)
1082 {
1083         const struct rte_flow_action_rss *rss = act->conf;
1084         uint32_t i;
1085
1086         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1087                 rte_flow_error_set(error, EINVAL,
1088                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1089                                    "Invalid action.");
1090                 return -rte_errno;
1091         }
1092
1093         if (rss->queue_num <= 1) {
1094                 rte_flow_error_set(error, EINVAL,
1095                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1096                                    "Queue region size can't be 0 or 1.");
1097                 return -rte_errno;
1098         }
1099
1100         /* check if queue index for queue region is continuous */
1101         for (i = 0; i < rss->queue_num - 1; i++) {
1102                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1103                         rte_flow_error_set(error, EINVAL,
1104                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1105                                            "Discontinuous queue region");
1106                         return -rte_errno;
1107                 }
1108         }
1109
1110         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1111                 rte_flow_error_set(error, EINVAL,
1112                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1113                                    "Invalid queue region indexes.");
1114                 return -rte_errno;
1115         }
1116
1117         if (!(rte_is_power_of_2(rss->queue_num) &&
1118              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1119                 rte_flow_error_set(error, EINVAL,
1120                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1121                                    "The region size should be any of the following values:"
1122                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1123                                    "of queues do not exceed the VSI allocation.");
1124                 return -rte_errno;
1125         }
1126
1127         filter->input.q_index = rss->queue[0];
1128         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1129         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1130
1131         return 0;
1132 }
1133
1134 static int
1135 ice_fdir_parse_action(struct ice_adapter *ad,
1136                       const struct rte_flow_action actions[],
1137                       struct rte_flow_error *error,
1138                       struct ice_fdir_filter_conf *filter)
1139 {
1140         struct ice_pf *pf = &ad->pf;
1141         const struct rte_flow_action_queue *act_q;
1142         const struct rte_flow_action_mark *mark_spec = NULL;
1143         const struct rte_flow_action_count *act_count;
1144         uint32_t dest_num = 0;
1145         uint32_t mark_num = 0;
1146         uint32_t counter_num = 0;
1147         int ret;
1148
1149         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1150                 switch (actions->type) {
1151                 case RTE_FLOW_ACTION_TYPE_VOID:
1152                         break;
1153                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1154                         dest_num++;
1155
1156                         act_q = actions->conf;
1157                         filter->input.q_index = act_q->index;
1158                         if (filter->input.q_index >=
1159                                         pf->dev_data->nb_rx_queues) {
1160                                 rte_flow_error_set(error, EINVAL,
1161                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1162                                                    actions,
1163                                                    "Invalid queue for FDIR.");
1164                                 return -rte_errno;
1165                         }
1166                         filter->input.dest_ctl =
1167                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1168                         break;
1169                 case RTE_FLOW_ACTION_TYPE_DROP:
1170                         dest_num++;
1171
1172                         filter->input.dest_ctl =
1173                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1174                         break;
1175                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1176                         dest_num++;
1177
1178                         filter->input.dest_ctl =
1179                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1180                         filter->input.q_index = 0;
1181                         break;
1182                 case RTE_FLOW_ACTION_TYPE_RSS:
1183                         dest_num++;
1184
1185                         ret = ice_fdir_parse_action_qregion(pf,
1186                                                 error, actions, filter);
1187                         if (ret)
1188                                 return ret;
1189                         break;
1190                 case RTE_FLOW_ACTION_TYPE_MARK:
1191                         mark_num++;
1192
1193                         mark_spec = actions->conf;
1194                         filter->input.fltr_id = mark_spec->id;
1195                         break;
1196                 case RTE_FLOW_ACTION_TYPE_COUNT:
1197                         counter_num++;
1198
1199                         act_count = actions->conf;
1200                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1201                         rte_memcpy(&filter->act_count, act_count,
1202                                                 sizeof(filter->act_count));
1203
1204                         break;
1205                 default:
1206                         rte_flow_error_set(error, EINVAL,
1207                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1208                                    "Invalid action.");
1209                         return -rte_errno;
1210                 }
1211         }
1212
1213         if (dest_num == 0 || dest_num >= 2) {
1214                 rte_flow_error_set(error, EINVAL,
1215                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1216                            "Unsupported action combination");
1217                 return -rte_errno;
1218         }
1219
1220         if (mark_num >= 2) {
1221                 rte_flow_error_set(error, EINVAL,
1222                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1223                            "Too many mark actions");
1224                 return -rte_errno;
1225         }
1226
1227         if (counter_num >= 2) {
1228                 rte_flow_error_set(error, EINVAL,
1229                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1230                            "Too many count actions");
1231                 return -rte_errno;
1232         }
1233
1234         return 0;
1235 }
1236
1237 static int
1238 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1239                        const struct rte_flow_item pattern[],
1240                        struct rte_flow_error *error,
1241                        struct ice_fdir_filter_conf *filter)
1242 {
1243         const struct rte_flow_item *item = pattern;
1244         enum rte_flow_item_type item_type;
1245         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1246         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1247         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1248         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1249         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1250         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1251         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1252         uint64_t input_set = ICE_INSET_NONE;
1253         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1254         uint8_t  ipv6_addr_mask[16] = {
1255                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1256                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1257         };
1258         uint32_t vtc_flow_cpu;
1259
1260
1261         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1262                 if (item->last) {
1263                         rte_flow_error_set(error, EINVAL,
1264                                         RTE_FLOW_ERROR_TYPE_ITEM,
1265                                         item,
1266                                         "Not support range");
1267                         return -rte_errno;
1268                 }
1269                 item_type = item->type;
1270
1271                 switch (item_type) {
1272                 case RTE_FLOW_ITEM_TYPE_ETH:
1273                         eth_spec = item->spec;
1274                         eth_mask = item->mask;
1275
1276                         if (eth_spec && eth_mask) {
1277                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1278                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1279                                         rte_flow_error_set(error, EINVAL,
1280                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1281                                                 item,
1282                                                 "Src mac not support");
1283                                         return -rte_errno;
1284                                 }
1285
1286                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1287                                         rte_flow_error_set(error, EINVAL,
1288                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1289                                                 item,
1290                                                 "Invalid mac addr mask");
1291                                         return -rte_errno;
1292                                 }
1293
1294                                 input_set |= ICE_INSET_DMAC;
1295                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1296                                            &eth_spec->dst,
1297                                            RTE_ETHER_ADDR_LEN);
1298                         }
1299                         break;
1300                 case RTE_FLOW_ITEM_TYPE_IPV4:
1301                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1302                         ipv4_spec = item->spec;
1303                         ipv4_mask = item->mask;
1304
1305                         if (ipv4_spec && ipv4_mask) {
1306                                 /* Check IPv4 mask and update input set */
1307                                 if (ipv4_mask->hdr.version_ihl ||
1308                                     ipv4_mask->hdr.total_length ||
1309                                     ipv4_mask->hdr.packet_id ||
1310                                     ipv4_mask->hdr.fragment_offset ||
1311                                     ipv4_mask->hdr.hdr_checksum) {
1312                                         rte_flow_error_set(error, EINVAL,
1313                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1314                                                    item,
1315                                                    "Invalid IPv4 mask.");
1316                                         return -rte_errno;
1317                                 }
1318                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1319                                         input_set |= ICE_INSET_IPV4_SRC;
1320                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1321                                         input_set |= ICE_INSET_IPV4_DST;
1322                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1323                                         input_set |= ICE_INSET_IPV4_TOS;
1324                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1325                                         input_set |= ICE_INSET_IPV4_TTL;
1326                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1327                                         input_set |= ICE_INSET_IPV4_PROTO;
1328
1329                                 filter->input.ip.v4.dst_ip =
1330                                         ipv4_spec->hdr.src_addr;
1331                                 filter->input.ip.v4.src_ip =
1332                                         ipv4_spec->hdr.dst_addr;
1333                                 filter->input.ip.v4.tos =
1334                                         ipv4_spec->hdr.type_of_service;
1335                                 filter->input.ip.v4.ttl =
1336                                         ipv4_spec->hdr.time_to_live;
1337                                 filter->input.ip.v4.proto =
1338                                         ipv4_spec->hdr.next_proto_id;
1339                         }
1340
1341                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1342                         break;
1343                 case RTE_FLOW_ITEM_TYPE_IPV6:
1344                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1345                         ipv6_spec = item->spec;
1346                         ipv6_mask = item->mask;
1347
1348                         if (ipv6_spec && ipv6_mask) {
1349                                 /* Check IPv6 mask and update input set */
1350                                 if (ipv6_mask->hdr.payload_len) {
1351                                         rte_flow_error_set(error, EINVAL,
1352                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1353                                                    item,
1354                                                    "Invalid IPv6 mask");
1355                                         return -rte_errno;
1356                                 }
1357
1358                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1359                                             ipv6_addr_mask,
1360                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1361                                         input_set |= ICE_INSET_IPV6_SRC;
1362                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1363                                             ipv6_addr_mask,
1364                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1365                                         input_set |= ICE_INSET_IPV6_DST;
1366
1367                                 if ((ipv6_mask->hdr.vtc_flow &
1368                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1369                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1370                                         input_set |= ICE_INSET_IPV6_TC;
1371                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1372                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1373                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1374                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1375
1376                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1377                                            ipv6_spec->hdr.src_addr, 16);
1378                                 rte_memcpy(filter->input.ip.v6.src_ip,
1379                                            ipv6_spec->hdr.dst_addr, 16);
1380
1381                                 vtc_flow_cpu =
1382                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1383                                 filter->input.ip.v6.tc =
1384                                         (uint8_t)(vtc_flow_cpu >>
1385                                                   ICE_FDIR_IPV6_TC_OFFSET);
1386                                 filter->input.ip.v6.proto =
1387                                         ipv6_spec->hdr.proto;
1388                                 filter->input.ip.v6.hlim =
1389                                         ipv6_spec->hdr.hop_limits;
1390                         }
1391
1392                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1393                         break;
1394                 case RTE_FLOW_ITEM_TYPE_TCP:
1395                         tcp_spec = item->spec;
1396                         tcp_mask = item->mask;
1397
1398                         if (tcp_spec && tcp_mask) {
1399                                 /* Check TCP mask and update input set */
1400                                 if (tcp_mask->hdr.sent_seq ||
1401                                     tcp_mask->hdr.recv_ack ||
1402                                     tcp_mask->hdr.data_off ||
1403                                     tcp_mask->hdr.tcp_flags ||
1404                                     tcp_mask->hdr.rx_win ||
1405                                     tcp_mask->hdr.cksum ||
1406                                     tcp_mask->hdr.tcp_urp) {
1407                                         rte_flow_error_set(error, EINVAL,
1408                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1409                                                    item,
1410                                                    "Invalid TCP mask");
1411                                         return -rte_errno;
1412                                 }
1413
1414                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1415                                         input_set |= ICE_INSET_TCP_SRC_PORT;
1416                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1417                                         input_set |= ICE_INSET_TCP_DST_PORT;
1418
1419                                 /* Get filter info */
1420                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1421                                         filter->input.ip.v4.dst_port =
1422                                                 tcp_spec->hdr.src_port;
1423                                         filter->input.ip.v4.src_port =
1424                                                 tcp_spec->hdr.dst_port;
1425                                         flow_type =
1426                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1427                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1428                                         filter->input.ip.v6.dst_port =
1429                                                 tcp_spec->hdr.src_port;
1430                                         filter->input.ip.v6.src_port =
1431                                                 tcp_spec->hdr.dst_port;
1432                                         flow_type =
1433                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1434                                 }
1435                         }
1436                         break;
1437                 case RTE_FLOW_ITEM_TYPE_UDP:
1438                         udp_spec = item->spec;
1439                         udp_mask = item->mask;
1440
1441                         if (udp_spec && udp_mask) {
1442                                 /* Check UDP mask and update input set*/
1443                                 if (udp_mask->hdr.dgram_len ||
1444                                     udp_mask->hdr.dgram_cksum) {
1445                                         rte_flow_error_set(error, EINVAL,
1446                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1447                                                    item,
1448                                                    "Invalid UDP mask");
1449                                         return -rte_errno;
1450                                 }
1451
1452                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1453                                         input_set |= ICE_INSET_UDP_SRC_PORT;
1454                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1455                                         input_set |= ICE_INSET_UDP_DST_PORT;
1456
1457                                 /* Get filter info */
1458                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1459                                         filter->input.ip.v4.dst_port =
1460                                                 udp_spec->hdr.src_port;
1461                                         filter->input.ip.v4.src_port =
1462                                                 udp_spec->hdr.dst_port;
1463                                         flow_type =
1464                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1465                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1466                                         filter->input.ip.v6.src_port =
1467                                                 udp_spec->hdr.src_port;
1468                                         filter->input.ip.v6.dst_port =
1469                                                 udp_spec->hdr.dst_port;
1470                                         flow_type =
1471                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1472                                 }
1473                         }
1474                         break;
1475                 case RTE_FLOW_ITEM_TYPE_SCTP:
1476                         sctp_spec = item->spec;
1477                         sctp_mask = item->mask;
1478
1479                         if (sctp_spec && sctp_mask) {
1480                                 /* Check SCTP mask and update input set */
1481                                 if (sctp_mask->hdr.cksum) {
1482                                         rte_flow_error_set(error, EINVAL,
1483                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1484                                                    item,
1485                                                    "Invalid UDP mask");
1486                                         return -rte_errno;
1487                                 }
1488
1489                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1490                                         input_set |= ICE_INSET_SCTP_SRC_PORT;
1491                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1492                                         input_set |= ICE_INSET_SCTP_DST_PORT;
1493
1494                                 /* Get filter info */
1495                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1496                                         filter->input.ip.v4.dst_port =
1497                                                 sctp_spec->hdr.src_port;
1498                                         filter->input.ip.v4.src_port =
1499                                                 sctp_spec->hdr.dst_port;
1500                                         flow_type =
1501                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1502                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1503                                         filter->input.ip.v6.dst_port =
1504                                                 sctp_spec->hdr.src_port;
1505                                         filter->input.ip.v6.src_port =
1506                                                 sctp_spec->hdr.dst_port;
1507                                         flow_type =
1508                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1509                                 }
1510                         }
1511                         break;
1512                 case RTE_FLOW_ITEM_TYPE_VOID:
1513                         break;
1514                 default:
1515                         rte_flow_error_set(error, EINVAL,
1516                                    RTE_FLOW_ERROR_TYPE_ITEM,
1517                                    item,
1518                                    "Invalid pattern item.");
1519                         return -rte_errno;
1520                 }
1521         }
1522
1523         filter->input.flow_type = flow_type;
1524         filter->input_set = input_set;
1525
1526         return 0;
1527 }
1528
1529 static int
1530 ice_fdir_parse(struct ice_adapter *ad,
1531                struct ice_pattern_match_item *array,
1532                uint32_t array_len,
1533                const struct rte_flow_item pattern[],
1534                const struct rte_flow_action actions[],
1535                void **meta,
1536                struct rte_flow_error *error)
1537 {
1538         struct ice_pf *pf = &ad->pf;
1539         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1540         struct ice_pattern_match_item *item = NULL;
1541         uint64_t input_set;
1542         int ret;
1543
1544         memset(filter, 0, sizeof(*filter));
1545         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1546         if (!item)
1547                 return -rte_errno;
1548
1549         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1550         if (ret)
1551                 return ret;
1552         input_set = filter->input_set;
1553         if (!input_set || input_set & ~item->input_set_mask) {
1554                 rte_flow_error_set(error, EINVAL,
1555                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1556                                    pattern,
1557                                    "Invalid input set");
1558                 return -rte_errno;
1559         }
1560
1561         ret = ice_fdir_parse_action(ad, actions, error, filter);
1562         if (ret)
1563                 return ret;
1564
1565         *meta = filter;
1566
1567         return 0;
1568 }
1569
1570 static struct ice_flow_parser ice_fdir_parser = {
1571         .engine = &ice_fdir_engine,
1572         .array = ice_fdir_pattern,
1573         .array_len = RTE_DIM(ice_fdir_pattern),
1574         .parse_pattern_action = ice_fdir_parse,
1575         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1576 };
1577
1578 RTE_INIT(ice_fdir_engine_register)
1579 {
1580         ice_register_flow_engine(&ice_fdir_engine);
1581 }