net/ice: support flow director counter
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
7 #include "ice_rxtx.h"
8 #include "ice_generic_flow.h"
9
10 #define ICE_FDIR_IPV6_TC_OFFSET         20
11 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
12
13 #define ICE_FDIR_MAX_QREGION_SIZE       128
14
15 #define ICE_FDIR_INSET_ETH_IPV4 (\
16         ICE_INSET_DMAC | \
17         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
18         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
19
20 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
21         ICE_FDIR_INSET_ETH_IPV4 | \
22         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
23
24 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
25         ICE_FDIR_INSET_ETH_IPV4 | \
26         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
27
28 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
29         ICE_FDIR_INSET_ETH_IPV4 | \
30         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
31
32 #define ICE_FDIR_INSET_ETH_IPV6 (\
33         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
34         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
35
36 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
37         ICE_FDIR_INSET_ETH_IPV6 | \
38         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
39
40 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
41         ICE_FDIR_INSET_ETH_IPV6 | \
42         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
43
44 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
45         ICE_FDIR_INSET_ETH_IPV6 | \
46         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
47
48 static struct ice_pattern_match_item ice_fdir_pattern[] = {
49         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
50         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
51         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
52         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
53         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
54         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
55         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
56         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
57 };
58
59 static struct ice_flow_parser ice_fdir_parser;
60
61 static const struct rte_memzone *
62 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
63 {
64         return rte_memzone_reserve_aligned(name, len, socket_id,
65                                            RTE_MEMZONE_IOVA_CONTIG,
66                                            ICE_RING_BASE_ALIGN);
67 }
68
69 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
70
71 static int
72 ice_fdir_prof_alloc(struct ice_hw *hw)
73 {
74         enum ice_fltr_ptype ptype, fltr_ptype;
75
76         if (!hw->fdir_prof) {
77                 hw->fdir_prof = (struct ice_fd_hw_prof **)
78                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
79                                    sizeof(*hw->fdir_prof));
80                 if (!hw->fdir_prof)
81                         return -ENOMEM;
82         }
83         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
84              ptype < ICE_FLTR_PTYPE_MAX;
85              ptype++) {
86                 if (!hw->fdir_prof[ptype]) {
87                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
88                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
89                         if (!hw->fdir_prof[ptype])
90                                 goto fail_mem;
91                 }
92         }
93         return 0;
94
95 fail_mem:
96         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
97              fltr_ptype < ptype;
98              fltr_ptype++)
99                 rte_free(hw->fdir_prof[fltr_ptype]);
100         rte_free(hw->fdir_prof);
101         return -ENOMEM;
102 }
103
104 static int
105 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
106                           struct ice_fdir_counter_pool_container *container,
107                           uint32_t index_start,
108                           uint32_t len)
109 {
110         struct ice_fdir_counter_pool *pool;
111         uint32_t i;
112         int ret = 0;
113
114         pool = rte_zmalloc("ice_fdir_counter_pool",
115                            sizeof(*pool) +
116                            sizeof(struct ice_fdir_counter) * len,
117                            0);
118         if (!pool) {
119                 PMD_INIT_LOG(ERR,
120                              "Failed to allocate memory for fdir counter pool");
121                 return -ENOMEM;
122         }
123
124         TAILQ_INIT(&pool->counter_list);
125         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
126
127         for (i = 0; i < len; i++) {
128                 struct ice_fdir_counter *counter = &pool->counters[i];
129
130                 counter->hw_index = index_start + i;
131                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
132         }
133
134         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
135                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
136                 ret = -EINVAL;
137                 goto free_pool;
138         }
139
140         container->pools[container->index_free++] = pool;
141         return 0;
142
143 free_pool:
144         rte_free(pool);
145         return ret;
146 }
147
148 static int
149 ice_fdir_counter_init(struct ice_pf *pf)
150 {
151         struct ice_hw *hw = ICE_PF_TO_HW(pf);
152         struct ice_fdir_info *fdir_info = &pf->fdir;
153         struct ice_fdir_counter_pool_container *container =
154                                 &fdir_info->counter;
155         uint32_t cnt_index, len;
156         int ret;
157
158         TAILQ_INIT(&container->pool_list);
159
160         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
161         len = ICE_FDIR_COUNTERS_PER_BLOCK;
162
163         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
164         if (ret) {
165                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
166                 return ret;
167         }
168
169         return 0;
170 }
171
172 static int
173 ice_fdir_counter_release(struct ice_pf *pf)
174 {
175         struct ice_fdir_info *fdir_info = &pf->fdir;
176         struct ice_fdir_counter_pool_container *container =
177                                 &fdir_info->counter;
178         uint8_t i;
179
180         for (i = 0; i < container->index_free; i++)
181                 rte_free(container->pools[i]);
182
183         return 0;
184 }
185
186 static struct ice_fdir_counter *
187 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
188                                         *container,
189                                uint32_t id)
190 {
191         struct ice_fdir_counter_pool *pool;
192         struct ice_fdir_counter *counter;
193         int i;
194
195         TAILQ_FOREACH(pool, &container->pool_list, next) {
196                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
197                         counter = &pool->counters[i];
198
199                         if (counter->shared &&
200                             counter->ref_cnt &&
201                             counter->id == id)
202                                 return counter;
203                 }
204         }
205
206         return NULL;
207 }
208
209 static struct ice_fdir_counter *
210 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
211 {
212         struct ice_hw *hw = ICE_PF_TO_HW(pf);
213         struct ice_fdir_info *fdir_info = &pf->fdir;
214         struct ice_fdir_counter_pool_container *container =
215                                 &fdir_info->counter;
216         struct ice_fdir_counter_pool *pool = NULL;
217         struct ice_fdir_counter *counter_free = NULL;
218
219         if (shared) {
220                 counter_free = ice_fdir_counter_shared_search(container, id);
221                 if (counter_free) {
222                         if (counter_free->ref_cnt + 1 == 0) {
223                                 rte_errno = E2BIG;
224                                 return NULL;
225                         }
226                         counter_free->ref_cnt++;
227                         return counter_free;
228                 }
229         }
230
231         TAILQ_FOREACH(pool, &container->pool_list, next) {
232                 counter_free = TAILQ_FIRST(&pool->counter_list);
233                 if (counter_free)
234                         break;
235                 counter_free = NULL;
236         }
237
238         if (!counter_free) {
239                 PMD_DRV_LOG(ERR, "No free counter found\n");
240                 return NULL;
241         }
242
243         counter_free->shared = shared;
244         counter_free->id = id;
245         counter_free->ref_cnt = 1;
246         counter_free->pool = pool;
247
248         /* reset statistic counter value */
249         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
250         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
251
252         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
253         if (TAILQ_EMPTY(&pool->counter_list)) {
254                 TAILQ_REMOVE(&container->pool_list, pool, next);
255                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
256         }
257
258         return counter_free;
259 }
260
261 static void
262 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
263                       struct ice_fdir_counter *counter)
264 {
265         if (!counter)
266                 return;
267
268         if (--counter->ref_cnt == 0) {
269                 struct ice_fdir_counter_pool *pool = counter->pool;
270
271                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
272         }
273 }
274
275 /*
276  * ice_fdir_setup - reserve and initialize the Flow Director resources
277  * @pf: board private structure
278  */
279 static int
280 ice_fdir_setup(struct ice_pf *pf)
281 {
282         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
283         struct ice_hw *hw = ICE_PF_TO_HW(pf);
284         const struct rte_memzone *mz = NULL;
285         char z_name[RTE_MEMZONE_NAMESIZE];
286         struct ice_vsi *vsi;
287         int err = ICE_SUCCESS;
288
289         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
290                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
291                 return -ENOTSUP;
292         }
293
294         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
295                     " fd_fltr_best_effort = %u.",
296                     hw->func_caps.fd_fltr_guar,
297                     hw->func_caps.fd_fltr_best_effort);
298
299         if (pf->fdir.fdir_vsi) {
300                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
301                 return ICE_SUCCESS;
302         }
303
304         /* make new FDIR VSI */
305         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
306         if (!vsi) {
307                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
308                 return -EINVAL;
309         }
310         pf->fdir.fdir_vsi = vsi;
311
312         err = ice_fdir_counter_init(pf);
313         if (err) {
314                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
315                 return -EINVAL;
316         }
317
318         /*Fdir tx queue setup*/
319         err = ice_fdir_setup_tx_resources(pf);
320         if (err) {
321                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
322                 goto fail_setup_tx;
323         }
324
325         /*Fdir rx queue setup*/
326         err = ice_fdir_setup_rx_resources(pf);
327         if (err) {
328                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
329                 goto fail_setup_rx;
330         }
331
332         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
333         if (err) {
334                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
335                 goto fail_mem;
336         }
337
338         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
339         if (err) {
340                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
341                 goto fail_mem;
342         }
343
344         /* reserve memory for the fdir programming packet */
345         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
346                  ICE_FDIR_MZ_NAME,
347                  eth_dev->data->port_id);
348         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
349         if (!mz) {
350                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
351                             "flow director program packet.");
352                 err = -ENOMEM;
353                 goto fail_mem;
354         }
355         pf->fdir.prg_pkt = mz->addr;
356         pf->fdir.dma_addr = mz->iova;
357
358         err = ice_fdir_prof_alloc(hw);
359         if (err) {
360                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
361                             "flow director profile.");
362                 err = -ENOMEM;
363                 goto fail_mem;
364         }
365
366         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
367                     vsi->base_queue);
368         return ICE_SUCCESS;
369
370 fail_mem:
371         ice_rx_queue_release(pf->fdir.rxq);
372         pf->fdir.rxq = NULL;
373 fail_setup_rx:
374         ice_tx_queue_release(pf->fdir.txq);
375         pf->fdir.txq = NULL;
376 fail_setup_tx:
377         ice_release_vsi(vsi);
378         pf->fdir.fdir_vsi = NULL;
379         return err;
380 }
381
382 static void
383 ice_fdir_prof_free(struct ice_hw *hw)
384 {
385         enum ice_fltr_ptype ptype;
386
387         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
388              ptype < ICE_FLTR_PTYPE_MAX;
389              ptype++)
390                 rte_free(hw->fdir_prof[ptype]);
391
392         rte_free(hw->fdir_prof);
393 }
394
395 /* Remove a profile for some filter type */
396 static void
397 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
398 {
399         struct ice_hw *hw = ICE_PF_TO_HW(pf);
400         struct ice_fd_hw_prof *hw_prof;
401         uint64_t prof_id;
402         uint16_t vsi_num;
403         int i;
404
405         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
406                 return;
407
408         hw_prof = hw->fdir_prof[ptype];
409
410         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
411         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
412                 if (hw_prof->entry_h[i][is_tunnel]) {
413                         vsi_num = ice_get_hw_vsi_num(hw,
414                                                      hw_prof->vsi_h[i]);
415                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
416                                              vsi_num, ptype);
417                         ice_flow_rem_entry(hw,
418                                            hw_prof->entry_h[i][is_tunnel]);
419                         hw_prof->entry_h[i][is_tunnel] = 0;
420                 }
421         }
422         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
423         rte_free(hw_prof->fdir_seg[is_tunnel]);
424         hw_prof->fdir_seg[is_tunnel] = NULL;
425
426         for (i = 0; i < hw_prof->cnt; i++)
427                 hw_prof->vsi_h[i] = 0;
428         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
429 }
430
431 /* Remove all created profiles */
432 static void
433 ice_fdir_prof_rm_all(struct ice_pf *pf)
434 {
435         enum ice_fltr_ptype ptype;
436
437         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
438              ptype < ICE_FLTR_PTYPE_MAX;
439              ptype++) {
440                 ice_fdir_prof_rm(pf, ptype, false);
441                 ice_fdir_prof_rm(pf, ptype, true);
442         }
443 }
444
445 /*
446  * ice_fdir_teardown - release the Flow Director resources
447  * @pf: board private structure
448  */
449 static void
450 ice_fdir_teardown(struct ice_pf *pf)
451 {
452         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
453         struct ice_hw *hw = ICE_PF_TO_HW(pf);
454         struct ice_vsi *vsi;
455         int err;
456
457         vsi = pf->fdir.fdir_vsi;
458         if (!vsi)
459                 return;
460
461         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
462         if (err)
463                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
464
465         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
466         if (err)
467                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
468
469         err = ice_fdir_counter_release(pf);
470         if (err)
471                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
472
473         ice_tx_queue_release(pf->fdir.txq);
474         pf->fdir.txq = NULL;
475         ice_rx_queue_release(pf->fdir.rxq);
476         pf->fdir.rxq = NULL;
477         ice_fdir_prof_rm_all(pf);
478         ice_fdir_prof_free(hw);
479         ice_release_vsi(vsi);
480         pf->fdir.fdir_vsi = NULL;
481 }
482
483 static int
484 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
485                      struct ice_vsi *ctrl_vsi,
486                      struct ice_flow_seg_info *seg,
487                      enum ice_fltr_ptype ptype,
488                      bool is_tunnel)
489 {
490         struct ice_hw *hw = ICE_PF_TO_HW(pf);
491         enum ice_flow_dir dir = ICE_FLOW_RX;
492         struct ice_flow_seg_info *ori_seg;
493         struct ice_fd_hw_prof *hw_prof;
494         struct ice_flow_prof *prof;
495         uint64_t entry_1 = 0;
496         uint64_t entry_2 = 0;
497         uint16_t vsi_num;
498         int ret;
499         uint64_t prof_id;
500
501         hw_prof = hw->fdir_prof[ptype];
502         ori_seg = hw_prof->fdir_seg[is_tunnel];
503         if (ori_seg) {
504                 if (!is_tunnel) {
505                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
506                                 return -EAGAIN;
507                 } else {
508                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
509                                 return -EAGAIN;
510                 }
511
512                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
513                         return -EINVAL;
514
515                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
516         }
517
518         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
519         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
520                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
521         if (ret)
522                 return ret;
523         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
524                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
525                                  seg, NULL, 0, &entry_1);
526         if (ret) {
527                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
528                             ptype);
529                 goto err_add_prof;
530         }
531         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
532                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
533                                  seg, NULL, 0, &entry_2);
534         if (ret) {
535                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
536                             ptype);
537                 goto err_add_entry;
538         }
539
540         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
541         hw_prof->cnt = 0;
542         hw_prof->fdir_seg[is_tunnel] = seg;
543         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
544         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
545         pf->hw_prof_cnt[ptype][is_tunnel]++;
546         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
547         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
548         pf->hw_prof_cnt[ptype][is_tunnel]++;
549
550         return ret;
551
552 err_add_entry:
553         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
554         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
555         ice_flow_rem_entry(hw, entry_1);
556 err_add_prof:
557         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
558
559         return ret;
560 }
561
562 static void
563 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
564 {
565         uint32_t i, j;
566
567         struct ice_inset_map {
568                 uint64_t inset;
569                 enum ice_flow_field fld;
570         };
571         static const struct ice_inset_map ice_inset_map[] = {
572                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
573                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
574                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
575                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
576                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
577                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
578                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
579                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
580                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
581                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
582                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
583                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
584                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
585                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
586                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
587                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
588                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
589         };
590
591         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
592                 if ((inset & ice_inset_map[i].inset) ==
593                     ice_inset_map[i].inset)
594                         field[j++] = ice_inset_map[i].fld;
595         }
596 }
597
598 static int
599 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
600                         uint64_t input_set, bool is_tunnel)
601 {
602         struct ice_flow_seg_info *seg;
603         struct ice_flow_seg_info *seg_tun = NULL;
604         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
605         int i, ret;
606
607         if (!input_set)
608                 return -EINVAL;
609
610         seg = (struct ice_flow_seg_info *)
611                 ice_malloc(hw, sizeof(*seg));
612         if (!seg) {
613                 PMD_DRV_LOG(ERR, "No memory can be allocated");
614                 return -ENOMEM;
615         }
616
617         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
618                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
619         ice_fdir_input_set_parse(input_set, field);
620
621         switch (flow) {
622         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
623                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
624                                   ICE_FLOW_SEG_HDR_IPV4);
625                 break;
626         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
627                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
628                                   ICE_FLOW_SEG_HDR_IPV4);
629                 break;
630         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
631                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
632                                   ICE_FLOW_SEG_HDR_IPV4);
633                 break;
634         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
635                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
636                 break;
637         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
638                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
639                                   ICE_FLOW_SEG_HDR_IPV6);
640                 break;
641         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
642                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
643                                   ICE_FLOW_SEG_HDR_IPV6);
644                 break;
645         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
646                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
647                                   ICE_FLOW_SEG_HDR_IPV6);
648                 break;
649         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
650                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
651                 break;
652         default:
653                 PMD_DRV_LOG(ERR, "not supported filter type.");
654                 break;
655         }
656
657         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
658                 ice_flow_set_fld(seg, field[i],
659                                  ICE_FLOW_FLD_OFF_INVAL,
660                                  ICE_FLOW_FLD_OFF_INVAL,
661                                  ICE_FLOW_FLD_OFF_INVAL, false);
662         }
663
664         if (!is_tunnel) {
665                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
666                                            seg, flow, false);
667         } else {
668                 seg_tun = (struct ice_flow_seg_info *)
669                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
670                 if (!seg_tun) {
671                         PMD_DRV_LOG(ERR, "No memory can be allocated");
672                         rte_free(seg);
673                         return -ENOMEM;
674                 }
675                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
676                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
677                                            seg_tun, flow, true);
678         }
679
680         if (!ret) {
681                 return ret;
682         } else if (ret < 0) {
683                 rte_free(seg);
684                 if (is_tunnel)
685                         rte_free(seg_tun);
686                 return (ret == -EAGAIN) ? 0 : ret;
687         } else {
688                 return ret;
689         }
690 }
691
692 static void
693 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
694                     bool is_tunnel, bool add)
695 {
696         struct ice_hw *hw = ICE_PF_TO_HW(pf);
697         int cnt;
698
699         cnt = (add) ? 1 : -1;
700         hw->fdir_active_fltr += cnt;
701         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
702                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
703         else
704                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
705 }
706
707 static int
708 ice_fdir_init(struct ice_adapter *ad)
709 {
710         struct ice_pf *pf = &ad->pf;
711         int ret;
712
713         ret = ice_fdir_setup(pf);
714         if (ret)
715                 return ret;
716
717         return ice_register_parser(&ice_fdir_parser, ad);
718 }
719
720 static void
721 ice_fdir_uninit(struct ice_adapter *ad)
722 {
723         struct ice_pf *pf = &ad->pf;
724
725         ice_unregister_parser(&ice_fdir_parser, ad);
726
727         ice_fdir_teardown(pf);
728 }
729
730 static int
731 ice_fdir_add_del_filter(struct ice_pf *pf,
732                         struct ice_fdir_filter_conf *filter,
733                         bool add)
734 {
735         struct ice_fltr_desc desc;
736         struct ice_hw *hw = ICE_PF_TO_HW(pf);
737         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
738         int ret;
739
740         filter->input.dest_vsi = pf->main_vsi->idx;
741
742         memset(&desc, 0, sizeof(desc));
743         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
744
745         memset(pkt, 0, ICE_FDIR_PKT_LEN);
746         ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
747         if (ret) {
748                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
749                 return -EINVAL;
750         }
751
752         return ice_fdir_programming(pf, &desc);
753 }
754
755 static int
756 ice_fdir_create_filter(struct ice_adapter *ad,
757                        struct rte_flow *flow,
758                        void *meta,
759                        struct rte_flow_error *error)
760 {
761         struct ice_pf *pf = &ad->pf;
762         struct ice_fdir_filter_conf *filter = meta;
763         struct ice_fdir_filter_conf *rule;
764         int ret;
765
766         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
767         if (!rule) {
768                 rte_flow_error_set(error, ENOMEM,
769                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
770                                    "Failed to allocate memory");
771                 return -rte_errno;
772         }
773
774         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
775                         filter->input_set, false);
776         if (ret) {
777                 rte_flow_error_set(error, -ret,
778                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
779                                    "Profile configure failed.");
780                 goto free_entry;
781         }
782
783         /* alloc counter for FDIR */
784         if (filter->input.cnt_ena) {
785                 struct rte_flow_action_count *act_count = &filter->act_count;
786
787                 filter->counter = ice_fdir_counter_alloc(pf,
788                                                          act_count->shared,
789                                                          act_count->id);
790                 if (!filter->counter) {
791                         rte_flow_error_set(error, EINVAL,
792                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
793                                         "Failed to alloc FDIR counter.");
794                         goto free_entry;
795                 }
796                 filter->input.cnt_index = filter->counter->hw_index;
797         }
798
799         ret = ice_fdir_add_del_filter(pf, filter, true);
800         if (ret) {
801                 rte_flow_error_set(error, -ret,
802                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
803                                    "Add filter rule failed.");
804                 goto free_counter;
805         }
806
807         rte_memcpy(rule, filter, sizeof(*rule));
808         flow->rule = rule;
809         ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
810         return 0;
811
812 free_counter:
813         if (filter->counter) {
814                 ice_fdir_counter_free(pf, filter->counter);
815                 filter->counter = NULL;
816         }
817
818 free_entry:
819         rte_free(rule);
820         return -rte_errno;
821 }
822
823 static int
824 ice_fdir_destroy_filter(struct ice_adapter *ad,
825                         struct rte_flow *flow,
826                         struct rte_flow_error *error)
827 {
828         struct ice_pf *pf = &ad->pf;
829         struct ice_fdir_filter_conf *filter;
830         int ret;
831
832         filter = (struct ice_fdir_filter_conf *)flow->rule;
833
834         if (filter->counter) {
835                 ice_fdir_counter_free(pf, filter->counter);
836                 filter->counter = NULL;
837         }
838
839         ret = ice_fdir_add_del_filter(pf, filter, false);
840         if (ret) {
841                 rte_flow_error_set(error, -ret,
842                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
843                                    "Del filter rule failed.");
844                 return -rte_errno;
845         }
846
847         ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
848         flow->rule = NULL;
849
850         rte_free(filter);
851
852         return 0;
853 }
854
855 static int
856 ice_fdir_query_count(struct ice_adapter *ad,
857                       struct rte_flow *flow,
858                       struct rte_flow_query_count *flow_stats,
859                       struct rte_flow_error *error)
860 {
861         struct ice_pf *pf = &ad->pf;
862         struct ice_hw *hw = ICE_PF_TO_HW(pf);
863         struct ice_fdir_filter_conf *filter = flow->rule;
864         struct ice_fdir_counter *counter = filter->counter;
865         uint64_t hits_lo, hits_hi;
866
867         if (!counter) {
868                 rte_flow_error_set(error, EINVAL,
869                                   RTE_FLOW_ERROR_TYPE_ACTION,
870                                   NULL,
871                                   "FDIR counters not available");
872                 return -rte_errno;
873         }
874
875         /*
876          * Reading the low 32-bits latches the high 32-bits into a shadow
877          * register. Reading the high 32-bit returns the value in the
878          * shadow register.
879          */
880         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
881         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
882
883         flow_stats->hits_set = 1;
884         flow_stats->hits = hits_lo | (hits_hi << 32);
885         flow_stats->bytes_set = 0;
886         flow_stats->bytes = 0;
887
888         if (flow_stats->reset) {
889                 /* reset statistic counter value */
890                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
891                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
892         }
893
894         return 0;
895 }
896
897 static struct ice_flow_engine ice_fdir_engine = {
898         .init = ice_fdir_init,
899         .uninit = ice_fdir_uninit,
900         .create = ice_fdir_create_filter,
901         .destroy = ice_fdir_destroy_filter,
902         .query_count = ice_fdir_query_count,
903         .type = ICE_FLOW_ENGINE_FDIR,
904 };
905
906 static int
907 ice_fdir_parse_action_qregion(struct ice_pf *pf,
908                               struct rte_flow_error *error,
909                               const struct rte_flow_action *act,
910                               struct ice_fdir_filter_conf *filter)
911 {
912         const struct rte_flow_action_rss *rss = act->conf;
913         uint32_t i;
914
915         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
916                 rte_flow_error_set(error, EINVAL,
917                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
918                                    "Invalid action.");
919                 return -rte_errno;
920         }
921
922         if (rss->queue_num <= 1) {
923                 rte_flow_error_set(error, EINVAL,
924                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
925                                    "Queue region size can't be 0 or 1.");
926                 return -rte_errno;
927         }
928
929         /* check if queue index for queue region is continuous */
930         for (i = 0; i < rss->queue_num - 1; i++) {
931                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
932                         rte_flow_error_set(error, EINVAL,
933                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
934                                            "Discontinuous queue region");
935                         return -rte_errno;
936                 }
937         }
938
939         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
940                 rte_flow_error_set(error, EINVAL,
941                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
942                                    "Invalid queue region indexes.");
943                 return -rte_errno;
944         }
945
946         if (!(rte_is_power_of_2(rss->queue_num) &&
947              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
948                 rte_flow_error_set(error, EINVAL,
949                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
950                                    "The region size should be any of the following values:"
951                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
952                                    "of queues do not exceed the VSI allocation.");
953                 return -rte_errno;
954         }
955
956         filter->input.q_index = rss->queue[0];
957         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
958         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
959
960         return 0;
961 }
962
963 static int
964 ice_fdir_parse_action(struct ice_adapter *ad,
965                       const struct rte_flow_action actions[],
966                       struct rte_flow_error *error,
967                       struct ice_fdir_filter_conf *filter)
968 {
969         struct ice_pf *pf = &ad->pf;
970         const struct rte_flow_action_queue *act_q;
971         const struct rte_flow_action_mark *mark_spec = NULL;
972         const struct rte_flow_action_count *act_count;
973         uint32_t dest_num = 0;
974         uint32_t mark_num = 0;
975         uint32_t counter_num = 0;
976         int ret;
977
978         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
979                 switch (actions->type) {
980                 case RTE_FLOW_ACTION_TYPE_VOID:
981                         break;
982                 case RTE_FLOW_ACTION_TYPE_QUEUE:
983                         dest_num++;
984
985                         act_q = actions->conf;
986                         filter->input.q_index = act_q->index;
987                         if (filter->input.q_index >=
988                                         pf->dev_data->nb_rx_queues) {
989                                 rte_flow_error_set(error, EINVAL,
990                                                    RTE_FLOW_ERROR_TYPE_ACTION,
991                                                    actions,
992                                                    "Invalid queue for FDIR.");
993                                 return -rte_errno;
994                         }
995                         filter->input.dest_ctl =
996                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
997                         break;
998                 case RTE_FLOW_ACTION_TYPE_DROP:
999                         dest_num++;
1000
1001                         filter->input.dest_ctl =
1002                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1003                         break;
1004                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1005                         dest_num++;
1006
1007                         filter->input.dest_ctl =
1008                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1009                         filter->input.q_index = 0;
1010                         break;
1011                 case RTE_FLOW_ACTION_TYPE_RSS:
1012                         dest_num++;
1013
1014                         ret = ice_fdir_parse_action_qregion(pf,
1015                                                 error, actions, filter);
1016                         if (ret)
1017                                 return ret;
1018                         break;
1019                 case RTE_FLOW_ACTION_TYPE_MARK:
1020                         mark_num++;
1021
1022                         mark_spec = actions->conf;
1023                         filter->input.fltr_id = mark_spec->id;
1024                         break;
1025                 case RTE_FLOW_ACTION_TYPE_COUNT:
1026                         counter_num++;
1027
1028                         act_count = actions->conf;
1029                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1030                         rte_memcpy(&filter->act_count, act_count,
1031                                                 sizeof(filter->act_count));
1032
1033                         break;
1034                 default:
1035                         rte_flow_error_set(error, EINVAL,
1036                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1037                                    "Invalid action.");
1038                         return -rte_errno;
1039                 }
1040         }
1041
1042         if (dest_num == 0 || dest_num >= 2) {
1043                 rte_flow_error_set(error, EINVAL,
1044                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1045                            "Unsupported action combination");
1046                 return -rte_errno;
1047         }
1048
1049         if (mark_num >= 2) {
1050                 rte_flow_error_set(error, EINVAL,
1051                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1052                            "Too many mark actions");
1053                 return -rte_errno;
1054         }
1055
1056         if (counter_num >= 2) {
1057                 rte_flow_error_set(error, EINVAL,
1058                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1059                            "Too many count actions");
1060                 return -rte_errno;
1061         }
1062
1063         return 0;
1064 }
1065
1066 static int
1067 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1068                        const struct rte_flow_item pattern[],
1069                        struct rte_flow_error *error,
1070                        struct ice_fdir_filter_conf *filter)
1071 {
1072         const struct rte_flow_item *item = pattern;
1073         enum rte_flow_item_type item_type;
1074         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1075         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1076         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1077         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1078         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1079         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1080         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1081         uint64_t input_set = ICE_INSET_NONE;
1082         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1083         uint8_t  ipv6_addr_mask[16] = {
1084                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1085                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1086         };
1087         uint32_t vtc_flow_cpu;
1088
1089
1090         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1091                 if (item->last) {
1092                         rte_flow_error_set(error, EINVAL,
1093                                         RTE_FLOW_ERROR_TYPE_ITEM,
1094                                         item,
1095                                         "Not support range");
1096                         return -rte_errno;
1097                 }
1098                 item_type = item->type;
1099
1100                 switch (item_type) {
1101                 case RTE_FLOW_ITEM_TYPE_ETH:
1102                         eth_spec = item->spec;
1103                         eth_mask = item->mask;
1104
1105                         if (eth_spec && eth_mask) {
1106                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1107                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1108                                         rte_flow_error_set(error, EINVAL,
1109                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1110                                                 item,
1111                                                 "Src mac not support");
1112                                         return -rte_errno;
1113                                 }
1114
1115                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1116                                         rte_flow_error_set(error, EINVAL,
1117                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1118                                                 item,
1119                                                 "Invalid mac addr mask");
1120                                         return -rte_errno;
1121                                 }
1122
1123                                 input_set |= ICE_INSET_DMAC;
1124                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1125                                            &eth_spec->dst,
1126                                            RTE_ETHER_ADDR_LEN);
1127                         }
1128                         break;
1129                 case RTE_FLOW_ITEM_TYPE_IPV4:
1130                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1131                         ipv4_spec = item->spec;
1132                         ipv4_mask = item->mask;
1133
1134                         if (ipv4_spec && ipv4_mask) {
1135                                 /* Check IPv4 mask and update input set */
1136                                 if (ipv4_mask->hdr.version_ihl ||
1137                                     ipv4_mask->hdr.total_length ||
1138                                     ipv4_mask->hdr.packet_id ||
1139                                     ipv4_mask->hdr.fragment_offset ||
1140                                     ipv4_mask->hdr.hdr_checksum) {
1141                                         rte_flow_error_set(error, EINVAL,
1142                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1143                                                    item,
1144                                                    "Invalid IPv4 mask.");
1145                                         return -rte_errno;
1146                                 }
1147                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1148                                         input_set |= ICE_INSET_IPV4_SRC;
1149                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1150                                         input_set |= ICE_INSET_IPV4_DST;
1151                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1152                                         input_set |= ICE_INSET_IPV4_TOS;
1153                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1154                                         input_set |= ICE_INSET_IPV4_TTL;
1155                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1156                                         input_set |= ICE_INSET_IPV4_PROTO;
1157
1158                                 filter->input.ip.v4.dst_ip =
1159                                         ipv4_spec->hdr.src_addr;
1160                                 filter->input.ip.v4.src_ip =
1161                                         ipv4_spec->hdr.dst_addr;
1162                                 filter->input.ip.v4.tos =
1163                                         ipv4_spec->hdr.type_of_service;
1164                                 filter->input.ip.v4.ttl =
1165                                         ipv4_spec->hdr.time_to_live;
1166                                 filter->input.ip.v4.proto =
1167                                         ipv4_spec->hdr.next_proto_id;
1168                         }
1169
1170                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1171                         break;
1172                 case RTE_FLOW_ITEM_TYPE_IPV6:
1173                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1174                         ipv6_spec = item->spec;
1175                         ipv6_mask = item->mask;
1176
1177                         if (ipv6_spec && ipv6_mask) {
1178                                 /* Check IPv6 mask and update input set */
1179                                 if (ipv6_mask->hdr.payload_len) {
1180                                         rte_flow_error_set(error, EINVAL,
1181                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1182                                                    item,
1183                                                    "Invalid IPv6 mask");
1184                                         return -rte_errno;
1185                                 }
1186
1187                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1188                                             ipv6_addr_mask,
1189                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1190                                         input_set |= ICE_INSET_IPV6_SRC;
1191                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1192                                             ipv6_addr_mask,
1193                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1194                                         input_set |= ICE_INSET_IPV6_DST;
1195
1196                                 if ((ipv6_mask->hdr.vtc_flow &
1197                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1198                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1199                                         input_set |= ICE_INSET_IPV6_TC;
1200                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1201                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1202                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1203                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1204
1205                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1206                                            ipv6_spec->hdr.src_addr, 16);
1207                                 rte_memcpy(filter->input.ip.v6.src_ip,
1208                                            ipv6_spec->hdr.dst_addr, 16);
1209
1210                                 vtc_flow_cpu =
1211                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1212                                 filter->input.ip.v6.tc =
1213                                         (uint8_t)(vtc_flow_cpu >>
1214                                                   ICE_FDIR_IPV6_TC_OFFSET);
1215                                 filter->input.ip.v6.proto =
1216                                         ipv6_spec->hdr.proto;
1217                                 filter->input.ip.v6.hlim =
1218                                         ipv6_spec->hdr.hop_limits;
1219                         }
1220
1221                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1222                         break;
1223                 case RTE_FLOW_ITEM_TYPE_TCP:
1224                         tcp_spec = item->spec;
1225                         tcp_mask = item->mask;
1226
1227                         if (tcp_spec && tcp_mask) {
1228                                 /* Check TCP mask and update input set */
1229                                 if (tcp_mask->hdr.sent_seq ||
1230                                     tcp_mask->hdr.recv_ack ||
1231                                     tcp_mask->hdr.data_off ||
1232                                     tcp_mask->hdr.tcp_flags ||
1233                                     tcp_mask->hdr.rx_win ||
1234                                     tcp_mask->hdr.cksum ||
1235                                     tcp_mask->hdr.tcp_urp) {
1236                                         rte_flow_error_set(error, EINVAL,
1237                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1238                                                    item,
1239                                                    "Invalid TCP mask");
1240                                         return -rte_errno;
1241                                 }
1242
1243                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1244                                         input_set |= ICE_INSET_TCP_SRC_PORT;
1245                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1246                                         input_set |= ICE_INSET_TCP_DST_PORT;
1247
1248                                 /* Get filter info */
1249                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1250                                         filter->input.ip.v4.dst_port =
1251                                                 tcp_spec->hdr.src_port;
1252                                         filter->input.ip.v4.src_port =
1253                                                 tcp_spec->hdr.dst_port;
1254                                         flow_type =
1255                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1256                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1257                                         filter->input.ip.v6.dst_port =
1258                                                 tcp_spec->hdr.src_port;
1259                                         filter->input.ip.v6.src_port =
1260                                                 tcp_spec->hdr.dst_port;
1261                                         flow_type =
1262                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1263                                 }
1264                         }
1265                         break;
1266                 case RTE_FLOW_ITEM_TYPE_UDP:
1267                         udp_spec = item->spec;
1268                         udp_mask = item->mask;
1269
1270                         if (udp_spec && udp_mask) {
1271                                 /* Check UDP mask and update input set*/
1272                                 if (udp_mask->hdr.dgram_len ||
1273                                     udp_mask->hdr.dgram_cksum) {
1274                                         rte_flow_error_set(error, EINVAL,
1275                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1276                                                    item,
1277                                                    "Invalid UDP mask");
1278                                         return -rte_errno;
1279                                 }
1280
1281                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1282                                         input_set |= ICE_INSET_UDP_SRC_PORT;
1283                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1284                                         input_set |= ICE_INSET_UDP_DST_PORT;
1285
1286                                 /* Get filter info */
1287                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1288                                         filter->input.ip.v4.dst_port =
1289                                                 udp_spec->hdr.src_port;
1290                                         filter->input.ip.v4.src_port =
1291                                                 udp_spec->hdr.dst_port;
1292                                         flow_type =
1293                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1294                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1295                                         filter->input.ip.v6.src_port =
1296                                                 udp_spec->hdr.src_port;
1297                                         filter->input.ip.v6.dst_port =
1298                                                 udp_spec->hdr.dst_port;
1299                                         flow_type =
1300                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1301                                 }
1302                         }
1303                         break;
1304                 case RTE_FLOW_ITEM_TYPE_SCTP:
1305                         sctp_spec = item->spec;
1306                         sctp_mask = item->mask;
1307
1308                         if (sctp_spec && sctp_mask) {
1309                                 /* Check SCTP mask and update input set */
1310                                 if (sctp_mask->hdr.cksum) {
1311                                         rte_flow_error_set(error, EINVAL,
1312                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1313                                                    item,
1314                                                    "Invalid UDP mask");
1315                                         return -rte_errno;
1316                                 }
1317
1318                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1319                                         input_set |= ICE_INSET_SCTP_SRC_PORT;
1320                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1321                                         input_set |= ICE_INSET_SCTP_DST_PORT;
1322
1323                                 /* Get filter info */
1324                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1325                                         filter->input.ip.v4.dst_port =
1326                                                 sctp_spec->hdr.src_port;
1327                                         filter->input.ip.v4.src_port =
1328                                                 sctp_spec->hdr.dst_port;
1329                                         flow_type =
1330                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1331                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1332                                         filter->input.ip.v6.dst_port =
1333                                                 sctp_spec->hdr.src_port;
1334                                         filter->input.ip.v6.src_port =
1335                                                 sctp_spec->hdr.dst_port;
1336                                         flow_type =
1337                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1338                                 }
1339                         }
1340                         break;
1341                 case RTE_FLOW_ITEM_TYPE_VOID:
1342                         break;
1343                 default:
1344                         rte_flow_error_set(error, EINVAL,
1345                                    RTE_FLOW_ERROR_TYPE_ITEM,
1346                                    item,
1347                                    "Invalid pattern item.");
1348                         return -rte_errno;
1349                 }
1350         }
1351
1352         filter->input.flow_type = flow_type;
1353         filter->input_set = input_set;
1354
1355         return 0;
1356 }
1357
1358 static int
1359 ice_fdir_parse(struct ice_adapter *ad,
1360                struct ice_pattern_match_item *array,
1361                uint32_t array_len,
1362                const struct rte_flow_item pattern[],
1363                const struct rte_flow_action actions[],
1364                void **meta,
1365                struct rte_flow_error *error)
1366 {
1367         struct ice_pf *pf = &ad->pf;
1368         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1369         struct ice_pattern_match_item *item = NULL;
1370         uint64_t input_set;
1371         int ret;
1372
1373         memset(filter, 0, sizeof(*filter));
1374         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1375         if (!item)
1376                 return -rte_errno;
1377
1378         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1379         if (ret)
1380                 return ret;
1381         input_set = filter->input_set;
1382         if (!input_set || input_set & ~item->input_set_mask) {
1383                 rte_flow_error_set(error, EINVAL,
1384                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1385                                    pattern,
1386                                    "Invalid input set");
1387                 return -rte_errno;
1388         }
1389
1390         ret = ice_fdir_parse_action(ad, actions, error, filter);
1391         if (ret)
1392                 return ret;
1393
1394         *meta = filter;
1395
1396         return 0;
1397 }
1398
1399 static struct ice_flow_parser ice_fdir_parser = {
1400         .engine = &ice_fdir_engine,
1401         .array = ice_fdir_pattern,
1402         .array_len = RTE_DIM(ice_fdir_pattern),
1403         .parse_pattern_action = ice_fdir_parse,
1404         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1405 };
1406
1407 RTE_INIT(ice_fdir_engine_register)
1408 {
1409         ice_register_flow_engine(&ice_fdir_engine);
1410 }