8dbd6fc72ce77e5138326c32cd77a65e9e43a9ee
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
7 #include "ice_rxtx.h"
8 #include "ice_generic_flow.h"
9
10 #define ICE_FDIR_IPV6_TC_OFFSET         20
11 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
12
13 #define ICE_FDIR_MAX_QREGION_SIZE       128
14
15 #define ICE_FDIR_INSET_ETH_IPV4 (\
16         ICE_INSET_DMAC | \
17         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
18         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
19
20 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
21         ICE_FDIR_INSET_ETH_IPV4 | \
22         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
23
24 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
25         ICE_FDIR_INSET_ETH_IPV4 | \
26         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
27
28 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
29         ICE_FDIR_INSET_ETH_IPV4 | \
30         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
31
32 #define ICE_FDIR_INSET_ETH_IPV6 (\
33         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
34         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
35
36 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
37         ICE_FDIR_INSET_ETH_IPV6 | \
38         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
39
40 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
41         ICE_FDIR_INSET_ETH_IPV6 | \
42         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
43
44 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
45         ICE_FDIR_INSET_ETH_IPV6 | \
46         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
47
48 static struct ice_pattern_match_item ice_fdir_pattern[] = {
49         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
50         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
51         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
52         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
53         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
54         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
55         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
56         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
57 };
58
59 static struct ice_flow_parser ice_fdir_parser;
60
61 static const struct rte_memzone *
62 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
63 {
64         return rte_memzone_reserve_aligned(name, len, socket_id,
65                                            RTE_MEMZONE_IOVA_CONTIG,
66                                            ICE_RING_BASE_ALIGN);
67 }
68
69 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
70
71 static int
72 ice_fdir_prof_alloc(struct ice_hw *hw)
73 {
74         enum ice_fltr_ptype ptype, fltr_ptype;
75
76         if (!hw->fdir_prof) {
77                 hw->fdir_prof = (struct ice_fd_hw_prof **)
78                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
79                                    sizeof(*hw->fdir_prof));
80                 if (!hw->fdir_prof)
81                         return -ENOMEM;
82         }
83         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
84              ptype < ICE_FLTR_PTYPE_MAX;
85              ptype++) {
86                 if (!hw->fdir_prof[ptype]) {
87                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
88                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
89                         if (!hw->fdir_prof[ptype])
90                                 goto fail_mem;
91                 }
92         }
93         return 0;
94
95 fail_mem:
96         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
97              fltr_ptype < ptype;
98              fltr_ptype++)
99                 rte_free(hw->fdir_prof[fltr_ptype]);
100         rte_free(hw->fdir_prof);
101         return -ENOMEM;
102 }
103
104 static int
105 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
106                           struct ice_fdir_counter_pool_container *container,
107                           uint32_t index_start,
108                           uint32_t len)
109 {
110         struct ice_fdir_counter_pool *pool;
111         uint32_t i;
112         int ret = 0;
113
114         pool = rte_zmalloc("ice_fdir_counter_pool",
115                            sizeof(*pool) +
116                            sizeof(struct ice_fdir_counter) * len,
117                            0);
118         if (!pool) {
119                 PMD_INIT_LOG(ERR,
120                              "Failed to allocate memory for fdir counter pool");
121                 return -ENOMEM;
122         }
123
124         TAILQ_INIT(&pool->counter_list);
125         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
126
127         for (i = 0; i < len; i++) {
128                 struct ice_fdir_counter *counter = &pool->counters[i];
129
130                 counter->hw_index = index_start + i;
131                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
132         }
133
134         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
135                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
136                 ret = -EINVAL;
137                 goto free_pool;
138         }
139
140         container->pools[container->index_free++] = pool;
141         return 0;
142
143 free_pool:
144         rte_free(pool);
145         return ret;
146 }
147
148 static int
149 ice_fdir_counter_init(struct ice_pf *pf)
150 {
151         struct ice_hw *hw = ICE_PF_TO_HW(pf);
152         struct ice_fdir_info *fdir_info = &pf->fdir;
153         struct ice_fdir_counter_pool_container *container =
154                                 &fdir_info->counter;
155         uint32_t cnt_index, len;
156         int ret;
157
158         TAILQ_INIT(&container->pool_list);
159
160         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
161         len = ICE_FDIR_COUNTERS_PER_BLOCK;
162
163         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
164         if (ret) {
165                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
166                 return ret;
167         }
168
169         return 0;
170 }
171
172 static int
173 ice_fdir_counter_release(struct ice_pf *pf)
174 {
175         struct ice_fdir_info *fdir_info = &pf->fdir;
176         struct ice_fdir_counter_pool_container *container =
177                                 &fdir_info->counter;
178         uint8_t i;
179
180         for (i = 0; i < container->index_free; i++)
181                 rte_free(container->pools[i]);
182
183         return 0;
184 }
185
186 /*
187  * ice_fdir_setup - reserve and initialize the Flow Director resources
188  * @pf: board private structure
189  */
190 static int
191 ice_fdir_setup(struct ice_pf *pf)
192 {
193         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
194         struct ice_hw *hw = ICE_PF_TO_HW(pf);
195         const struct rte_memzone *mz = NULL;
196         char z_name[RTE_MEMZONE_NAMESIZE];
197         struct ice_vsi *vsi;
198         int err = ICE_SUCCESS;
199
200         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
201                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
202                 return -ENOTSUP;
203         }
204
205         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
206                     " fd_fltr_best_effort = %u.",
207                     hw->func_caps.fd_fltr_guar,
208                     hw->func_caps.fd_fltr_best_effort);
209
210         if (pf->fdir.fdir_vsi) {
211                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
212                 return ICE_SUCCESS;
213         }
214
215         /* make new FDIR VSI */
216         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
217         if (!vsi) {
218                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
219                 return -EINVAL;
220         }
221         pf->fdir.fdir_vsi = vsi;
222
223         err = ice_fdir_counter_init(pf);
224         if (err) {
225                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
226                 return -EINVAL;
227         }
228
229         /*Fdir tx queue setup*/
230         err = ice_fdir_setup_tx_resources(pf);
231         if (err) {
232                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
233                 goto fail_setup_tx;
234         }
235
236         /*Fdir rx queue setup*/
237         err = ice_fdir_setup_rx_resources(pf);
238         if (err) {
239                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
240                 goto fail_setup_rx;
241         }
242
243         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
244         if (err) {
245                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
246                 goto fail_mem;
247         }
248
249         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
250         if (err) {
251                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
252                 goto fail_mem;
253         }
254
255         /* reserve memory for the fdir programming packet */
256         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
257                  ICE_FDIR_MZ_NAME,
258                  eth_dev->data->port_id);
259         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
260         if (!mz) {
261                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
262                             "flow director program packet.");
263                 err = -ENOMEM;
264                 goto fail_mem;
265         }
266         pf->fdir.prg_pkt = mz->addr;
267         pf->fdir.dma_addr = mz->iova;
268
269         err = ice_fdir_prof_alloc(hw);
270         if (err) {
271                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
272                             "flow director profile.");
273                 err = -ENOMEM;
274                 goto fail_mem;
275         }
276
277         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
278                     vsi->base_queue);
279         return ICE_SUCCESS;
280
281 fail_mem:
282         ice_rx_queue_release(pf->fdir.rxq);
283         pf->fdir.rxq = NULL;
284 fail_setup_rx:
285         ice_tx_queue_release(pf->fdir.txq);
286         pf->fdir.txq = NULL;
287 fail_setup_tx:
288         ice_release_vsi(vsi);
289         pf->fdir.fdir_vsi = NULL;
290         return err;
291 }
292
293 static void
294 ice_fdir_prof_free(struct ice_hw *hw)
295 {
296         enum ice_fltr_ptype ptype;
297
298         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
299              ptype < ICE_FLTR_PTYPE_MAX;
300              ptype++)
301                 rte_free(hw->fdir_prof[ptype]);
302
303         rte_free(hw->fdir_prof);
304 }
305
306 /* Remove a profile for some filter type */
307 static void
308 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
309 {
310         struct ice_hw *hw = ICE_PF_TO_HW(pf);
311         struct ice_fd_hw_prof *hw_prof;
312         uint64_t prof_id;
313         uint16_t vsi_num;
314         int i;
315
316         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
317                 return;
318
319         hw_prof = hw->fdir_prof[ptype];
320
321         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
322         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
323                 if (hw_prof->entry_h[i][is_tunnel]) {
324                         vsi_num = ice_get_hw_vsi_num(hw,
325                                                      hw_prof->vsi_h[i]);
326                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
327                                              vsi_num, ptype);
328                         ice_flow_rem_entry(hw,
329                                            hw_prof->entry_h[i][is_tunnel]);
330                         hw_prof->entry_h[i][is_tunnel] = 0;
331                 }
332         }
333         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
334         rte_free(hw_prof->fdir_seg[is_tunnel]);
335         hw_prof->fdir_seg[is_tunnel] = NULL;
336
337         for (i = 0; i < hw_prof->cnt; i++)
338                 hw_prof->vsi_h[i] = 0;
339         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
340 }
341
342 /* Remove all created profiles */
343 static void
344 ice_fdir_prof_rm_all(struct ice_pf *pf)
345 {
346         enum ice_fltr_ptype ptype;
347
348         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
349              ptype < ICE_FLTR_PTYPE_MAX;
350              ptype++) {
351                 ice_fdir_prof_rm(pf, ptype, false);
352                 ice_fdir_prof_rm(pf, ptype, true);
353         }
354 }
355
356 /*
357  * ice_fdir_teardown - release the Flow Director resources
358  * @pf: board private structure
359  */
360 static void
361 ice_fdir_teardown(struct ice_pf *pf)
362 {
363         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
364         struct ice_hw *hw = ICE_PF_TO_HW(pf);
365         struct ice_vsi *vsi;
366         int err;
367
368         vsi = pf->fdir.fdir_vsi;
369         if (!vsi)
370                 return;
371
372         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
373         if (err)
374                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
375
376         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
377         if (err)
378                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
379
380         err = ice_fdir_counter_release(pf);
381         if (err)
382                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
383
384         ice_tx_queue_release(pf->fdir.txq);
385         pf->fdir.txq = NULL;
386         ice_rx_queue_release(pf->fdir.rxq);
387         pf->fdir.rxq = NULL;
388         ice_fdir_prof_rm_all(pf);
389         ice_fdir_prof_free(hw);
390         ice_release_vsi(vsi);
391         pf->fdir.fdir_vsi = NULL;
392 }
393
394 static int
395 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
396                      struct ice_vsi *ctrl_vsi,
397                      struct ice_flow_seg_info *seg,
398                      enum ice_fltr_ptype ptype,
399                      bool is_tunnel)
400 {
401         struct ice_hw *hw = ICE_PF_TO_HW(pf);
402         enum ice_flow_dir dir = ICE_FLOW_RX;
403         struct ice_flow_seg_info *ori_seg;
404         struct ice_fd_hw_prof *hw_prof;
405         struct ice_flow_prof *prof;
406         uint64_t entry_1 = 0;
407         uint64_t entry_2 = 0;
408         uint16_t vsi_num;
409         int ret;
410         uint64_t prof_id;
411
412         hw_prof = hw->fdir_prof[ptype];
413         ori_seg = hw_prof->fdir_seg[is_tunnel];
414         if (ori_seg) {
415                 if (!is_tunnel) {
416                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
417                                 return -EAGAIN;
418                 } else {
419                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
420                                 return -EAGAIN;
421                 }
422
423                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
424                         return -EINVAL;
425
426                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
427         }
428
429         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
430         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
431                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
432         if (ret)
433                 return ret;
434         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
435                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
436                                  seg, NULL, 0, &entry_1);
437         if (ret) {
438                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
439                             ptype);
440                 goto err_add_prof;
441         }
442         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
443                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
444                                  seg, NULL, 0, &entry_2);
445         if (ret) {
446                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
447                             ptype);
448                 goto err_add_entry;
449         }
450
451         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
452         hw_prof->cnt = 0;
453         hw_prof->fdir_seg[is_tunnel] = seg;
454         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
455         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
456         pf->hw_prof_cnt[ptype][is_tunnel]++;
457         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
458         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
459         pf->hw_prof_cnt[ptype][is_tunnel]++;
460
461         return ret;
462
463 err_add_entry:
464         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
465         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
466         ice_flow_rem_entry(hw, entry_1);
467 err_add_prof:
468         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
469
470         return ret;
471 }
472
473 static void
474 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
475 {
476         uint32_t i, j;
477
478         struct ice_inset_map {
479                 uint64_t inset;
480                 enum ice_flow_field fld;
481         };
482         static const struct ice_inset_map ice_inset_map[] = {
483                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
484                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
485                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
486                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
487                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
488                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
489                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
490                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
491                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
492                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
493                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
494                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
495                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
496                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
497                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
498                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
499                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
500         };
501
502         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
503                 if ((inset & ice_inset_map[i].inset) ==
504                     ice_inset_map[i].inset)
505                         field[j++] = ice_inset_map[i].fld;
506         }
507 }
508
509 static int
510 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
511                         uint64_t input_set, bool is_tunnel)
512 {
513         struct ice_flow_seg_info *seg;
514         struct ice_flow_seg_info *seg_tun = NULL;
515         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
516         int i, ret;
517
518         if (!input_set)
519                 return -EINVAL;
520
521         seg = (struct ice_flow_seg_info *)
522                 ice_malloc(hw, sizeof(*seg));
523         if (!seg) {
524                 PMD_DRV_LOG(ERR, "No memory can be allocated");
525                 return -ENOMEM;
526         }
527
528         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
529                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
530         ice_fdir_input_set_parse(input_set, field);
531
532         switch (flow) {
533         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
534                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
535                                   ICE_FLOW_SEG_HDR_IPV4);
536                 break;
537         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
538                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
539                                   ICE_FLOW_SEG_HDR_IPV4);
540                 break;
541         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
542                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
543                                   ICE_FLOW_SEG_HDR_IPV4);
544                 break;
545         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
546                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
547                 break;
548         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
549                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
550                                   ICE_FLOW_SEG_HDR_IPV6);
551                 break;
552         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
553                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
554                                   ICE_FLOW_SEG_HDR_IPV6);
555                 break;
556         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
557                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
558                                   ICE_FLOW_SEG_HDR_IPV6);
559                 break;
560         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
561                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
562                 break;
563         default:
564                 PMD_DRV_LOG(ERR, "not supported filter type.");
565                 break;
566         }
567
568         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
569                 ice_flow_set_fld(seg, field[i],
570                                  ICE_FLOW_FLD_OFF_INVAL,
571                                  ICE_FLOW_FLD_OFF_INVAL,
572                                  ICE_FLOW_FLD_OFF_INVAL, false);
573         }
574
575         if (!is_tunnel) {
576                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
577                                            seg, flow, false);
578         } else {
579                 seg_tun = (struct ice_flow_seg_info *)
580                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
581                 if (!seg_tun) {
582                         PMD_DRV_LOG(ERR, "No memory can be allocated");
583                         rte_free(seg);
584                         return -ENOMEM;
585                 }
586                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
587                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
588                                            seg_tun, flow, true);
589         }
590
591         if (!ret) {
592                 return ret;
593         } else if (ret < 0) {
594                 rte_free(seg);
595                 if (is_tunnel)
596                         rte_free(seg_tun);
597                 return (ret == -EAGAIN) ? 0 : ret;
598         } else {
599                 return ret;
600         }
601 }
602
603 static void
604 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
605                     bool is_tunnel, bool add)
606 {
607         struct ice_hw *hw = ICE_PF_TO_HW(pf);
608         int cnt;
609
610         cnt = (add) ? 1 : -1;
611         hw->fdir_active_fltr += cnt;
612         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
613                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
614         else
615                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
616 }
617
618 static int
619 ice_fdir_init(struct ice_adapter *ad)
620 {
621         struct ice_pf *pf = &ad->pf;
622         int ret;
623
624         ret = ice_fdir_setup(pf);
625         if (ret)
626                 return ret;
627
628         return ice_register_parser(&ice_fdir_parser, ad);
629 }
630
631 static void
632 ice_fdir_uninit(struct ice_adapter *ad)
633 {
634         struct ice_pf *pf = &ad->pf;
635
636         ice_unregister_parser(&ice_fdir_parser, ad);
637
638         ice_fdir_teardown(pf);
639 }
640
641 static int
642 ice_fdir_add_del_filter(struct ice_pf *pf,
643                         struct ice_fdir_filter_conf *filter,
644                         bool add)
645 {
646         struct ice_fltr_desc desc;
647         struct ice_hw *hw = ICE_PF_TO_HW(pf);
648         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
649         int ret;
650
651         filter->input.dest_vsi = pf->main_vsi->idx;
652
653         memset(&desc, 0, sizeof(desc));
654         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
655
656         memset(pkt, 0, ICE_FDIR_PKT_LEN);
657         ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
658         if (ret) {
659                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
660                 return -EINVAL;
661         }
662
663         return ice_fdir_programming(pf, &desc);
664 }
665
666 static int
667 ice_fdir_create_filter(struct ice_adapter *ad,
668                        struct rte_flow *flow,
669                        void *meta,
670                        struct rte_flow_error *error)
671 {
672         struct ice_pf *pf = &ad->pf;
673         struct ice_fdir_filter_conf *filter = meta;
674         struct ice_fdir_filter_conf *rule;
675         int ret;
676
677         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
678         if (!rule) {
679                 rte_flow_error_set(error, ENOMEM,
680                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
681                                    "Failed to allocate memory");
682                 return -rte_errno;
683         }
684
685         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
686                         filter->input_set, false);
687         if (ret) {
688                 rte_flow_error_set(error, -ret,
689                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
690                                    "Profile configure failed.");
691                 goto free_entry;
692         }
693
694         ret = ice_fdir_add_del_filter(pf, filter, true);
695         if (ret) {
696                 rte_flow_error_set(error, -ret,
697                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
698                                    "Add filter rule failed.");
699                 goto free_entry;
700         }
701
702         rte_memcpy(rule, filter, sizeof(*rule));
703         flow->rule = rule;
704         ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
705         return 0;
706
707 free_entry:
708         rte_free(rule);
709         return -rte_errno;
710 }
711
712 static int
713 ice_fdir_destroy_filter(struct ice_adapter *ad,
714                         struct rte_flow *flow,
715                         struct rte_flow_error *error)
716 {
717         struct ice_pf *pf = &ad->pf;
718         struct ice_fdir_filter_conf *filter;
719         int ret;
720
721         filter = (struct ice_fdir_filter_conf *)flow->rule;
722
723         ret = ice_fdir_add_del_filter(pf, filter, false);
724         if (ret) {
725                 rte_flow_error_set(error, -ret,
726                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
727                                    "Del filter rule failed.");
728                 return -rte_errno;
729         }
730
731         ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
732         flow->rule = NULL;
733
734         rte_free(filter);
735
736         return 0;
737 }
738
739 static struct ice_flow_engine ice_fdir_engine = {
740         .init = ice_fdir_init,
741         .uninit = ice_fdir_uninit,
742         .create = ice_fdir_create_filter,
743         .destroy = ice_fdir_destroy_filter,
744         .type = ICE_FLOW_ENGINE_FDIR,
745 };
746
747 static int
748 ice_fdir_parse_action_qregion(struct ice_pf *pf,
749                               struct rte_flow_error *error,
750                               const struct rte_flow_action *act,
751                               struct ice_fdir_filter_conf *filter)
752 {
753         const struct rte_flow_action_rss *rss = act->conf;
754         uint32_t i;
755
756         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
757                 rte_flow_error_set(error, EINVAL,
758                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
759                                    "Invalid action.");
760                 return -rte_errno;
761         }
762
763         if (rss->queue_num <= 1) {
764                 rte_flow_error_set(error, EINVAL,
765                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
766                                    "Queue region size can't be 0 or 1.");
767                 return -rte_errno;
768         }
769
770         /* check if queue index for queue region is continuous */
771         for (i = 0; i < rss->queue_num - 1; i++) {
772                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
773                         rte_flow_error_set(error, EINVAL,
774                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
775                                            "Discontinuous queue region");
776                         return -rte_errno;
777                 }
778         }
779
780         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
781                 rte_flow_error_set(error, EINVAL,
782                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
783                                    "Invalid queue region indexes.");
784                 return -rte_errno;
785         }
786
787         if (!(rte_is_power_of_2(rss->queue_num) &&
788              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
789                 rte_flow_error_set(error, EINVAL,
790                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
791                                    "The region size should be any of the following values:"
792                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
793                                    "of queues do not exceed the VSI allocation.");
794                 return -rte_errno;
795         }
796
797         filter->input.q_index = rss->queue[0];
798         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
799         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
800
801         return 0;
802 }
803
804 static int
805 ice_fdir_parse_action(struct ice_adapter *ad,
806                       const struct rte_flow_action actions[],
807                       struct rte_flow_error *error,
808                       struct ice_fdir_filter_conf *filter)
809 {
810         struct ice_pf *pf = &ad->pf;
811         const struct rte_flow_action_queue *act_q;
812         const struct rte_flow_action_mark *mark_spec = NULL;
813         uint32_t dest_num = 0;
814         uint32_t mark_num = 0;
815         int ret;
816
817         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
818                 switch (actions->type) {
819                 case RTE_FLOW_ACTION_TYPE_VOID:
820                         break;
821                 case RTE_FLOW_ACTION_TYPE_QUEUE:
822                         dest_num++;
823
824                         act_q = actions->conf;
825                         filter->input.q_index = act_q->index;
826                         if (filter->input.q_index >=
827                                         pf->dev_data->nb_rx_queues) {
828                                 rte_flow_error_set(error, EINVAL,
829                                                    RTE_FLOW_ERROR_TYPE_ACTION,
830                                                    actions,
831                                                    "Invalid queue for FDIR.");
832                                 return -rte_errno;
833                         }
834                         filter->input.dest_ctl =
835                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
836                         break;
837                 case RTE_FLOW_ACTION_TYPE_DROP:
838                         dest_num++;
839
840                         filter->input.dest_ctl =
841                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
842                         break;
843                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
844                         dest_num++;
845
846                         filter->input.dest_ctl =
847                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
848                         filter->input.q_index = 0;
849                         break;
850                 case RTE_FLOW_ACTION_TYPE_RSS:
851                         dest_num++;
852
853                         ret = ice_fdir_parse_action_qregion(pf,
854                                                 error, actions, filter);
855                         if (ret)
856                                 return ret;
857                         break;
858                 case RTE_FLOW_ACTION_TYPE_MARK:
859                         mark_num++;
860
861                         mark_spec = actions->conf;
862                         filter->input.fltr_id = mark_spec->id;
863                         break;
864                 default:
865                         rte_flow_error_set(error, EINVAL,
866                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
867                                    "Invalid action.");
868                         return -rte_errno;
869                 }
870         }
871
872         if (dest_num == 0 || dest_num >= 2) {
873                 rte_flow_error_set(error, EINVAL,
874                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
875                            "Unsupported action combination");
876                 return -rte_errno;
877         }
878
879         if (mark_num >= 2) {
880                 rte_flow_error_set(error, EINVAL,
881                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
882                            "Too many mark actions");
883                 return -rte_errno;
884         }
885
886         return 0;
887 }
888
889 static int
890 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
891                        const struct rte_flow_item pattern[],
892                        struct rte_flow_error *error,
893                        struct ice_fdir_filter_conf *filter)
894 {
895         const struct rte_flow_item *item = pattern;
896         enum rte_flow_item_type item_type;
897         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
898         const struct rte_flow_item_eth *eth_spec, *eth_mask;
899         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
900         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
901         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
902         const struct rte_flow_item_udp *udp_spec, *udp_mask;
903         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
904         uint64_t input_set = ICE_INSET_NONE;
905         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
906         uint8_t  ipv6_addr_mask[16] = {
907                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
908                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
909         };
910         uint32_t vtc_flow_cpu;
911
912
913         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
914                 if (item->last) {
915                         rte_flow_error_set(error, EINVAL,
916                                         RTE_FLOW_ERROR_TYPE_ITEM,
917                                         item,
918                                         "Not support range");
919                         return -rte_errno;
920                 }
921                 item_type = item->type;
922
923                 switch (item_type) {
924                 case RTE_FLOW_ITEM_TYPE_ETH:
925                         eth_spec = item->spec;
926                         eth_mask = item->mask;
927
928                         if (eth_spec && eth_mask) {
929                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
930                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
931                                         rte_flow_error_set(error, EINVAL,
932                                                 RTE_FLOW_ERROR_TYPE_ITEM,
933                                                 item,
934                                                 "Src mac not support");
935                                         return -rte_errno;
936                                 }
937
938                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
939                                         rte_flow_error_set(error, EINVAL,
940                                                 RTE_FLOW_ERROR_TYPE_ITEM,
941                                                 item,
942                                                 "Invalid mac addr mask");
943                                         return -rte_errno;
944                                 }
945
946                                 input_set |= ICE_INSET_DMAC;
947                                 rte_memcpy(&filter->input.ext_data.dst_mac,
948                                            &eth_spec->dst,
949                                            RTE_ETHER_ADDR_LEN);
950                         }
951                         break;
952                 case RTE_FLOW_ITEM_TYPE_IPV4:
953                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
954                         ipv4_spec = item->spec;
955                         ipv4_mask = item->mask;
956
957                         if (ipv4_spec && ipv4_mask) {
958                                 /* Check IPv4 mask and update input set */
959                                 if (ipv4_mask->hdr.version_ihl ||
960                                     ipv4_mask->hdr.total_length ||
961                                     ipv4_mask->hdr.packet_id ||
962                                     ipv4_mask->hdr.fragment_offset ||
963                                     ipv4_mask->hdr.hdr_checksum) {
964                                         rte_flow_error_set(error, EINVAL,
965                                                    RTE_FLOW_ERROR_TYPE_ITEM,
966                                                    item,
967                                                    "Invalid IPv4 mask.");
968                                         return -rte_errno;
969                                 }
970                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
971                                         input_set |= ICE_INSET_IPV4_SRC;
972                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
973                                         input_set |= ICE_INSET_IPV4_DST;
974                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
975                                         input_set |= ICE_INSET_IPV4_TOS;
976                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
977                                         input_set |= ICE_INSET_IPV4_TTL;
978                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
979                                         input_set |= ICE_INSET_IPV4_PROTO;
980
981                                 filter->input.ip.v4.dst_ip =
982                                         ipv4_spec->hdr.src_addr;
983                                 filter->input.ip.v4.src_ip =
984                                         ipv4_spec->hdr.dst_addr;
985                                 filter->input.ip.v4.tos =
986                                         ipv4_spec->hdr.type_of_service;
987                                 filter->input.ip.v4.ttl =
988                                         ipv4_spec->hdr.time_to_live;
989                                 filter->input.ip.v4.proto =
990                                         ipv4_spec->hdr.next_proto_id;
991                         }
992
993                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
994                         break;
995                 case RTE_FLOW_ITEM_TYPE_IPV6:
996                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
997                         ipv6_spec = item->spec;
998                         ipv6_mask = item->mask;
999
1000                         if (ipv6_spec && ipv6_mask) {
1001                                 /* Check IPv6 mask and update input set */
1002                                 if (ipv6_mask->hdr.payload_len) {
1003                                         rte_flow_error_set(error, EINVAL,
1004                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1005                                                    item,
1006                                                    "Invalid IPv6 mask");
1007                                         return -rte_errno;
1008                                 }
1009
1010                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1011                                             ipv6_addr_mask,
1012                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1013                                         input_set |= ICE_INSET_IPV6_SRC;
1014                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1015                                             ipv6_addr_mask,
1016                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1017                                         input_set |= ICE_INSET_IPV6_DST;
1018
1019                                 if ((ipv6_mask->hdr.vtc_flow &
1020                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1021                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1022                                         input_set |= ICE_INSET_IPV6_TC;
1023                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1024                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1025                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1026                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1027
1028                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1029                                            ipv6_spec->hdr.src_addr, 16);
1030                                 rte_memcpy(filter->input.ip.v6.src_ip,
1031                                            ipv6_spec->hdr.dst_addr, 16);
1032
1033                                 vtc_flow_cpu =
1034                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1035                                 filter->input.ip.v6.tc =
1036                                         (uint8_t)(vtc_flow_cpu >>
1037                                                   ICE_FDIR_IPV6_TC_OFFSET);
1038                                 filter->input.ip.v6.proto =
1039                                         ipv6_spec->hdr.proto;
1040                                 filter->input.ip.v6.hlim =
1041                                         ipv6_spec->hdr.hop_limits;
1042                         }
1043
1044                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1045                         break;
1046                 case RTE_FLOW_ITEM_TYPE_TCP:
1047                         tcp_spec = item->spec;
1048                         tcp_mask = item->mask;
1049
1050                         if (tcp_spec && tcp_mask) {
1051                                 /* Check TCP mask and update input set */
1052                                 if (tcp_mask->hdr.sent_seq ||
1053                                     tcp_mask->hdr.recv_ack ||
1054                                     tcp_mask->hdr.data_off ||
1055                                     tcp_mask->hdr.tcp_flags ||
1056                                     tcp_mask->hdr.rx_win ||
1057                                     tcp_mask->hdr.cksum ||
1058                                     tcp_mask->hdr.tcp_urp) {
1059                                         rte_flow_error_set(error, EINVAL,
1060                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1061                                                    item,
1062                                                    "Invalid TCP mask");
1063                                         return -rte_errno;
1064                                 }
1065
1066                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1067                                         input_set |= ICE_INSET_TCP_SRC_PORT;
1068                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1069                                         input_set |= ICE_INSET_TCP_DST_PORT;
1070
1071                                 /* Get filter info */
1072                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1073                                         filter->input.ip.v4.dst_port =
1074                                                 tcp_spec->hdr.src_port;
1075                                         filter->input.ip.v4.src_port =
1076                                                 tcp_spec->hdr.dst_port;
1077                                         flow_type =
1078                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1079                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1080                                         filter->input.ip.v6.dst_port =
1081                                                 tcp_spec->hdr.src_port;
1082                                         filter->input.ip.v6.src_port =
1083                                                 tcp_spec->hdr.dst_port;
1084                                         flow_type =
1085                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1086                                 }
1087                         }
1088                         break;
1089                 case RTE_FLOW_ITEM_TYPE_UDP:
1090                         udp_spec = item->spec;
1091                         udp_mask = item->mask;
1092
1093                         if (udp_spec && udp_mask) {
1094                                 /* Check UDP mask and update input set*/
1095                                 if (udp_mask->hdr.dgram_len ||
1096                                     udp_mask->hdr.dgram_cksum) {
1097                                         rte_flow_error_set(error, EINVAL,
1098                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1099                                                    item,
1100                                                    "Invalid UDP mask");
1101                                         return -rte_errno;
1102                                 }
1103
1104                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1105                                         input_set |= ICE_INSET_UDP_SRC_PORT;
1106                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1107                                         input_set |= ICE_INSET_UDP_DST_PORT;
1108
1109                                 /* Get filter info */
1110                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1111                                         filter->input.ip.v4.dst_port =
1112                                                 udp_spec->hdr.src_port;
1113                                         filter->input.ip.v4.src_port =
1114                                                 udp_spec->hdr.dst_port;
1115                                         flow_type =
1116                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1117                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1118                                         filter->input.ip.v6.src_port =
1119                                                 udp_spec->hdr.src_port;
1120                                         filter->input.ip.v6.dst_port =
1121                                                 udp_spec->hdr.dst_port;
1122                                         flow_type =
1123                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1124                                 }
1125                         }
1126                         break;
1127                 case RTE_FLOW_ITEM_TYPE_SCTP:
1128                         sctp_spec = item->spec;
1129                         sctp_mask = item->mask;
1130
1131                         if (sctp_spec && sctp_mask) {
1132                                 /* Check SCTP mask and update input set */
1133                                 if (sctp_mask->hdr.cksum) {
1134                                         rte_flow_error_set(error, EINVAL,
1135                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1136                                                    item,
1137                                                    "Invalid UDP mask");
1138                                         return -rte_errno;
1139                                 }
1140
1141                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1142                                         input_set |= ICE_INSET_SCTP_SRC_PORT;
1143                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1144                                         input_set |= ICE_INSET_SCTP_DST_PORT;
1145
1146                                 /* Get filter info */
1147                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1148                                         filter->input.ip.v4.dst_port =
1149                                                 sctp_spec->hdr.src_port;
1150                                         filter->input.ip.v4.src_port =
1151                                                 sctp_spec->hdr.dst_port;
1152                                         flow_type =
1153                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1154                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1155                                         filter->input.ip.v6.dst_port =
1156                                                 sctp_spec->hdr.src_port;
1157                                         filter->input.ip.v6.src_port =
1158                                                 sctp_spec->hdr.dst_port;
1159                                         flow_type =
1160                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1161                                 }
1162                         }
1163                         break;
1164                 case RTE_FLOW_ITEM_TYPE_VOID:
1165                         break;
1166                 default:
1167                         rte_flow_error_set(error, EINVAL,
1168                                    RTE_FLOW_ERROR_TYPE_ITEM,
1169                                    item,
1170                                    "Invalid pattern item.");
1171                         return -rte_errno;
1172                 }
1173         }
1174
1175         filter->input.flow_type = flow_type;
1176         filter->input_set = input_set;
1177
1178         return 0;
1179 }
1180
1181 static int
1182 ice_fdir_parse(struct ice_adapter *ad,
1183                struct ice_pattern_match_item *array,
1184                uint32_t array_len,
1185                const struct rte_flow_item pattern[],
1186                const struct rte_flow_action actions[],
1187                void **meta,
1188                struct rte_flow_error *error)
1189 {
1190         struct ice_pf *pf = &ad->pf;
1191         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1192         struct ice_pattern_match_item *item = NULL;
1193         uint64_t input_set;
1194         int ret;
1195
1196         memset(filter, 0, sizeof(*filter));
1197         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1198         if (!item)
1199                 return -rte_errno;
1200
1201         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1202         if (ret)
1203                 return ret;
1204         input_set = filter->input_set;
1205         if (!input_set || input_set & ~item->input_set_mask) {
1206                 rte_flow_error_set(error, EINVAL,
1207                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1208                                    pattern,
1209                                    "Invalid input set");
1210                 return -rte_errno;
1211         }
1212
1213         ret = ice_fdir_parse_action(ad, actions, error, filter);
1214         if (ret)
1215                 return ret;
1216
1217         *meta = filter;
1218
1219         return 0;
1220 }
1221
1222 static struct ice_flow_parser ice_fdir_parser = {
1223         .engine = &ice_fdir_engine,
1224         .array = ice_fdir_pattern,
1225         .array_len = RTE_DIM(ice_fdir_pattern),
1226         .parse_pattern_action = ice_fdir_parse,
1227         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1228 };
1229
1230 RTE_INIT(ice_fdir_engine_register)
1231 {
1232         ice_register_flow_engine(&ice_fdir_engine);
1233 }