net/ice: enable flow director queue group
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
7 #include "ice_rxtx.h"
8 #include "ice_generic_flow.h"
9
10 #define ICE_FDIR_IPV6_TC_OFFSET         20
11 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
12
13 #define ICE_FDIR_MAX_QREGION_SIZE       128
14
15 #define ICE_FDIR_INSET_ETH_IPV4 (\
16         ICE_INSET_DMAC | \
17         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
18         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
19
20 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
21         ICE_FDIR_INSET_ETH_IPV4 | \
22         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
23
24 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
25         ICE_FDIR_INSET_ETH_IPV4 | \
26         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
27
28 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
29         ICE_FDIR_INSET_ETH_IPV4 | \
30         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
31
32 #define ICE_FDIR_INSET_ETH_IPV6 (\
33         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
34         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
35
36 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
37         ICE_FDIR_INSET_ETH_IPV6 | \
38         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
39
40 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
41         ICE_FDIR_INSET_ETH_IPV6 | \
42         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
43
44 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
45         ICE_FDIR_INSET_ETH_IPV6 | \
46         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
47
48 static struct ice_pattern_match_item ice_fdir_pattern[] = {
49         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
50         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
51         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
52         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
53         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
54         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
55         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
56         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
57 };
58
59 static struct ice_flow_parser ice_fdir_parser;
60
61 static const struct rte_memzone *
62 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
63 {
64         return rte_memzone_reserve_aligned(name, len, socket_id,
65                                            RTE_MEMZONE_IOVA_CONTIG,
66                                            ICE_RING_BASE_ALIGN);
67 }
68
69 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
70
71 static int
72 ice_fdir_prof_alloc(struct ice_hw *hw)
73 {
74         enum ice_fltr_ptype ptype, fltr_ptype;
75
76         if (!hw->fdir_prof) {
77                 hw->fdir_prof = (struct ice_fd_hw_prof **)
78                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
79                                    sizeof(*hw->fdir_prof));
80                 if (!hw->fdir_prof)
81                         return -ENOMEM;
82         }
83         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
84              ptype < ICE_FLTR_PTYPE_MAX;
85              ptype++) {
86                 if (!hw->fdir_prof[ptype]) {
87                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
88                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
89                         if (!hw->fdir_prof[ptype])
90                                 goto fail_mem;
91                 }
92         }
93         return 0;
94
95 fail_mem:
96         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
97              fltr_ptype < ptype;
98              fltr_ptype++)
99                 rte_free(hw->fdir_prof[fltr_ptype]);
100         rte_free(hw->fdir_prof);
101         return -ENOMEM;
102 }
103
104 /*
105  * ice_fdir_setup - reserve and initialize the Flow Director resources
106  * @pf: board private structure
107  */
108 static int
109 ice_fdir_setup(struct ice_pf *pf)
110 {
111         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
112         struct ice_hw *hw = ICE_PF_TO_HW(pf);
113         const struct rte_memzone *mz = NULL;
114         char z_name[RTE_MEMZONE_NAMESIZE];
115         struct ice_vsi *vsi;
116         int err = ICE_SUCCESS;
117
118         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
119                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
120                 return -ENOTSUP;
121         }
122
123         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
124                     " fd_fltr_best_effort = %u.",
125                     hw->func_caps.fd_fltr_guar,
126                     hw->func_caps.fd_fltr_best_effort);
127
128         if (pf->fdir.fdir_vsi) {
129                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
130                 return ICE_SUCCESS;
131         }
132
133         /* make new FDIR VSI */
134         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
135         if (!vsi) {
136                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
137                 return -EINVAL;
138         }
139         pf->fdir.fdir_vsi = vsi;
140
141         /*Fdir tx queue setup*/
142         err = ice_fdir_setup_tx_resources(pf);
143         if (err) {
144                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
145                 goto fail_setup_tx;
146         }
147
148         /*Fdir rx queue setup*/
149         err = ice_fdir_setup_rx_resources(pf);
150         if (err) {
151                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
152                 goto fail_setup_rx;
153         }
154
155         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
156         if (err) {
157                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
158                 goto fail_mem;
159         }
160
161         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
162         if (err) {
163                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
164                 goto fail_mem;
165         }
166
167         /* reserve memory for the fdir programming packet */
168         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
169                  ICE_FDIR_MZ_NAME,
170                  eth_dev->data->port_id);
171         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
172         if (!mz) {
173                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
174                             "flow director program packet.");
175                 err = -ENOMEM;
176                 goto fail_mem;
177         }
178         pf->fdir.prg_pkt = mz->addr;
179         pf->fdir.dma_addr = mz->iova;
180
181         err = ice_fdir_prof_alloc(hw);
182         if (err) {
183                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
184                             "flow director profile.");
185                 err = -ENOMEM;
186                 goto fail_mem;
187         }
188
189         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
190                     vsi->base_queue);
191         return ICE_SUCCESS;
192
193 fail_mem:
194         ice_rx_queue_release(pf->fdir.rxq);
195         pf->fdir.rxq = NULL;
196 fail_setup_rx:
197         ice_tx_queue_release(pf->fdir.txq);
198         pf->fdir.txq = NULL;
199 fail_setup_tx:
200         ice_release_vsi(vsi);
201         pf->fdir.fdir_vsi = NULL;
202         return err;
203 }
204
205 static void
206 ice_fdir_prof_free(struct ice_hw *hw)
207 {
208         enum ice_fltr_ptype ptype;
209
210         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
211              ptype < ICE_FLTR_PTYPE_MAX;
212              ptype++)
213                 rte_free(hw->fdir_prof[ptype]);
214
215         rte_free(hw->fdir_prof);
216 }
217
218 /* Remove a profile for some filter type */
219 static void
220 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
221 {
222         struct ice_hw *hw = ICE_PF_TO_HW(pf);
223         struct ice_fd_hw_prof *hw_prof;
224         uint64_t prof_id;
225         uint16_t vsi_num;
226         int i;
227
228         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
229                 return;
230
231         hw_prof = hw->fdir_prof[ptype];
232
233         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
234         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
235                 if (hw_prof->entry_h[i][is_tunnel]) {
236                         vsi_num = ice_get_hw_vsi_num(hw,
237                                                      hw_prof->vsi_h[i]);
238                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
239                                              vsi_num, ptype);
240                         ice_flow_rem_entry(hw,
241                                            hw_prof->entry_h[i][is_tunnel]);
242                         hw_prof->entry_h[i][is_tunnel] = 0;
243                 }
244         }
245         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
246         rte_free(hw_prof->fdir_seg[is_tunnel]);
247         hw_prof->fdir_seg[is_tunnel] = NULL;
248
249         for (i = 0; i < hw_prof->cnt; i++)
250                 hw_prof->vsi_h[i] = 0;
251         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
252 }
253
254 /* Remove all created profiles */
255 static void
256 ice_fdir_prof_rm_all(struct ice_pf *pf)
257 {
258         enum ice_fltr_ptype ptype;
259
260         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
261              ptype < ICE_FLTR_PTYPE_MAX;
262              ptype++) {
263                 ice_fdir_prof_rm(pf, ptype, false);
264                 ice_fdir_prof_rm(pf, ptype, true);
265         }
266 }
267
268 /*
269  * ice_fdir_teardown - release the Flow Director resources
270  * @pf: board private structure
271  */
272 static void
273 ice_fdir_teardown(struct ice_pf *pf)
274 {
275         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
276         struct ice_hw *hw = ICE_PF_TO_HW(pf);
277         struct ice_vsi *vsi;
278         int err;
279
280         vsi = pf->fdir.fdir_vsi;
281         if (!vsi)
282                 return;
283
284         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
285         if (err)
286                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
287
288         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
289         if (err)
290                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
291
292         ice_tx_queue_release(pf->fdir.txq);
293         pf->fdir.txq = NULL;
294         ice_rx_queue_release(pf->fdir.rxq);
295         pf->fdir.rxq = NULL;
296         ice_fdir_prof_rm_all(pf);
297         ice_fdir_prof_free(hw);
298         ice_release_vsi(vsi);
299         pf->fdir.fdir_vsi = NULL;
300 }
301
302 static int
303 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
304                      struct ice_vsi *ctrl_vsi,
305                      struct ice_flow_seg_info *seg,
306                      enum ice_fltr_ptype ptype,
307                      bool is_tunnel)
308 {
309         struct ice_hw *hw = ICE_PF_TO_HW(pf);
310         enum ice_flow_dir dir = ICE_FLOW_RX;
311         struct ice_flow_seg_info *ori_seg;
312         struct ice_fd_hw_prof *hw_prof;
313         struct ice_flow_prof *prof;
314         uint64_t entry_1 = 0;
315         uint64_t entry_2 = 0;
316         uint16_t vsi_num;
317         int ret;
318         uint64_t prof_id;
319
320         hw_prof = hw->fdir_prof[ptype];
321         ori_seg = hw_prof->fdir_seg[is_tunnel];
322         if (ori_seg) {
323                 if (!is_tunnel) {
324                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
325                                 return -EAGAIN;
326                 } else {
327                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
328                                 return -EAGAIN;
329                 }
330
331                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
332                         return -EINVAL;
333
334                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
335         }
336
337         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
338         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
339                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
340         if (ret)
341                 return ret;
342         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
343                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
344                                  seg, NULL, 0, &entry_1);
345         if (ret) {
346                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
347                             ptype);
348                 goto err_add_prof;
349         }
350         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
351                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
352                                  seg, NULL, 0, &entry_2);
353         if (ret) {
354                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
355                             ptype);
356                 goto err_add_entry;
357         }
358
359         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
360         hw_prof->cnt = 0;
361         hw_prof->fdir_seg[is_tunnel] = seg;
362         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
363         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
364         pf->hw_prof_cnt[ptype][is_tunnel]++;
365         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
366         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
367         pf->hw_prof_cnt[ptype][is_tunnel]++;
368
369         return ret;
370
371 err_add_entry:
372         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
373         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
374         ice_flow_rem_entry(hw, entry_1);
375 err_add_prof:
376         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
377
378         return ret;
379 }
380
381 static void
382 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
383 {
384         uint32_t i, j;
385
386         struct ice_inset_map {
387                 uint64_t inset;
388                 enum ice_flow_field fld;
389         };
390         static const struct ice_inset_map ice_inset_map[] = {
391                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
392                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
393                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
394                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
395                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
396                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
397                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
398                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
399                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
400                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
401                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
402                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
403                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
404                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
405                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
406                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
407                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
408         };
409
410         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
411                 if ((inset & ice_inset_map[i].inset) ==
412                     ice_inset_map[i].inset)
413                         field[j++] = ice_inset_map[i].fld;
414         }
415 }
416
417 static int
418 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
419                         uint64_t input_set, bool is_tunnel)
420 {
421         struct ice_flow_seg_info *seg;
422         struct ice_flow_seg_info *seg_tun = NULL;
423         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
424         int i, ret;
425
426         if (!input_set)
427                 return -EINVAL;
428
429         seg = (struct ice_flow_seg_info *)
430                 ice_malloc(hw, sizeof(*seg));
431         if (!seg) {
432                 PMD_DRV_LOG(ERR, "No memory can be allocated");
433                 return -ENOMEM;
434         }
435
436         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
437                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
438         ice_fdir_input_set_parse(input_set, field);
439
440         switch (flow) {
441         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
442                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
443                                   ICE_FLOW_SEG_HDR_IPV4);
444                 break;
445         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
446                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
447                                   ICE_FLOW_SEG_HDR_IPV4);
448                 break;
449         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
450                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
451                                   ICE_FLOW_SEG_HDR_IPV4);
452                 break;
453         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
454                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
455                 break;
456         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
457                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
458                                   ICE_FLOW_SEG_HDR_IPV6);
459                 break;
460         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
461                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
462                                   ICE_FLOW_SEG_HDR_IPV6);
463                 break;
464         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
465                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
466                                   ICE_FLOW_SEG_HDR_IPV6);
467                 break;
468         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
469                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
470                 break;
471         default:
472                 PMD_DRV_LOG(ERR, "not supported filter type.");
473                 break;
474         }
475
476         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
477                 ice_flow_set_fld(seg, field[i],
478                                  ICE_FLOW_FLD_OFF_INVAL,
479                                  ICE_FLOW_FLD_OFF_INVAL,
480                                  ICE_FLOW_FLD_OFF_INVAL, false);
481         }
482
483         if (!is_tunnel) {
484                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
485                                            seg, flow, false);
486         } else {
487                 seg_tun = (struct ice_flow_seg_info *)
488                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
489                 if (!seg_tun) {
490                         PMD_DRV_LOG(ERR, "No memory can be allocated");
491                         rte_free(seg);
492                         return -ENOMEM;
493                 }
494                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
495                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
496                                            seg_tun, flow, true);
497         }
498
499         if (!ret) {
500                 return ret;
501         } else if (ret < 0) {
502                 rte_free(seg);
503                 if (is_tunnel)
504                         rte_free(seg_tun);
505                 return (ret == -EAGAIN) ? 0 : ret;
506         } else {
507                 return ret;
508         }
509 }
510
511 static void
512 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
513                     bool is_tunnel, bool add)
514 {
515         struct ice_hw *hw = ICE_PF_TO_HW(pf);
516         int cnt;
517
518         cnt = (add) ? 1 : -1;
519         hw->fdir_active_fltr += cnt;
520         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
521                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
522         else
523                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
524 }
525
526 static int
527 ice_fdir_init(struct ice_adapter *ad)
528 {
529         struct ice_pf *pf = &ad->pf;
530         int ret;
531
532         ret = ice_fdir_setup(pf);
533         if (ret)
534                 return ret;
535
536         return ice_register_parser(&ice_fdir_parser, ad);
537 }
538
539 static void
540 ice_fdir_uninit(struct ice_adapter *ad)
541 {
542         struct ice_pf *pf = &ad->pf;
543
544         ice_unregister_parser(&ice_fdir_parser, ad);
545
546         ice_fdir_teardown(pf);
547 }
548
549 static int
550 ice_fdir_add_del_filter(struct ice_pf *pf,
551                         struct ice_fdir_filter_conf *filter,
552                         bool add)
553 {
554         struct ice_fltr_desc desc;
555         struct ice_hw *hw = ICE_PF_TO_HW(pf);
556         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
557         int ret;
558
559         filter->input.dest_vsi = pf->main_vsi->idx;
560
561         memset(&desc, 0, sizeof(desc));
562         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
563
564         memset(pkt, 0, ICE_FDIR_PKT_LEN);
565         ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
566         if (ret) {
567                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
568                 return -EINVAL;
569         }
570
571         return ice_fdir_programming(pf, &desc);
572 }
573
574 static int
575 ice_fdir_create_filter(struct ice_adapter *ad,
576                        struct rte_flow *flow,
577                        void *meta,
578                        struct rte_flow_error *error)
579 {
580         struct ice_pf *pf = &ad->pf;
581         struct ice_fdir_filter_conf *filter = meta;
582         struct ice_fdir_filter_conf *rule;
583         int ret;
584
585         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
586         if (!rule) {
587                 rte_flow_error_set(error, ENOMEM,
588                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
589                                    "Failed to allocate memory");
590                 return -rte_errno;
591         }
592
593         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
594                         filter->input_set, false);
595         if (ret) {
596                 rte_flow_error_set(error, -ret,
597                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
598                                    "Profile configure failed.");
599                 goto free_entry;
600         }
601
602         ret = ice_fdir_add_del_filter(pf, filter, true);
603         if (ret) {
604                 rte_flow_error_set(error, -ret,
605                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
606                                    "Add filter rule failed.");
607                 goto free_entry;
608         }
609
610         rte_memcpy(rule, filter, sizeof(*rule));
611         flow->rule = rule;
612         ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
613         return 0;
614
615 free_entry:
616         rte_free(rule);
617         return -rte_errno;
618 }
619
620 static int
621 ice_fdir_destroy_filter(struct ice_adapter *ad,
622                         struct rte_flow *flow,
623                         struct rte_flow_error *error)
624 {
625         struct ice_pf *pf = &ad->pf;
626         struct ice_fdir_filter_conf *filter;
627         int ret;
628
629         filter = (struct ice_fdir_filter_conf *)flow->rule;
630
631         ret = ice_fdir_add_del_filter(pf, filter, false);
632         if (ret) {
633                 rte_flow_error_set(error, -ret,
634                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
635                                    "Del filter rule failed.");
636                 return -rte_errno;
637         }
638
639         ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
640         flow->rule = NULL;
641
642         rte_free(filter);
643
644         return 0;
645 }
646
647 static struct ice_flow_engine ice_fdir_engine = {
648         .init = ice_fdir_init,
649         .uninit = ice_fdir_uninit,
650         .create = ice_fdir_create_filter,
651         .destroy = ice_fdir_destroy_filter,
652         .type = ICE_FLOW_ENGINE_FDIR,
653 };
654
655 static int
656 ice_fdir_parse_action_qregion(struct ice_pf *pf,
657                               struct rte_flow_error *error,
658                               const struct rte_flow_action *act,
659                               struct ice_fdir_filter_conf *filter)
660 {
661         const struct rte_flow_action_rss *rss = act->conf;
662         uint32_t i;
663
664         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
665                 rte_flow_error_set(error, EINVAL,
666                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
667                                    "Invalid action.");
668                 return -rte_errno;
669         }
670
671         if (rss->queue_num <= 1) {
672                 rte_flow_error_set(error, EINVAL,
673                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
674                                    "Queue region size can't be 0 or 1.");
675                 return -rte_errno;
676         }
677
678         /* check if queue index for queue region is continuous */
679         for (i = 0; i < rss->queue_num - 1; i++) {
680                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
681                         rte_flow_error_set(error, EINVAL,
682                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
683                                            "Discontinuous queue region");
684                         return -rte_errno;
685                 }
686         }
687
688         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
689                 rte_flow_error_set(error, EINVAL,
690                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
691                                    "Invalid queue region indexes.");
692                 return -rte_errno;
693         }
694
695         if (!(rte_is_power_of_2(rss->queue_num) &&
696              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
697                 rte_flow_error_set(error, EINVAL,
698                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
699                                    "The region size should be any of the following values:"
700                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
701                                    "of queues do not exceed the VSI allocation.");
702                 return -rte_errno;
703         }
704
705         filter->input.q_index = rss->queue[0];
706         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
707         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
708
709         return 0;
710 }
711
712 static int
713 ice_fdir_parse_action(struct ice_adapter *ad,
714                       const struct rte_flow_action actions[],
715                       struct rte_flow_error *error,
716                       struct ice_fdir_filter_conf *filter)
717 {
718         struct ice_pf *pf = &ad->pf;
719         const struct rte_flow_action_queue *act_q;
720         const struct rte_flow_action_mark *mark_spec = NULL;
721         uint32_t dest_num = 0;
722         uint32_t mark_num = 0;
723         int ret;
724
725         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
726                 switch (actions->type) {
727                 case RTE_FLOW_ACTION_TYPE_VOID:
728                         break;
729                 case RTE_FLOW_ACTION_TYPE_QUEUE:
730                         dest_num++;
731
732                         act_q = actions->conf;
733                         filter->input.q_index = act_q->index;
734                         if (filter->input.q_index >=
735                                         pf->dev_data->nb_rx_queues) {
736                                 rte_flow_error_set(error, EINVAL,
737                                                    RTE_FLOW_ERROR_TYPE_ACTION,
738                                                    actions,
739                                                    "Invalid queue for FDIR.");
740                                 return -rte_errno;
741                         }
742                         filter->input.dest_ctl =
743                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
744                         break;
745                 case RTE_FLOW_ACTION_TYPE_DROP:
746                         dest_num++;
747
748                         filter->input.dest_ctl =
749                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
750                         break;
751                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
752                         dest_num++;
753
754                         filter->input.dest_ctl =
755                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
756                         filter->input.q_index = 0;
757                         break;
758                 case RTE_FLOW_ACTION_TYPE_RSS:
759                         dest_num++;
760
761                         ret = ice_fdir_parse_action_qregion(pf,
762                                                 error, actions, filter);
763                         if (ret)
764                                 return ret;
765                         break;
766                 case RTE_FLOW_ACTION_TYPE_MARK:
767                         mark_num++;
768
769                         mark_spec = actions->conf;
770                         filter->input.fltr_id = mark_spec->id;
771                         break;
772                 default:
773                         rte_flow_error_set(error, EINVAL,
774                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
775                                    "Invalid action.");
776                         return -rte_errno;
777                 }
778         }
779
780         if (dest_num == 0 || dest_num >= 2) {
781                 rte_flow_error_set(error, EINVAL,
782                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
783                            "Unsupported action combination");
784                 return -rte_errno;
785         }
786
787         if (mark_num >= 2) {
788                 rte_flow_error_set(error, EINVAL,
789                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
790                            "Too many mark actions");
791                 return -rte_errno;
792         }
793
794         return 0;
795 }
796
797 static int
798 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
799                        const struct rte_flow_item pattern[],
800                        struct rte_flow_error *error,
801                        struct ice_fdir_filter_conf *filter)
802 {
803         const struct rte_flow_item *item = pattern;
804         enum rte_flow_item_type item_type;
805         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
806         const struct rte_flow_item_eth *eth_spec, *eth_mask;
807         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
808         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
809         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
810         const struct rte_flow_item_udp *udp_spec, *udp_mask;
811         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
812         uint64_t input_set = ICE_INSET_NONE;
813         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
814         uint8_t  ipv6_addr_mask[16] = {
815                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
816                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
817         };
818         uint32_t vtc_flow_cpu;
819
820
821         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
822                 if (item->last) {
823                         rte_flow_error_set(error, EINVAL,
824                                         RTE_FLOW_ERROR_TYPE_ITEM,
825                                         item,
826                                         "Not support range");
827                         return -rte_errno;
828                 }
829                 item_type = item->type;
830
831                 switch (item_type) {
832                 case RTE_FLOW_ITEM_TYPE_ETH:
833                         eth_spec = item->spec;
834                         eth_mask = item->mask;
835
836                         if (eth_spec && eth_mask) {
837                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
838                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
839                                         rte_flow_error_set(error, EINVAL,
840                                                 RTE_FLOW_ERROR_TYPE_ITEM,
841                                                 item,
842                                                 "Src mac not support");
843                                         return -rte_errno;
844                                 }
845
846                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
847                                         rte_flow_error_set(error, EINVAL,
848                                                 RTE_FLOW_ERROR_TYPE_ITEM,
849                                                 item,
850                                                 "Invalid mac addr mask");
851                                         return -rte_errno;
852                                 }
853
854                                 input_set |= ICE_INSET_DMAC;
855                                 rte_memcpy(&filter->input.ext_data.dst_mac,
856                                            &eth_spec->dst,
857                                            RTE_ETHER_ADDR_LEN);
858                         }
859                         break;
860                 case RTE_FLOW_ITEM_TYPE_IPV4:
861                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
862                         ipv4_spec = item->spec;
863                         ipv4_mask = item->mask;
864
865                         if (ipv4_spec && ipv4_mask) {
866                                 /* Check IPv4 mask and update input set */
867                                 if (ipv4_mask->hdr.version_ihl ||
868                                     ipv4_mask->hdr.total_length ||
869                                     ipv4_mask->hdr.packet_id ||
870                                     ipv4_mask->hdr.fragment_offset ||
871                                     ipv4_mask->hdr.hdr_checksum) {
872                                         rte_flow_error_set(error, EINVAL,
873                                                    RTE_FLOW_ERROR_TYPE_ITEM,
874                                                    item,
875                                                    "Invalid IPv4 mask.");
876                                         return -rte_errno;
877                                 }
878                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
879                                         input_set |= ICE_INSET_IPV4_SRC;
880                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
881                                         input_set |= ICE_INSET_IPV4_DST;
882                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
883                                         input_set |= ICE_INSET_IPV4_TOS;
884                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
885                                         input_set |= ICE_INSET_IPV4_TTL;
886                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
887                                         input_set |= ICE_INSET_IPV4_PROTO;
888
889                                 filter->input.ip.v4.dst_ip =
890                                         ipv4_spec->hdr.src_addr;
891                                 filter->input.ip.v4.src_ip =
892                                         ipv4_spec->hdr.dst_addr;
893                                 filter->input.ip.v4.tos =
894                                         ipv4_spec->hdr.type_of_service;
895                                 filter->input.ip.v4.ttl =
896                                         ipv4_spec->hdr.time_to_live;
897                                 filter->input.ip.v4.proto =
898                                         ipv4_spec->hdr.next_proto_id;
899                         }
900
901                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
902                         break;
903                 case RTE_FLOW_ITEM_TYPE_IPV6:
904                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
905                         ipv6_spec = item->spec;
906                         ipv6_mask = item->mask;
907
908                         if (ipv6_spec && ipv6_mask) {
909                                 /* Check IPv6 mask and update input set */
910                                 if (ipv6_mask->hdr.payload_len) {
911                                         rte_flow_error_set(error, EINVAL,
912                                                    RTE_FLOW_ERROR_TYPE_ITEM,
913                                                    item,
914                                                    "Invalid IPv6 mask");
915                                         return -rte_errno;
916                                 }
917
918                                 if (!memcmp(ipv6_mask->hdr.src_addr,
919                                             ipv6_addr_mask,
920                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
921                                         input_set |= ICE_INSET_IPV6_SRC;
922                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
923                                             ipv6_addr_mask,
924                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
925                                         input_set |= ICE_INSET_IPV6_DST;
926
927                                 if ((ipv6_mask->hdr.vtc_flow &
928                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
929                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
930                                         input_set |= ICE_INSET_IPV6_TC;
931                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
932                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
933                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
934                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
935
936                                 rte_memcpy(filter->input.ip.v6.dst_ip,
937                                            ipv6_spec->hdr.src_addr, 16);
938                                 rte_memcpy(filter->input.ip.v6.src_ip,
939                                            ipv6_spec->hdr.dst_addr, 16);
940
941                                 vtc_flow_cpu =
942                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
943                                 filter->input.ip.v6.tc =
944                                         (uint8_t)(vtc_flow_cpu >>
945                                                   ICE_FDIR_IPV6_TC_OFFSET);
946                                 filter->input.ip.v6.proto =
947                                         ipv6_spec->hdr.proto;
948                                 filter->input.ip.v6.hlim =
949                                         ipv6_spec->hdr.hop_limits;
950                         }
951
952                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
953                         break;
954                 case RTE_FLOW_ITEM_TYPE_TCP:
955                         tcp_spec = item->spec;
956                         tcp_mask = item->mask;
957
958                         if (tcp_spec && tcp_mask) {
959                                 /* Check TCP mask and update input set */
960                                 if (tcp_mask->hdr.sent_seq ||
961                                     tcp_mask->hdr.recv_ack ||
962                                     tcp_mask->hdr.data_off ||
963                                     tcp_mask->hdr.tcp_flags ||
964                                     tcp_mask->hdr.rx_win ||
965                                     tcp_mask->hdr.cksum ||
966                                     tcp_mask->hdr.tcp_urp) {
967                                         rte_flow_error_set(error, EINVAL,
968                                                    RTE_FLOW_ERROR_TYPE_ITEM,
969                                                    item,
970                                                    "Invalid TCP mask");
971                                         return -rte_errno;
972                                 }
973
974                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
975                                         input_set |= ICE_INSET_TCP_SRC_PORT;
976                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
977                                         input_set |= ICE_INSET_TCP_DST_PORT;
978
979                                 /* Get filter info */
980                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
981                                         filter->input.ip.v4.dst_port =
982                                                 tcp_spec->hdr.src_port;
983                                         filter->input.ip.v4.src_port =
984                                                 tcp_spec->hdr.dst_port;
985                                         flow_type =
986                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
987                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
988                                         filter->input.ip.v6.dst_port =
989                                                 tcp_spec->hdr.src_port;
990                                         filter->input.ip.v6.src_port =
991                                                 tcp_spec->hdr.dst_port;
992                                         flow_type =
993                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
994                                 }
995                         }
996                         break;
997                 case RTE_FLOW_ITEM_TYPE_UDP:
998                         udp_spec = item->spec;
999                         udp_mask = item->mask;
1000
1001                         if (udp_spec && udp_mask) {
1002                                 /* Check UDP mask and update input set*/
1003                                 if (udp_mask->hdr.dgram_len ||
1004                                     udp_mask->hdr.dgram_cksum) {
1005                                         rte_flow_error_set(error, EINVAL,
1006                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1007                                                    item,
1008                                                    "Invalid UDP mask");
1009                                         return -rte_errno;
1010                                 }
1011
1012                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1013                                         input_set |= ICE_INSET_UDP_SRC_PORT;
1014                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1015                                         input_set |= ICE_INSET_UDP_DST_PORT;
1016
1017                                 /* Get filter info */
1018                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1019                                         filter->input.ip.v4.dst_port =
1020                                                 udp_spec->hdr.src_port;
1021                                         filter->input.ip.v4.src_port =
1022                                                 udp_spec->hdr.dst_port;
1023                                         flow_type =
1024                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1025                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1026                                         filter->input.ip.v6.src_port =
1027                                                 udp_spec->hdr.src_port;
1028                                         filter->input.ip.v6.dst_port =
1029                                                 udp_spec->hdr.dst_port;
1030                                         flow_type =
1031                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1032                                 }
1033                         }
1034                         break;
1035                 case RTE_FLOW_ITEM_TYPE_SCTP:
1036                         sctp_spec = item->spec;
1037                         sctp_mask = item->mask;
1038
1039                         if (sctp_spec && sctp_mask) {
1040                                 /* Check SCTP mask and update input set */
1041                                 if (sctp_mask->hdr.cksum) {
1042                                         rte_flow_error_set(error, EINVAL,
1043                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1044                                                    item,
1045                                                    "Invalid UDP mask");
1046                                         return -rte_errno;
1047                                 }
1048
1049                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1050                                         input_set |= ICE_INSET_SCTP_SRC_PORT;
1051                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1052                                         input_set |= ICE_INSET_SCTP_DST_PORT;
1053
1054                                 /* Get filter info */
1055                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1056                                         filter->input.ip.v4.dst_port =
1057                                                 sctp_spec->hdr.src_port;
1058                                         filter->input.ip.v4.src_port =
1059                                                 sctp_spec->hdr.dst_port;
1060                                         flow_type =
1061                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1062                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1063                                         filter->input.ip.v6.dst_port =
1064                                                 sctp_spec->hdr.src_port;
1065                                         filter->input.ip.v6.src_port =
1066                                                 sctp_spec->hdr.dst_port;
1067                                         flow_type =
1068                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1069                                 }
1070                         }
1071                         break;
1072                 case RTE_FLOW_ITEM_TYPE_VOID:
1073                         break;
1074                 default:
1075                         rte_flow_error_set(error, EINVAL,
1076                                    RTE_FLOW_ERROR_TYPE_ITEM,
1077                                    item,
1078                                    "Invalid pattern item.");
1079                         return -rte_errno;
1080                 }
1081         }
1082
1083         filter->input.flow_type = flow_type;
1084         filter->input_set = input_set;
1085
1086         return 0;
1087 }
1088
1089 static int
1090 ice_fdir_parse(struct ice_adapter *ad,
1091                struct ice_pattern_match_item *array,
1092                uint32_t array_len,
1093                const struct rte_flow_item pattern[],
1094                const struct rte_flow_action actions[],
1095                void **meta,
1096                struct rte_flow_error *error)
1097 {
1098         struct ice_pf *pf = &ad->pf;
1099         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1100         struct ice_pattern_match_item *item = NULL;
1101         uint64_t input_set;
1102         int ret;
1103
1104         memset(filter, 0, sizeof(*filter));
1105         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1106         if (!item)
1107                 return -rte_errno;
1108
1109         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1110         if (ret)
1111                 return ret;
1112         input_set = filter->input_set;
1113         if (!input_set || input_set & ~item->input_set_mask) {
1114                 rte_flow_error_set(error, EINVAL,
1115                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1116                                    pattern,
1117                                    "Invalid input set");
1118                 return -rte_errno;
1119         }
1120
1121         ret = ice_fdir_parse_action(ad, actions, error, filter);
1122         if (ret)
1123                 return ret;
1124
1125         *meta = filter;
1126
1127         return 0;
1128 }
1129
1130 static struct ice_flow_parser ice_fdir_parser = {
1131         .engine = &ice_fdir_engine,
1132         .array = ice_fdir_pattern,
1133         .array_len = RTE_DIM(ice_fdir_pattern),
1134         .parse_pattern_action = ice_fdir_parse,
1135         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1136 };
1137
1138 RTE_INIT(ice_fdir_engine_register)
1139 {
1140         ice_register_flow_engine(&ice_fdir_engine);
1141 }