net/ice: add flow director create and destroy
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
7 #include "ice_rxtx.h"
8 #include "ice_generic_flow.h"
9
10 #define ICE_FDIR_IPV6_TC_OFFSET         20
11 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
12
13 #define ICE_FDIR_INSET_ETH_IPV4 (\
14         ICE_INSET_DMAC | \
15         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
16         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
17
18 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
19         ICE_FDIR_INSET_ETH_IPV4 | \
20         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV6 (\
31         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
32         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
33
34 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
35         ICE_FDIR_INSET_ETH_IPV6 | \
36         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
37
38 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
39         ICE_FDIR_INSET_ETH_IPV6 | \
40         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
41
42 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
43         ICE_FDIR_INSET_ETH_IPV6 | \
44         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
45
46 static struct ice_pattern_match_item ice_fdir_pattern[] = {
47         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
48         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
49         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
50         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
51         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
52         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
53         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
54         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
55 };
56
57 static struct ice_flow_parser ice_fdir_parser;
58
59 static const struct rte_memzone *
60 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
61 {
62         return rte_memzone_reserve_aligned(name, len, socket_id,
63                                            RTE_MEMZONE_IOVA_CONTIG,
64                                            ICE_RING_BASE_ALIGN);
65 }
66
67 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
68
69 static int
70 ice_fdir_prof_alloc(struct ice_hw *hw)
71 {
72         enum ice_fltr_ptype ptype, fltr_ptype;
73
74         if (!hw->fdir_prof) {
75                 hw->fdir_prof = (struct ice_fd_hw_prof **)
76                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
77                                    sizeof(*hw->fdir_prof));
78                 if (!hw->fdir_prof)
79                         return -ENOMEM;
80         }
81         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
82              ptype < ICE_FLTR_PTYPE_MAX;
83              ptype++) {
84                 if (!hw->fdir_prof[ptype]) {
85                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
86                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
87                         if (!hw->fdir_prof[ptype])
88                                 goto fail_mem;
89                 }
90         }
91         return 0;
92
93 fail_mem:
94         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
95              fltr_ptype < ptype;
96              fltr_ptype++)
97                 rte_free(hw->fdir_prof[fltr_ptype]);
98         rte_free(hw->fdir_prof);
99         return -ENOMEM;
100 }
101
102 /*
103  * ice_fdir_setup - reserve and initialize the Flow Director resources
104  * @pf: board private structure
105  */
106 static int
107 ice_fdir_setup(struct ice_pf *pf)
108 {
109         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
110         struct ice_hw *hw = ICE_PF_TO_HW(pf);
111         const struct rte_memzone *mz = NULL;
112         char z_name[RTE_MEMZONE_NAMESIZE];
113         struct ice_vsi *vsi;
114         int err = ICE_SUCCESS;
115
116         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
117                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
118                 return -ENOTSUP;
119         }
120
121         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
122                     " fd_fltr_best_effort = %u.",
123                     hw->func_caps.fd_fltr_guar,
124                     hw->func_caps.fd_fltr_best_effort);
125
126         if (pf->fdir.fdir_vsi) {
127                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
128                 return ICE_SUCCESS;
129         }
130
131         /* make new FDIR VSI */
132         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
133         if (!vsi) {
134                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
135                 return -EINVAL;
136         }
137         pf->fdir.fdir_vsi = vsi;
138
139         /*Fdir tx queue setup*/
140         err = ice_fdir_setup_tx_resources(pf);
141         if (err) {
142                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
143                 goto fail_setup_tx;
144         }
145
146         /*Fdir rx queue setup*/
147         err = ice_fdir_setup_rx_resources(pf);
148         if (err) {
149                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
150                 goto fail_setup_rx;
151         }
152
153         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
154         if (err) {
155                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
156                 goto fail_mem;
157         }
158
159         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
160         if (err) {
161                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
162                 goto fail_mem;
163         }
164
165         /* reserve memory for the fdir programming packet */
166         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
167                  ICE_FDIR_MZ_NAME,
168                  eth_dev->data->port_id);
169         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
170         if (!mz) {
171                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
172                             "flow director program packet.");
173                 err = -ENOMEM;
174                 goto fail_mem;
175         }
176         pf->fdir.prg_pkt = mz->addr;
177         pf->fdir.dma_addr = mz->iova;
178
179         err = ice_fdir_prof_alloc(hw);
180         if (err) {
181                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
182                             "flow director profile.");
183                 err = -ENOMEM;
184                 goto fail_mem;
185         }
186
187         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
188                     vsi->base_queue);
189         return ICE_SUCCESS;
190
191 fail_mem:
192         ice_rx_queue_release(pf->fdir.rxq);
193         pf->fdir.rxq = NULL;
194 fail_setup_rx:
195         ice_tx_queue_release(pf->fdir.txq);
196         pf->fdir.txq = NULL;
197 fail_setup_tx:
198         ice_release_vsi(vsi);
199         pf->fdir.fdir_vsi = NULL;
200         return err;
201 }
202
203 static void
204 ice_fdir_prof_free(struct ice_hw *hw)
205 {
206         enum ice_fltr_ptype ptype;
207
208         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
209              ptype < ICE_FLTR_PTYPE_MAX;
210              ptype++)
211                 rte_free(hw->fdir_prof[ptype]);
212
213         rte_free(hw->fdir_prof);
214 }
215
216 /* Remove a profile for some filter type */
217 static void
218 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
219 {
220         struct ice_hw *hw = ICE_PF_TO_HW(pf);
221         struct ice_fd_hw_prof *hw_prof;
222         uint64_t prof_id;
223         uint16_t vsi_num;
224         int i;
225
226         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
227                 return;
228
229         hw_prof = hw->fdir_prof[ptype];
230
231         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
232         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
233                 if (hw_prof->entry_h[i][is_tunnel]) {
234                         vsi_num = ice_get_hw_vsi_num(hw,
235                                                      hw_prof->vsi_h[i]);
236                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
237                                              vsi_num, ptype);
238                         ice_flow_rem_entry(hw,
239                                            hw_prof->entry_h[i][is_tunnel]);
240                         hw_prof->entry_h[i][is_tunnel] = 0;
241                 }
242         }
243         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
244         rte_free(hw_prof->fdir_seg[is_tunnel]);
245         hw_prof->fdir_seg[is_tunnel] = NULL;
246
247         for (i = 0; i < hw_prof->cnt; i++)
248                 hw_prof->vsi_h[i] = 0;
249         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
250 }
251
252 /* Remove all created profiles */
253 static void
254 ice_fdir_prof_rm_all(struct ice_pf *pf)
255 {
256         enum ice_fltr_ptype ptype;
257
258         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
259              ptype < ICE_FLTR_PTYPE_MAX;
260              ptype++) {
261                 ice_fdir_prof_rm(pf, ptype, false);
262                 ice_fdir_prof_rm(pf, ptype, true);
263         }
264 }
265
266 /*
267  * ice_fdir_teardown - release the Flow Director resources
268  * @pf: board private structure
269  */
270 static void
271 ice_fdir_teardown(struct ice_pf *pf)
272 {
273         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
274         struct ice_hw *hw = ICE_PF_TO_HW(pf);
275         struct ice_vsi *vsi;
276         int err;
277
278         vsi = pf->fdir.fdir_vsi;
279         if (!vsi)
280                 return;
281
282         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
283         if (err)
284                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
285
286         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
287         if (err)
288                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
289
290         ice_tx_queue_release(pf->fdir.txq);
291         pf->fdir.txq = NULL;
292         ice_rx_queue_release(pf->fdir.rxq);
293         pf->fdir.rxq = NULL;
294         ice_fdir_prof_rm_all(pf);
295         ice_fdir_prof_free(hw);
296         ice_release_vsi(vsi);
297         pf->fdir.fdir_vsi = NULL;
298 }
299
300 static int
301 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
302                      struct ice_vsi *ctrl_vsi,
303                      struct ice_flow_seg_info *seg,
304                      enum ice_fltr_ptype ptype,
305                      bool is_tunnel)
306 {
307         struct ice_hw *hw = ICE_PF_TO_HW(pf);
308         enum ice_flow_dir dir = ICE_FLOW_RX;
309         struct ice_flow_seg_info *ori_seg;
310         struct ice_fd_hw_prof *hw_prof;
311         struct ice_flow_prof *prof;
312         uint64_t entry_1 = 0;
313         uint64_t entry_2 = 0;
314         uint16_t vsi_num;
315         int ret;
316         uint64_t prof_id;
317
318         hw_prof = hw->fdir_prof[ptype];
319         ori_seg = hw_prof->fdir_seg[is_tunnel];
320         if (ori_seg) {
321                 if (!is_tunnel) {
322                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
323                                 return -EAGAIN;
324                 } else {
325                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
326                                 return -EAGAIN;
327                 }
328
329                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
330                         return -EINVAL;
331
332                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
333         }
334
335         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
336         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
337                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
338         if (ret)
339                 return ret;
340         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
341                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
342                                  seg, NULL, 0, &entry_1);
343         if (ret) {
344                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
345                             ptype);
346                 goto err_add_prof;
347         }
348         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
349                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
350                                  seg, NULL, 0, &entry_2);
351         if (ret) {
352                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
353                             ptype);
354                 goto err_add_entry;
355         }
356
357         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
358         hw_prof->cnt = 0;
359         hw_prof->fdir_seg[is_tunnel] = seg;
360         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
361         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
362         pf->hw_prof_cnt[ptype][is_tunnel]++;
363         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
364         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
365         pf->hw_prof_cnt[ptype][is_tunnel]++;
366
367         return ret;
368
369 err_add_entry:
370         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
371         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
372         ice_flow_rem_entry(hw, entry_1);
373 err_add_prof:
374         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
375
376         return ret;
377 }
378
379 static void
380 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
381 {
382         uint32_t i, j;
383
384         struct ice_inset_map {
385                 uint64_t inset;
386                 enum ice_flow_field fld;
387         };
388         static const struct ice_inset_map ice_inset_map[] = {
389                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
390                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
391                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
392                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
393                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
394                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
395                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
396                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
397                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
398                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
399                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
400                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
401                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
402                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
403                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
404                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
405                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
406         };
407
408         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
409                 if ((inset & ice_inset_map[i].inset) ==
410                     ice_inset_map[i].inset)
411                         field[j++] = ice_inset_map[i].fld;
412         }
413 }
414
415 static int
416 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
417                         uint64_t input_set, bool is_tunnel)
418 {
419         struct ice_flow_seg_info *seg;
420         struct ice_flow_seg_info *seg_tun = NULL;
421         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
422         int i, ret;
423
424         if (!input_set)
425                 return -EINVAL;
426
427         seg = (struct ice_flow_seg_info *)
428                 ice_malloc(hw, sizeof(*seg));
429         if (!seg) {
430                 PMD_DRV_LOG(ERR, "No memory can be allocated");
431                 return -ENOMEM;
432         }
433
434         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
435                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
436         ice_fdir_input_set_parse(input_set, field);
437
438         switch (flow) {
439         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
440                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
441                                   ICE_FLOW_SEG_HDR_IPV4);
442                 break;
443         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
444                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
445                                   ICE_FLOW_SEG_HDR_IPV4);
446                 break;
447         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
448                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
449                                   ICE_FLOW_SEG_HDR_IPV4);
450                 break;
451         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
452                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
453                 break;
454         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
455                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
456                                   ICE_FLOW_SEG_HDR_IPV6);
457                 break;
458         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
459                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
460                                   ICE_FLOW_SEG_HDR_IPV6);
461                 break;
462         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
463                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
464                                   ICE_FLOW_SEG_HDR_IPV6);
465                 break;
466         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
467                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
468                 break;
469         default:
470                 PMD_DRV_LOG(ERR, "not supported filter type.");
471                 break;
472         }
473
474         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
475                 ice_flow_set_fld(seg, field[i],
476                                  ICE_FLOW_FLD_OFF_INVAL,
477                                  ICE_FLOW_FLD_OFF_INVAL,
478                                  ICE_FLOW_FLD_OFF_INVAL, false);
479         }
480
481         if (!is_tunnel) {
482                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
483                                            seg, flow, false);
484         } else {
485                 seg_tun = (struct ice_flow_seg_info *)
486                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
487                 if (!seg_tun) {
488                         PMD_DRV_LOG(ERR, "No memory can be allocated");
489                         rte_free(seg);
490                         return -ENOMEM;
491                 }
492                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
493                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
494                                            seg_tun, flow, true);
495         }
496
497         if (!ret) {
498                 return ret;
499         } else if (ret < 0) {
500                 rte_free(seg);
501                 if (is_tunnel)
502                         rte_free(seg_tun);
503                 return (ret == -EAGAIN) ? 0 : ret;
504         } else {
505                 return ret;
506         }
507 }
508
509 static void
510 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
511                     bool is_tunnel, bool add)
512 {
513         struct ice_hw *hw = ICE_PF_TO_HW(pf);
514         int cnt;
515
516         cnt = (add) ? 1 : -1;
517         hw->fdir_active_fltr += cnt;
518         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
519                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
520         else
521                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
522 }
523
524 static int
525 ice_fdir_init(struct ice_adapter *ad)
526 {
527         struct ice_pf *pf = &ad->pf;
528         int ret;
529
530         ret = ice_fdir_setup(pf);
531         if (ret)
532                 return ret;
533
534         return ice_register_parser(&ice_fdir_parser, ad);
535 }
536
537 static void
538 ice_fdir_uninit(struct ice_adapter *ad)
539 {
540         struct ice_pf *pf = &ad->pf;
541
542         ice_unregister_parser(&ice_fdir_parser, ad);
543
544         ice_fdir_teardown(pf);
545 }
546
547 static int
548 ice_fdir_add_del_filter(struct ice_pf *pf,
549                         struct ice_fdir_filter_conf *filter,
550                         bool add)
551 {
552         struct ice_fltr_desc desc;
553         struct ice_hw *hw = ICE_PF_TO_HW(pf);
554         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
555         int ret;
556
557         filter->input.dest_vsi = pf->main_vsi->idx;
558
559         memset(&desc, 0, sizeof(desc));
560         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
561
562         memset(pkt, 0, ICE_FDIR_PKT_LEN);
563         ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
564         if (ret) {
565                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
566                 return -EINVAL;
567         }
568
569         return ice_fdir_programming(pf, &desc);
570 }
571
572 static int
573 ice_fdir_create_filter(struct ice_adapter *ad,
574                        struct rte_flow *flow,
575                        void *meta,
576                        struct rte_flow_error *error)
577 {
578         struct ice_pf *pf = &ad->pf;
579         struct ice_fdir_filter_conf *filter = meta;
580         struct ice_fdir_filter_conf *rule;
581         int ret;
582
583         rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
584         if (!rule) {
585                 rte_flow_error_set(error, ENOMEM,
586                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
587                                    "Failed to allocate memory");
588                 return -rte_errno;
589         }
590
591         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
592                         filter->input_set, false);
593         if (ret) {
594                 rte_flow_error_set(error, -ret,
595                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
596                                    "Profile configure failed.");
597                 goto free_entry;
598         }
599
600         ret = ice_fdir_add_del_filter(pf, filter, true);
601         if (ret) {
602                 rte_flow_error_set(error, -ret,
603                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
604                                    "Add filter rule failed.");
605                 goto free_entry;
606         }
607
608         rte_memcpy(rule, filter, sizeof(*rule));
609         flow->rule = rule;
610         ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
611         return 0;
612
613 free_entry:
614         rte_free(rule);
615         return -rte_errno;
616 }
617
618 static int
619 ice_fdir_destroy_filter(struct ice_adapter *ad,
620                         struct rte_flow *flow,
621                         struct rte_flow_error *error)
622 {
623         struct ice_pf *pf = &ad->pf;
624         struct ice_fdir_filter_conf *filter;
625         int ret;
626
627         filter = (struct ice_fdir_filter_conf *)flow->rule;
628
629         ret = ice_fdir_add_del_filter(pf, filter, false);
630         if (ret) {
631                 rte_flow_error_set(error, -ret,
632                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
633                                    "Del filter rule failed.");
634                 return -rte_errno;
635         }
636
637         ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
638         flow->rule = NULL;
639
640         rte_free(filter);
641
642         return 0;
643 }
644
645 static struct ice_flow_engine ice_fdir_engine = {
646         .init = ice_fdir_init,
647         .uninit = ice_fdir_uninit,
648         .create = ice_fdir_create_filter,
649         .destroy = ice_fdir_destroy_filter,
650         .type = ICE_FLOW_ENGINE_FDIR,
651 };
652
653 static int
654 ice_fdir_parse_action(struct ice_adapter *ad,
655                       const struct rte_flow_action actions[],
656                       struct rte_flow_error *error,
657                       struct ice_fdir_filter_conf *filter)
658 {
659         struct ice_pf *pf = &ad->pf;
660         const struct rte_flow_action_queue *act_q;
661         const struct rte_flow_action_mark *mark_spec = NULL;
662         uint32_t dest_num = 0;
663         uint32_t mark_num = 0;
664
665         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
666                 switch (actions->type) {
667                 case RTE_FLOW_ACTION_TYPE_VOID:
668                         break;
669                 case RTE_FLOW_ACTION_TYPE_QUEUE:
670                         dest_num++;
671
672                         act_q = actions->conf;
673                         filter->input.q_index = act_q->index;
674                         if (filter->input.q_index >=
675                                         pf->dev_data->nb_rx_queues) {
676                                 rte_flow_error_set(error, EINVAL,
677                                                    RTE_FLOW_ERROR_TYPE_ACTION,
678                                                    actions,
679                                                    "Invalid queue for FDIR.");
680                                 return -rte_errno;
681                         }
682                         filter->input.dest_ctl =
683                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
684                         break;
685                 case RTE_FLOW_ACTION_TYPE_DROP:
686                         dest_num++;
687
688                         filter->input.dest_ctl =
689                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
690                         break;
691                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
692                         dest_num++;
693
694                         filter->input.dest_ctl =
695                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
696                         filter->input.q_index = 0;
697                         break;
698                 case RTE_FLOW_ACTION_TYPE_MARK:
699                         mark_num++;
700
701                         mark_spec = actions->conf;
702                         filter->input.fltr_id = mark_spec->id;
703                         break;
704                 default:
705                         rte_flow_error_set(error, EINVAL,
706                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
707                                    "Invalid action.");
708                         return -rte_errno;
709                 }
710         }
711
712         if (dest_num == 0 || dest_num >= 2) {
713                 rte_flow_error_set(error, EINVAL,
714                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
715                            "Unsupported action combination");
716                 return -rte_errno;
717         }
718
719         if (mark_num >= 2) {
720                 rte_flow_error_set(error, EINVAL,
721                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
722                            "Too many mark actions");
723                 return -rte_errno;
724         }
725
726         return 0;
727 }
728
729 static int
730 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
731                        const struct rte_flow_item pattern[],
732                        struct rte_flow_error *error,
733                        struct ice_fdir_filter_conf *filter)
734 {
735         const struct rte_flow_item *item = pattern;
736         enum rte_flow_item_type item_type;
737         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
738         const struct rte_flow_item_eth *eth_spec, *eth_mask;
739         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
740         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
741         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
742         const struct rte_flow_item_udp *udp_spec, *udp_mask;
743         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
744         uint64_t input_set = ICE_INSET_NONE;
745         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
746         uint8_t  ipv6_addr_mask[16] = {
747                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
748                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
749         };
750         uint32_t vtc_flow_cpu;
751
752
753         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
754                 if (item->last) {
755                         rte_flow_error_set(error, EINVAL,
756                                         RTE_FLOW_ERROR_TYPE_ITEM,
757                                         item,
758                                         "Not support range");
759                         return -rte_errno;
760                 }
761                 item_type = item->type;
762
763                 switch (item_type) {
764                 case RTE_FLOW_ITEM_TYPE_ETH:
765                         eth_spec = item->spec;
766                         eth_mask = item->mask;
767
768                         if (eth_spec && eth_mask) {
769                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
770                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
771                                         rte_flow_error_set(error, EINVAL,
772                                                 RTE_FLOW_ERROR_TYPE_ITEM,
773                                                 item,
774                                                 "Src mac not support");
775                                         return -rte_errno;
776                                 }
777
778                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
779                                         rte_flow_error_set(error, EINVAL,
780                                                 RTE_FLOW_ERROR_TYPE_ITEM,
781                                                 item,
782                                                 "Invalid mac addr mask");
783                                         return -rte_errno;
784                                 }
785
786                                 input_set |= ICE_INSET_DMAC;
787                                 rte_memcpy(&filter->input.ext_data.dst_mac,
788                                            &eth_spec->dst,
789                                            RTE_ETHER_ADDR_LEN);
790                         }
791                         break;
792                 case RTE_FLOW_ITEM_TYPE_IPV4:
793                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
794                         ipv4_spec = item->spec;
795                         ipv4_mask = item->mask;
796
797                         if (ipv4_spec && ipv4_mask) {
798                                 /* Check IPv4 mask and update input set */
799                                 if (ipv4_mask->hdr.version_ihl ||
800                                     ipv4_mask->hdr.total_length ||
801                                     ipv4_mask->hdr.packet_id ||
802                                     ipv4_mask->hdr.fragment_offset ||
803                                     ipv4_mask->hdr.hdr_checksum) {
804                                         rte_flow_error_set(error, EINVAL,
805                                                    RTE_FLOW_ERROR_TYPE_ITEM,
806                                                    item,
807                                                    "Invalid IPv4 mask.");
808                                         return -rte_errno;
809                                 }
810                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
811                                         input_set |= ICE_INSET_IPV4_SRC;
812                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
813                                         input_set |= ICE_INSET_IPV4_DST;
814                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
815                                         input_set |= ICE_INSET_IPV4_TOS;
816                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
817                                         input_set |= ICE_INSET_IPV4_TTL;
818                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
819                                         input_set |= ICE_INSET_IPV4_PROTO;
820
821                                 filter->input.ip.v4.dst_ip =
822                                         ipv4_spec->hdr.src_addr;
823                                 filter->input.ip.v4.src_ip =
824                                         ipv4_spec->hdr.dst_addr;
825                                 filter->input.ip.v4.tos =
826                                         ipv4_spec->hdr.type_of_service;
827                                 filter->input.ip.v4.ttl =
828                                         ipv4_spec->hdr.time_to_live;
829                                 filter->input.ip.v4.proto =
830                                         ipv4_spec->hdr.next_proto_id;
831                         }
832
833                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
834                         break;
835                 case RTE_FLOW_ITEM_TYPE_IPV6:
836                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
837                         ipv6_spec = item->spec;
838                         ipv6_mask = item->mask;
839
840                         if (ipv6_spec && ipv6_mask) {
841                                 /* Check IPv6 mask and update input set */
842                                 if (ipv6_mask->hdr.payload_len) {
843                                         rte_flow_error_set(error, EINVAL,
844                                                    RTE_FLOW_ERROR_TYPE_ITEM,
845                                                    item,
846                                                    "Invalid IPv6 mask");
847                                         return -rte_errno;
848                                 }
849
850                                 if (!memcmp(ipv6_mask->hdr.src_addr,
851                                             ipv6_addr_mask,
852                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
853                                         input_set |= ICE_INSET_IPV6_SRC;
854                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
855                                             ipv6_addr_mask,
856                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
857                                         input_set |= ICE_INSET_IPV6_DST;
858
859                                 if ((ipv6_mask->hdr.vtc_flow &
860                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
861                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
862                                         input_set |= ICE_INSET_IPV6_TC;
863                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
864                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
865                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
866                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
867
868                                 rte_memcpy(filter->input.ip.v6.dst_ip,
869                                            ipv6_spec->hdr.src_addr, 16);
870                                 rte_memcpy(filter->input.ip.v6.src_ip,
871                                            ipv6_spec->hdr.dst_addr, 16);
872
873                                 vtc_flow_cpu =
874                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
875                                 filter->input.ip.v6.tc =
876                                         (uint8_t)(vtc_flow_cpu >>
877                                                   ICE_FDIR_IPV6_TC_OFFSET);
878                                 filter->input.ip.v6.proto =
879                                         ipv6_spec->hdr.proto;
880                                 filter->input.ip.v6.hlim =
881                                         ipv6_spec->hdr.hop_limits;
882                         }
883
884                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
885                         break;
886                 case RTE_FLOW_ITEM_TYPE_TCP:
887                         tcp_spec = item->spec;
888                         tcp_mask = item->mask;
889
890                         if (tcp_spec && tcp_mask) {
891                                 /* Check TCP mask and update input set */
892                                 if (tcp_mask->hdr.sent_seq ||
893                                     tcp_mask->hdr.recv_ack ||
894                                     tcp_mask->hdr.data_off ||
895                                     tcp_mask->hdr.tcp_flags ||
896                                     tcp_mask->hdr.rx_win ||
897                                     tcp_mask->hdr.cksum ||
898                                     tcp_mask->hdr.tcp_urp) {
899                                         rte_flow_error_set(error, EINVAL,
900                                                    RTE_FLOW_ERROR_TYPE_ITEM,
901                                                    item,
902                                                    "Invalid TCP mask");
903                                         return -rte_errno;
904                                 }
905
906                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
907                                         input_set |= ICE_INSET_TCP_SRC_PORT;
908                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
909                                         input_set |= ICE_INSET_TCP_DST_PORT;
910
911                                 /* Get filter info */
912                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
913                                         filter->input.ip.v4.dst_port =
914                                                 tcp_spec->hdr.src_port;
915                                         filter->input.ip.v4.src_port =
916                                                 tcp_spec->hdr.dst_port;
917                                         flow_type =
918                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
919                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
920                                         filter->input.ip.v6.dst_port =
921                                                 tcp_spec->hdr.src_port;
922                                         filter->input.ip.v6.src_port =
923                                                 tcp_spec->hdr.dst_port;
924                                         flow_type =
925                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
926                                 }
927                         }
928                         break;
929                 case RTE_FLOW_ITEM_TYPE_UDP:
930                         udp_spec = item->spec;
931                         udp_mask = item->mask;
932
933                         if (udp_spec && udp_mask) {
934                                 /* Check UDP mask and update input set*/
935                                 if (udp_mask->hdr.dgram_len ||
936                                     udp_mask->hdr.dgram_cksum) {
937                                         rte_flow_error_set(error, EINVAL,
938                                                    RTE_FLOW_ERROR_TYPE_ITEM,
939                                                    item,
940                                                    "Invalid UDP mask");
941                                         return -rte_errno;
942                                 }
943
944                                 if (udp_mask->hdr.src_port == UINT16_MAX)
945                                         input_set |= ICE_INSET_UDP_SRC_PORT;
946                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
947                                         input_set |= ICE_INSET_UDP_DST_PORT;
948
949                                 /* Get filter info */
950                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
951                                         filter->input.ip.v4.dst_port =
952                                                 udp_spec->hdr.src_port;
953                                         filter->input.ip.v4.src_port =
954                                                 udp_spec->hdr.dst_port;
955                                         flow_type =
956                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
957                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
958                                         filter->input.ip.v6.src_port =
959                                                 udp_spec->hdr.src_port;
960                                         filter->input.ip.v6.dst_port =
961                                                 udp_spec->hdr.dst_port;
962                                         flow_type =
963                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
964                                 }
965                         }
966                         break;
967                 case RTE_FLOW_ITEM_TYPE_SCTP:
968                         sctp_spec = item->spec;
969                         sctp_mask = item->mask;
970
971                         if (sctp_spec && sctp_mask) {
972                                 /* Check SCTP mask and update input set */
973                                 if (sctp_mask->hdr.cksum) {
974                                         rte_flow_error_set(error, EINVAL,
975                                                    RTE_FLOW_ERROR_TYPE_ITEM,
976                                                    item,
977                                                    "Invalid UDP mask");
978                                         return -rte_errno;
979                                 }
980
981                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
982                                         input_set |= ICE_INSET_SCTP_SRC_PORT;
983                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
984                                         input_set |= ICE_INSET_SCTP_DST_PORT;
985
986                                 /* Get filter info */
987                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
988                                         filter->input.ip.v4.dst_port =
989                                                 sctp_spec->hdr.src_port;
990                                         filter->input.ip.v4.src_port =
991                                                 sctp_spec->hdr.dst_port;
992                                         flow_type =
993                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
994                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
995                                         filter->input.ip.v6.dst_port =
996                                                 sctp_spec->hdr.src_port;
997                                         filter->input.ip.v6.src_port =
998                                                 sctp_spec->hdr.dst_port;
999                                         flow_type =
1000                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1001                                 }
1002                         }
1003                         break;
1004                 case RTE_FLOW_ITEM_TYPE_VOID:
1005                         break;
1006                 default:
1007                         rte_flow_error_set(error, EINVAL,
1008                                    RTE_FLOW_ERROR_TYPE_ITEM,
1009                                    item,
1010                                    "Invalid pattern item.");
1011                         return -rte_errno;
1012                 }
1013         }
1014
1015         filter->input.flow_type = flow_type;
1016         filter->input_set = input_set;
1017
1018         return 0;
1019 }
1020
1021 static int
1022 ice_fdir_parse(struct ice_adapter *ad,
1023                struct ice_pattern_match_item *array,
1024                uint32_t array_len,
1025                const struct rte_flow_item pattern[],
1026                const struct rte_flow_action actions[],
1027                void **meta,
1028                struct rte_flow_error *error)
1029 {
1030         struct ice_pf *pf = &ad->pf;
1031         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1032         struct ice_pattern_match_item *item = NULL;
1033         uint64_t input_set;
1034         int ret;
1035
1036         memset(filter, 0, sizeof(*filter));
1037         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1038         if (!item)
1039                 return -rte_errno;
1040
1041         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1042         if (ret)
1043                 return ret;
1044         input_set = filter->input_set;
1045         if (!input_set || input_set & ~item->input_set_mask) {
1046                 rte_flow_error_set(error, EINVAL,
1047                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1048                                    pattern,
1049                                    "Invalid input set");
1050                 return -rte_errno;
1051         }
1052
1053         ret = ice_fdir_parse_action(ad, actions, error, filter);
1054         if (ret)
1055                 return ret;
1056
1057         *meta = filter;
1058
1059         return 0;
1060 }
1061
1062 static struct ice_flow_parser ice_fdir_parser = {
1063         .engine = &ice_fdir_engine,
1064         .array = ice_fdir_pattern,
1065         .array_len = RTE_DIM(ice_fdir_pattern),
1066         .parse_pattern_action = ice_fdir_parse,
1067         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1068 };
1069
1070 RTE_INIT(ice_fdir_engine_register)
1071 {
1072         ice_register_flow_engine(&ice_fdir_engine);
1073 }