60eb27eac94f4f8893bed6acf84e7e3faa02bae0
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
7 #include "ice_rxtx.h"
8 #include "ice_generic_flow.h"
9
10 static const struct rte_memzone *
11 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
12 {
13         return rte_memzone_reserve_aligned(name, len, socket_id,
14                                            RTE_MEMZONE_IOVA_CONTIG,
15                                            ICE_RING_BASE_ALIGN);
16 }
17
18 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
19
20 static int
21 ice_fdir_prof_alloc(struct ice_hw *hw)
22 {
23         enum ice_fltr_ptype ptype, fltr_ptype;
24
25         if (!hw->fdir_prof) {
26                 hw->fdir_prof = (struct ice_fd_hw_prof **)
27                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
28                                    sizeof(*hw->fdir_prof));
29                 if (!hw->fdir_prof)
30                         return -ENOMEM;
31         }
32         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
33              ptype < ICE_FLTR_PTYPE_MAX;
34              ptype++) {
35                 if (!hw->fdir_prof[ptype]) {
36                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
37                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
38                         if (!hw->fdir_prof[ptype])
39                                 goto fail_mem;
40                 }
41         }
42         return 0;
43
44 fail_mem:
45         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
46              fltr_ptype < ptype;
47              fltr_ptype++)
48                 rte_free(hw->fdir_prof[fltr_ptype]);
49         rte_free(hw->fdir_prof);
50         return -ENOMEM;
51 }
52
53 /*
54  * ice_fdir_setup - reserve and initialize the Flow Director resources
55  * @pf: board private structure
56  */
57 static int
58 ice_fdir_setup(struct ice_pf *pf)
59 {
60         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
61         struct ice_hw *hw = ICE_PF_TO_HW(pf);
62         const struct rte_memzone *mz = NULL;
63         char z_name[RTE_MEMZONE_NAMESIZE];
64         struct ice_vsi *vsi;
65         int err = ICE_SUCCESS;
66
67         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
68                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
69                 return -ENOTSUP;
70         }
71
72         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
73                     " fd_fltr_best_effort = %u.",
74                     hw->func_caps.fd_fltr_guar,
75                     hw->func_caps.fd_fltr_best_effort);
76
77         if (pf->fdir.fdir_vsi) {
78                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
79                 return ICE_SUCCESS;
80         }
81
82         /* make new FDIR VSI */
83         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
84         if (!vsi) {
85                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
86                 return -EINVAL;
87         }
88         pf->fdir.fdir_vsi = vsi;
89
90         /*Fdir tx queue setup*/
91         err = ice_fdir_setup_tx_resources(pf);
92         if (err) {
93                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
94                 goto fail_setup_tx;
95         }
96
97         /*Fdir rx queue setup*/
98         err = ice_fdir_setup_rx_resources(pf);
99         if (err) {
100                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
101                 goto fail_setup_rx;
102         }
103
104         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
105         if (err) {
106                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
107                 goto fail_mem;
108         }
109
110         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
111         if (err) {
112                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
113                 goto fail_mem;
114         }
115
116         /* reserve memory for the fdir programming packet */
117         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
118                  ICE_FDIR_MZ_NAME,
119                  eth_dev->data->port_id);
120         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
121         if (!mz) {
122                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
123                             "flow director program packet.");
124                 err = -ENOMEM;
125                 goto fail_mem;
126         }
127         pf->fdir.prg_pkt = mz->addr;
128         pf->fdir.dma_addr = mz->iova;
129
130         err = ice_fdir_prof_alloc(hw);
131         if (err) {
132                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
133                             "flow director profile.");
134                 err = -ENOMEM;
135                 goto fail_mem;
136         }
137
138         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
139                     vsi->base_queue);
140         return ICE_SUCCESS;
141
142 fail_mem:
143         ice_rx_queue_release(pf->fdir.rxq);
144         pf->fdir.rxq = NULL;
145 fail_setup_rx:
146         ice_tx_queue_release(pf->fdir.txq);
147         pf->fdir.txq = NULL;
148 fail_setup_tx:
149         ice_release_vsi(vsi);
150         pf->fdir.fdir_vsi = NULL;
151         return err;
152 }
153
154 static void
155 ice_fdir_prof_free(struct ice_hw *hw)
156 {
157         enum ice_fltr_ptype ptype;
158
159         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
160              ptype < ICE_FLTR_PTYPE_MAX;
161              ptype++)
162                 rte_free(hw->fdir_prof[ptype]);
163
164         rte_free(hw->fdir_prof);
165 }
166
167 /* Remove a profile for some filter type */
168 static void
169 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
170 {
171         struct ice_hw *hw = ICE_PF_TO_HW(pf);
172         struct ice_fd_hw_prof *hw_prof;
173         uint64_t prof_id;
174         uint16_t vsi_num;
175         int i;
176
177         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
178                 return;
179
180         hw_prof = hw->fdir_prof[ptype];
181
182         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
183         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
184                 if (hw_prof->entry_h[i][is_tunnel]) {
185                         vsi_num = ice_get_hw_vsi_num(hw,
186                                                      hw_prof->vsi_h[i]);
187                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
188                                              vsi_num, ptype);
189                         ice_flow_rem_entry(hw,
190                                            hw_prof->entry_h[i][is_tunnel]);
191                         hw_prof->entry_h[i][is_tunnel] = 0;
192                 }
193         }
194         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
195         rte_free(hw_prof->fdir_seg[is_tunnel]);
196         hw_prof->fdir_seg[is_tunnel] = NULL;
197
198         for (i = 0; i < hw_prof->cnt; i++)
199                 hw_prof->vsi_h[i] = 0;
200         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
201 }
202
203 /* Remove all created profiles */
204 static void
205 ice_fdir_prof_rm_all(struct ice_pf *pf)
206 {
207         enum ice_fltr_ptype ptype;
208
209         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
210              ptype < ICE_FLTR_PTYPE_MAX;
211              ptype++) {
212                 ice_fdir_prof_rm(pf, ptype, false);
213                 ice_fdir_prof_rm(pf, ptype, true);
214         }
215 }
216
217 /*
218  * ice_fdir_teardown - release the Flow Director resources
219  * @pf: board private structure
220  */
221 static void
222 ice_fdir_teardown(struct ice_pf *pf)
223 {
224         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
225         struct ice_hw *hw = ICE_PF_TO_HW(pf);
226         struct ice_vsi *vsi;
227         int err;
228
229         vsi = pf->fdir.fdir_vsi;
230         if (!vsi)
231                 return;
232
233         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
234         if (err)
235                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
236
237         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
238         if (err)
239                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
240
241         ice_tx_queue_release(pf->fdir.txq);
242         pf->fdir.txq = NULL;
243         ice_rx_queue_release(pf->fdir.rxq);
244         pf->fdir.rxq = NULL;
245         ice_fdir_prof_rm_all(pf);
246         ice_fdir_prof_free(hw);
247         ice_release_vsi(vsi);
248         pf->fdir.fdir_vsi = NULL;
249 }
250
251 static int
252 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
253                      struct ice_vsi *ctrl_vsi,
254                      struct ice_flow_seg_info *seg,
255                      enum ice_fltr_ptype ptype,
256                      bool is_tunnel)
257 {
258         struct ice_hw *hw = ICE_PF_TO_HW(pf);
259         enum ice_flow_dir dir = ICE_FLOW_RX;
260         struct ice_flow_seg_info *ori_seg;
261         struct ice_fd_hw_prof *hw_prof;
262         struct ice_flow_prof *prof;
263         uint64_t entry_1 = 0;
264         uint64_t entry_2 = 0;
265         uint16_t vsi_num;
266         int ret;
267         uint64_t prof_id;
268
269         hw_prof = hw->fdir_prof[ptype];
270         ori_seg = hw_prof->fdir_seg[is_tunnel];
271         if (ori_seg) {
272                 if (!is_tunnel) {
273                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
274                                 return -EAGAIN;
275                 } else {
276                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
277                                 return -EAGAIN;
278                 }
279
280                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
281                         return -EINVAL;
282
283                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
284         }
285
286         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
287         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
288                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
289         if (ret)
290                 return ret;
291         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
292                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
293                                  seg, NULL, 0, &entry_1);
294         if (ret) {
295                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
296                             ptype);
297                 goto err_add_prof;
298         }
299         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
300                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
301                                  seg, NULL, 0, &entry_2);
302         if (ret) {
303                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
304                             ptype);
305                 goto err_add_entry;
306         }
307
308         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
309         hw_prof->cnt = 0;
310         hw_prof->fdir_seg[is_tunnel] = seg;
311         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
312         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
313         pf->hw_prof_cnt[ptype][is_tunnel]++;
314         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
315         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
316         pf->hw_prof_cnt[ptype][is_tunnel]++;
317
318         return ret;
319
320 err_add_entry:
321         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
322         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
323         ice_flow_rem_entry(hw, entry_1);
324 err_add_prof:
325         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
326
327         return ret;
328 }
329
330 static void
331 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
332 {
333         uint32_t i, j;
334
335         struct ice_inset_map {
336                 uint64_t inset;
337                 enum ice_flow_field fld;
338         };
339         static const struct ice_inset_map ice_inset_map[] = {
340                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
341                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
342                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
343                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
344                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
345                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
346                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
347                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
348                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
349                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
350                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
351                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
352                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
353                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
354                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
355                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
356                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
357         };
358
359         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
360                 if ((inset & ice_inset_map[i].inset) ==
361                     ice_inset_map[i].inset)
362                         field[j++] = ice_inset_map[i].fld;
363         }
364 }
365
366 static int __rte_unused
367 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
368                         uint64_t input_set, bool is_tunnel)
369 {
370         struct ice_flow_seg_info *seg;
371         struct ice_flow_seg_info *seg_tun = NULL;
372         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
373         int i, ret;
374
375         if (!input_set)
376                 return -EINVAL;
377
378         seg = (struct ice_flow_seg_info *)
379                 ice_malloc(hw, sizeof(*seg));
380         if (!seg) {
381                 PMD_DRV_LOG(ERR, "No memory can be allocated");
382                 return -ENOMEM;
383         }
384
385         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
386                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
387         ice_fdir_input_set_parse(input_set, field);
388
389         switch (flow) {
390         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
391                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
392                                   ICE_FLOW_SEG_HDR_IPV4);
393                 break;
394         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
395                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
396                                   ICE_FLOW_SEG_HDR_IPV4);
397                 break;
398         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
399                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
400                                   ICE_FLOW_SEG_HDR_IPV4);
401                 break;
402         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
403                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
404                 break;
405         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
406                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
407                                   ICE_FLOW_SEG_HDR_IPV6);
408                 break;
409         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
410                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
411                                   ICE_FLOW_SEG_HDR_IPV6);
412                 break;
413         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
414                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
415                                   ICE_FLOW_SEG_HDR_IPV6);
416                 break;
417         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
418                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
419                 break;
420         default:
421                 PMD_DRV_LOG(ERR, "not supported filter type.");
422                 break;
423         }
424
425         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
426                 ice_flow_set_fld(seg, field[i],
427                                  ICE_FLOW_FLD_OFF_INVAL,
428                                  ICE_FLOW_FLD_OFF_INVAL,
429                                  ICE_FLOW_FLD_OFF_INVAL, false);
430         }
431
432         if (!is_tunnel) {
433                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
434                                            seg, flow, false);
435         } else {
436                 seg_tun = (struct ice_flow_seg_info *)
437                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
438                 if (!seg_tun) {
439                         PMD_DRV_LOG(ERR, "No memory can be allocated");
440                         rte_free(seg);
441                         return -ENOMEM;
442                 }
443                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
444                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
445                                            seg_tun, flow, true);
446         }
447
448         if (!ret) {
449                 return ret;
450         } else if (ret < 0) {
451                 rte_free(seg);
452                 if (is_tunnel)
453                         rte_free(seg_tun);
454                 return (ret == -EAGAIN) ? 0 : ret;
455         } else {
456                 return ret;
457         }
458 }
459
460 static void __rte_unused
461 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
462                     bool is_tunnel, bool add)
463 {
464         struct ice_hw *hw = ICE_PF_TO_HW(pf);
465         int cnt;
466
467         cnt = (add) ? 1 : -1;
468         hw->fdir_active_fltr += cnt;
469         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
470                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
471         else
472                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
473 }
474
475 static int
476 ice_fdir_init(struct ice_adapter *ad)
477 {
478         struct ice_pf *pf = &ad->pf;
479
480         return ice_fdir_setup(pf);
481 }
482
483 static void
484 ice_fdir_uninit(struct ice_adapter *ad)
485 {
486         struct ice_pf *pf = &ad->pf;
487
488         ice_fdir_teardown(pf);
489 }
490
491 static struct ice_flow_engine ice_fdir_engine = {
492         .init = ice_fdir_init,
493         .uninit = ice_fdir_uninit,
494         .type = ICE_FLOW_ENGINE_FDIR,
495 };
496
497 RTE_INIT(ice_fdir_engine_register)
498 {
499         ice_register_flow_engine(&ice_fdir_engine);
500 }