3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
8 #include "ice_generic_flow.h"
10 static const struct rte_memzone *
11 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
13 return rte_memzone_reserve_aligned(name, len, socket_id,
14 RTE_MEMZONE_IOVA_CONTIG,
18 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
21 ice_fdir_prof_alloc(struct ice_hw *hw)
23 enum ice_fltr_ptype ptype, fltr_ptype;
26 hw->fdir_prof = (struct ice_fd_hw_prof **)
27 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
28 sizeof(*hw->fdir_prof));
32 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
33 ptype < ICE_FLTR_PTYPE_MAX;
35 if (!hw->fdir_prof[ptype]) {
36 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
37 ice_malloc(hw, sizeof(**hw->fdir_prof));
38 if (!hw->fdir_prof[ptype])
45 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
48 rte_free(hw->fdir_prof[fltr_ptype]);
49 rte_free(hw->fdir_prof);
54 * ice_fdir_setup - reserve and initialize the Flow Director resources
55 * @pf: board private structure
58 ice_fdir_setup(struct ice_pf *pf)
60 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
61 struct ice_hw *hw = ICE_PF_TO_HW(pf);
62 const struct rte_memzone *mz = NULL;
63 char z_name[RTE_MEMZONE_NAMESIZE];
65 int err = ICE_SUCCESS;
67 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
68 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
72 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
73 " fd_fltr_best_effort = %u.",
74 hw->func_caps.fd_fltr_guar,
75 hw->func_caps.fd_fltr_best_effort);
77 if (pf->fdir.fdir_vsi) {
78 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
82 /* make new FDIR VSI */
83 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
85 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
88 pf->fdir.fdir_vsi = vsi;
90 /*Fdir tx queue setup*/
91 err = ice_fdir_setup_tx_resources(pf);
93 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
97 /*Fdir rx queue setup*/
98 err = ice_fdir_setup_rx_resources(pf);
100 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
104 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
106 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
110 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
112 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
116 /* reserve memory for the fdir programming packet */
117 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
119 eth_dev->data->port_id);
120 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
122 PMD_DRV_LOG(ERR, "Cannot init memzone for "
123 "flow director program packet.");
127 pf->fdir.prg_pkt = mz->addr;
128 pf->fdir.dma_addr = mz->iova;
130 err = ice_fdir_prof_alloc(hw);
132 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
133 "flow director profile.");
138 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
143 ice_rx_queue_release(pf->fdir.rxq);
146 ice_tx_queue_release(pf->fdir.txq);
149 ice_release_vsi(vsi);
150 pf->fdir.fdir_vsi = NULL;
155 ice_fdir_prof_free(struct ice_hw *hw)
157 enum ice_fltr_ptype ptype;
159 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
160 ptype < ICE_FLTR_PTYPE_MAX;
162 rte_free(hw->fdir_prof[ptype]);
164 rte_free(hw->fdir_prof);
167 /* Remove a profile for some filter type */
169 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
171 struct ice_hw *hw = ICE_PF_TO_HW(pf);
172 struct ice_fd_hw_prof *hw_prof;
177 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
180 hw_prof = hw->fdir_prof[ptype];
182 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
183 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
184 if (hw_prof->entry_h[i][is_tunnel]) {
185 vsi_num = ice_get_hw_vsi_num(hw,
187 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
189 ice_flow_rem_entry(hw,
190 hw_prof->entry_h[i][is_tunnel]);
191 hw_prof->entry_h[i][is_tunnel] = 0;
194 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
195 rte_free(hw_prof->fdir_seg[is_tunnel]);
196 hw_prof->fdir_seg[is_tunnel] = NULL;
198 for (i = 0; i < hw_prof->cnt; i++)
199 hw_prof->vsi_h[i] = 0;
200 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
203 /* Remove all created profiles */
205 ice_fdir_prof_rm_all(struct ice_pf *pf)
207 enum ice_fltr_ptype ptype;
209 for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
210 ptype < ICE_FLTR_PTYPE_MAX;
212 ice_fdir_prof_rm(pf, ptype, false);
213 ice_fdir_prof_rm(pf, ptype, true);
218 * ice_fdir_teardown - release the Flow Director resources
219 * @pf: board private structure
222 ice_fdir_teardown(struct ice_pf *pf)
224 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
225 struct ice_hw *hw = ICE_PF_TO_HW(pf);
229 vsi = pf->fdir.fdir_vsi;
233 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
235 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
237 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
239 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
241 ice_tx_queue_release(pf->fdir.txq);
243 ice_rx_queue_release(pf->fdir.rxq);
245 ice_fdir_prof_rm_all(pf);
246 ice_fdir_prof_free(hw);
247 ice_release_vsi(vsi);
248 pf->fdir.fdir_vsi = NULL;
252 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
253 struct ice_vsi *ctrl_vsi,
254 struct ice_flow_seg_info *seg,
255 enum ice_fltr_ptype ptype,
258 struct ice_hw *hw = ICE_PF_TO_HW(pf);
259 enum ice_flow_dir dir = ICE_FLOW_RX;
260 struct ice_flow_seg_info *ori_seg;
261 struct ice_fd_hw_prof *hw_prof;
262 struct ice_flow_prof *prof;
263 uint64_t entry_1 = 0;
264 uint64_t entry_2 = 0;
269 hw_prof = hw->fdir_prof[ptype];
270 ori_seg = hw_prof->fdir_seg[is_tunnel];
273 if (!memcmp(ori_seg, seg, sizeof(*seg)))
276 if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
280 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
283 ice_fdir_prof_rm(pf, ptype, is_tunnel);
286 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
287 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
288 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
291 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
292 vsi->idx, ICE_FLOW_PRIO_NORMAL,
293 seg, NULL, 0, &entry_1);
295 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
299 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
300 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
301 seg, NULL, 0, &entry_2);
303 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
308 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
310 hw_prof->fdir_seg[is_tunnel] = seg;
311 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
312 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
313 pf->hw_prof_cnt[ptype][is_tunnel]++;
314 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
315 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
316 pf->hw_prof_cnt[ptype][is_tunnel]++;
321 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
322 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
323 ice_flow_rem_entry(hw, entry_1);
325 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
331 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
335 struct ice_inset_map {
337 enum ice_flow_field fld;
339 static const struct ice_inset_map ice_inset_map[] = {
340 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
341 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
342 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
343 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
344 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
345 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
346 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
347 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
348 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
349 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
350 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
351 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
352 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
353 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
354 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
355 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
356 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
359 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
360 if ((inset & ice_inset_map[i].inset) ==
361 ice_inset_map[i].inset)
362 field[j++] = ice_inset_map[i].fld;
366 static int __rte_unused
367 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
368 uint64_t input_set, bool is_tunnel)
370 struct ice_flow_seg_info *seg;
371 struct ice_flow_seg_info *seg_tun = NULL;
372 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
378 seg = (struct ice_flow_seg_info *)
379 ice_malloc(hw, sizeof(*seg));
381 PMD_DRV_LOG(ERR, "No memory can be allocated");
385 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
386 field[i] = ICE_FLOW_FIELD_IDX_MAX;
387 ice_fdir_input_set_parse(input_set, field);
390 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
391 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
392 ICE_FLOW_SEG_HDR_IPV4);
394 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
395 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
396 ICE_FLOW_SEG_HDR_IPV4);
398 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
399 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
400 ICE_FLOW_SEG_HDR_IPV4);
402 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
403 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
405 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
406 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
407 ICE_FLOW_SEG_HDR_IPV6);
409 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
410 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
411 ICE_FLOW_SEG_HDR_IPV6);
413 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
414 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
415 ICE_FLOW_SEG_HDR_IPV6);
417 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
418 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
421 PMD_DRV_LOG(ERR, "not supported filter type.");
425 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
426 ice_flow_set_fld(seg, field[i],
427 ICE_FLOW_FLD_OFF_INVAL,
428 ICE_FLOW_FLD_OFF_INVAL,
429 ICE_FLOW_FLD_OFF_INVAL, false);
433 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
436 seg_tun = (struct ice_flow_seg_info *)
437 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
439 PMD_DRV_LOG(ERR, "No memory can be allocated");
443 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
444 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
445 seg_tun, flow, true);
450 } else if (ret < 0) {
454 return (ret == -EAGAIN) ? 0 : ret;
460 static void __rte_unused
461 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
462 bool is_tunnel, bool add)
464 struct ice_hw *hw = ICE_PF_TO_HW(pf);
467 cnt = (add) ? 1 : -1;
468 hw->fdir_active_fltr += cnt;
469 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
470 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
472 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
476 ice_fdir_init(struct ice_adapter *ad)
478 struct ice_pf *pf = &ad->pf;
480 return ice_fdir_setup(pf);
484 ice_fdir_uninit(struct ice_adapter *ad)
486 struct ice_pf *pf = &ad->pf;
488 ice_fdir_teardown(pf);
491 static struct ice_flow_engine ice_fdir_engine = {
492 .init = ice_fdir_init,
493 .uninit = ice_fdir_uninit,
494 .type = ICE_FLOW_ENGINE_FDIR,
497 RTE_INIT(ice_fdir_engine_register)
499 ice_register_flow_engine(&ice_fdir_engine);