3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
8 #include "ice_generic_flow.h"
10 #define ICE_FDIR_IPV6_TC_OFFSET 20
11 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
13 #define ICE_FDIR_MAX_QREGION_SIZE 128
15 #define ICE_FDIR_INSET_ETH_IPV4 (\
17 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
18 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
20 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
21 ICE_FDIR_INSET_ETH_IPV4 | \
22 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
24 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
25 ICE_FDIR_INSET_ETH_IPV4 | \
26 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
28 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
29 ICE_FDIR_INSET_ETH_IPV4 | \
30 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
32 #define ICE_FDIR_INSET_ETH_IPV6 (\
33 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
34 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
36 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
37 ICE_FDIR_INSET_ETH_IPV6 | \
38 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
40 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
41 ICE_FDIR_INSET_ETH_IPV6 | \
42 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
44 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
45 ICE_FDIR_INSET_ETH_IPV6 | \
46 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
48 static struct ice_pattern_match_item ice_fdir_pattern[] = {
49 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
50 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
51 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
52 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
53 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
54 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
55 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
56 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
59 static struct ice_flow_parser ice_fdir_parser;
61 static const struct rte_memzone *
62 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
64 return rte_memzone_reserve_aligned(name, len, socket_id,
65 RTE_MEMZONE_IOVA_CONTIG,
69 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
72 ice_fdir_prof_alloc(struct ice_hw *hw)
74 enum ice_fltr_ptype ptype, fltr_ptype;
77 hw->fdir_prof = (struct ice_fd_hw_prof **)
78 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
79 sizeof(*hw->fdir_prof));
83 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
84 ptype < ICE_FLTR_PTYPE_MAX;
86 if (!hw->fdir_prof[ptype]) {
87 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
88 ice_malloc(hw, sizeof(**hw->fdir_prof));
89 if (!hw->fdir_prof[ptype])
96 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
99 rte_free(hw->fdir_prof[fltr_ptype]);
100 rte_free(hw->fdir_prof);
105 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
106 struct ice_fdir_counter_pool_container *container,
107 uint32_t index_start,
110 struct ice_fdir_counter_pool *pool;
114 pool = rte_zmalloc("ice_fdir_counter_pool",
116 sizeof(struct ice_fdir_counter) * len,
120 "Failed to allocate memory for fdir counter pool");
124 TAILQ_INIT(&pool->counter_list);
125 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
127 for (i = 0; i < len; i++) {
128 struct ice_fdir_counter *counter = &pool->counters[i];
130 counter->hw_index = index_start + i;
131 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
134 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
135 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
140 container->pools[container->index_free++] = pool;
149 ice_fdir_counter_init(struct ice_pf *pf)
151 struct ice_hw *hw = ICE_PF_TO_HW(pf);
152 struct ice_fdir_info *fdir_info = &pf->fdir;
153 struct ice_fdir_counter_pool_container *container =
155 uint32_t cnt_index, len;
158 TAILQ_INIT(&container->pool_list);
160 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
161 len = ICE_FDIR_COUNTERS_PER_BLOCK;
163 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
165 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
173 ice_fdir_counter_release(struct ice_pf *pf)
175 struct ice_fdir_info *fdir_info = &pf->fdir;
176 struct ice_fdir_counter_pool_container *container =
180 for (i = 0; i < container->index_free; i++)
181 rte_free(container->pools[i]);
187 * ice_fdir_setup - reserve and initialize the Flow Director resources
188 * @pf: board private structure
191 ice_fdir_setup(struct ice_pf *pf)
193 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
194 struct ice_hw *hw = ICE_PF_TO_HW(pf);
195 const struct rte_memzone *mz = NULL;
196 char z_name[RTE_MEMZONE_NAMESIZE];
198 int err = ICE_SUCCESS;
200 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
201 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
205 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
206 " fd_fltr_best_effort = %u.",
207 hw->func_caps.fd_fltr_guar,
208 hw->func_caps.fd_fltr_best_effort);
210 if (pf->fdir.fdir_vsi) {
211 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
215 /* make new FDIR VSI */
216 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
218 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
221 pf->fdir.fdir_vsi = vsi;
223 err = ice_fdir_counter_init(pf);
225 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
229 /*Fdir tx queue setup*/
230 err = ice_fdir_setup_tx_resources(pf);
232 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
236 /*Fdir rx queue setup*/
237 err = ice_fdir_setup_rx_resources(pf);
239 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
243 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
245 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
249 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
251 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
255 /* reserve memory for the fdir programming packet */
256 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
258 eth_dev->data->port_id);
259 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
261 PMD_DRV_LOG(ERR, "Cannot init memzone for "
262 "flow director program packet.");
266 pf->fdir.prg_pkt = mz->addr;
267 pf->fdir.dma_addr = mz->iova;
269 err = ice_fdir_prof_alloc(hw);
271 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
272 "flow director profile.");
277 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
282 ice_rx_queue_release(pf->fdir.rxq);
285 ice_tx_queue_release(pf->fdir.txq);
288 ice_release_vsi(vsi);
289 pf->fdir.fdir_vsi = NULL;
294 ice_fdir_prof_free(struct ice_hw *hw)
296 enum ice_fltr_ptype ptype;
298 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
299 ptype < ICE_FLTR_PTYPE_MAX;
301 rte_free(hw->fdir_prof[ptype]);
303 rte_free(hw->fdir_prof);
306 /* Remove a profile for some filter type */
308 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
310 struct ice_hw *hw = ICE_PF_TO_HW(pf);
311 struct ice_fd_hw_prof *hw_prof;
316 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
319 hw_prof = hw->fdir_prof[ptype];
321 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
322 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
323 if (hw_prof->entry_h[i][is_tunnel]) {
324 vsi_num = ice_get_hw_vsi_num(hw,
326 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
328 ice_flow_rem_entry(hw,
329 hw_prof->entry_h[i][is_tunnel]);
330 hw_prof->entry_h[i][is_tunnel] = 0;
333 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
334 rte_free(hw_prof->fdir_seg[is_tunnel]);
335 hw_prof->fdir_seg[is_tunnel] = NULL;
337 for (i = 0; i < hw_prof->cnt; i++)
338 hw_prof->vsi_h[i] = 0;
339 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
342 /* Remove all created profiles */
344 ice_fdir_prof_rm_all(struct ice_pf *pf)
346 enum ice_fltr_ptype ptype;
348 for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
349 ptype < ICE_FLTR_PTYPE_MAX;
351 ice_fdir_prof_rm(pf, ptype, false);
352 ice_fdir_prof_rm(pf, ptype, true);
357 * ice_fdir_teardown - release the Flow Director resources
358 * @pf: board private structure
361 ice_fdir_teardown(struct ice_pf *pf)
363 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
364 struct ice_hw *hw = ICE_PF_TO_HW(pf);
368 vsi = pf->fdir.fdir_vsi;
372 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
374 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
376 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
378 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
380 err = ice_fdir_counter_release(pf);
382 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
384 ice_tx_queue_release(pf->fdir.txq);
386 ice_rx_queue_release(pf->fdir.rxq);
388 ice_fdir_prof_rm_all(pf);
389 ice_fdir_prof_free(hw);
390 ice_release_vsi(vsi);
391 pf->fdir.fdir_vsi = NULL;
395 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
396 struct ice_vsi *ctrl_vsi,
397 struct ice_flow_seg_info *seg,
398 enum ice_fltr_ptype ptype,
401 struct ice_hw *hw = ICE_PF_TO_HW(pf);
402 enum ice_flow_dir dir = ICE_FLOW_RX;
403 struct ice_flow_seg_info *ori_seg;
404 struct ice_fd_hw_prof *hw_prof;
405 struct ice_flow_prof *prof;
406 uint64_t entry_1 = 0;
407 uint64_t entry_2 = 0;
412 hw_prof = hw->fdir_prof[ptype];
413 ori_seg = hw_prof->fdir_seg[is_tunnel];
416 if (!memcmp(ori_seg, seg, sizeof(*seg)))
419 if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
423 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
426 ice_fdir_prof_rm(pf, ptype, is_tunnel);
429 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
430 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
431 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
434 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
435 vsi->idx, ICE_FLOW_PRIO_NORMAL,
436 seg, NULL, 0, &entry_1);
438 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
442 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
443 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
444 seg, NULL, 0, &entry_2);
446 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
451 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
453 hw_prof->fdir_seg[is_tunnel] = seg;
454 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
455 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
456 pf->hw_prof_cnt[ptype][is_tunnel]++;
457 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
458 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
459 pf->hw_prof_cnt[ptype][is_tunnel]++;
464 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
465 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
466 ice_flow_rem_entry(hw, entry_1);
468 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
474 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
478 struct ice_inset_map {
480 enum ice_flow_field fld;
482 static const struct ice_inset_map ice_inset_map[] = {
483 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
484 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
485 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
486 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
487 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
488 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
489 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
490 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
491 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
492 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
493 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
494 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
495 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
496 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
497 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
498 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
499 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
502 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
503 if ((inset & ice_inset_map[i].inset) ==
504 ice_inset_map[i].inset)
505 field[j++] = ice_inset_map[i].fld;
510 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
511 uint64_t input_set, bool is_tunnel)
513 struct ice_flow_seg_info *seg;
514 struct ice_flow_seg_info *seg_tun = NULL;
515 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
521 seg = (struct ice_flow_seg_info *)
522 ice_malloc(hw, sizeof(*seg));
524 PMD_DRV_LOG(ERR, "No memory can be allocated");
528 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
529 field[i] = ICE_FLOW_FIELD_IDX_MAX;
530 ice_fdir_input_set_parse(input_set, field);
533 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
534 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
535 ICE_FLOW_SEG_HDR_IPV4);
537 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
538 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
539 ICE_FLOW_SEG_HDR_IPV4);
541 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
542 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
543 ICE_FLOW_SEG_HDR_IPV4);
545 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
546 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
548 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
549 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
550 ICE_FLOW_SEG_HDR_IPV6);
552 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
553 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
554 ICE_FLOW_SEG_HDR_IPV6);
556 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
557 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
558 ICE_FLOW_SEG_HDR_IPV6);
560 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
561 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
564 PMD_DRV_LOG(ERR, "not supported filter type.");
568 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
569 ice_flow_set_fld(seg, field[i],
570 ICE_FLOW_FLD_OFF_INVAL,
571 ICE_FLOW_FLD_OFF_INVAL,
572 ICE_FLOW_FLD_OFF_INVAL, false);
576 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
579 seg_tun = (struct ice_flow_seg_info *)
580 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
582 PMD_DRV_LOG(ERR, "No memory can be allocated");
586 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
587 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
588 seg_tun, flow, true);
593 } else if (ret < 0) {
597 return (ret == -EAGAIN) ? 0 : ret;
604 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
605 bool is_tunnel, bool add)
607 struct ice_hw *hw = ICE_PF_TO_HW(pf);
610 cnt = (add) ? 1 : -1;
611 hw->fdir_active_fltr += cnt;
612 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
613 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
615 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
619 ice_fdir_init(struct ice_adapter *ad)
621 struct ice_pf *pf = &ad->pf;
624 ret = ice_fdir_setup(pf);
628 return ice_register_parser(&ice_fdir_parser, ad);
632 ice_fdir_uninit(struct ice_adapter *ad)
634 struct ice_pf *pf = &ad->pf;
636 ice_unregister_parser(&ice_fdir_parser, ad);
638 ice_fdir_teardown(pf);
642 ice_fdir_add_del_filter(struct ice_pf *pf,
643 struct ice_fdir_filter_conf *filter,
646 struct ice_fltr_desc desc;
647 struct ice_hw *hw = ICE_PF_TO_HW(pf);
648 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
651 filter->input.dest_vsi = pf->main_vsi->idx;
653 memset(&desc, 0, sizeof(desc));
654 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
656 memset(pkt, 0, ICE_FDIR_PKT_LEN);
657 ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
659 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
663 return ice_fdir_programming(pf, &desc);
667 ice_fdir_create_filter(struct ice_adapter *ad,
668 struct rte_flow *flow,
670 struct rte_flow_error *error)
672 struct ice_pf *pf = &ad->pf;
673 struct ice_fdir_filter_conf *filter = meta;
674 struct ice_fdir_filter_conf *rule;
677 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
679 rte_flow_error_set(error, ENOMEM,
680 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
681 "Failed to allocate memory");
685 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
686 filter->input_set, false);
688 rte_flow_error_set(error, -ret,
689 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
690 "Profile configure failed.");
694 ret = ice_fdir_add_del_filter(pf, filter, true);
696 rte_flow_error_set(error, -ret,
697 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
698 "Add filter rule failed.");
702 rte_memcpy(rule, filter, sizeof(*rule));
704 ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
713 ice_fdir_destroy_filter(struct ice_adapter *ad,
714 struct rte_flow *flow,
715 struct rte_flow_error *error)
717 struct ice_pf *pf = &ad->pf;
718 struct ice_fdir_filter_conf *filter;
721 filter = (struct ice_fdir_filter_conf *)flow->rule;
723 ret = ice_fdir_add_del_filter(pf, filter, false);
725 rte_flow_error_set(error, -ret,
726 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
727 "Del filter rule failed.");
731 ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
739 static struct ice_flow_engine ice_fdir_engine = {
740 .init = ice_fdir_init,
741 .uninit = ice_fdir_uninit,
742 .create = ice_fdir_create_filter,
743 .destroy = ice_fdir_destroy_filter,
744 .type = ICE_FLOW_ENGINE_FDIR,
748 ice_fdir_parse_action_qregion(struct ice_pf *pf,
749 struct rte_flow_error *error,
750 const struct rte_flow_action *act,
751 struct ice_fdir_filter_conf *filter)
753 const struct rte_flow_action_rss *rss = act->conf;
756 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
757 rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ACTION, act,
763 if (rss->queue_num <= 1) {
764 rte_flow_error_set(error, EINVAL,
765 RTE_FLOW_ERROR_TYPE_ACTION, act,
766 "Queue region size can't be 0 or 1.");
770 /* check if queue index for queue region is continuous */
771 for (i = 0; i < rss->queue_num - 1; i++) {
772 if (rss->queue[i + 1] != rss->queue[i] + 1) {
773 rte_flow_error_set(error, EINVAL,
774 RTE_FLOW_ERROR_TYPE_ACTION, act,
775 "Discontinuous queue region");
780 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
781 rte_flow_error_set(error, EINVAL,
782 RTE_FLOW_ERROR_TYPE_ACTION, act,
783 "Invalid queue region indexes.");
787 if (!(rte_is_power_of_2(rss->queue_num) &&
788 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
789 rte_flow_error_set(error, EINVAL,
790 RTE_FLOW_ERROR_TYPE_ACTION, act,
791 "The region size should be any of the following values:"
792 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
793 "of queues do not exceed the VSI allocation.");
797 filter->input.q_index = rss->queue[0];
798 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
799 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
805 ice_fdir_parse_action(struct ice_adapter *ad,
806 const struct rte_flow_action actions[],
807 struct rte_flow_error *error,
808 struct ice_fdir_filter_conf *filter)
810 struct ice_pf *pf = &ad->pf;
811 const struct rte_flow_action_queue *act_q;
812 const struct rte_flow_action_mark *mark_spec = NULL;
813 uint32_t dest_num = 0;
814 uint32_t mark_num = 0;
817 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
818 switch (actions->type) {
819 case RTE_FLOW_ACTION_TYPE_VOID:
821 case RTE_FLOW_ACTION_TYPE_QUEUE:
824 act_q = actions->conf;
825 filter->input.q_index = act_q->index;
826 if (filter->input.q_index >=
827 pf->dev_data->nb_rx_queues) {
828 rte_flow_error_set(error, EINVAL,
829 RTE_FLOW_ERROR_TYPE_ACTION,
831 "Invalid queue for FDIR.");
834 filter->input.dest_ctl =
835 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
837 case RTE_FLOW_ACTION_TYPE_DROP:
840 filter->input.dest_ctl =
841 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
843 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
846 filter->input.dest_ctl =
847 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
848 filter->input.q_index = 0;
850 case RTE_FLOW_ACTION_TYPE_RSS:
853 ret = ice_fdir_parse_action_qregion(pf,
854 error, actions, filter);
858 case RTE_FLOW_ACTION_TYPE_MARK:
861 mark_spec = actions->conf;
862 filter->input.fltr_id = mark_spec->id;
865 rte_flow_error_set(error, EINVAL,
866 RTE_FLOW_ERROR_TYPE_ACTION, actions,
872 if (dest_num == 0 || dest_num >= 2) {
873 rte_flow_error_set(error, EINVAL,
874 RTE_FLOW_ERROR_TYPE_ACTION, actions,
875 "Unsupported action combination");
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ACTION, actions,
882 "Too many mark actions");
890 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
891 const struct rte_flow_item pattern[],
892 struct rte_flow_error *error,
893 struct ice_fdir_filter_conf *filter)
895 const struct rte_flow_item *item = pattern;
896 enum rte_flow_item_type item_type;
897 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
898 const struct rte_flow_item_eth *eth_spec, *eth_mask;
899 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
900 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
901 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
902 const struct rte_flow_item_udp *udp_spec, *udp_mask;
903 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
904 uint64_t input_set = ICE_INSET_NONE;
905 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
906 uint8_t ipv6_addr_mask[16] = {
907 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
908 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
910 uint32_t vtc_flow_cpu;
913 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
915 rte_flow_error_set(error, EINVAL,
916 RTE_FLOW_ERROR_TYPE_ITEM,
918 "Not support range");
921 item_type = item->type;
924 case RTE_FLOW_ITEM_TYPE_ETH:
925 eth_spec = item->spec;
926 eth_mask = item->mask;
928 if (eth_spec && eth_mask) {
929 if (!rte_is_zero_ether_addr(ð_spec->src) ||
930 !rte_is_zero_ether_addr(ð_mask->src)) {
931 rte_flow_error_set(error, EINVAL,
932 RTE_FLOW_ERROR_TYPE_ITEM,
934 "Src mac not support");
938 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
939 rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ITEM,
942 "Invalid mac addr mask");
946 input_set |= ICE_INSET_DMAC;
947 rte_memcpy(&filter->input.ext_data.dst_mac,
952 case RTE_FLOW_ITEM_TYPE_IPV4:
953 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
954 ipv4_spec = item->spec;
955 ipv4_mask = item->mask;
957 if (ipv4_spec && ipv4_mask) {
958 /* Check IPv4 mask and update input set */
959 if (ipv4_mask->hdr.version_ihl ||
960 ipv4_mask->hdr.total_length ||
961 ipv4_mask->hdr.packet_id ||
962 ipv4_mask->hdr.fragment_offset ||
963 ipv4_mask->hdr.hdr_checksum) {
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_ITEM,
967 "Invalid IPv4 mask.");
970 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
971 input_set |= ICE_INSET_IPV4_SRC;
972 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
973 input_set |= ICE_INSET_IPV4_DST;
974 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
975 input_set |= ICE_INSET_IPV4_TOS;
976 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
977 input_set |= ICE_INSET_IPV4_TTL;
978 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
979 input_set |= ICE_INSET_IPV4_PROTO;
981 filter->input.ip.v4.dst_ip =
982 ipv4_spec->hdr.src_addr;
983 filter->input.ip.v4.src_ip =
984 ipv4_spec->hdr.dst_addr;
985 filter->input.ip.v4.tos =
986 ipv4_spec->hdr.type_of_service;
987 filter->input.ip.v4.ttl =
988 ipv4_spec->hdr.time_to_live;
989 filter->input.ip.v4.proto =
990 ipv4_spec->hdr.next_proto_id;
993 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
995 case RTE_FLOW_ITEM_TYPE_IPV6:
996 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
997 ipv6_spec = item->spec;
998 ipv6_mask = item->mask;
1000 if (ipv6_spec && ipv6_mask) {
1001 /* Check IPv6 mask and update input set */
1002 if (ipv6_mask->hdr.payload_len) {
1003 rte_flow_error_set(error, EINVAL,
1004 RTE_FLOW_ERROR_TYPE_ITEM,
1006 "Invalid IPv6 mask");
1010 if (!memcmp(ipv6_mask->hdr.src_addr,
1012 RTE_DIM(ipv6_mask->hdr.src_addr)))
1013 input_set |= ICE_INSET_IPV6_SRC;
1014 if (!memcmp(ipv6_mask->hdr.dst_addr,
1016 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1017 input_set |= ICE_INSET_IPV6_DST;
1019 if ((ipv6_mask->hdr.vtc_flow &
1020 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1021 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1022 input_set |= ICE_INSET_IPV6_TC;
1023 if (ipv6_mask->hdr.proto == UINT8_MAX)
1024 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1025 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1026 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1028 rte_memcpy(filter->input.ip.v6.dst_ip,
1029 ipv6_spec->hdr.src_addr, 16);
1030 rte_memcpy(filter->input.ip.v6.src_ip,
1031 ipv6_spec->hdr.dst_addr, 16);
1034 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1035 filter->input.ip.v6.tc =
1036 (uint8_t)(vtc_flow_cpu >>
1037 ICE_FDIR_IPV6_TC_OFFSET);
1038 filter->input.ip.v6.proto =
1039 ipv6_spec->hdr.proto;
1040 filter->input.ip.v6.hlim =
1041 ipv6_spec->hdr.hop_limits;
1044 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1046 case RTE_FLOW_ITEM_TYPE_TCP:
1047 tcp_spec = item->spec;
1048 tcp_mask = item->mask;
1050 if (tcp_spec && tcp_mask) {
1051 /* Check TCP mask and update input set */
1052 if (tcp_mask->hdr.sent_seq ||
1053 tcp_mask->hdr.recv_ack ||
1054 tcp_mask->hdr.data_off ||
1055 tcp_mask->hdr.tcp_flags ||
1056 tcp_mask->hdr.rx_win ||
1057 tcp_mask->hdr.cksum ||
1058 tcp_mask->hdr.tcp_urp) {
1059 rte_flow_error_set(error, EINVAL,
1060 RTE_FLOW_ERROR_TYPE_ITEM,
1062 "Invalid TCP mask");
1066 if (tcp_mask->hdr.src_port == UINT16_MAX)
1067 input_set |= ICE_INSET_TCP_SRC_PORT;
1068 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1069 input_set |= ICE_INSET_TCP_DST_PORT;
1071 /* Get filter info */
1072 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1073 filter->input.ip.v4.dst_port =
1074 tcp_spec->hdr.src_port;
1075 filter->input.ip.v4.src_port =
1076 tcp_spec->hdr.dst_port;
1078 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1079 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1080 filter->input.ip.v6.dst_port =
1081 tcp_spec->hdr.src_port;
1082 filter->input.ip.v6.src_port =
1083 tcp_spec->hdr.dst_port;
1085 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1089 case RTE_FLOW_ITEM_TYPE_UDP:
1090 udp_spec = item->spec;
1091 udp_mask = item->mask;
1093 if (udp_spec && udp_mask) {
1094 /* Check UDP mask and update input set*/
1095 if (udp_mask->hdr.dgram_len ||
1096 udp_mask->hdr.dgram_cksum) {
1097 rte_flow_error_set(error, EINVAL,
1098 RTE_FLOW_ERROR_TYPE_ITEM,
1100 "Invalid UDP mask");
1104 if (udp_mask->hdr.src_port == UINT16_MAX)
1105 input_set |= ICE_INSET_UDP_SRC_PORT;
1106 if (udp_mask->hdr.dst_port == UINT16_MAX)
1107 input_set |= ICE_INSET_UDP_DST_PORT;
1109 /* Get filter info */
1110 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1111 filter->input.ip.v4.dst_port =
1112 udp_spec->hdr.src_port;
1113 filter->input.ip.v4.src_port =
1114 udp_spec->hdr.dst_port;
1116 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1117 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1118 filter->input.ip.v6.src_port =
1119 udp_spec->hdr.src_port;
1120 filter->input.ip.v6.dst_port =
1121 udp_spec->hdr.dst_port;
1123 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1127 case RTE_FLOW_ITEM_TYPE_SCTP:
1128 sctp_spec = item->spec;
1129 sctp_mask = item->mask;
1131 if (sctp_spec && sctp_mask) {
1132 /* Check SCTP mask and update input set */
1133 if (sctp_mask->hdr.cksum) {
1134 rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ITEM,
1137 "Invalid UDP mask");
1141 if (sctp_mask->hdr.src_port == UINT16_MAX)
1142 input_set |= ICE_INSET_SCTP_SRC_PORT;
1143 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1144 input_set |= ICE_INSET_SCTP_DST_PORT;
1146 /* Get filter info */
1147 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1148 filter->input.ip.v4.dst_port =
1149 sctp_spec->hdr.src_port;
1150 filter->input.ip.v4.src_port =
1151 sctp_spec->hdr.dst_port;
1153 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1154 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1155 filter->input.ip.v6.dst_port =
1156 sctp_spec->hdr.src_port;
1157 filter->input.ip.v6.src_port =
1158 sctp_spec->hdr.dst_port;
1160 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1164 case RTE_FLOW_ITEM_TYPE_VOID:
1167 rte_flow_error_set(error, EINVAL,
1168 RTE_FLOW_ERROR_TYPE_ITEM,
1170 "Invalid pattern item.");
1175 filter->input.flow_type = flow_type;
1176 filter->input_set = input_set;
1182 ice_fdir_parse(struct ice_adapter *ad,
1183 struct ice_pattern_match_item *array,
1185 const struct rte_flow_item pattern[],
1186 const struct rte_flow_action actions[],
1188 struct rte_flow_error *error)
1190 struct ice_pf *pf = &ad->pf;
1191 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1192 struct ice_pattern_match_item *item = NULL;
1196 memset(filter, 0, sizeof(*filter));
1197 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1201 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1204 input_set = filter->input_set;
1205 if (!input_set || input_set & ~item->input_set_mask) {
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1209 "Invalid input set");
1213 ret = ice_fdir_parse_action(ad, actions, error, filter);
1222 static struct ice_flow_parser ice_fdir_parser = {
1223 .engine = &ice_fdir_engine,
1224 .array = ice_fdir_pattern,
1225 .array_len = RTE_DIM(ice_fdir_pattern),
1226 .parse_pattern_action = ice_fdir_parse,
1227 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1230 RTE_INIT(ice_fdir_engine_register)
1232 ice_register_flow_engine(&ice_fdir_engine);