3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
8 #include "ice_generic_flow.h"
10 #define ICE_FDIR_IPV6_TC_OFFSET 20
11 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
13 #define ICE_FDIR_MAX_QREGION_SIZE 128
15 #define ICE_FDIR_INSET_ETH_IPV4 (\
17 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
18 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
20 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
21 ICE_FDIR_INSET_ETH_IPV4 | \
22 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
24 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
25 ICE_FDIR_INSET_ETH_IPV4 | \
26 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
28 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
29 ICE_FDIR_INSET_ETH_IPV4 | \
30 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
32 #define ICE_FDIR_INSET_ETH_IPV6 (\
33 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
34 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
36 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
37 ICE_FDIR_INSET_ETH_IPV6 | \
38 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
40 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
41 ICE_FDIR_INSET_ETH_IPV6 | \
42 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
44 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
45 ICE_FDIR_INSET_ETH_IPV6 | \
46 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
48 static struct ice_pattern_match_item ice_fdir_pattern[] = {
49 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
50 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
51 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
52 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
53 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
54 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
55 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
56 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
59 static struct ice_flow_parser ice_fdir_parser;
61 static const struct rte_memzone *
62 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
64 return rte_memzone_reserve_aligned(name, len, socket_id,
65 RTE_MEMZONE_IOVA_CONTIG,
69 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
72 ice_fdir_prof_alloc(struct ice_hw *hw)
74 enum ice_fltr_ptype ptype, fltr_ptype;
77 hw->fdir_prof = (struct ice_fd_hw_prof **)
78 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
79 sizeof(*hw->fdir_prof));
83 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
84 ptype < ICE_FLTR_PTYPE_MAX;
86 if (!hw->fdir_prof[ptype]) {
87 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
88 ice_malloc(hw, sizeof(**hw->fdir_prof));
89 if (!hw->fdir_prof[ptype])
96 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
99 rte_free(hw->fdir_prof[fltr_ptype]);
100 rte_free(hw->fdir_prof);
105 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
106 struct ice_fdir_counter_pool_container *container,
107 uint32_t index_start,
110 struct ice_fdir_counter_pool *pool;
114 pool = rte_zmalloc("ice_fdir_counter_pool",
116 sizeof(struct ice_fdir_counter) * len,
120 "Failed to allocate memory for fdir counter pool");
124 TAILQ_INIT(&pool->counter_list);
125 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
127 for (i = 0; i < len; i++) {
128 struct ice_fdir_counter *counter = &pool->counters[i];
130 counter->hw_index = index_start + i;
131 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
134 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
135 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
140 container->pools[container->index_free++] = pool;
149 ice_fdir_counter_init(struct ice_pf *pf)
151 struct ice_hw *hw = ICE_PF_TO_HW(pf);
152 struct ice_fdir_info *fdir_info = &pf->fdir;
153 struct ice_fdir_counter_pool_container *container =
155 uint32_t cnt_index, len;
158 TAILQ_INIT(&container->pool_list);
160 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
161 len = ICE_FDIR_COUNTERS_PER_BLOCK;
163 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
165 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
173 ice_fdir_counter_release(struct ice_pf *pf)
175 struct ice_fdir_info *fdir_info = &pf->fdir;
176 struct ice_fdir_counter_pool_container *container =
180 for (i = 0; i < container->index_free; i++)
181 rte_free(container->pools[i]);
186 static struct ice_fdir_counter *
187 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
191 struct ice_fdir_counter_pool *pool;
192 struct ice_fdir_counter *counter;
195 TAILQ_FOREACH(pool, &container->pool_list, next) {
196 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
197 counter = &pool->counters[i];
199 if (counter->shared &&
209 static struct ice_fdir_counter *
210 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
212 struct ice_hw *hw = ICE_PF_TO_HW(pf);
213 struct ice_fdir_info *fdir_info = &pf->fdir;
214 struct ice_fdir_counter_pool_container *container =
216 struct ice_fdir_counter_pool *pool = NULL;
217 struct ice_fdir_counter *counter_free = NULL;
220 counter_free = ice_fdir_counter_shared_search(container, id);
222 if (counter_free->ref_cnt + 1 == 0) {
226 counter_free->ref_cnt++;
231 TAILQ_FOREACH(pool, &container->pool_list, next) {
232 counter_free = TAILQ_FIRST(&pool->counter_list);
239 PMD_DRV_LOG(ERR, "No free counter found\n");
243 counter_free->shared = shared;
244 counter_free->id = id;
245 counter_free->ref_cnt = 1;
246 counter_free->pool = pool;
248 /* reset statistic counter value */
249 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
250 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
252 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
253 if (TAILQ_EMPTY(&pool->counter_list)) {
254 TAILQ_REMOVE(&container->pool_list, pool, next);
255 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
262 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
263 struct ice_fdir_counter *counter)
268 if (--counter->ref_cnt == 0) {
269 struct ice_fdir_counter_pool *pool = counter->pool;
271 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
276 * ice_fdir_setup - reserve and initialize the Flow Director resources
277 * @pf: board private structure
280 ice_fdir_setup(struct ice_pf *pf)
282 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
283 struct ice_hw *hw = ICE_PF_TO_HW(pf);
284 const struct rte_memzone *mz = NULL;
285 char z_name[RTE_MEMZONE_NAMESIZE];
287 int err = ICE_SUCCESS;
289 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
290 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
294 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
295 " fd_fltr_best_effort = %u.",
296 hw->func_caps.fd_fltr_guar,
297 hw->func_caps.fd_fltr_best_effort);
299 if (pf->fdir.fdir_vsi) {
300 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
304 /* make new FDIR VSI */
305 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
307 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
310 pf->fdir.fdir_vsi = vsi;
312 err = ice_fdir_counter_init(pf);
314 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
318 /*Fdir tx queue setup*/
319 err = ice_fdir_setup_tx_resources(pf);
321 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
325 /*Fdir rx queue setup*/
326 err = ice_fdir_setup_rx_resources(pf);
328 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
332 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
334 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
338 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
340 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
344 /* reserve memory for the fdir programming packet */
345 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
347 eth_dev->data->port_id);
348 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
350 PMD_DRV_LOG(ERR, "Cannot init memzone for "
351 "flow director program packet.");
355 pf->fdir.prg_pkt = mz->addr;
356 pf->fdir.dma_addr = mz->iova;
358 err = ice_fdir_prof_alloc(hw);
360 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
361 "flow director profile.");
366 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
371 ice_rx_queue_release(pf->fdir.rxq);
374 ice_tx_queue_release(pf->fdir.txq);
377 ice_release_vsi(vsi);
378 pf->fdir.fdir_vsi = NULL;
383 ice_fdir_prof_free(struct ice_hw *hw)
385 enum ice_fltr_ptype ptype;
387 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
388 ptype < ICE_FLTR_PTYPE_MAX;
390 rte_free(hw->fdir_prof[ptype]);
392 rte_free(hw->fdir_prof);
395 /* Remove a profile for some filter type */
397 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
399 struct ice_hw *hw = ICE_PF_TO_HW(pf);
400 struct ice_fd_hw_prof *hw_prof;
405 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
408 hw_prof = hw->fdir_prof[ptype];
410 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
411 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
412 if (hw_prof->entry_h[i][is_tunnel]) {
413 vsi_num = ice_get_hw_vsi_num(hw,
415 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
417 ice_flow_rem_entry(hw,
418 hw_prof->entry_h[i][is_tunnel]);
419 hw_prof->entry_h[i][is_tunnel] = 0;
422 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
423 rte_free(hw_prof->fdir_seg[is_tunnel]);
424 hw_prof->fdir_seg[is_tunnel] = NULL;
426 for (i = 0; i < hw_prof->cnt; i++)
427 hw_prof->vsi_h[i] = 0;
428 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
431 /* Remove all created profiles */
433 ice_fdir_prof_rm_all(struct ice_pf *pf)
435 enum ice_fltr_ptype ptype;
437 for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
438 ptype < ICE_FLTR_PTYPE_MAX;
440 ice_fdir_prof_rm(pf, ptype, false);
441 ice_fdir_prof_rm(pf, ptype, true);
446 * ice_fdir_teardown - release the Flow Director resources
447 * @pf: board private structure
450 ice_fdir_teardown(struct ice_pf *pf)
452 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
453 struct ice_hw *hw = ICE_PF_TO_HW(pf);
457 vsi = pf->fdir.fdir_vsi;
461 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
463 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
465 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
467 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
469 err = ice_fdir_counter_release(pf);
471 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
473 ice_tx_queue_release(pf->fdir.txq);
475 ice_rx_queue_release(pf->fdir.rxq);
477 ice_fdir_prof_rm_all(pf);
478 ice_fdir_prof_free(hw);
479 ice_release_vsi(vsi);
480 pf->fdir.fdir_vsi = NULL;
484 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
485 struct ice_vsi *ctrl_vsi,
486 struct ice_flow_seg_info *seg,
487 enum ice_fltr_ptype ptype,
490 struct ice_hw *hw = ICE_PF_TO_HW(pf);
491 enum ice_flow_dir dir = ICE_FLOW_RX;
492 struct ice_flow_seg_info *ori_seg;
493 struct ice_fd_hw_prof *hw_prof;
494 struct ice_flow_prof *prof;
495 uint64_t entry_1 = 0;
496 uint64_t entry_2 = 0;
501 hw_prof = hw->fdir_prof[ptype];
502 ori_seg = hw_prof->fdir_seg[is_tunnel];
505 if (!memcmp(ori_seg, seg, sizeof(*seg)))
508 if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
512 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
515 ice_fdir_prof_rm(pf, ptype, is_tunnel);
518 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
519 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
520 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
523 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
524 vsi->idx, ICE_FLOW_PRIO_NORMAL,
525 seg, NULL, 0, &entry_1);
527 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
531 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
532 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
533 seg, NULL, 0, &entry_2);
535 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
540 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
542 hw_prof->fdir_seg[is_tunnel] = seg;
543 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
544 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
545 pf->hw_prof_cnt[ptype][is_tunnel]++;
546 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
547 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
548 pf->hw_prof_cnt[ptype][is_tunnel]++;
553 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
554 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
555 ice_flow_rem_entry(hw, entry_1);
557 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
563 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
567 struct ice_inset_map {
569 enum ice_flow_field fld;
571 static const struct ice_inset_map ice_inset_map[] = {
572 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
573 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
574 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
575 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
576 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
577 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
578 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
579 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
580 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
581 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
582 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
583 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
584 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
585 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
586 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
587 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
588 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
591 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
592 if ((inset & ice_inset_map[i].inset) ==
593 ice_inset_map[i].inset)
594 field[j++] = ice_inset_map[i].fld;
599 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
600 uint64_t input_set, bool is_tunnel)
602 struct ice_flow_seg_info *seg;
603 struct ice_flow_seg_info *seg_tun = NULL;
604 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
610 seg = (struct ice_flow_seg_info *)
611 ice_malloc(hw, sizeof(*seg));
613 PMD_DRV_LOG(ERR, "No memory can be allocated");
617 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
618 field[i] = ICE_FLOW_FIELD_IDX_MAX;
619 ice_fdir_input_set_parse(input_set, field);
622 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
623 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
624 ICE_FLOW_SEG_HDR_IPV4);
626 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
627 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
628 ICE_FLOW_SEG_HDR_IPV4);
630 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
631 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
632 ICE_FLOW_SEG_HDR_IPV4);
634 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
635 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
637 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
638 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
639 ICE_FLOW_SEG_HDR_IPV6);
641 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
642 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
643 ICE_FLOW_SEG_HDR_IPV6);
645 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
646 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
647 ICE_FLOW_SEG_HDR_IPV6);
649 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
650 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
653 PMD_DRV_LOG(ERR, "not supported filter type.");
657 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
658 ice_flow_set_fld(seg, field[i],
659 ICE_FLOW_FLD_OFF_INVAL,
660 ICE_FLOW_FLD_OFF_INVAL,
661 ICE_FLOW_FLD_OFF_INVAL, false);
665 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
668 seg_tun = (struct ice_flow_seg_info *)
669 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
671 PMD_DRV_LOG(ERR, "No memory can be allocated");
675 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
676 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
677 seg_tun, flow, true);
682 } else if (ret < 0) {
686 return (ret == -EAGAIN) ? 0 : ret;
693 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
694 bool is_tunnel, bool add)
696 struct ice_hw *hw = ICE_PF_TO_HW(pf);
699 cnt = (add) ? 1 : -1;
700 hw->fdir_active_fltr += cnt;
701 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
702 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
704 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
708 ice_fdir_init(struct ice_adapter *ad)
710 struct ice_pf *pf = &ad->pf;
713 ret = ice_fdir_setup(pf);
717 return ice_register_parser(&ice_fdir_parser, ad);
721 ice_fdir_uninit(struct ice_adapter *ad)
723 struct ice_pf *pf = &ad->pf;
725 ice_unregister_parser(&ice_fdir_parser, ad);
727 ice_fdir_teardown(pf);
731 ice_fdir_add_del_filter(struct ice_pf *pf,
732 struct ice_fdir_filter_conf *filter,
735 struct ice_fltr_desc desc;
736 struct ice_hw *hw = ICE_PF_TO_HW(pf);
737 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
740 filter->input.dest_vsi = pf->main_vsi->idx;
742 memset(&desc, 0, sizeof(desc));
743 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
745 memset(pkt, 0, ICE_FDIR_PKT_LEN);
746 ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
748 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
752 return ice_fdir_programming(pf, &desc);
756 ice_fdir_create_filter(struct ice_adapter *ad,
757 struct rte_flow *flow,
759 struct rte_flow_error *error)
761 struct ice_pf *pf = &ad->pf;
762 struct ice_fdir_filter_conf *filter = meta;
763 struct ice_fdir_filter_conf *rule;
766 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
768 rte_flow_error_set(error, ENOMEM,
769 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
770 "Failed to allocate memory");
774 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
775 filter->input_set, false);
777 rte_flow_error_set(error, -ret,
778 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
779 "Profile configure failed.");
783 /* alloc counter for FDIR */
784 if (filter->input.cnt_ena) {
785 struct rte_flow_action_count *act_count = &filter->act_count;
787 filter->counter = ice_fdir_counter_alloc(pf,
790 if (!filter->counter) {
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
793 "Failed to alloc FDIR counter.");
796 filter->input.cnt_index = filter->counter->hw_index;
799 ret = ice_fdir_add_del_filter(pf, filter, true);
801 rte_flow_error_set(error, -ret,
802 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
803 "Add filter rule failed.");
807 rte_memcpy(rule, filter, sizeof(*rule));
809 ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
813 if (filter->counter) {
814 ice_fdir_counter_free(pf, filter->counter);
815 filter->counter = NULL;
824 ice_fdir_destroy_filter(struct ice_adapter *ad,
825 struct rte_flow *flow,
826 struct rte_flow_error *error)
828 struct ice_pf *pf = &ad->pf;
829 struct ice_fdir_filter_conf *filter;
832 filter = (struct ice_fdir_filter_conf *)flow->rule;
834 if (filter->counter) {
835 ice_fdir_counter_free(pf, filter->counter);
836 filter->counter = NULL;
839 ret = ice_fdir_add_del_filter(pf, filter, false);
841 rte_flow_error_set(error, -ret,
842 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
843 "Del filter rule failed.");
847 ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
856 ice_fdir_query_count(struct ice_adapter *ad,
857 struct rte_flow *flow,
858 struct rte_flow_query_count *flow_stats,
859 struct rte_flow_error *error)
861 struct ice_pf *pf = &ad->pf;
862 struct ice_hw *hw = ICE_PF_TO_HW(pf);
863 struct ice_fdir_filter_conf *filter = flow->rule;
864 struct ice_fdir_counter *counter = filter->counter;
865 uint64_t hits_lo, hits_hi;
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_ACTION,
871 "FDIR counters not available");
876 * Reading the low 32-bits latches the high 32-bits into a shadow
877 * register. Reading the high 32-bit returns the value in the
880 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
881 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
883 flow_stats->hits_set = 1;
884 flow_stats->hits = hits_lo | (hits_hi << 32);
885 flow_stats->bytes_set = 0;
886 flow_stats->bytes = 0;
888 if (flow_stats->reset) {
889 /* reset statistic counter value */
890 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
891 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
897 static struct ice_flow_engine ice_fdir_engine = {
898 .init = ice_fdir_init,
899 .uninit = ice_fdir_uninit,
900 .create = ice_fdir_create_filter,
901 .destroy = ice_fdir_destroy_filter,
902 .query_count = ice_fdir_query_count,
903 .type = ICE_FLOW_ENGINE_FDIR,
907 ice_fdir_parse_action_qregion(struct ice_pf *pf,
908 struct rte_flow_error *error,
909 const struct rte_flow_action *act,
910 struct ice_fdir_filter_conf *filter)
912 const struct rte_flow_action_rss *rss = act->conf;
915 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
916 rte_flow_error_set(error, EINVAL,
917 RTE_FLOW_ERROR_TYPE_ACTION, act,
922 if (rss->queue_num <= 1) {
923 rte_flow_error_set(error, EINVAL,
924 RTE_FLOW_ERROR_TYPE_ACTION, act,
925 "Queue region size can't be 0 or 1.");
929 /* check if queue index for queue region is continuous */
930 for (i = 0; i < rss->queue_num - 1; i++) {
931 if (rss->queue[i + 1] != rss->queue[i] + 1) {
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ACTION, act,
934 "Discontinuous queue region");
939 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
940 rte_flow_error_set(error, EINVAL,
941 RTE_FLOW_ERROR_TYPE_ACTION, act,
942 "Invalid queue region indexes.");
946 if (!(rte_is_power_of_2(rss->queue_num) &&
947 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ACTION, act,
950 "The region size should be any of the following values:"
951 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
952 "of queues do not exceed the VSI allocation.");
956 filter->input.q_index = rss->queue[0];
957 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
958 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
964 ice_fdir_parse_action(struct ice_adapter *ad,
965 const struct rte_flow_action actions[],
966 struct rte_flow_error *error,
967 struct ice_fdir_filter_conf *filter)
969 struct ice_pf *pf = &ad->pf;
970 const struct rte_flow_action_queue *act_q;
971 const struct rte_flow_action_mark *mark_spec = NULL;
972 const struct rte_flow_action_count *act_count;
973 uint32_t dest_num = 0;
974 uint32_t mark_num = 0;
975 uint32_t counter_num = 0;
978 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
979 switch (actions->type) {
980 case RTE_FLOW_ACTION_TYPE_VOID:
982 case RTE_FLOW_ACTION_TYPE_QUEUE:
985 act_q = actions->conf;
986 filter->input.q_index = act_q->index;
987 if (filter->input.q_index >=
988 pf->dev_data->nb_rx_queues) {
989 rte_flow_error_set(error, EINVAL,
990 RTE_FLOW_ERROR_TYPE_ACTION,
992 "Invalid queue for FDIR.");
995 filter->input.dest_ctl =
996 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
998 case RTE_FLOW_ACTION_TYPE_DROP:
1001 filter->input.dest_ctl =
1002 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1004 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1007 filter->input.dest_ctl =
1008 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1009 filter->input.q_index = 0;
1011 case RTE_FLOW_ACTION_TYPE_RSS:
1014 ret = ice_fdir_parse_action_qregion(pf,
1015 error, actions, filter);
1019 case RTE_FLOW_ACTION_TYPE_MARK:
1022 mark_spec = actions->conf;
1023 filter->input.fltr_id = mark_spec->id;
1025 case RTE_FLOW_ACTION_TYPE_COUNT:
1028 act_count = actions->conf;
1029 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1030 rte_memcpy(&filter->act_count, act_count,
1031 sizeof(filter->act_count));
1035 rte_flow_error_set(error, EINVAL,
1036 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1042 if (dest_num == 0 || dest_num >= 2) {
1043 rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1045 "Unsupported action combination");
1049 if (mark_num >= 2) {
1050 rte_flow_error_set(error, EINVAL,
1051 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1052 "Too many mark actions");
1056 if (counter_num >= 2) {
1057 rte_flow_error_set(error, EINVAL,
1058 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1059 "Too many count actions");
1067 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1068 const struct rte_flow_item pattern[],
1069 struct rte_flow_error *error,
1070 struct ice_fdir_filter_conf *filter)
1072 const struct rte_flow_item *item = pattern;
1073 enum rte_flow_item_type item_type;
1074 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1075 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1076 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1077 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1078 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1079 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1080 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1081 uint64_t input_set = ICE_INSET_NONE;
1082 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1083 uint8_t ipv6_addr_mask[16] = {
1084 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1085 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1087 uint32_t vtc_flow_cpu;
1090 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1092 rte_flow_error_set(error, EINVAL,
1093 RTE_FLOW_ERROR_TYPE_ITEM,
1095 "Not support range");
1098 item_type = item->type;
1100 switch (item_type) {
1101 case RTE_FLOW_ITEM_TYPE_ETH:
1102 eth_spec = item->spec;
1103 eth_mask = item->mask;
1105 if (eth_spec && eth_mask) {
1106 if (!rte_is_zero_ether_addr(ð_spec->src) ||
1107 !rte_is_zero_ether_addr(ð_mask->src)) {
1108 rte_flow_error_set(error, EINVAL,
1109 RTE_FLOW_ERROR_TYPE_ITEM,
1111 "Src mac not support");
1115 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
1116 rte_flow_error_set(error, EINVAL,
1117 RTE_FLOW_ERROR_TYPE_ITEM,
1119 "Invalid mac addr mask");
1123 input_set |= ICE_INSET_DMAC;
1124 rte_memcpy(&filter->input.ext_data.dst_mac,
1126 RTE_ETHER_ADDR_LEN);
1129 case RTE_FLOW_ITEM_TYPE_IPV4:
1130 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1131 ipv4_spec = item->spec;
1132 ipv4_mask = item->mask;
1134 if (ipv4_spec && ipv4_mask) {
1135 /* Check IPv4 mask and update input set */
1136 if (ipv4_mask->hdr.version_ihl ||
1137 ipv4_mask->hdr.total_length ||
1138 ipv4_mask->hdr.packet_id ||
1139 ipv4_mask->hdr.fragment_offset ||
1140 ipv4_mask->hdr.hdr_checksum) {
1141 rte_flow_error_set(error, EINVAL,
1142 RTE_FLOW_ERROR_TYPE_ITEM,
1144 "Invalid IPv4 mask.");
1147 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1148 input_set |= ICE_INSET_IPV4_SRC;
1149 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1150 input_set |= ICE_INSET_IPV4_DST;
1151 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1152 input_set |= ICE_INSET_IPV4_TOS;
1153 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1154 input_set |= ICE_INSET_IPV4_TTL;
1155 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1156 input_set |= ICE_INSET_IPV4_PROTO;
1158 filter->input.ip.v4.dst_ip =
1159 ipv4_spec->hdr.src_addr;
1160 filter->input.ip.v4.src_ip =
1161 ipv4_spec->hdr.dst_addr;
1162 filter->input.ip.v4.tos =
1163 ipv4_spec->hdr.type_of_service;
1164 filter->input.ip.v4.ttl =
1165 ipv4_spec->hdr.time_to_live;
1166 filter->input.ip.v4.proto =
1167 ipv4_spec->hdr.next_proto_id;
1170 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1172 case RTE_FLOW_ITEM_TYPE_IPV6:
1173 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1174 ipv6_spec = item->spec;
1175 ipv6_mask = item->mask;
1177 if (ipv6_spec && ipv6_mask) {
1178 /* Check IPv6 mask and update input set */
1179 if (ipv6_mask->hdr.payload_len) {
1180 rte_flow_error_set(error, EINVAL,
1181 RTE_FLOW_ERROR_TYPE_ITEM,
1183 "Invalid IPv6 mask");
1187 if (!memcmp(ipv6_mask->hdr.src_addr,
1189 RTE_DIM(ipv6_mask->hdr.src_addr)))
1190 input_set |= ICE_INSET_IPV6_SRC;
1191 if (!memcmp(ipv6_mask->hdr.dst_addr,
1193 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1194 input_set |= ICE_INSET_IPV6_DST;
1196 if ((ipv6_mask->hdr.vtc_flow &
1197 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1198 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1199 input_set |= ICE_INSET_IPV6_TC;
1200 if (ipv6_mask->hdr.proto == UINT8_MAX)
1201 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1202 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1203 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1205 rte_memcpy(filter->input.ip.v6.dst_ip,
1206 ipv6_spec->hdr.src_addr, 16);
1207 rte_memcpy(filter->input.ip.v6.src_ip,
1208 ipv6_spec->hdr.dst_addr, 16);
1211 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1212 filter->input.ip.v6.tc =
1213 (uint8_t)(vtc_flow_cpu >>
1214 ICE_FDIR_IPV6_TC_OFFSET);
1215 filter->input.ip.v6.proto =
1216 ipv6_spec->hdr.proto;
1217 filter->input.ip.v6.hlim =
1218 ipv6_spec->hdr.hop_limits;
1221 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1223 case RTE_FLOW_ITEM_TYPE_TCP:
1224 tcp_spec = item->spec;
1225 tcp_mask = item->mask;
1227 if (tcp_spec && tcp_mask) {
1228 /* Check TCP mask and update input set */
1229 if (tcp_mask->hdr.sent_seq ||
1230 tcp_mask->hdr.recv_ack ||
1231 tcp_mask->hdr.data_off ||
1232 tcp_mask->hdr.tcp_flags ||
1233 tcp_mask->hdr.rx_win ||
1234 tcp_mask->hdr.cksum ||
1235 tcp_mask->hdr.tcp_urp) {
1236 rte_flow_error_set(error, EINVAL,
1237 RTE_FLOW_ERROR_TYPE_ITEM,
1239 "Invalid TCP mask");
1243 if (tcp_mask->hdr.src_port == UINT16_MAX)
1244 input_set |= ICE_INSET_TCP_SRC_PORT;
1245 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1246 input_set |= ICE_INSET_TCP_DST_PORT;
1248 /* Get filter info */
1249 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1250 filter->input.ip.v4.dst_port =
1251 tcp_spec->hdr.src_port;
1252 filter->input.ip.v4.src_port =
1253 tcp_spec->hdr.dst_port;
1255 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1256 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1257 filter->input.ip.v6.dst_port =
1258 tcp_spec->hdr.src_port;
1259 filter->input.ip.v6.src_port =
1260 tcp_spec->hdr.dst_port;
1262 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1266 case RTE_FLOW_ITEM_TYPE_UDP:
1267 udp_spec = item->spec;
1268 udp_mask = item->mask;
1270 if (udp_spec && udp_mask) {
1271 /* Check UDP mask and update input set*/
1272 if (udp_mask->hdr.dgram_len ||
1273 udp_mask->hdr.dgram_cksum) {
1274 rte_flow_error_set(error, EINVAL,
1275 RTE_FLOW_ERROR_TYPE_ITEM,
1277 "Invalid UDP mask");
1281 if (udp_mask->hdr.src_port == UINT16_MAX)
1282 input_set |= ICE_INSET_UDP_SRC_PORT;
1283 if (udp_mask->hdr.dst_port == UINT16_MAX)
1284 input_set |= ICE_INSET_UDP_DST_PORT;
1286 /* Get filter info */
1287 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1288 filter->input.ip.v4.dst_port =
1289 udp_spec->hdr.src_port;
1290 filter->input.ip.v4.src_port =
1291 udp_spec->hdr.dst_port;
1293 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1294 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1295 filter->input.ip.v6.src_port =
1296 udp_spec->hdr.src_port;
1297 filter->input.ip.v6.dst_port =
1298 udp_spec->hdr.dst_port;
1300 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1304 case RTE_FLOW_ITEM_TYPE_SCTP:
1305 sctp_spec = item->spec;
1306 sctp_mask = item->mask;
1308 if (sctp_spec && sctp_mask) {
1309 /* Check SCTP mask and update input set */
1310 if (sctp_mask->hdr.cksum) {
1311 rte_flow_error_set(error, EINVAL,
1312 RTE_FLOW_ERROR_TYPE_ITEM,
1314 "Invalid UDP mask");
1318 if (sctp_mask->hdr.src_port == UINT16_MAX)
1319 input_set |= ICE_INSET_SCTP_SRC_PORT;
1320 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1321 input_set |= ICE_INSET_SCTP_DST_PORT;
1323 /* Get filter info */
1324 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1325 filter->input.ip.v4.dst_port =
1326 sctp_spec->hdr.src_port;
1327 filter->input.ip.v4.src_port =
1328 sctp_spec->hdr.dst_port;
1330 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1331 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1332 filter->input.ip.v6.dst_port =
1333 sctp_spec->hdr.src_port;
1334 filter->input.ip.v6.src_port =
1335 sctp_spec->hdr.dst_port;
1337 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1341 case RTE_FLOW_ITEM_TYPE_VOID:
1344 rte_flow_error_set(error, EINVAL,
1345 RTE_FLOW_ERROR_TYPE_ITEM,
1347 "Invalid pattern item.");
1352 filter->input.flow_type = flow_type;
1353 filter->input_set = input_set;
1359 ice_fdir_parse(struct ice_adapter *ad,
1360 struct ice_pattern_match_item *array,
1362 const struct rte_flow_item pattern[],
1363 const struct rte_flow_action actions[],
1365 struct rte_flow_error *error)
1367 struct ice_pf *pf = &ad->pf;
1368 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1369 struct ice_pattern_match_item *item = NULL;
1373 memset(filter, 0, sizeof(*filter));
1374 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1378 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1381 input_set = filter->input_set;
1382 if (!input_set || input_set & ~item->input_set_mask) {
1383 rte_flow_error_set(error, EINVAL,
1384 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1386 "Invalid input set");
1390 ret = ice_fdir_parse_action(ad, actions, error, filter);
1399 static struct ice_flow_parser ice_fdir_parser = {
1400 .engine = &ice_fdir_engine,
1401 .array = ice_fdir_pattern,
1402 .array_len = RTE_DIM(ice_fdir_pattern),
1403 .parse_pattern_action = ice_fdir_parse,
1404 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1407 RTE_INIT(ice_fdir_engine_register)
1409 ice_register_flow_engine(&ice_fdir_engine);