X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_fdir_filter.c;h=ce6aa09d3d71b6a463cf59dde182ff440fc440d9;hb=df9688427d082a44b73b0659ad4f582a76556f1e;hp=66001702b380094f8981af2b8657001d32b76dfe;hpb=63b094f8fa861de1b82e92fabeeede973fb84fb6;p=dpdk.git diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 66001702b3..ce6aa09d3d 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -1,5 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + #include #include +#include +#include #include "base/ice_fdir.h" #include "base/ice_flow.h" #include "base/ice_type.h" @@ -12,8 +18,11 @@ #define ICE_FDIR_MAX_QREGION_SIZE 128 +#define ICE_FDIR_INSET_ETH (\ + ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) + #define ICE_FDIR_INSET_ETH_IPV4 (\ - ICE_INSET_DMAC | \ + ICE_FDIR_INSET_ETH | \ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \ ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO) @@ -30,6 +39,7 @@ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) #define ICE_FDIR_INSET_ETH_IPV6 (\ + ICE_INSET_DMAC | \ ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \ ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR) @@ -45,7 +55,37 @@ ICE_FDIR_INSET_ETH_IPV6 | \ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) -static struct ice_pattern_match_item ice_fdir_pattern[] = { +#define ICE_FDIR_INSET_VXLAN_IPV4 (\ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST) + +#define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT) + +#define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT) + +#define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT) + +#define ICE_FDIR_INSET_IPV4_GTPU (\ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID) + +#define ICE_FDIR_INSET_IPV4_GTPU_EH (\ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI) + +#define ICE_FDIR_INSET_IPV6_GTPU (\ + ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID) + +#define ICE_FDIR_INSET_IPV6_GTPU_EH (\ + ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \ + ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI) + +static struct ice_pattern_match_item ice_fdir_pattern_list[] = { + {pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE}, {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE}, @@ -54,13 +94,42 @@ static struct ice_pattern_match_item ice_fdir_pattern[] = { {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE}, {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE}, {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4, + ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_udp, + ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_tcp, + ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_sctp, + ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp, + ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_IPV4_GTPU, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu, ICE_FDIR_INSET_IPV6_GTPU, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_INSET_NONE}, }; static struct ice_flow_parser ice_fdir_parser; +static int +ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type); + static const struct rte_memzone * ice_memzone_reserve(const char *name, uint32_t len, int socket_id) { + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(name); + if (mz) + return mz; + return rte_memzone_reserve_aligned(name, len, socket_id, RTE_MEMZONE_IOVA_CONTIG, ICE_RING_BASE_ALIGN); @@ -80,7 +149,7 @@ ice_fdir_prof_alloc(struct ice_hw *hw) if (!hw->fdir_prof) return -ENOMEM; } - for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1; ptype < ICE_FLTR_PTYPE_MAX; ptype++) { if (!hw->fdir_prof[ptype]) { @@ -93,14 +162,253 @@ ice_fdir_prof_alloc(struct ice_hw *hw) return 0; fail_mem: - for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1; fltr_ptype < ptype; - fltr_ptype++) + fltr_ptype++) { rte_free(hw->fdir_prof[fltr_ptype]); + hw->fdir_prof[fltr_ptype] = NULL; + } + rte_free(hw->fdir_prof); + hw->fdir_prof = NULL; + return -ENOMEM; } +static int +ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf, + struct ice_fdir_counter_pool_container *container, + uint32_t index_start, + uint32_t len) +{ + struct ice_fdir_counter_pool *pool; + uint32_t i; + int ret = 0; + + pool = rte_zmalloc("ice_fdir_counter_pool", + sizeof(*pool) + + sizeof(struct ice_fdir_counter) * len, + 0); + if (!pool) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir counter pool"); + return -ENOMEM; + } + + TAILQ_INIT(&pool->counter_list); + TAILQ_INSERT_TAIL(&container->pool_list, pool, next); + + for (i = 0; i < len; i++) { + struct ice_fdir_counter *counter = &pool->counters[i]; + + counter->hw_index = index_start + i; + TAILQ_INSERT_TAIL(&pool->counter_list, counter, next); + } + + if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) { + PMD_INIT_LOG(ERR, "FDIR counter pool is full"); + ret = -EINVAL; + goto free_pool; + } + + container->pools[container->index_free++] = pool; + return 0; + +free_pool: + rte_free(pool); + return ret; +} + +static int +ice_fdir_counter_init(struct ice_pf *pf) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + uint32_t cnt_index, len; + int ret; + + TAILQ_INIT(&container->pool_list); + + cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base); + len = ICE_FDIR_COUNTERS_PER_BLOCK; + + ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to add fdir pool to container"); + return ret; + } + + return 0; +} + +static int +ice_fdir_counter_release(struct ice_pf *pf) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + uint8_t i; + + for (i = 0; i < container->index_free; i++) { + rte_free(container->pools[i]); + container->pools[i] = NULL; + } + + TAILQ_INIT(&container->pool_list); + container->index_free = 0; + + return 0; +} + +static struct ice_fdir_counter * +ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container + *container, + uint32_t id) +{ + struct ice_fdir_counter_pool *pool; + struct ice_fdir_counter *counter; + int i; + + TAILQ_FOREACH(pool, &container->pool_list, next) { + for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) { + counter = &pool->counters[i]; + + if (counter->shared && + counter->ref_cnt && + counter->id == id) + return counter; + } + } + + return NULL; +} + +static struct ice_fdir_counter * +ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + struct ice_fdir_counter_pool *pool = NULL; + struct ice_fdir_counter *counter_free = NULL; + + if (shared) { + counter_free = ice_fdir_counter_shared_search(container, id); + if (counter_free) { + if (counter_free->ref_cnt + 1 == 0) { + rte_errno = E2BIG; + return NULL; + } + counter_free->ref_cnt++; + return counter_free; + } + } + + TAILQ_FOREACH(pool, &container->pool_list, next) { + counter_free = TAILQ_FIRST(&pool->counter_list); + if (counter_free) + break; + counter_free = NULL; + } + + if (!counter_free) { + PMD_DRV_LOG(ERR, "No free counter found\n"); + return NULL; + } + + counter_free->shared = shared; + counter_free->id = id; + counter_free->ref_cnt = 1; + counter_free->pool = pool; + + /* reset statistic counter value */ + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0); + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0); + + TAILQ_REMOVE(&pool->counter_list, counter_free, next); + if (TAILQ_EMPTY(&pool->counter_list)) { + TAILQ_REMOVE(&container->pool_list, pool, next); + TAILQ_INSERT_TAIL(&container->pool_list, pool, next); + } + + return counter_free; +} + +static void +ice_fdir_counter_free(__rte_unused struct ice_pf *pf, + struct ice_fdir_counter *counter) +{ + if (!counter) + return; + + if (--counter->ref_cnt == 0) { + struct ice_fdir_counter_pool *pool = counter->pool; + + TAILQ_INSERT_TAIL(&pool->counter_list, counter, next); + } +} + +static int +ice_fdir_init_filter_list(struct ice_pf *pf) +{ + struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct ice_fdir_info *fdir_info = &pf->fdir; + char fdir_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = ICE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(struct ice_fdir_fltr_pattern), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE, + }; + + /* Initialize hash */ + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", dev->device->name); + fdir_info->hash_table = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map", + sizeof(*fdir_info->hash_map) * + ICE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + ret = -ENOMEM; + goto err_fdir_hash_map_alloc; + } + return 0; + +err_fdir_hash_map_alloc: + rte_hash_free(fdir_info->hash_table); + + return ret; +} + +static void +ice_fdir_release_filter_list(struct ice_pf *pf) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_table) + rte_hash_free(fdir_info->hash_table); + + fdir_info->hash_map = NULL; + fdir_info->hash_table = NULL; +} + /* * ice_fdir_setup - reserve and initialize the Flow Director resources * @pf: board private structure @@ -138,6 +446,18 @@ ice_fdir_setup(struct ice_pf *pf) } pf->fdir.fdir_vsi = vsi; + err = ice_fdir_init_filter_list(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init FDIR filter list."); + return -EINVAL; + } + + err = ice_fdir_counter_init(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init FDIR counter."); + return -EINVAL; + } + /*Fdir tx queue setup*/ err = ice_fdir_setup_tx_resources(pf); if (err) { @@ -164,6 +484,11 @@ ice_fdir_setup(struct ice_pf *pf) goto fail_mem; } + /* Enable FDIR MSIX interrupt */ + vsi->nb_used_qps = 1; + ice_vsi_queues_bind_intr(vsi); + ice_vsi_enable_queues_intr(vsi); + /* reserve memory for the fdir programming packet */ snprintf(z_name, sizeof(z_name), "ICE_%s_%d", ICE_FDIR_MZ_NAME, @@ -177,19 +502,23 @@ ice_fdir_setup(struct ice_pf *pf) } pf->fdir.prg_pkt = mz->addr; pf->fdir.dma_addr = mz->iova; + pf->fdir.mz = mz; err = ice_fdir_prof_alloc(hw); if (err) { PMD_DRV_LOG(ERR, "Cannot allocate memory for " "flow director profile."); err = -ENOMEM; - goto fail_mem; + goto fail_prof; } PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.", vsi->base_queue); return ICE_SUCCESS; +fail_prof: + rte_memzone_free(pf->fdir.mz); + pf->fdir.mz = NULL; fail_mem: ice_rx_queue_release(pf->fdir.rxq); pf->fdir.rxq = NULL; @@ -207,12 +536,15 @@ ice_fdir_prof_free(struct ice_hw *hw) { enum ice_fltr_ptype ptype; - for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1; ptype < ICE_FLTR_PTYPE_MAX; - ptype++) + ptype++) { rte_free(hw->fdir_prof[ptype]); + hw->fdir_prof[ptype] = NULL; + } rte_free(hw->fdir_prof); + hw->fdir_prof = NULL; } /* Remove a profile for some filter type */ @@ -237,7 +569,7 @@ ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel) hw_prof->vsi_h[i]); ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, ptype); - ice_flow_rem_entry(hw, + ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[i][is_tunnel]); hw_prof->entry_h[i][is_tunnel] = 0; } @@ -257,7 +589,7 @@ ice_fdir_prof_rm_all(struct ice_pf *pf) { enum ice_fltr_ptype ptype; - for (ptype = ICE_FLTR_PTYPE_NONF_NONE; + for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1; ptype < ICE_FLTR_PTYPE_MAX; ptype++) { ice_fdir_prof_rm(pf, ptype, false); @@ -281,6 +613,8 @@ ice_fdir_teardown(struct ice_pf *pf) if (!vsi) return; + ice_vsi_disable_queues_intr(vsi); + err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id); if (err) PMD_DRV_LOG(ERR, "Failed to stop TX queue."); @@ -289,6 +623,12 @@ ice_fdir_teardown(struct ice_pf *pf) if (err) PMD_DRV_LOG(ERR, "Failed to stop RX queue."); + err = ice_fdir_counter_release(pf); + if (err) + PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource."); + + ice_fdir_release_filter_list(pf); + ice_tx_queue_release(pf->fdir.txq); pf->fdir.txq = NULL; ice_rx_queue_release(pf->fdir.rxq); @@ -297,6 +637,163 @@ ice_fdir_teardown(struct ice_pf *pf) ice_fdir_prof_free(hw); ice_release_vsi(vsi); pf->fdir.fdir_vsi = NULL; + + if (pf->fdir.mz) { + err = rte_memzone_free(pf->fdir.mz); + pf->fdir.mz = NULL; + if (err) + PMD_DRV_LOG(ERR, "Failed to free FDIR memezone."); + } +} + +static int +ice_fdir_cur_prof_conflict(struct ice_pf *pf, + enum ice_fltr_ptype ptype, + struct ice_flow_seg_info *seg, + bool is_tunnel) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_flow_seg_info *ori_seg; + struct ice_fd_hw_prof *hw_prof; + + hw_prof = hw->fdir_prof[ptype]; + ori_seg = hw_prof->fdir_seg[is_tunnel]; + + /* profile does not exist */ + if (!ori_seg) + return 0; + + /* if no input set conflict, return -EEXIST */ + if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) || + (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) { + PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.", + ptype); + return -EEXIST; + } + + /* a rule with input set conflict already exist, so give up */ + if (pf->fdir_fltr_cnt[ptype][is_tunnel]) { + PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.", + ptype); + return -EINVAL; + } + + /* it's safe to delete an empty profile */ + ice_fdir_prof_rm(pf, ptype, is_tunnel); + return 0; +} + +static bool +ice_fdir_prof_resolve_conflict(struct ice_pf *pf, + enum ice_fltr_ptype ptype, + bool is_tunnel) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fd_hw_prof *hw_prof; + struct ice_flow_seg_info *seg; + + hw_prof = hw->fdir_prof[ptype]; + seg = hw_prof->fdir_seg[is_tunnel]; + + /* profile does not exist */ + if (!seg) + return true; + + /* profile exists and rule exists, fail to resolve the conflict */ + if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0) + return false; + + /* it's safe to delete an empty profile */ + ice_fdir_prof_rm(pf, ptype, is_tunnel); + + return true; +} + +static int +ice_fdir_cross_prof_conflict(struct ice_pf *pf, + enum ice_fltr_ptype ptype, + bool is_tunnel) +{ + enum ice_fltr_ptype cflct_ptype; + + switch (ptype) { + /* IPv4 */ + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + /* IPv4 GTPU */ + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + /* IPv6 */ + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + default: + break; + } + return 0; +err: + PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.", + ptype, cflct_ptype); + return -EINVAL; } static int @@ -308,7 +805,6 @@ ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi, { struct ice_hw *hw = ICE_PF_TO_HW(pf); enum ice_flow_dir dir = ICE_FLOW_RX; - struct ice_flow_seg_info *ori_seg; struct ice_fd_hw_prof *hw_prof; struct ice_flow_prof *prof; uint64_t entry_1 = 0; @@ -317,22 +813,15 @@ ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi, int ret; uint64_t prof_id; - hw_prof = hw->fdir_prof[ptype]; - ori_seg = hw_prof->fdir_seg[is_tunnel]; - if (ori_seg) { - if (!is_tunnel) { - if (!memcmp(ori_seg, seg, sizeof(*seg))) - return -EAGAIN; - } else { - if (!memcmp(ori_seg, &seg[1], sizeof(*seg))) - return -EAGAIN; - } - - if (pf->fdir_fltr_cnt[ptype][is_tunnel]) - return -EINVAL; + /* check if have input set conflict on current profile. */ + ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel); + if (ret) + return ret; - ice_fdir_prof_rm(pf, ptype, is_tunnel); - } + /* check if the profile is conflict with other profile. */ + ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel); + if (ret) + return ret; prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX; ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg, @@ -356,6 +845,7 @@ ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi, goto err_add_entry; } + hw_prof = hw->fdir_prof[ptype]; pf->hw_prof_cnt[ptype][is_tunnel] = 0; hw_prof->cnt = 0; hw_prof->fdir_seg[is_tunnel] = seg; @@ -371,7 +861,7 @@ ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi, err_add_entry: vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); - ice_flow_rem_entry(hw, entry_1); + ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1); err_add_prof: ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); @@ -389,6 +879,7 @@ ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field) }; static const struct ice_inset_map ice_inset_map[] = { {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA}, + {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE}, {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA}, {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA}, {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP}, @@ -405,6 +896,16 @@ ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field) {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT}, {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT}, {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT}, + {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA}, + {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA}, + {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT}, + {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT}, + {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT}, + {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT}, + {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT}, + {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT}, + {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID}, + {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI}, }; for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) { @@ -414,84 +915,131 @@ ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field) } } -static int -ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow, - uint64_t input_set, bool is_tunnel) +static void +ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg) { - struct ice_flow_seg_info *seg; - struct ice_flow_seg_info *seg_tun = NULL; - enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX]; - int i, ret; - - if (!input_set) - return -EINVAL; - - seg = (struct ice_flow_seg_info *) - ice_malloc(hw, sizeof(*seg)); - if (!seg) { - PMD_DRV_LOG(ERR, "No memory can be allocated"); - return -ENOMEM; - } - - for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++) - field[i] = ICE_FLOW_FIELD_IDX_MAX; - ice_fdir_input_set_parse(input_set, field); - switch (flow) { case ICE_FLTR_PTYPE_NONF_IPV4_UDP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | - ICE_FLOW_SEG_HDR_IPV4); + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_TCP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | - ICE_FLOW_SEG_HDR_IPV4); + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | - ICE_FLOW_SEG_HDR_IPV4); + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: - ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4); + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_UDP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | - ICE_FLOW_SEG_HDR_IPV6); + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_TCP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | - ICE_FLOW_SEG_HDR_IPV6); + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | - ICE_FLOW_SEG_HDR_IPV6); + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: - ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6); + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_IPV6_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER); + break; + case ICE_FLTR_PTYPE_NON_IP_L2: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); break; default: PMD_DRV_LOG(ERR, "not supported filter type."); break; } +} + +static int +ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow, + uint64_t inner_input_set, uint64_t outer_input_set, + enum ice_fdir_tunnel_type ttype) +{ + struct ice_flow_seg_info *seg; + struct ice_flow_seg_info *seg_tun = NULL; + enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX]; + uint64_t input_set; + bool is_tunnel; + int k, i, ret = 0; + + if (!(inner_input_set | outer_input_set)) + return -EINVAL; + + seg_tun = (struct ice_flow_seg_info *) + ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX); + if (!seg_tun) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } - for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) { - ice_flow_set_fld(seg, field[i], - ICE_FLOW_FLD_OFF_INVAL, - ICE_FLOW_FLD_OFF_INVAL, - ICE_FLOW_FLD_OFF_INVAL, false); + /* use seg_tun[1] to record tunnel inner part or non-tunnel */ + for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) { + seg = &seg_tun[k]; + input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set; + if (input_set == 0) + continue; + + for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++) + field[i] = ICE_FLOW_FIELD_IDX_MAX; + + ice_fdir_input_set_parse(input_set, field); + + ice_fdir_input_set_hdrs(flow, seg); + + for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) { + ice_flow_set_fld(seg, field[i], + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } } + is_tunnel = ice_fdir_is_tunnel_profile(ttype); if (!is_tunnel) { ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi, - seg, flow, false); + seg_tun + 1, flow, false); } else { - seg_tun = (struct ice_flow_seg_info *) - ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX); - if (!seg_tun) { - PMD_DRV_LOG(ERR, "No memory can be allocated"); - rte_free(seg); - return -ENOMEM; - } - rte_memcpy(&seg_tun[1], seg, sizeof(*seg)); ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi, seg_tun, flow, true); } @@ -499,10 +1047,8 @@ ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow, if (!ret) { return ret; } else if (ret < 0) { - rte_free(seg); - if (is_tunnel) - rte_free(seg_tun); - return (ret == -EAGAIN) ? 0 : ret; + rte_free(seg_tun); + return (ret == -EEXIST) ? 0 : ret; } else { return ret; } @@ -527,25 +1073,46 @@ static int ice_fdir_init(struct ice_adapter *ad) { struct ice_pf *pf = &ad->pf; + struct ice_flow_parser *parser; int ret; + if (ad->hw.dcf_enabled) + return 0; + ret = ice_fdir_setup(pf); if (ret) return ret; - return ice_register_parser(&ice_fdir_parser, ad); + parser = &ice_fdir_parser; + + return ice_register_parser(parser, ad); } static void ice_fdir_uninit(struct ice_adapter *ad) { + struct ice_flow_parser *parser; struct ice_pf *pf = &ad->pf; - ice_unregister_parser(&ice_fdir_parser, ad); + if (ad->hw.dcf_enabled) + return; + + parser = &ice_fdir_parser; + + ice_unregister_parser(parser, ad); ice_fdir_teardown(pf); } +static int +ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type) +{ + if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN) + return 1; + else + return 0; +} + static int ice_fdir_add_del_filter(struct ice_pf *pf, struct ice_fdir_filter_conf *filter, @@ -554,15 +1121,19 @@ ice_fdir_add_del_filter(struct ice_pf *pf, struct ice_fltr_desc desc; struct ice_hw *hw = ICE_PF_TO_HW(pf); unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + bool is_tun; int ret; filter->input.dest_vsi = pf->main_vsi->idx; memset(&desc, 0, sizeof(desc)); + filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add); + is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type); + memset(pkt, 0, ICE_FDIR_PKT_LEN); - ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false); + ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun); if (ret) { PMD_DRV_LOG(ERR, "Generate dummy packet failed"); return -EINVAL; @@ -571,6 +1142,79 @@ ice_fdir_add_del_filter(struct ice_pf *pf, return ice_fdir_programming(pf, &desc); } +static void +ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key, + struct ice_fdir_filter_conf *filter) +{ + struct ice_fdir_fltr *input = &filter->input; + memset(key, 0, sizeof(*key)); + + key->flow_type = input->flow_type; + rte_memcpy(&key->ip, &input->ip, sizeof(key->ip)); + rte_memcpy(&key->mask, &input->mask, sizeof(key->mask)); + rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data)); + rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask)); + + rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data)); + rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask)); + + key->tunnel_type = filter->tunnel_type; +} + +/* Check if there exists the flow director filter */ +static struct ice_fdir_filter_conf * +ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info, + const struct ice_fdir_fltr_pattern *key) +{ + int ret; + + ret = rte_hash_lookup(fdir_info->hash_table, key); + if (ret < 0) + return NULL; + + return fdir_info->hash_map[ret]; +} + +/* Add a flow director entry into the SW list */ +static int +ice_fdir_entry_insert(struct ice_pf *pf, + struct ice_fdir_filter_conf *entry, + struct ice_fdir_fltr_pattern *key) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + int ret; + + ret = rte_hash_add_key(fdir_info->hash_table, key); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert fdir entry to hash table %d!", + ret); + return ret; + } + fdir_info->hash_map[ret] = entry; + + return 0; +} + +/* Delete a flow director entry from the SW list */ +static int +ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + int ret; + + ret = rte_hash_del_key(fdir_info->hash_table, key); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to delete fdir filter to hash table %d!", + ret); + return ret; + } + fdir_info->hash_map[ret] = NULL; + + return 0; +} + static int ice_fdir_create_filter(struct ice_adapter *ad, struct rte_flow *flow, @@ -579,19 +1223,34 @@ ice_fdir_create_filter(struct ice_adapter *ad, { struct ice_pf *pf = &ad->pf; struct ice_fdir_filter_conf *filter = meta; - struct ice_fdir_filter_conf *rule; + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_filter_conf *entry, *node; + struct ice_fdir_fltr_pattern key; + bool is_tun; int ret; - rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0); - if (!rule) { + ice_fdir_extract_fltr_key(&key, filter); + node = ice_fdir_entry_lookup(fdir_info, &key); + if (node) { + rte_flow_error_set(error, EEXIST, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Rule already exists!"); + return -rte_errno; + } + + entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0); + if (!entry) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to allocate memory"); return -rte_errno; } + is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type); + ret = ice_fdir_input_set_conf(pf, filter->input.flow_type, - filter->input_set, false); + filter->input_set, filter->outer_input_set, + filter->tunnel_type); if (ret) { rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -599,21 +1258,55 @@ ice_fdir_create_filter(struct ice_adapter *ad, goto free_entry; } + /* alloc counter for FDIR */ + if (filter->input.cnt_ena) { + struct rte_flow_action_count *act_count = &filter->act_count; + + filter->counter = ice_fdir_counter_alloc(pf, + act_count->shared, + act_count->id); + if (!filter->counter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Failed to alloc FDIR counter."); + goto free_entry; + } + filter->input.cnt_index = filter->counter->hw_index; + } + ret = ice_fdir_add_del_filter(pf, filter, true); if (ret) { rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Add filter rule failed."); + goto free_counter; + } + + if (filter->mark_flag == 1) + ice_fdir_rx_parsing_enable(ad, 1); + + rte_memcpy(entry, filter, sizeof(*entry)); + ret = ice_fdir_entry_insert(pf, entry, &key); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Insert entry to table failed."); goto free_entry; } - rte_memcpy(rule, filter, sizeof(*rule)); - flow->rule = rule; - ice_fdir_cnt_update(pf, filter->input.flow_type, false, true); + flow->rule = entry; + ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true); + return 0; +free_counter: + if (filter->counter) { + ice_fdir_counter_free(pf, filter->counter); + filter->counter = NULL; + } + free_entry: - rte_free(rule); + rte_free(entry); return -rte_errno; } @@ -623,11 +1316,30 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, struct rte_flow_error *error) { struct ice_pf *pf = &ad->pf; - struct ice_fdir_filter_conf *filter; + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_filter_conf *filter, *entry; + struct ice_fdir_fltr_pattern key; + bool is_tun; int ret; filter = (struct ice_fdir_filter_conf *)flow->rule; + is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type); + + if (filter->counter) { + ice_fdir_counter_free(pf, filter->counter); + filter->counter = NULL; + } + + ice_fdir_extract_fltr_key(&key, filter); + entry = ice_fdir_entry_lookup(fdir_info, &key); + if (!entry) { + rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Can't find entry."); + return -rte_errno; + } + ret = ice_fdir_add_del_filter(pf, filter, false); if (ret) { rte_flow_error_set(error, -ret, @@ -636,7 +1348,19 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, return -rte_errno; } - ice_fdir_cnt_update(pf, filter->input.flow_type, false, false); + ret = ice_fdir_entry_del(pf, &key); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Remove entry from table failed."); + return -rte_errno; + } + + ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false); + + if (filter->mark_flag == 1) + ice_fdir_rx_parsing_enable(ad, 0); + flow->rule = NULL; rte_free(filter); @@ -644,11 +1368,54 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, return 0; } +static int +ice_fdir_query_count(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_query_count *flow_stats, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_filter_conf *filter = flow->rule; + struct ice_fdir_counter *counter = filter->counter; + uint64_t hits_lo, hits_hi; + + if (!counter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "FDIR counters not available"); + return -rte_errno; + } + + /* + * Reading the low 32-bits latches the high 32-bits into a shadow + * register. Reading the high 32-bit returns the value in the + * shadow register. + */ + hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index)); + hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index)); + + flow_stats->hits_set = 1; + flow_stats->hits = hits_lo | (hits_hi << 32); + flow_stats->bytes_set = 0; + flow_stats->bytes = 0; + + if (flow_stats->reset) { + /* reset statistic counter value */ + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0); + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0); + } + + return 0; +} + static struct ice_flow_engine ice_fdir_engine = { .init = ice_fdir_init, .uninit = ice_fdir_uninit, .create = ice_fdir_create_filter, .destroy = ice_fdir_destroy_filter, + .query_count = ice_fdir_query_count, .type = ICE_FLOW_ENGINE_FDIR, }; @@ -718,8 +1485,10 @@ ice_fdir_parse_action(struct ice_adapter *ad, struct ice_pf *pf = &ad->pf; const struct rte_flow_action_queue *act_q; const struct rte_flow_action_mark *mark_spec = NULL; + const struct rte_flow_action_count *act_count; uint32_t dest_num = 0; uint32_t mark_num = 0; + uint32_t counter_num = 0; int ret; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -752,8 +1521,7 @@ ice_fdir_parse_action(struct ice_adapter *ad, dest_num++; filter->input.dest_ctl = - ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; - filter->input.q_index = 0; + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; break; case RTE_FLOW_ACTION_TYPE_RSS: dest_num++; @@ -765,9 +1533,19 @@ ice_fdir_parse_action(struct ice_adapter *ad, break; case RTE_FLOW_ACTION_TYPE_MARK: mark_num++; - + filter->mark_flag = 1; mark_spec = actions->conf; filter->input.fltr_id = mark_spec->id; + filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + counter_num++; + + act_count = actions->conf; + filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + rte_memcpy(&filter->act_count, act_count, + sizeof(filter->act_count)); + break; default: rte_flow_error_set(error, EINVAL, @@ -777,7 +1555,7 @@ ice_fdir_parse_action(struct ice_adapter *ad, } } - if (dest_num == 0 || dest_num >= 2) { + if (dest_num >= 2) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Unsupported action combination"); @@ -791,6 +1569,25 @@ ice_fdir_parse_action(struct ice_adapter *ad, return -rte_errno; } + if (counter_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Too many count actions"); + return -rte_errno; + } + + if (dest_num + mark_num + counter_num == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Empty action"); + return -rte_errno; + } + + /* set default action to PASSTHRU mode, in "mark/count only" case. */ + if (dest_num == 0) + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; + return 0; } @@ -803,12 +1600,16 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, const struct rte_flow_item *item = pattern; enum rte_flow_item_type item_type; enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; + enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE; const struct rte_flow_item_eth *eth_spec, *eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; const struct rte_flow_item_udp *udp_spec, *udp_mask; const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask; uint64_t input_set = ICE_INSET_NONE; uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE; uint8_t ipv6_addr_mask[16] = { @@ -816,7 +1617,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; uint32_t vtc_flow_cpu; - + uint16_t ether_type; + enum rte_flow_item_type next_type; for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -832,29 +1634,43 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, case RTE_FLOW_ITEM_TYPE_ETH: eth_spec = item->spec; eth_mask = item->mask; + next_type = (item + 1)->type; if (eth_spec && eth_mask) { - if (!rte_is_zero_ether_addr(ð_spec->src) || - !rte_is_zero_ether_addr(ð_mask->src)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Src mac not support"); - return -rte_errno; + if (!rte_is_zero_ether_addr(ð_mask->dst)) { + input_set |= ICE_INSET_DMAC; + rte_memcpy(&filter->input.ext_data.dst_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); } - if (!rte_is_broadcast_ether_addr(ð_mask->dst)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid mac addr mask"); - return -rte_errno; + if (!rte_is_zero_ether_addr(ð_mask->src)) { + input_set |= ICE_INSET_SMAC; + rte_memcpy(&filter->input.ext_data.src_mac, + ð_spec->src, + RTE_ETHER_ADDR_LEN); } - input_set |= ICE_INSET_DMAC; - rte_memcpy(&filter->input.ext_data.dst_mac, - ð_spec->dst, - RTE_ETHER_ADDR_LEN); + /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */ + if (eth_mask->type == RTE_BE16(0xffff) && + next_type == RTE_FLOW_ITEM_TYPE_END) { + input_set |= ICE_INSET_ETHERTYPE; + ether_type = rte_be_to_cpu_16(eth_spec->type); + + if (ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ether_type."); + return -rte_errno; + } + + rte_memcpy(&filter->input.ext_data.ether_type, + ð_spec->type, + sizeof(eth_spec->type)); + flow_type = ICE_FLTR_PTYPE_NON_IP_L2; + } } break; case RTE_FLOW_ITEM_TYPE_IPV4: @@ -876,9 +1692,13 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, return -rte_errno; } if (ipv4_mask->hdr.src_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_SRC; + input_set |= tunnel_type ? + ICE_INSET_TUN_IPV4_SRC : + ICE_INSET_IPV4_SRC; if (ipv4_mask->hdr.dst_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_DST; + input_set |= tunnel_type ? + ICE_INSET_TUN_IPV4_DST : + ICE_INSET_IPV4_DST; if (ipv4_mask->hdr.type_of_service == UINT8_MAX) input_set |= ICE_INSET_IPV4_TOS; if (ipv4_mask->hdr.time_to_live == UINT8_MAX) @@ -887,9 +1707,9 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, input_set |= ICE_INSET_IPV4_PROTO; filter->input.ip.v4.dst_ip = - ipv4_spec->hdr.src_addr; - filter->input.ip.v4.src_ip = ipv4_spec->hdr.dst_addr; + filter->input.ip.v4.src_ip = + ipv4_spec->hdr.src_addr; filter->input.ip.v4.tos = ipv4_spec->hdr.type_of_service; filter->input.ip.v4.ttl = @@ -934,9 +1754,9 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, input_set |= ICE_INSET_IPV6_HOP_LIMIT; rte_memcpy(filter->input.ip.v6.dst_ip, - ipv6_spec->hdr.src_addr, 16); - rte_memcpy(filter->input.ip.v6.src_ip, ipv6_spec->hdr.dst_addr, 16); + rte_memcpy(filter->input.ip.v6.src_ip, + ipv6_spec->hdr.src_addr, 16); vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow); @@ -955,6 +1775,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, tcp_spec = item->spec; tcp_mask = item->mask; + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + if (tcp_spec && tcp_mask) { /* Check TCP mask and update input set */ if (tcp_mask->hdr.sent_seq || @@ -972,25 +1797,25 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } if (tcp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TCP_SRC_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_TCP_SRC_PORT : + ICE_INSET_TCP_SRC_PORT; if (tcp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TCP_DST_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_TCP_DST_PORT : + ICE_INSET_TCP_DST_PORT; /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { filter->input.ip.v4.dst_port = - tcp_spec->hdr.src_port; - filter->input.ip.v4.src_port = tcp_spec->hdr.dst_port; - flow_type = - ICE_FLTR_PTYPE_NONF_IPV4_TCP; + filter->input.ip.v4.src_port = + tcp_spec->hdr.src_port; } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { filter->input.ip.v6.dst_port = - tcp_spec->hdr.src_port; - filter->input.ip.v6.src_port = tcp_spec->hdr.dst_port; - flow_type = - ICE_FLTR_PTYPE_NONF_IPV6_TCP; + filter->input.ip.v6.src_port = + tcp_spec->hdr.src_port; } } break; @@ -998,6 +1823,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, udp_spec = item->spec; udp_mask = item->mask; + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + if (udp_spec && udp_mask) { /* Check UDP mask and update input set*/ if (udp_mask->hdr.dgram_len || @@ -1010,25 +1840,25 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } if (udp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_UDP_SRC_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_UDP_SRC_PORT : + ICE_INSET_UDP_SRC_PORT; if (udp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_UDP_DST_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_UDP_DST_PORT : + ICE_INSET_UDP_DST_PORT; /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { filter->input.ip.v4.dst_port = - udp_spec->hdr.src_port; - filter->input.ip.v4.src_port = udp_spec->hdr.dst_port; - flow_type = - ICE_FLTR_PTYPE_NONF_IPV4_UDP; + filter->input.ip.v4.src_port = + udp_spec->hdr.src_port; } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { filter->input.ip.v6.src_port = udp_spec->hdr.src_port; filter->input.ip.v6.dst_port = udp_spec->hdr.dst_port; - flow_type = - ICE_FLTR_PTYPE_NONF_IPV6_UDP; } } break; @@ -1036,6 +1866,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, sctp_spec = item->spec; sctp_mask = item->mask; + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + if (sctp_spec && sctp_mask) { /* Check SCTP mask and update input set */ if (sctp_mask->hdr.cksum) { @@ -1047,30 +1882,82 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } if (sctp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SCTP_SRC_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_SCTP_SRC_PORT : + ICE_INSET_SCTP_SRC_PORT; if (sctp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_SCTP_DST_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_SCTP_DST_PORT : + ICE_INSET_SCTP_DST_PORT; /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { filter->input.ip.v4.dst_port = - sctp_spec->hdr.src_port; - filter->input.ip.v4.src_port = sctp_spec->hdr.dst_port; - flow_type = - ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + filter->input.ip.v4.src_port = + sctp_spec->hdr.src_port; } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { filter->input.ip.v6.dst_port = - sctp_spec->hdr.src_port; - filter->input.ip.v6.src_port = sctp_spec->hdr.dst_port; - flow_type = - ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + filter->input.ip.v6.src_port = + sctp_spec->hdr.src_port; } } break; case RTE_FLOW_ITEM_TYPE_VOID: break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + l3 = RTE_FLOW_ITEM_TYPE_END; + vxlan_spec = item->spec; + vxlan_mask = item->mask; + + if (vxlan_spec || vxlan_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid vxlan field"); + return -rte_errno; + } + + tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_GTPU: + l3 = RTE_FLOW_ITEM_TYPE_END; + gtp_spec = item->spec; + gtp_mask = item->mask; + + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP mask"); + return -rte_errno; + } + + if (gtp_mask->teid == UINT32_MAX) + input_set |= ICE_INSET_GTPU_TEID; + + filter->input.gtpu_data.teid = gtp_spec->teid; + } + + tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU; + break; + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + gtp_psc_spec = item->spec; + gtp_psc_mask = item->mask; + + if (gtp_psc_spec && gtp_psc_mask) { + if (gtp_psc_mask->qfi == UINT8_MAX) + input_set |= ICE_INSET_GTPU_QFI; + + filter->input.gtpu_data.qfi = + gtp_psc_spec->qfi; + } + tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; + break; default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1080,6 +1967,20 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } } + if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU && + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH && + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_OTHER; + else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU && + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER; + else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH && + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_IPV6_OTHER; + + filter->tunnel_type = tunnel_type; filter->input.flow_type = flow_type; filter->input_set = input_set; @@ -1102,35 +2003,39 @@ ice_fdir_parse(struct ice_adapter *ad, int ret; memset(filter, 0, sizeof(*filter)); - item = ice_search_pattern_match_item(pattern, array, array_len, error); + item = ice_search_pattern_match_item(ad, pattern, array, array_len, + error); if (!item) return -rte_errno; ret = ice_fdir_parse_pattern(ad, pattern, error, filter); if (ret) - return ret; - input_set = filter->input_set; + goto error; + input_set = filter->input_set | filter->outer_input_set; if (!input_set || input_set & ~item->input_set_mask) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern, "Invalid input set"); - return -rte_errno; + ret = -rte_errno; + goto error; } ret = ice_fdir_parse_action(ad, actions, error, filter); if (ret) - return ret; + goto error; - *meta = filter; - - return 0; + if (meta) + *meta = filter; +error: + rte_free(item); + return ret; } static struct ice_flow_parser ice_fdir_parser = { .engine = &ice_fdir_engine, - .array = ice_fdir_pattern, - .array_len = RTE_DIM(ice_fdir_pattern), + .array = ice_fdir_pattern_list, + .array_len = RTE_DIM(ice_fdir_pattern_list), .parse_pattern_action = ice_fdir_parse, .stage = ICE_FLOW_STAGE_DISTRIBUTOR, };