#include <stdio.h>
#include <rte_flow.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
#include "base/ice_fdir.h"
#include "base/ice_flow.h"
#include "base/ice_type.h"
#include "ice_rxtx.h"
#include "ice_generic_flow.h"
+#define ICE_FDIR_IPV6_TC_OFFSET 20
+#define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
+
+#define ICE_FDIR_MAX_QREGION_SIZE 128
+
+#define ICE_FDIR_INSET_ETH_IPV4 (\
+ ICE_INSET_DMAC | \
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
+ ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
+
+#define ICE_FDIR_INSET_ETH_IPV4_UDP (\
+ ICE_FDIR_INSET_ETH_IPV4 | \
+ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV4_TCP (\
+ ICE_FDIR_INSET_ETH_IPV4 | \
+ ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
+ ICE_FDIR_INSET_ETH_IPV4 | \
+ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6 (\
+ ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
+ ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
+
+#define ICE_FDIR_INSET_ETH_IPV6_UDP (\
+ ICE_FDIR_INSET_ETH_IPV6 | \
+ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6_TCP (\
+ ICE_FDIR_INSET_ETH_IPV6 | \
+ ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
+ ICE_FDIR_INSET_ETH_IPV6 | \
+ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+#define ICE_FDIR_INSET_VXLAN_IPV4 (\
+ ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
+
+#define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
+ ICE_FDIR_INSET_VXLAN_IPV4 | \
+ ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
+
+#define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
+ ICE_FDIR_INSET_VXLAN_IPV4 | \
+ ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
+
+#define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
+ ICE_FDIR_INSET_VXLAN_IPV4 | \
+ ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
+
+static struct ice_pattern_match_item ice_fdir_pattern[] = {
+ {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
+ {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp_vxlan_ipv4,
+ ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
+ ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
+ ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
+ ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
+ ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
+ ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
+ ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
+ ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
+};
+
+static struct ice_flow_parser ice_fdir_parser;
+
static const struct rte_memzone *
ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
{
return -ENOMEM;
}
+static int
+ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
+ struct ice_fdir_counter_pool_container *container,
+ uint32_t index_start,
+ uint32_t len)
+{
+ struct ice_fdir_counter_pool *pool;
+ uint32_t i;
+ int ret = 0;
+
+ pool = rte_zmalloc("ice_fdir_counter_pool",
+ sizeof(*pool) +
+ sizeof(struct ice_fdir_counter) * len,
+ 0);
+ if (!pool) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir counter pool");
+ return -ENOMEM;
+ }
+
+ TAILQ_INIT(&pool->counter_list);
+ TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
+
+ for (i = 0; i < len; i++) {
+ struct ice_fdir_counter *counter = &pool->counters[i];
+
+ counter->hw_index = index_start + i;
+ TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
+ }
+
+ if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
+ PMD_INIT_LOG(ERR, "FDIR counter pool is full");
+ ret = -EINVAL;
+ goto free_pool;
+ }
+
+ container->pools[container->index_free++] = pool;
+ return 0;
+
+free_pool:
+ rte_free(pool);
+ return ret;
+}
+
+static int
+ice_fdir_counter_init(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+ struct ice_fdir_counter_pool_container *container =
+ &fdir_info->counter;
+ uint32_t cnt_index, len;
+ int ret;
+
+ TAILQ_INIT(&container->pool_list);
+
+ cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
+ len = ICE_FDIR_COUNTERS_PER_BLOCK;
+
+ ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ice_fdir_counter_release(struct ice_pf *pf)
+{
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+ struct ice_fdir_counter_pool_container *container =
+ &fdir_info->counter;
+ uint8_t i;
+
+ for (i = 0; i < container->index_free; i++)
+ rte_free(container->pools[i]);
+
+ return 0;
+}
+
+static struct ice_fdir_counter *
+ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
+ *container,
+ uint32_t id)
+{
+ struct ice_fdir_counter_pool *pool;
+ struct ice_fdir_counter *counter;
+ int i;
+
+ TAILQ_FOREACH(pool, &container->pool_list, next) {
+ for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
+ counter = &pool->counters[i];
+
+ if (counter->shared &&
+ counter->ref_cnt &&
+ counter->id == id)
+ return counter;
+ }
+ }
+
+ return NULL;
+}
+
+static struct ice_fdir_counter *
+ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+ struct ice_fdir_counter_pool_container *container =
+ &fdir_info->counter;
+ struct ice_fdir_counter_pool *pool = NULL;
+ struct ice_fdir_counter *counter_free = NULL;
+
+ if (shared) {
+ counter_free = ice_fdir_counter_shared_search(container, id);
+ if (counter_free) {
+ if (counter_free->ref_cnt + 1 == 0) {
+ rte_errno = E2BIG;
+ return NULL;
+ }
+ counter_free->ref_cnt++;
+ return counter_free;
+ }
+ }
+
+ TAILQ_FOREACH(pool, &container->pool_list, next) {
+ counter_free = TAILQ_FIRST(&pool->counter_list);
+ if (counter_free)
+ break;
+ counter_free = NULL;
+ }
+
+ if (!counter_free) {
+ PMD_DRV_LOG(ERR, "No free counter found\n");
+ return NULL;
+ }
+
+ counter_free->shared = shared;
+ counter_free->id = id;
+ counter_free->ref_cnt = 1;
+ counter_free->pool = pool;
+
+ /* reset statistic counter value */
+ ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
+ ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
+
+ TAILQ_REMOVE(&pool->counter_list, counter_free, next);
+ if (TAILQ_EMPTY(&pool->counter_list)) {
+ TAILQ_REMOVE(&container->pool_list, pool, next);
+ TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
+ }
+
+ return counter_free;
+}
+
+static void
+ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
+ struct ice_fdir_counter *counter)
+{
+ if (!counter)
+ return;
+
+ if (--counter->ref_cnt == 0) {
+ struct ice_fdir_counter_pool *pool = counter->pool;
+
+ TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
+ }
+}
+
+static int
+ice_fdir_init_filter_list(struct ice_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = ICE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct ice_fdir_fltr_pattern),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize hash */
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->device->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
+ sizeof(*fdir_info->hash_map) *
+ ICE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+ return 0;
+
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+
+ return ret;
+}
+
+static void
+ice_fdir_release_filter_list(struct ice_pf *pf)
+{
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+}
+
/*
* ice_fdir_setup - reserve and initialize the Flow Director resources
* @pf: board private structure
}
pf->fdir.fdir_vsi = vsi;
+ err = ice_fdir_init_filter_list(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
+ return -EINVAL;
+ }
+
+ err = ice_fdir_counter_init(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
+ return -EINVAL;
+ }
+
/*Fdir tx queue setup*/
err = ice_fdir_setup_tx_resources(pf);
if (err) {
if (err)
PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
+ err = ice_fdir_counter_release(pf);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
+
+ ice_fdir_release_filter_list(pf);
+
ice_tx_queue_release(pf->fdir.txq);
pf->fdir.txq = NULL;
ice_rx_queue_release(pf->fdir.rxq);
{ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
{ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
{ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
+ {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
+ {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
+ {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
+ {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
+ {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
+ {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
+ {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
+ {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
};
for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
}
}
-static int __rte_unused
+static int
ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
uint64_t input_set, bool is_tunnel)
{
}
}
-static void __rte_unused
+static void
ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
bool is_tunnel, bool add)
{
ice_fdir_init(struct ice_adapter *ad)
{
struct ice_pf *pf = &ad->pf;
+ int ret;
- return ice_fdir_setup(pf);
+ ret = ice_fdir_setup(pf);
+ if (ret)
+ return ret;
+
+ return ice_register_parser(&ice_fdir_parser, ad);
}
static void
{
struct ice_pf *pf = &ad->pf;
+ ice_unregister_parser(&ice_fdir_parser, ad);
+
ice_fdir_teardown(pf);
}
+static int
+ice_fdir_add_del_filter(struct ice_pf *pf,
+ struct ice_fdir_filter_conf *filter,
+ bool add)
+{
+ struct ice_fltr_desc desc;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ bool is_tun;
+ int ret;
+
+ filter->input.dest_vsi = pf->main_vsi->idx;
+
+ memset(&desc, 0, sizeof(desc));
+ ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
+
+ is_tun = filter->tunnel_type ? true : false;
+
+ memset(pkt, 0, ICE_FDIR_PKT_LEN);
+ ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Generate dummy packet failed");
+ return -EINVAL;
+ }
+
+ return ice_fdir_programming(pf, &desc);
+}
+
+static void
+ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
+ struct ice_fdir_filter_conf *filter)
+{
+ struct ice_fdir_fltr *input = &filter->input;
+ memset(key, 0, sizeof(*key));
+
+ key->flow_type = input->flow_type;
+ rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
+ rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
+ rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
+ rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
+
+ key->tunnel_type = filter->tunnel_type;
+}
+
+/* Check if there exists the flow director filter */
+static struct ice_fdir_filter_conf *
+ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
+ const struct ice_fdir_fltr_pattern *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, key);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director entry into the SW list */
+static int
+ice_fdir_entry_insert(struct ice_pf *pf,
+ struct ice_fdir_filter_conf *entry,
+ struct ice_fdir_fltr_pattern *key)
+{
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table, key);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir entry to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = entry;
+
+ return 0;
+}
+
+/* Delete a flow director entry from the SW list */
+static int
+ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
+{
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, key);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = NULL;
+
+ return 0;
+}
+
+static int
+ice_fdir_create_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_fdir_filter_conf *filter = meta;
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+ struct ice_fdir_filter_conf *entry, *node;
+ struct ice_fdir_fltr_pattern key;
+ bool is_tun;
+ int ret;
+
+ ice_fdir_extract_fltr_key(&key, filter);
+ node = ice_fdir_entry_lookup(fdir_info, &key);
+ if (node) {
+ rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Rule already exists!");
+ return -rte_errno;
+ }
+
+ entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
+ if (!entry) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return -rte_errno;
+ }
+
+ is_tun = filter->tunnel_type ? true : false;
+
+ ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
+ filter->input_set, is_tun);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Profile configure failed.");
+ goto free_entry;
+ }
+
+ /* alloc counter for FDIR */
+ if (filter->input.cnt_ena) {
+ struct rte_flow_action_count *act_count = &filter->act_count;
+
+ filter->counter = ice_fdir_counter_alloc(pf,
+ act_count->shared,
+ act_count->id);
+ if (!filter->counter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Failed to alloc FDIR counter.");
+ goto free_entry;
+ }
+ filter->input.cnt_index = filter->counter->hw_index;
+ }
+
+ ret = ice_fdir_add_del_filter(pf, filter, true);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Add filter rule failed.");
+ goto free_counter;
+ }
+
+ rte_memcpy(entry, filter, sizeof(*entry));
+ ret = ice_fdir_entry_insert(pf, entry, &key);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Insert entry to table failed.");
+ goto free_entry;
+ }
+
+ flow->rule = entry;
+ ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
+
+ return 0;
+
+free_counter:
+ if (filter->counter) {
+ ice_fdir_counter_free(pf, filter->counter);
+ filter->counter = NULL;
+ }
+
+free_entry:
+ rte_free(entry);
+ return -rte_errno;
+}
+
+static int
+ice_fdir_destroy_filter(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_fdir_info *fdir_info = &pf->fdir;
+ struct ice_fdir_filter_conf *filter, *entry;
+ struct ice_fdir_fltr_pattern key;
+ bool is_tun;
+ int ret;
+
+ filter = (struct ice_fdir_filter_conf *)flow->rule;
+
+ is_tun = filter->tunnel_type ? true : false;
+
+ if (filter->counter) {
+ ice_fdir_counter_free(pf, filter->counter);
+ filter->counter = NULL;
+ }
+
+ ice_fdir_extract_fltr_key(&key, filter);
+ entry = ice_fdir_entry_lookup(fdir_info, &key);
+ if (!entry) {
+ rte_flow_error_set(error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Can't find entry.");
+ return -rte_errno;
+ }
+
+ ret = ice_fdir_add_del_filter(pf, filter, false);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Del filter rule failed.");
+ return -rte_errno;
+ }
+
+ ret = ice_fdir_entry_del(pf, &key);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Remove entry from table failed.");
+ return -rte_errno;
+ }
+
+ ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
+ flow->rule = NULL;
+
+ rte_free(filter);
+
+ return 0;
+}
+
+static int
+ice_fdir_query_count(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_query_count *flow_stats,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_fdir_filter_conf *filter = flow->rule;
+ struct ice_fdir_counter *counter = filter->counter;
+ uint64_t hits_lo, hits_hi;
+
+ if (!counter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "FDIR counters not available");
+ return -rte_errno;
+ }
+
+ /*
+ * Reading the low 32-bits latches the high 32-bits into a shadow
+ * register. Reading the high 32-bit returns the value in the
+ * shadow register.
+ */
+ hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
+ hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
+
+ flow_stats->hits_set = 1;
+ flow_stats->hits = hits_lo | (hits_hi << 32);
+ flow_stats->bytes_set = 0;
+ flow_stats->bytes = 0;
+
+ if (flow_stats->reset) {
+ /* reset statistic counter value */
+ ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
+ ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
+ }
+
+ return 0;
+}
+
static struct ice_flow_engine ice_fdir_engine = {
.init = ice_fdir_init,
.uninit = ice_fdir_uninit,
+ .create = ice_fdir_create_filter,
+ .destroy = ice_fdir_destroy_filter,
+ .query_count = ice_fdir_query_count,
.type = ICE_FLOW_ENGINE_FDIR,
};
+static int
+ice_fdir_parse_action_qregion(struct ice_pf *pf,
+ struct rte_flow_error *error,
+ const struct rte_flow_action *act,
+ struct ice_fdir_filter_conf *filter)
+{
+ const struct rte_flow_action_rss *rss = act->conf;
+ uint32_t i;
+
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (rss->queue_num <= 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Queue region size can't be 0 or 1.");
+ return -rte_errno;
+ }
+
+ /* check if queue index for queue region is continuous */
+ for (i = 0; i < rss->queue_num - 1; i++) {
+ if (rss->queue[i + 1] != rss->queue[i] + 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Discontinuous queue region");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue region indexes.");
+ return -rte_errno;
+ }
+
+ if (!(rte_is_power_of_2(rss->queue_num) &&
+ (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "The region size should be any of the following values:"
+ "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+ "of queues do not exceed the VSI allocation.");
+ return -rte_errno;
+ }
+
+ filter->input.q_index = rss->queue[0];
+ filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
+ filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
+
+ return 0;
+}
+
+static int
+ice_fdir_parse_action(struct ice_adapter *ad,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct ice_fdir_filter_conf *filter)
+{
+ struct ice_pf *pf = &ad->pf;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec = NULL;
+ const struct rte_flow_action_count *act_count;
+ uint32_t dest_num = 0;
+ uint32_t mark_num = 0;
+ uint32_t counter_num = 0;
+ int ret;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ dest_num++;
+
+ act_q = actions->conf;
+ filter->input.q_index = act_q->index;
+ if (filter->input.q_index >=
+ pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid queue for FDIR.");
+ return -rte_errno;
+ }
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ filter->input.q_index = 0;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ dest_num++;
+
+ ret = ice_fdir_parse_action_qregion(pf,
+ error, actions, filter);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ mark_num++;
+
+ mark_spec = actions->conf;
+ filter->input.fltr_id = mark_spec->id;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ counter_num++;
+
+ act_count = actions->conf;
+ filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ rte_memcpy(&filter->act_count, act_count,
+ sizeof(filter->act_count));
+
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ if (dest_num == 0 || dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ if (mark_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many mark actions");
+ return -rte_errno;
+ }
+
+ if (counter_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many count actions");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct ice_fdir_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+ uint64_t input_set = ICE_INSET_NONE;
+ uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+ uint8_t ipv6_addr_mask[16] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ };
+ uint32_t vtc_flow_cpu;
+
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (eth_spec && eth_mask) {
+ if (!rte_is_zero_ether_addr(ð_spec->src) ||
+ !rte_is_zero_ether_addr(ð_mask->src)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Src mac not support");
+ return -rte_errno;
+ }
+
+ if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid mac addr mask");
+ return -rte_errno;
+ }
+
+ input_set |= ICE_INSET_DMAC;
+ rte_memcpy(&filter->input.ext_data.dst_mac,
+ ð_spec->dst,
+ RTE_ETHER_ADDR_LEN);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= tunnel_type ?
+ ICE_INSET_TUN_IPV4_SRC :
+ ICE_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= tunnel_type ?
+ ICE_INSET_TUN_IPV4_DST :
+ ICE_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= ICE_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= ICE_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= ICE_INSET_IPV4_PROTO;
+
+ filter->input.ip.v4.dst_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.ip.v4.src_ip =
+ ipv4_spec->hdr.dst_addr;
+ filter->input.ip.v4.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.ip.v4.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.ip.v4.proto =
+ ipv4_spec->hdr.next_proto_id;
+ }
+
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ if (ipv6_spec && ipv6_mask) {
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ if (!memcmp(ipv6_mask->hdr.src_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr)))
+ input_set |= ICE_INSET_IPV6_SRC;
+ if (!memcmp(ipv6_mask->hdr.dst_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ input_set |= ICE_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+ input_set |= ICE_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= ICE_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+
+ rte_memcpy(filter->input.ip.v6.dst_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.ip.v6.src_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ vtc_flow_cpu =
+ rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+ filter->input.ip.v6.tc =
+ (uint8_t)(vtc_flow_cpu >>
+ ICE_FDIR_IPV6_TC_OFFSET);
+ filter->input.ip.v6.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.ip.v6.hlim =
+ ipv6_spec->hdr.hop_limits;
+ }
+
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= tunnel_type ?
+ ICE_INSET_TUN_TCP_SRC_PORT :
+ ICE_INSET_TCP_SRC_PORT;
+ if (tcp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= tunnel_type ?
+ ICE_INSET_TUN_TCP_DST_PORT :
+ ICE_INSET_TCP_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.ip.v4.dst_port =
+ tcp_spec->hdr.src_port;
+ filter->input.ip.v4.src_port =
+ tcp_spec->hdr.dst_port;
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.ip.v6.dst_port =
+ tcp_spec->hdr.src_port;
+ filter->input.ip.v6.src_port =
+ tcp_spec->hdr.dst_port;
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= tunnel_type ?
+ ICE_INSET_TUN_UDP_SRC_PORT :
+ ICE_INSET_UDP_SRC_PORT;
+ if (udp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= tunnel_type ?
+ ICE_INSET_TUN_UDP_DST_PORT :
+ ICE_INSET_UDP_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.ip.v4.dst_port =
+ udp_spec->hdr.src_port;
+ filter->input.ip.v4.src_port =
+ udp_spec->hdr.dst_port;
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.ip.v6.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.ip.v6.dst_port =
+ udp_spec->hdr.dst_port;
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (sctp_spec && sctp_mask) {
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= tunnel_type ?
+ ICE_INSET_TUN_SCTP_SRC_PORT :
+ ICE_INSET_SCTP_SRC_PORT;
+ if (sctp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= tunnel_type ?
+ ICE_INSET_TUN_SCTP_DST_PORT :
+ ICE_INSET_SCTP_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.ip.v4.dst_port =
+ sctp_spec->hdr.src_port;
+ filter->input.ip.v4.src_port =
+ sctp_spec->hdr.dst_port;
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.ip.v6.dst_port =
+ sctp_spec->hdr.src_port;
+ filter->input.ip.v6.src_port =
+ sctp_spec->hdr.dst_port;
+ flow_type =
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ l3 = RTE_FLOW_ITEM_TYPE_END;
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+
+ if (vxlan_spec || vxlan_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vxlan field");
+ return -rte_errno;
+ }
+
+ tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ filter->tunnel_type = tunnel_type;
+ filter->input.flow_type = flow_type;
+ filter->input_set = input_set;
+
+ return 0;
+}
+
+static int
+ice_fdir_parse(struct ice_adapter *ad,
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
+ struct ice_pattern_match_item *item = NULL;
+ uint64_t input_set;
+ int ret;
+
+ memset(filter, 0, sizeof(*filter));
+ item = ice_search_pattern_match_item(pattern, array, array_len, error);
+ if (!item)
+ return -rte_errno;
+
+ ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
+ if (ret)
+ return ret;
+ input_set = filter->input_set;
+ if (!input_set || input_set & ~item->input_set_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ pattern,
+ "Invalid input set");
+ return -rte_errno;
+ }
+
+ ret = ice_fdir_parse_action(ad, actions, error, filter);
+ if (ret)
+ return ret;
+
+ *meta = filter;
+
+ return 0;
+}
+
+static struct ice_flow_parser ice_fdir_parser = {
+ .engine = &ice_fdir_engine,
+ .array = ice_fdir_pattern,
+ .array_len = RTE_DIM(ice_fdir_pattern),
+ .parse_pattern_action = ice_fdir_parse,
+ .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
RTE_INIT(ice_fdir_engine_register)
{
ice_register_flow_engine(&ice_fdir_engine);