#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
+#include <rte_cycles.h>
#include <rte_ip.h>
#include <rte_gre.h>
#include <rte_vxlan.h>
flow_dv_shared_lock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
flow_dv_shared_unlock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
/* Fetch variable byte size mask from the array. */
mask = flow_dv_fetch_field((const uint8_t *)item->mask +
field->offset, field->size);
- MLX5_ASSERT(mask);
if (!mask) {
++field;
continue;
return 0;
}
+/**
+ * Validate VLAN item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] dev
+ * Ethernet device flow is being created on.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_vlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *mask = item->mask;
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(UINT16_MAX),
+ .inner_type = RTE_BE16(UINT16_MAX),
+ };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int ret;
+ const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4);
+ const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+
+ if (item_flags & vlanm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple VLAN layers not supported");
+ else if ((item_flags & l34m) != 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN cannot follow L3/L4 layer");
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan),
+ error);
+ if (ret)
+ return ret;
+ if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->vmwa_context) {
+ /*
+ * Non-NULL context means we have a virtual machine
+ * and SR-IOV enabled, we have to create VLAN interface
+ * to make hypervisor to setup E-Switch vport
+ * context correctly. We avoid creating the multiple
+ * VLAN interfaces, so we cannot support VLAN tag mask.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN tag mask is not"
+ " supported in virtual"
+ " environment");
+ }
+ }
+ return 0;
+}
+
+/*
+ * GTP flags are contained in 1 byte of the format:
+ * -------------------------------------------
+ * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 |
+ * |-----------------------------------------|
+ * | value | Version | PT | Res | E | S | PN |
+ * -------------------------------------------
+ *
+ * Matching is supported only for GTP flags E, S, PN.
+ */
+#define MLX5_GTP_FLAGS_MASK 0x07
+
/**
* Validate GTP item.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_gtp *spec = item->spec;
const struct rte_flow_item_gtp *mask = item->mask;
const struct rte_flow_item_gtp nic_mask = {
+ .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
.msg_type = 0xff,
.teid = RTE_BE32(0xffffffff),
};
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_gtp_mask;
+ if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Match is supported for GTP"
+ " flags only");
return mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
const struct rte_flow_item_vlan *vlan_m = items->mask;
const struct rte_flow_item_vlan *vlan_v = items->spec;
+ /* If VLAN item in pattern doesn't contain data, return here. */
+ if (!vlan_v)
+ return;
if (!vlan_m)
vlan_m = &nic_mask;
/* Only full match values are accepted */
const struct rte_flow_action *action = actions;
const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
- if (conf->vlan_vid > RTE_BE16(0xFFE))
+ if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"VLAN VID value is too big");
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
uint32_t idx = 0;
*cache_resource = *resource;
/*
* Depending on rdma_core version the glue routine calls
- * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
+ * either mlx5dv_dr_action_create_dest_ib_port(domain, dev_port)
* or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
*/
cache_resource->action =
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
* @return
* Max number of modify header actions device can support.
*/
-static unsigned int
-flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev, uint64_t flags)
+static inline unsigned int
+flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
+ uint64_t flags)
{
/*
- * There's no way to directly query the max cap. Although it has to be
- * acquried by iterative trial, it is a safe assumption that more
- * actions are supported by FW if extensive metadata register is
- * supported. (Only in the root table)
+ * There's no way to directly query the max capacity from FW.
+ * The maximal value on root table should be assumed to be supported.
*/
if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
return MLX5_MAX_MODIFY_NUM;
else
- return mlx5_flow_ext_mreg_supported(dev) ?
- MLX5_ROOT_TBL_MODIFY_NUM :
- MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG;
+ return MLX5_ROOT_TBL_MODIFY_NUM;
}
/**
return 0;
}
+/**
+ * Validate the age action.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the age action.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_age(uint64_t action_flags,
+ const struct rte_flow_action *action,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_age *age = action->conf;
+
+ if (!priv->config.devx || priv->counter_fallback)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "age action not supported");
+ if (!(action->conf))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be null");
+ if (age->timeout >= UINT16_MAX / 2 / 10)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Max age time: 3275 seconds");
+ if (action_flags & MLX5_FLOW_ACTION_AGE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Duplicate age ctions set");
+ return 0;
+}
+
/**
* Validate the modify-header IPv4 DSCP actions.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont;
struct mlx5_flow_counter_pool *pool;
- uint32_t batch = 0;
+ uint32_t batch = 0, age = 0;
idx--;
+ age = MLX_CNT_IS_AGE(idx);
+ idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
if (idx >= MLX5_CNT_BATCH_OFFSET) {
idx -= MLX5_CNT_BATCH_OFFSET;
batch = 1;
}
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
MLX5_ASSERT(pool);
if (ppool)
*ppool = pool;
- return &pool->counters_raw[idx % MLX5_COUNTERS_PER_POOL];
+ return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
}
/**
static struct mlx5_counter_stats_mem_mng *
flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
{
- struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
- (dev->data->dev_private))->sh;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
}
mkey_attr.addr = (uintptr_t)mem;
mkey_attr.size = size;
- mkey_attr.umem_id = mem_mng->umem->umem_id;
+ mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
mkey_attr.pd = sh->pdn;
mkey_attr.log_entity_size = 0;
mkey_attr.pg_access = 0;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = 1;
+ if (priv->config.hca_attr.relaxed_ordering_write &&
+ priv->config.hca_attr.relaxed_ordering_read &&
+ !haswell_broadwell_cpu)
+ mkey_attr.relaxed_ordering = 1;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
* Pointer to the Ethernet device structure.
* @param[in] batch
* Whether the pool is for counter that was allocated by batch command.
+ * @param[in] age
+ * Whether the pool is for Aging counter.
*
* @return
- * The new container pointer on success, otherwise NULL and rte_errno is set.
+ * 0 on success, otherwise negative errno value and rte_errno is set.
*/
-static struct mlx5_pools_container *
-flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
+static int
+flow_dv_container_resize(struct rte_eth_dev *dev,
+ uint32_t batch, uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont =
- MLX5_CNT_CONTAINER(priv->sh, batch, 0);
- struct mlx5_pools_container *new_cont =
- MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
+ struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
+ age);
struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
+ void *old_pools = cont->pools;
uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
- int i;
+ void *pools = rte_calloc(__func__, 1, mem_size, 0);
- /* Fallback mode has no background thread. Skip the check. */
- if (!priv->counter_fallback &&
- cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
- /* The last resize still hasn't detected by the host thread. */
- rte_errno = EAGAIN;
- return NULL;
- }
- new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
- if (!new_cont->pools) {
+ if (!pools) {
rte_errno = ENOMEM;
- return NULL;
+ return -ENOMEM;
}
- if (cont->n)
- memcpy(new_cont->pools, cont->pools, cont->n *
- sizeof(struct mlx5_flow_counter_pool *));
+ if (old_pools)
+ memcpy(pools, old_pools, cont->n *
+ sizeof(struct mlx5_flow_counter_pool *));
/*
* Fallback mode query the counter directly, no background query
* resources are needed.
*/
if (!priv->counter_fallback) {
+ int i;
+
mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
- MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
+ MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
if (!mem_mng) {
- rte_free(new_cont->pools);
- return NULL;
+ rte_free(pools);
+ return -ENOMEM;
}
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
mem_mng->raws +
MLX5_CNT_CONTAINER_RESIZE +
i, next);
- } else {
- /*
- * Release the old container pools directly as no background
- * thread helps that.
- */
- rte_free(cont->pools);
}
- new_cont->n = resize;
- rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
- TAILQ_INIT(&new_cont->pool_list);
- TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
- new_cont->init_mem_mng = mem_mng;
- rte_cio_wmb();
- /* Flip the master container. */
- priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
- return new_cont;
+ rte_spinlock_lock(&cont->resize_sl);
+ cont->n = resize;
+ cont->mem_mng = mem_mng;
+ cont->pools = pools;
+ rte_spinlock_unlock(&cont->resize_sl);
+ if (old_pools)
+ rte_free(old_pools);
+ return 0;
}
/**
*pkts = 0;
*bytes = 0;
} else {
- offset = cnt - &pool->counters_raw[0];
+ offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
}
* The devX counter handle.
* @param[in] batch
* Whether the pool is for counter that was allocated by batch command.
+ * @param[in] age
+ * Whether the pool is for counter that was allocated for aging.
* @param[in/out] cont_cur
* Pointer to the container pointer, it will be update in pool resize.
*
* @return
* The pool container pointer on success, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_pools_container *
+static struct mlx5_flow_counter_pool *
flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
- uint32_t batch)
+ uint32_t batch, uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- 0);
+ age);
int16_t n_valid = rte_atomic16_read(&cont->n_valid);
- uint32_t size;
+ uint32_t size = sizeof(*pool);
- if (cont->n == n_valid) {
- cont = flow_dv_container_resize(dev, batch);
- if (!cont)
- return NULL;
- }
- size = sizeof(*pool);
- if (!batch)
- size += MLX5_COUNTERS_PER_POOL *
- sizeof(struct mlx5_flow_counter_ext);
+ if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
+ return NULL;
+ size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
+ size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
+ size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
pool = rte_calloc(__func__, 1, size, 0);
if (!pool) {
rte_errno = ENOMEM;
}
pool->min_dcs = dcs;
if (!priv->counter_fallback)
- pool->raw = cont->init_mem_mng->raws + n_valid %
- MLX5_CNT_CONTAINER_RESIZE;
+ pool->raw = cont->mem_mng->raws + n_valid %
+ MLX5_CNT_CONTAINER_RESIZE;
pool->raw_hw = NULL;
+ pool->type = 0;
+ pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT);
+ pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE);
rte_spinlock_init(&pool->sl);
/*
* The generation of the new allocated counters in this pool is 0, 2 in
/* Pool initialization must be updated before host thread access. */
rte_cio_wmb();
rte_atomic16_add(&cont->n_valid, 1);
- return cont;
+ return pool;
}
+/**
+ * Update the minimum dcs-id for aged or no-aged counter pool.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] pool
+ * Current counter pool.
+ * @param[in] batch
+ * Whether the pool is for counter that was allocated by batch command.
+ * @param[in] age
+ * Whether the counter is for aging.
+ */
+static void
+flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter_pool *pool,
+ uint32_t batch, uint32_t age)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter_pool *other;
+ struct mlx5_pools_container *cont;
+
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1));
+ other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id);
+ if (!other)
+ return;
+ if (pool->min_dcs->id < other->min_dcs->id) {
+ rte_atomic64_set(&other->a64_dcs,
+ rte_atomic64_read(&pool->a64_dcs));
+ } else {
+ rte_atomic64_set(&pool->a64_dcs,
+ rte_atomic64_read(&other->a64_dcs));
+ }
+}
/**
* Prepare a new counter and/or a new counter pool.
*
* Where to put the pointer of a new counter.
* @param[in] batch
* Whether the pool is for counter that was allocated by batch command.
+ * @param[in] age
+ * Whether the pool is for counter that was allocated for aging.
*
* @return
- * The counter container pointer and @p cnt_free is set on success,
+ * The counter pool pointer and @p cnt_free is set on success,
* NULL otherwise and rte_errno is set.
*/
-static struct mlx5_pools_container *
+static struct mlx5_flow_counter_pool *
flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
struct mlx5_flow_counter **cnt_free,
- uint32_t batch)
+ uint32_t batch, uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont;
struct mlx5_flow_counter *cnt;
uint32_t i;
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
if (!batch) {
/* bulk_bitmap must be 0 for single counter allocation. */
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
return NULL;
pool = flow_dv_find_pool_by_id(cont, dcs->id);
if (!pool) {
- cont = flow_dv_pool_create(dev, dcs, batch);
- if (!cont) {
+ pool = flow_dv_pool_create(dev, dcs, batch, age);
+ if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
- pool = TAILQ_FIRST(&cont->pool_list);
} else if (dcs->id < pool->min_dcs->id) {
rte_atomic64_set(&pool->a64_dcs,
(int64_t)(uintptr_t)dcs);
}
+ flow_dv_counter_update_min_dcs(dev,
+ pool, batch, age);
i = dcs->id % MLX5_COUNTERS_PER_POOL;
- cnt = &pool->counters_raw[i];
+ cnt = MLX5_POOL_GET_CNT(pool, i);
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
*cnt_free = cnt;
- return cont;
+ return pool;
}
/* bulk_bitmap is in 128 counters units. */
if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
rte_errno = ENODATA;
return NULL;
}
- cont = flow_dv_pool_create(dev, dcs, batch);
- if (!cont) {
+ pool = flow_dv_pool_create(dev, dcs, batch, age);
+ if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
- pool = TAILQ_FIRST(&cont->pool_list);
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = &pool->counters_raw[i];
+ cnt = MLX5_POOL_GET_CNT(pool, i);
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
}
- *cnt_free = &pool->counters_raw[0];
- return cont;
+ *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
+ return pool;
}
/**
flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
struct mlx5_flow_counter_pool **ppool)
{
- static struct mlx5_flow_counter_ext *cnt;
+ struct mlx5_flow_counter_ext *cnt;
struct mlx5_flow_counter_pool *pool;
- uint32_t i;
+ uint32_t i, j;
uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
for (i = 0; i < n_valid; i++) {
pool = cont->pools[i];
- for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = MLX5_GET_POOL_CNT_EXT(pool, i);
+ for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
+ cnt = MLX5_GET_POOL_CNT_EXT(pool, j);
if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
if (ppool)
*ppool = cont->pools[i];
* Counter identifier.
* @param[in] group
* Counter flow group.
+ * @param[in] age
+ * Whether the counter was allocated for aging.
*
* @return
* Index to flow counter on success, 0 otherwise and rte_errno is set.
*/
static uint32_t
flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
- uint16_t group)
+ uint16_t group, uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
*/
uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- 0);
+ age);
uint32_t cnt_idx;
if (!priv->config.devx) {
cnt_free = NULL;
}
if (!cnt_free) {
- cont = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
- if (!cont)
+ pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch, age);
+ if (!pool)
return 0;
- pool = TAILQ_FIRST(&cont->pool_list);
}
if (!batch)
cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
struct mlx5_devx_obj *dcs;
if (batch) {
- offset = cnt_free - &pool->counters_raw[0];
+ offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
dcs = pool->min_dcs;
} else {
offset = 0;
}
}
cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
- (cnt_free - pool->counters_raw));
+ MLX5_CNT_ARRAY_IDX(pool, cnt_free));
cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
+ cnt_idx += age * MLX5_CNT_AGE_OFFSET;
/* Update the counter reset values. */
if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
return cnt_idx;
}
+/**
+ * Get age param from counter index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] counter
+ * Index to the counter handler.
+ *
+ * @return
+ * The aging parameter specified for the counter index.
+ */
+static struct mlx5_age_param*
+flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
+ uint32_t counter)
+{
+ struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_pool *pool = NULL;
+
+ flow_dv_counter_get_by_idx(dev, counter, &pool);
+ counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
+ cnt = MLX5_POOL_GET_CNT(pool, counter);
+ return MLX5_CNT_TO_AGE(cnt);
+}
+
+/**
+ * Remove a flow counter from aged counter list.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] counter
+ * Index to the counter handler.
+ * @param[in] cnt
+ * Pointer to the counter handler.
+ */
+static void
+flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
+ uint32_t counter, struct mlx5_flow_counter *cnt)
+{
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ age_info = GET_PORT_AGE_INFO(priv);
+ age_param = flow_dv_counter_idx_get_age(dev, counter);
+ if (rte_atomic16_cmpset((volatile uint16_t *)
+ &age_param->state,
+ AGE_CANDIDATE, AGE_FREE)
+ != AGE_CANDIDATE) {
+ /**
+ * We need the lock even it is age timeout,
+ * since counter may still in process.
+ */
+ rte_spinlock_lock(&age_info->aged_sl);
+ TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
+ rte_spinlock_unlock(&age_info->aged_sl);
+ }
+ rte_atomic16_set(&age_param->state, AGE_FREE);
+}
/**
* Release a flow counter.
*
if (cnt_ext && --cnt_ext->ref_cnt)
return;
}
+ if (IS_AGE_POOL(pool))
+ flow_dv_counter_remove_from_age(dev, counter, cnt);
/* Put the counter in the end - the last updated one. */
TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
/*
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- ret = mlx5_flow_validate_item_vlan(items, item_flags,
- dev, error);
+ ret = flow_dv_validate_item_vlan(items, item_flags,
+ dev, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
/* Meter action will add one more TAG action. */
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_dv_validate_action_age(action_flags,
+ actions, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ ++actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
ret = flow_dv_validate_action_modify_ipv4_dscp
(action_flags,
dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
mlx5_flow_ext_mreg_supported(dev))
rw_act_num += MLX5_ACT_NUM_SET_TAG;
- if ((uint32_t)rw_act_num >=
+ if ((uint32_t)rw_act_num >
flow_dv_modify_hdr_action_max(dev, is_root)) {
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
}
#endif
+/**
+ * Add match of ip_version.
+ *
+ * @param[in] group
+ * Flow group.
+ * @param[in] headers_v
+ * Values header pointer.
+ * @param[in] headers_m
+ * Masks header pointer.
+ * @param[in] ip_version
+ * The IP version to set.
+ */
+static inline void
+flow_dv_set_match_ip_version(uint32_t group,
+ void *headers_v,
+ void *headers_m,
+ uint8_t ip_version)
+{
+ if (group == 0)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ else
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
+ ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
+}
+
/**
* Add Ethernet item to matcher and to the value.
*
*/
static void
flow_dv_translate_item_eth(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item, int inner,
+ uint32_t group)
{
const struct rte_flow_item_eth *eth_m = item->mask;
const struct rte_flow_item_eth *eth_v = item->spec;
* HW supports match on one Ethertype, the Ethertype following the last
* VLAN tag of the packet (see PRM).
* Set match on ethertype only if ETH header is not followed by VLAN.
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
*/
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(eth_m->type));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
- *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ }
}
/**
flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ int inner, uint32_t group)
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
uint16_t tci_m;
uint16_t tci_v;
- if (!vlan_v)
- return;
- if (!vlan_m)
- vlan_m = &rte_flow_item_vlan_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->handle->vf_vlan.tag =
- rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
+ if (vlan_v)
+ dev_flow->handle->vf_vlan.tag =
+ rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
- tci_m = rte_be_to_cpu_16(vlan_m->tci);
- tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
+ /*
+ * When VLAN item exists in flow, mark packet as tagged,
+ * even if TCI is not specified.
+ */
MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ if (!vlan_v)
+ return;
+ if (!vlan_m)
+ vlan_m = &rte_flow_item_vlan_mask;
+ tci_m = rte_be_to_cpu_16(vlan_m->tci);
+ tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
+ /*
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ */
+ if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type &
+ vlan_v->inner_type));
+ }
}
/**
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- if (group == 0)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
- else
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
/*
* On outer header (which must contains L2), or inner header with L2,
* set cvlan_tag mask bit to mark this packet as untagged.
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- if (group == 0)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
- else
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
/*
* On outer header (which must contains L2), or inner header with L2,
* set cvlan_tag mask bit to mark this packet as untagged.
return;
if (!gtp_m)
gtp_m = &rte_flow_item_gtp_mask;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
+ gtp_m->v_pt_rsv_flags);
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
+ gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
gtp_v->msg_type & gtp_m->msg_type);
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_resource *tbl;
union mlx5_flow_tbl_key table_key = {
{
struct mlx5_flow_tbl_resource *tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_matcher *cache_matcher;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
struct mlx5_hlist_entry *entry;
uint32_t tag_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *tag;
tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
* This parameter is transferred to
* mlx5dv_dr_action_create_dest_ib_port().
*/
- *dst_port_id = priv->ibv_port;
+ *dst_port_id = priv->dev_port;
#else
/*
* Legacy mode, no LAG configurations is supported.
return 0;
}
+/**
+ * Create a counter with aging configuration.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[out] count
+ * Pointer to the counter action configuration.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_counter(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_action_count *count,
+ const struct rte_flow_action_age *age)
+{
+ uint32_t counter;
+ struct mlx5_age_param *age_param;
+
+ counter = flow_dv_counter_alloc(dev,
+ count ? count->shared : 0,
+ count ? count->id : 0,
+ dev_flow->dv.group, !!age);
+ if (!counter || age == NULL)
+ return counter;
+ age_param = flow_dv_counter_idx_get_age(dev, counter);
+ /*
+ * The counter age accuracy may have a bit delay. Have 3/4
+ * second bias on the timeount in order to let it age in time.
+ */
+ age_param->context = age->context ? age->context :
+ (void *)(uintptr_t)(dev_flow->flow_idx);
+ /*
+ * The counter age accuracy may have a bit delay. Have 3/4
+ * second bias on the timeount in order to let it age in time.
+ */
+ age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
+ /* Set expire time in unit of 0.1 sec. */
+ age_param->port_id = dev->data->port_id;
+ age_param->expire = age_param->timeout +
+ rte_rdtsc() / (rte_get_tsc_hz() / 10);
+ rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
+ return counter;
+}
/**
* Add Tx queue matcher
*
(MLX5_MAX_MODIFY_NUM + 1)];
} mhdr_dummy;
struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+ const struct rte_flow_action_count *count = NULL;
+ const struct rte_flow_action_age *age = NULL;
union flow_dv_attr flow_attr = { .attr = 0 };
uint32_t tag_be;
union mlx5_flow_tbl_key tbl_key;
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action *action = actions;
- const struct rte_flow_action_count *count = action->conf;
const uint8_t *rss_key;
const struct rte_flow_action_jump *jump_data;
const struct rte_flow_action_meter *mtr;
if (flow_dv_translate_action_port_id(dev, action,
&port_id, error))
return -rte_errno;
- memset(&port_id_resource, 0, sizeof(port_id_resource));
port_id_resource.port_id = port_id;
+ MLX5_ASSERT(!handle->rix_port_id_action);
if (flow_dv_port_id_action_resource_register
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
- MLX5_ASSERT(!handle->rix_port_id_action);
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
action_flags |= MLX5_FLOW_ACTION_RSS;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
case RTE_FLOW_ACTION_TYPE_COUNT:
if (!dev_conf->devx) {
- rte_errno = ENOTSUP;
- goto cnt_err;
- }
- flow->counter = flow_dv_counter_alloc(dev,
- count->shared,
- count->id,
- dev_flow->dv.group);
- if (!flow->counter)
- goto cnt_err;
- dev_flow->dv.actions[actions_n++] =
- (flow_dv_counter_get_by_idx(dev,
- flow->counter, NULL))->action;
- action_flags |= MLX5_FLOW_ACTION_COUNT;
- break;
-cnt_err:
- if (rte_errno == ENOTSUP)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"count action not supported");
+ }
+ /* Save information first, will apply later. */
+ if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
+ count = action->conf;
else
- return rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "cannot create counter"
- " object.");
+ age = action->conf;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
dev_flow->dv.actions[actions_n++] =
NULL,
"meter not found "
"or invalid parameters");
- flow->meter = fm->meter_id;
+ flow->meter = fm->idx;
}
/* Set the meter action. */
if (!fm) {
- fm = mlx5_flow_meter_find(priv, flow->meter);
+ fm = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_MTR], flow->meter);
if (!fm)
return rte_flow_error_set(error,
rte_errno,
dev_flow->dv.actions[modify_action_position] =
handle->dvh.modify_hdr->verbs_action;
}
+ if (action_flags & MLX5_FLOW_ACTION_COUNT) {
+ flow->counter =
+ flow_dv_translate_create_counter(dev,
+ dev_flow, count, age);
+
+ if (!flow->counter)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create counter"
+ " object.");
+ dev_flow->dv.actions[actions_n++] =
+ (flow_dv_counter_get_by_idx(dev,
+ flow->counter, NULL))->action;
+ }
break;
default:
break;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
flow_dv_translate_item_eth(match_mask, match_value,
- items, tunnel);
+ items, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
case RTE_FLOW_ITEM_TYPE_VLAN:
flow_dv_translate_item_vlan(dev_flow,
match_mask, match_value,
- items, tunnel);
+ items, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_INNER_VLAN) :
if (flow->meter) {
struct mlx5_flow_meter *fm;
- fm = mlx5_flow_meter_find(priv, flow->meter);
+ fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
+ flow->meter);
if (fm)
mlx5_flow_meter_detach(fm);
flow->meter = 0;
uint32_t color_reg_c_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_match_params mask = {
.size = sizeof(mask.buf),
};
return 0;
}
+/**
+ * Get aged-out flows.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] context
+ * The address of an array of pointers to the aged-out flows contexts.
+ * @param[in] nb_contexts
+ * The length of context array pointers.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * how many contexts get in success, otherwise negative errno value.
+ * if nb_contexts is 0, return the amount of all aged contexts.
+ * if nb_contexts is not 0 , return the amount of aged flows reported
+ * in the context array.
+ * @note: only stub for now
+ */
+static int
+flow_get_aged_flows(struct rte_eth_dev *dev,
+ void **context,
+ uint32_t nb_contexts,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param;
+ struct mlx5_flow_counter *counter;
+ int nb_flows = 0;
+
+ if (nb_contexts && !context)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Should assign at least one flow or"
+ " context to get if nb_contexts != 0");
+ age_info = GET_PORT_AGE_INFO(priv);
+ rte_spinlock_lock(&age_info->aged_sl);
+ TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
+ nb_flows++;
+ if (nb_contexts) {
+ age_param = MLX5_CNT_TO_AGE(counter);
+ context[nb_flows - 1] = age_param->context;
+ if (!(--nb_contexts))
+ break;
+ }
+ }
+ rte_spinlock_unlock(&age_info->aged_sl);
+ MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
+ return nb_flows;
+}
+
/*
* Mutex-protected thunk to lock-free __flow_dv_translate().
*/
uint32_t cnt;
flow_dv_shared_lock(dev);
- cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
+ cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0);
flow_dv_shared_unlock(dev);
return cnt;
}
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
+ .get_aged_flows = flow_get_aged_flows,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */