*/
#define MLX5_MAKE_CNT_IDX(pi, offset) \
((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1)
+#define MLX5_CNT_TO_CNT_EXT(pool, cnt) (&((struct mlx5_flow_counter_ext *) \
+ ((pool) + 1))[((cnt) - (pool)->counters_raw)])
+#define MLX5_GET_POOL_CNT_EXT(pool, offset) \
+ (&((struct mlx5_flow_counter_ext *) \
+ ((pool) + 1))[offset])
struct mlx5_flow_counter_pool;
uint64_t bytes;
};
-/* Counters information. */
+/* Generic counters information. */
struct mlx5_flow_counter {
TAILQ_ENTRY(mlx5_flow_counter) next;
/**< Pointer to the next flow counter structure. */
+ union {
+ uint64_t hits; /**< Reset value of hits packets. */
+ int64_t query_gen; /**< Generation of the last release. */
+ };
+ uint64_t bytes; /**< Reset value of bytes. */
+ void *action; /**< Pointer to the dv action. */
+};
+
+/* Extend counters information for none batch counters. */
+struct mlx5_flow_counter_ext {
uint32_t shared:1; /**< Share counter ID with other flow rules. */
uint32_t batch: 1;
/**< Whether the counter was allocated by batch command. */
uint32_t ref_cnt:30; /**< Reference counter. */
- uint32_t id; /**< Counter ID. */
+ uint32_t id; /**< User counter ID. */
union { /**< Holds the counters for the rule. */
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct ibv_counter_set *cs;
struct ibv_counters *cs;
#endif
struct mlx5_devx_obj *dcs; /**< Counter Devx object. */
- struct mlx5_flow_counter_pool *pool; /**< The counter pool. */
};
- union {
- uint64_t hits; /**< Reset value of hits packets. */
- int64_t query_gen; /**< Generation of the last release. */
- };
- uint64_t bytes; /**< Reset value of bytes. */
- void *action; /**< Pointer to the dv action. */
};
+
TAILQ_HEAD(mlx5_counters, mlx5_flow_counter);
-/* Counter pool structure - query is in pool resolution. */
+/* Generic counter pool structure - query is in pool resolution. */
struct mlx5_flow_counter_pool {
TAILQ_ENTRY(mlx5_flow_counter_pool) next;
struct mlx5_counters counters; /* Free counter list. */
rte_spinlock_t sl; /* The pool lock. */
struct mlx5_counter_stats_raw *raw;
struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */
- struct mlx5_flow_counter counters_raw[]; /* The pool counters memory. */
+ struct mlx5_flow_counter counters_raw[MLX5_COUNTERS_PER_POOL];
+ /* The pool counters memory. */
};
struct mlx5_counter_stats_raw;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0);
struct mlx5_flow_counter_pool *pool;
- struct mlx5_flow_counter *cnt = NULL;
+ struct mlx5_flow_counter_ext *cnt_ext;
struct mlx5_devx_obj *dcs = NULL;
uint32_t offset;
pool = TAILQ_FIRST(&cont->pool_list);
}
offset = dcs->id % MLX5_COUNTERS_PER_POOL;
- cnt = &pool->counters_raw[offset];
- struct mlx5_flow_counter tmpl = {
- .shared = shared,
- .ref_cnt = 1,
- .id = id,
- .dcs = dcs,
- };
- tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
- if (!tmpl.action) {
- claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
+ cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, offset);
+ cnt_ext->shared = shared;
+ cnt_ext->ref_cnt = 1;
+ cnt_ext->id = id;
+ cnt_ext->dcs = dcs;
+ pool->counters_raw[offset].action =
+ mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
+ if (!pool->counters_raw[offset].action) {
+ claim_zero(mlx5_devx_cmd_destroy(dcs));
rte_errno = errno;
return 0;
}
- *cnt = tmpl;
return MLX5_MAKE_CNT_IDX(pool->index, offset);
}
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] counter
- * Index to the counter handler.
+ * Extend counter handler.
*/
static void
flow_dv_counter_release_fallback(struct rte_eth_dev *dev __rte_unused,
- struct mlx5_flow_counter *counter)
+ struct mlx5_flow_counter_ext *counter)
{
if (!counter)
return;
- if (--counter->ref_cnt == 0) {
- claim_zero(mlx5_glue->destroy_flow_action(counter->action));
- claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
- counter->action = NULL;
- counter->dcs = NULL;
- }
+ claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
+ counter->dcs = NULL;
}
/**
*/
static inline int
_flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
- struct mlx5_flow_counter *cnt, uint64_t *pkts,
+ struct mlx5_flow_counter_ext *cnt, uint64_t *pkts,
uint64_t *bytes)
{
return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
0, NULL, NULL, 0);
}
-/**
- * Get a pool by a counter.
- *
- * @param[in] cnt
- * Pointer to the counter.
- *
- * @return
- * The counter pool.
- */
-static struct mlx5_flow_counter_pool *
-flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
-{
- if (!cnt->batch) {
- cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
- return (struct mlx5_flow_counter_pool *)cnt - 1;
- }
- return cnt->pool;
-}
-
/**
* Get DV flow counter by index.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] cnt
- * Pointer to the flow counter.
+ * Index to the flow counter.
* @param[out] pkts
* The statistics value of packets.
* @param[out] bytes
* 0 on success, otherwise a negative errno value and rte_errno is set.
*/
static inline int
-_flow_dv_query_count(struct rte_eth_dev *dev,
- struct mlx5_flow_counter *cnt, uint64_t *pkts,
+_flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
uint64_t *bytes)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_pool *pool =
- flow_dv_counter_pool_get(cnt);
- int offset = cnt - &pool->counters_raw[0];
+ struct mlx5_flow_counter_pool *pool = NULL;
+ struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_ext *cnt_ext = NULL;
+ int offset;
- if (priv->counter_fallback)
- return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
+ cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
+ MLX5_ASSERT(pool);
+ if (counter < MLX5_CNT_BATCH_OFFSET) {
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ if (priv->counter_fallback)
+ return _flow_dv_query_count_fallback(dev, cnt_ext,
+ pkts, bytes);
+ }
rte_spinlock_lock(&pool->sl);
/*
* current allocated in parallel to the host reading.
* In this case the new counter values must be reported as 0.
*/
- if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
+ if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
*pkts = 0;
*bytes = 0;
} else {
+ offset = cnt - &pool->counters_raw[0];
*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
}
if (!cont)
return NULL;
}
- size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
- sizeof(struct mlx5_flow_counter);
+ size = sizeof(*pool);
+ if (!batch)
+ size += MLX5_COUNTERS_PER_POOL *
+ sizeof(struct mlx5_flow_counter_ext);
pool = rte_calloc(__func__, 1, size, 0);
if (!pool) {
rte_errno = ENOMEM;
rte_atomic64_set(&pool->a64_dcs,
(int64_t)(uintptr_t)dcs);
}
- cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
+ i = dcs->id % MLX5_COUNTERS_PER_POOL;
+ cnt = &pool->counters_raw[i];
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
- cnt->dcs = dcs;
+ MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
*cnt_free = cnt;
return cont;
}
pool = TAILQ_FIRST(&cont->pool_list);
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = &pool->counters_raw[i];
- cnt->pool = pool;
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
}
*cnt_free = &pool->counters_raw[0];
* mlx5 flow counter pool in the container,
*
* @return
- * NULL if not existed, otherwise pointer to the shared counter.
+ * NULL if not existed, otherwise pointer to the shared extend counter.
*/
-static struct mlx5_flow_counter *
+static struct mlx5_flow_counter_ext *
flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
struct mlx5_flow_counter_pool **ppool)
{
- static struct mlx5_flow_counter *cnt;
+ static struct mlx5_flow_counter_ext *cnt;
struct mlx5_flow_counter_pool *pool;
uint32_t i;
uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
for (i = 0; i < n_valid; i++) {
pool = cont->pools[i];
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = &pool->counters_raw[i];
+ cnt = MLX5_GET_POOL_CNT_EXT(pool, i);
if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
if (ppool)
- *ppool = pool;
+ *ppool = cont->pools[i];
return cnt;
}
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt_free = NULL;
+ struct mlx5_flow_counter_ext *cnt_ext = NULL;
/*
* Currently group 0 flow counter cannot be assigned to a flow if it is
* not the first one in the batch counter allocation, so it is better
return 0;
}
if (shared) {
- cnt_free = flow_dv_counter_shared_search(cont, id, &pool);
- if (cnt_free) {
- if (cnt_free->ref_cnt + 1 == 0) {
+ cnt_ext = flow_dv_counter_shared_search(cont, id, &pool);
+ if (cnt_ext) {
+ if (cnt_ext->ref_cnt + 1 == 0) {
rte_errno = E2BIG;
return 0;
}
- cnt_free->ref_cnt++;
+ cnt_ext->ref_cnt++;
cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
- (cnt_free - pool->counters_raw) + 1;
+ (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
+ + 1;
return cnt_idx;
}
}
return 0;
pool = TAILQ_FIRST(&cont->pool_list);
}
- cnt_free->batch = batch;
+ if (!batch)
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
/* Create a DV counter action only in the first time usage. */
if (!cnt_free->action) {
uint16_t offset;
dcs = pool->min_dcs;
} else {
offset = 0;
- dcs = cnt_free->dcs;
+ dcs = cnt_ext->dcs;
}
cnt_free->action = mlx5_glue->dv_create_flow_action_counter
(dcs->obj, offset);
return 0;
}
}
+ cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
+ (cnt_free - pool->counters_raw));
+ cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
/* Update the counter reset values. */
- if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
+ if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
return 0;
- cnt_free->shared = shared;
- cnt_free->ref_cnt = 1;
- cnt_free->id = id;
+ if (cnt_ext) {
+ cnt_ext->shared = shared;
+ cnt_ext->ref_cnt = 1;
+ cnt_ext->id = id;
+ }
if (!priv->sh->cmng.query_thread_on)
/* Start the asynchronous batch query by the host thread. */
mlx5_set_query_alarm(priv->sh);
TAILQ_REMOVE(&cont->pool_list, pool, next);
TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
}
- cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
- (cnt_free - pool->counters_raw));
- cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
return cnt_idx;
}
flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_pool *pool;
+ struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_ext *cnt_ext = NULL;
if (!counter)
return;
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
+ MLX5_ASSERT(pool);
+ if (counter < MLX5_CNT_BATCH_OFFSET)
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ if (cnt_ext && --cnt_ext->ref_cnt)
+ return;
if (priv->counter_fallback) {
- flow_dv_counter_release_fallback(dev, cnt);
+ claim_zero(mlx5_glue->destroy_flow_action(cnt->action));
+ flow_dv_counter_release_fallback(dev, cnt_ext);
+ cnt->action = NULL;
return;
}
- if (--cnt->ref_cnt == 0) {
- /* Put the counter in the end - the last updated one. */
- TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
- /*
- * Counters released between query trigger and handler need
- * to wait the next round of query. Since the packets arrive
- * in the gap period will not be taken into account to the
- * old counter.
- */
- cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
- }
+ /* Put the counter in the end - the last updated one. */
+ TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
+ /*
+ * Counters released between query trigger and handler need
+ * to wait the next round of query. Since the packets arrive
+ * in the gap period will not be taken into account to the
+ * old counter.
+ */
+ cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
}
/**
cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
NULL);
- int err = _flow_dv_query_count(dev, cnt, &pkts,
+ int err = _flow_dv_query_count(dev, flow->counter, &pkts,
&bytes);
if (err)
if (!priv->config.devx)
return -1;
- cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
- ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
+ ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
if (ret)
return -1;
+ cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
*pkts = inn_pkts - cnt->hits;
*bytes = inn_bytes - cnt->bytes;
if (clear) {
*/
static int
flow_verbs_counter_create(struct rte_eth_dev *dev,
- struct mlx5_flow_counter *counter)
+ struct mlx5_flow_counter_ext *counter)
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0);
struct mlx5_flow_counter_pool *pool = NULL;
+ struct mlx5_flow_counter_ext *cnt_ext = NULL;
struct mlx5_flow_counter *cnt = NULL;
uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
uint32_t pool_idx;
for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
pool = cont->pools[pool_idx];
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = &pool->counters_raw[i];
- if (cnt->shared && cnt->id == id) {
- cnt->ref_cnt++;
+ cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+ if (cnt_ext->shared && cnt_ext->id == id) {
+ cnt_ext->ref_cnt++;
return MLX5_MAKE_CNT_IDX(pool_idx, i);
}
}
cont->n += MLX5_CNT_CONTAINER_RESIZE;
}
/* Allocate memory for new pool*/
- size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
+ size = sizeof(*pool) + sizeof(*cnt_ext) *
+ MLX5_COUNTERS_PER_POOL;
pool = rte_calloc(__func__, 1, size, 0);
if (!pool)
return 0;
rte_atomic16_add(&cont->n_valid, 1);
TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
}
- cnt->id = id;
- cnt->shared = shared;
- cnt->ref_cnt = 1;
+ i = cnt - pool->counters_raw;
+ cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+ cnt_ext->id = id;
+ cnt_ext->shared = shared;
+ cnt_ext->ref_cnt = 1;
cnt->hits = 0;
cnt->bytes = 0;
/* Create counter with Verbs. */
- ret = flow_verbs_counter_create(dev, cnt);
+ ret = flow_verbs_counter_create(dev, cnt_ext);
if (!ret) {
TAILQ_REMOVE(&pool->counters, cnt, next);
- return MLX5_MAKE_CNT_IDX(pool_idx, (cnt - pool->counters_raw));
+ return MLX5_MAKE_CNT_IDX(pool_idx, i);
}
/* Some error occurred in Verbs library. */
rte_errno = -ret;
{
struct mlx5_flow_counter_pool *pool;
struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_ext *cnt_ext;
cnt = flow_verbs_counter_get_by_idx(dev, counter,
&pool);
- if (--cnt->ref_cnt == 0) {
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ if (--cnt_ext->ref_cnt == 0) {
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
- claim_zero(mlx5_glue->destroy_counter_set(cnt->cs));
- cnt->cs = NULL;
+ claim_zero(mlx5_glue->destroy_counter_set(cnt_ext->cs));
+ cnt_ext->cs = NULL;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- claim_zero(mlx5_glue->destroy_counters(cnt->cs));
- cnt->cs = NULL;
+ claim_zero(mlx5_glue->destroy_counters(cnt_ext->cs));
+ cnt_ext->cs = NULL;
#endif
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
}
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
if (flow->counter) {
+ struct mlx5_flow_counter_pool *pool;
struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
- (dev, flow->counter, NULL);
+ (dev, flow->counter, &pool);
+ struct mlx5_flow_counter_ext *cnt_ext = MLX5_CNT_TO_CNT_EXT
+ (pool, cnt);
struct rte_flow_query_count *qc = data;
uint64_t counters[2] = {0, 0};
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct ibv_query_counter_set_attr query_cs_attr = {
- .cs = cnt->cs,
+ .cs = cnt_ext->cs,
.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
};
struct ibv_counter_set_data query_out = {
&query_out);
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
int err = mlx5_glue->query_counters
- (cnt->cs, counters,
+ (cnt_ext->cs, counters,
RTE_DIM(counters),
IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
#endif
{
const struct rte_flow_action_count *count = action->conf;
struct rte_flow *flow = dev_flow->flow;
- struct mlx5_flow_counter *cnt = NULL;
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ struct mlx5_flow_counter_pool *pool;
+ struct mlx5_flow_counter *cnt = NULL;
+ struct mlx5_flow_counter_ext *cnt_ext;
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
struct ibv_flow_spec_counter_action counter = {
.type = IBV_FLOW_SPEC_ACTION_COUNT,
"cannot get counter"
" context.");
}
- cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, NULL);
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
- counter.counter_set_handle = cnt->cs->handle;
+ cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ counter.counter_set_handle = cnt_ext->cs->handle;
flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- counter.counters = cnt->cs;
+ cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ counter.counters = cnt_ext->cs;
flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
#endif
return 0;