}
}
-/**
- * Acquire the synchronizing object to protect multithreaded access
- * to shared dv context. Lock occurs only if context is actually
- * shared, i.e. we have multiport IB device and representors are
- * created.
- *
- * @param[in] dev
- * Pointer to the rte_eth_dev structure.
- */
-static void
-flow_dv_shared_lock(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
-
- if (sh->refcnt > 1) {
- int ret;
-
- ret = pthread_mutex_lock(&sh->dv_mutex);
- MLX5_ASSERT(!ret);
- (void)ret;
- }
-}
-
-static void
-flow_dv_shared_unlock(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
-
- if (sh->refcnt > 1) {
- int ret;
-
- ret = pthread_mutex_unlock(&sh->dv_mutex);
- MLX5_ASSERT(!ret);
- (void)ret;
- }
-}
-
/* Update VLAN's VID/PCP based on input rte_flow_action.
*
* @param[in] action
* Index to the counter handler.
*/
static void
-flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
+flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
act_res->rix_tag = 0;
}
if (act_res->cnt) {
- flow_dv_counter_release(dev, act_res->cnt);
+ flow_dv_counter_free(dev, act_res->cnt);
act_res->cnt = 0;
}
}
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-__flow_dv_translate(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
- struct rte_flow_error *error)
+flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
{
struct mlx5_flow_dv_workspace *dv;
struct mlx5_flow_handle *dh;
* Pointer to flow structure.
*/
static void
-__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_handle *dh;
uint32_t handle_idx;
* Pointer to flow structure.
*/
static void
-__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct rte_flow_shared_action *shared;
struct mlx5_flow_handle *dev_handle;
if (!flow)
return;
- __flow_dv_remove(dev, flow);
+ flow_dv_remove(dev, flow);
shared = mlx5_flow_get_shared_rss(flow);
if (shared)
__atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
if (flow->counter) {
- flow_dv_counter_release(dev, flow->counter);
+ flow_dv_counter_free(dev, flow->counter);
flow->counter = 0;
}
if (flow->meter) {
* rte_errno is set.
*/
static struct rte_flow_shared_action *
-__flow_dv_action_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
- const struct rte_flow_action *action,
- struct rte_flow_error *error)
+flow_dv_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
{
struct rte_flow_shared_action *shared_action = NULL;
struct mlx5_priv *priv = dev->data->dev_private;
* 0 on success, otherwise negative errno value.
*/
static int
-__flow_dv_action_destroy(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- struct rte_flow_error *error)
+flow_dv_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
int ret;
* 0 on success, otherwise negative errno value.
*/
static int
-__flow_dv_action_update(struct rte_eth_dev *dev,
+flow_dv_action_update(struct rte_eth_dev *dev,
struct rte_flow_shared_action *action,
const void *action_conf,
struct rte_flow_error *error)
return nb_flows;
}
-/*
- * Mutex-protected thunk to lock-free __flow_dv_translate().
- */
-static int
-flow_dv_translate(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
- flow_dv_shared_unlock(dev);
- return ret;
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_apply().
- */
-static int
-flow_dv_apply(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_apply(dev, flow, error);
- flow_dv_shared_unlock(dev);
- return ret;
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_remove().
- */
-static void
-flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- flow_dv_shared_lock(dev);
- __flow_dv_remove(dev, flow);
- flow_dv_shared_unlock(dev);
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_destroy().
- */
-static void
-flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- flow_dv_shared_lock(dev);
- __flow_dv_destroy(dev, flow);
- flow_dv_shared_unlock(dev);
-}
-
/*
* Mutex-protected thunk to lock-free flow_dv_counter_alloc().
*/
static uint32_t
flow_dv_counter_allocate(struct rte_eth_dev *dev)
{
- uint32_t cnt;
-
- flow_dv_shared_lock(dev);
- cnt = flow_dv_counter_alloc(dev, 0);
- flow_dv_shared_unlock(dev);
- return cnt;
-}
-
-/*
- * Mutex-protected thunk to lock-free flow_dv_counter_release().
- */
-static void
-flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
-{
- flow_dv_shared_lock(dev);
- flow_dv_counter_release(dev, cnt);
- flow_dv_shared_unlock(dev);
+ return flow_dv_counter_alloc(dev, 0);
}
/**
}
}
-/*
- * Mutex-protected thunk to lock-free __flow_dv_action_create().
- */
-static struct rte_flow_shared_action *
-flow_dv_action_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
- const struct rte_flow_action *action,
- struct rte_flow_error *error)
-{
- struct rte_flow_shared_action *shared_action = NULL;
-
- flow_dv_shared_lock(dev);
- shared_action = __flow_dv_action_create(dev, conf, action, error);
- flow_dv_shared_unlock(dev);
- return shared_action;
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_action_destroy().
- */
-static int
-flow_dv_action_destroy(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- struct rte_flow_error *error)
-{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_action_destroy(dev, action, error);
- flow_dv_shared_unlock(dev);
- return ret;
-}
-
-/*
- * Mutex-protected thunk to lock-free __flow_dv_action_update().
- */
-static int
-flow_dv_action_update(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- const void *action_conf,
- struct rte_flow_error *error)
-{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_action_update(dev, action, action_conf,
- error);
- flow_dv_shared_unlock(dev);
- return ret;
-}
-
static int
flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
{