#include <stdalign.h>
#include <stdint.h>
#include <string.h>
+#include <unistd.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
#endif
+/* VLAN header definitions */
+#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
+#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
+#define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
+#define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
+#define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
+
union flow_dv_attr {
struct {
uint32_t valid:1;
{0, 0, 0},
};
+static void
+mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
+ uint8_t next_protocol, uint64_t *item_flags,
+ int *tunnel)
+{
+ assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ if (next_protocol == IPPROTO_IPIP) {
+ *item_flags |= MLX5_FLOW_LAYER_IPIP;
+ *tunnel = 1;
+ }
+ if (next_protocol == IPPROTO_IPV6) {
+ *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
+ *tunnel = 1;
+ }
+}
+
/**
* Acquire the synchronizing object to protect multithreaded access
* to shared dv context. Lock occurs only if context is actually
return 0;
}
+/**
+ * Validate the pop VLAN action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the pop vlan action.
+ * @param[in] item_flags
+ * The items found in this flow rule.
+ * @param[in] attr
+ * Pointer to flow attributes.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
+ uint64_t action_flags,
+ const struct rte_flow_action *action,
+ uint64_t item_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ (void)action;
+ (void)attr;
+ if (!priv->sh->pop_vlan_action)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "pop vlan action is not supported");
+ /*
+ * Check for inconsistencies:
+ * fail strip_vlan in a flow that matches packets without VLAN tags.
+ * fail strip_vlan in a flow that matches packets without explicitly a
+ * matching on VLAN tag ?
+ */
+ if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no support for multiple vlan pop "
+ "actions");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot pop vlan without a "
+ "match on (outer) vlan in the flow");
+ return 0;
+}
+
+/**
+ * Get VLAN default info from vlan match info.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * the list of item specifications.
+ * @param[out] vlan
+ * pointer VLAN info to fill to.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static void
+flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
+ struct rte_vlan_hdr *vlan)
+{
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
+ MLX5DV_FLOW_VLAN_VID_MASK),
+ .inner_type = RTE_BE16(0xffff),
+ };
+
+ if (items == NULL)
+ return;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
+ items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
+ ;
+ if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ const struct rte_flow_item_vlan *vlan_m = items->mask;
+ const struct rte_flow_item_vlan *vlan_v = items->spec;
+
+ if (!vlan_m)
+ vlan_m = &nic_mask;
+ /* Only full match values are accepted */
+ if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
+ MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
+ vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
+ vlan->vlan_tci |=
+ rte_be_to_cpu_16(vlan_v->tci &
+ MLX5DV_FLOW_VLAN_PCP_MASK_BE);
+ }
+ if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
+ MLX5DV_FLOW_VLAN_VID_MASK_BE) {
+ vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
+ vlan->vlan_tci |=
+ rte_be_to_cpu_16(vlan_v->tci &
+ MLX5DV_FLOW_VLAN_VID_MASK_BE);
+ }
+ if (vlan_m->inner_type == nic_mask.inner_type)
+ vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
+ vlan_m->inner_type);
+ }
+}
+
+/**
+ * Validate the push VLAN action.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the encap action.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_push_vlan(uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
+
+ if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
+ push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "invalid vlan ethertype");
+ if (action_flags &
+ (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no support for multiple VLAN "
+ "actions");
+ (void)attr;
+ return 0;
+}
+
+/**
+ * Validate the set VLAN PCP.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] actions
+ * Pointer to the list of actions remaining in the flow rule.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action = actions;
+ const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
+
+ if (conf->vlan_pcp > 7)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "VLAN PCP value is too big");
+ if (mlx5_flow_find_action(actions,
+ RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) == NULL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "set VLAN PCP can only be used "
+ "with push VLAN action");
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "set VLAN PCP action must precede "
+ "the push VLAN action");
+ return 0;
+}
+
+/**
+ * Validate the set VLAN VID.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] actions
+ * Pointer to the list of actions remaining in the flow rule.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_set_vlan_vid(uint64_t action_flags,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action = actions;
+ const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
+
+ if (conf->vlan_vid > RTE_BE16(0xFFE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "VLAN VID value is too big");
+ if (mlx5_flow_find_action(actions,
+ RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) == NULL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "set VLAN VID can only be used "
+ "with push VLAN action");
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "set VLAN VID action must precede "
+ "the push VLAN action");
+ return 0;
+}
+
/**
* Validate count action.
*
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ const struct rte_flow_action_raw_encap *raw_encap =
+ (const struct rte_flow_action_raw_encap *)action->conf;
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
NULL,
"encap action not supported for "
"ingress");
+ if (!raw_encap->size || !raw_encap->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "raw encap data cannot be empty");
return 0;
}
return 0;
}
+/**
+ * Find existing push vlan resource or create and register a new one.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] resource
+ * Pointer to port ID action resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_push_vlan_action_resource_register
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_push_vlan_action_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
+ struct mlx5dv_dr_domain *domain;
+
+ /* Lookup a matching resource from cache. */
+ LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
+ if (resource->vlan_tag == cache_resource->vlan_tag &&
+ resource->ft_type == cache_resource->ft_type) {
+ DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
+ "refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->dv.push_vlan_res = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new push_vlan action resource. */
+ cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ domain = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
+ domain = sh->rx_domain;
+ else
+ domain = sh->tx_domain;
+ cache_resource->action =
+ mlx5_glue->dr_create_flow_action_push_vlan(domain,
+ resource->vlan_tag);
+ if (!cache_resource->action) {
+ rte_free(cache_resource);
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ }
+ rte_atomic32_init(&cache_resource->refcnt);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
+ dev_flow->dv.push_vlan_res = cache_resource;
+ DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
+}
/**
* Get the size of specific rte_flow_item_type
*
/* VLAN skipping */
while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
- next_hdr += sizeof(struct rte_vlan_hdr);
vlan = (struct rte_vlan_hdr *)next_hdr;
proto = RTE_BE16(vlan->eth_proto);
+ next_hdr += sizeof(struct rte_vlan_hdr);
}
/* HW calculates IPv4 csum. no need to proceed */
return 0;
}
+/**
+ * Create action push VLAN.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] vlan_tag
+ * the vlan tag to push to the Ethernet header.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_vlan_hdr *vlan,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_push_vlan_action_resource res;
+
+ res.vlan_tag =
+ rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
+ vlan->vlan_tci);
+ if (attr->transfer)
+ res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ else
+ res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ return flow_dv_push_vlan_action_resource_register
+ (dev, &res, dev_flow, error);
+}
+
/**
* Validate the modify-header actions.
*
* Validate jump action.
*
* @param[in] action
- * Pointer to the modify action.
+ * Pointer to the jump action.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
* @param[in] group
* The group of the current flow.
* @param[out] error
*/
static int
flow_dv_validate_action_jump(const struct rte_flow_action *action,
+ uint64_t action_flags,
uint32_t group,
struct rte_flow_error *error)
{
- if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
+ if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
+ MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "action configuration not set");
}
/**
- * Find existing modify-header resource or create and register a new one.
+ * Find existing modify-header resource or create and register a new one.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] resource
+ * Pointer to modify-header resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_modify_hdr_resource_register
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
+ struct mlx5dv_dr_domain *ns;
+
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ ns = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+ ns = sh->tx_domain;
+ else
+ ns = sh->rx_domain;
+ resource->flags =
+ dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ /* Lookup a matching resource from cache. */
+ LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
+ if (resource->ft_type == cache_resource->ft_type &&
+ resource->actions_num == cache_resource->actions_num &&
+ resource->flags == cache_resource->flags &&
+ !memcmp((const void *)resource->actions,
+ (const void *)cache_resource->actions,
+ (resource->actions_num *
+ sizeof(resource->actions[0])))) {
+ DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->dv.modify_hdr = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new modify-header resource. */
+ cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ cache_resource->verbs_action =
+ mlx5_glue->dv_create_flow_action_modify_header
+ (sh->ctx, cache_resource->ft_type,
+ ns, cache_resource->flags,
+ cache_resource->actions_num *
+ sizeof(cache_resource->actions[0]),
+ (uint64_t *)cache_resource->actions);
+ if (!cache_resource->verbs_action) {
+ rte_free(cache_resource);
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ }
+ rte_atomic32_init(&cache_resource->refcnt);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
+ dev_flow->dv.modify_hdr = cache_resource;
+ DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
+}
+
+#define MLX5_CNT_CONTAINER_RESIZE 64
+
+/**
+ * Get or create a flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] shared
+ * Indicate if this counter is shared with other flows.
+ * @param[in] id
+ * Counter identifier.
+ *
+ * @return
+ * pointer to flow counter on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_counter *
+flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
+ uint32_t id)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter *cnt = NULL;
+ struct mlx5_devx_obj *dcs = NULL;
+
+ if (!priv->config.devx) {
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+ if (shared) {
+ TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
+ if (cnt->shared && cnt->id == id) {
+ cnt->ref_cnt++;
+ return cnt;
+ }
+ }
+ }
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
+ if (!dcs)
+ return NULL;
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ struct mlx5_flow_counter tmpl = {
+ .shared = shared,
+ .ref_cnt = 1,
+ .id = id,
+ .dcs = dcs,
+ };
+ tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
+ if (!tmpl.action) {
+ claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
+ rte_errno = errno;
+ rte_free(cnt);
+ return NULL;
+ }
+ *cnt = tmpl;
+ TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
+ return cnt;
+}
+
+/**
+ * Release a flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] counter
+ * Pointer to the counter handler.
+ */
+static void
+flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter *counter)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!counter)
+ return;
+ if (--counter->ref_cnt == 0) {
+ TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
+ claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
+ rte_free(counter);
+ }
+}
+
+/**
+ * Query a devx flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] cnt
+ * Pointer to the flow counter.
+ * @param[out] pkts
+ * The statistics value of packets.
+ * @param[out] bytes
+ * The statistics value of bytes.
+ *
+ * @return
+ * 0 on success, otherwise a negative errno value and rte_errno is set.
+ */
+static inline int
+_flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_flow_counter *cnt, uint64_t *pkts,
+ uint64_t *bytes)
+{
+ return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
+ 0, NULL, NULL, 0);
+}
+
+/**
+ * Get a pool by a counter.
+ *
+ * @param[in] cnt
+ * Pointer to the counter.
+ *
+ * @return
+ * The counter pool.
+ */
+static struct mlx5_flow_counter_pool *
+flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
+{
+ if (!cnt->batch) {
+ cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
+ return (struct mlx5_flow_counter_pool *)cnt - 1;
+ }
+ return cnt->pool;
+}
+
+/**
+ * Get a pool by devx counter ID.
+ *
+ * @param[in] cont
+ * Pointer to the counter container.
+ * @param[in] id
+ * The counter devx ID.
+ *
+ * @return
+ * The counter pool pointer if exists, NULL otherwise,
+ */
+static struct mlx5_flow_counter_pool *
+flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
+{
+ struct mlx5_flow_counter_pool *pool;
+
+ TAILQ_FOREACH(pool, &cont->pool_list, next) {
+ int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
+ MLX5_COUNTERS_PER_POOL;
+
+ if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
+ return pool;
+ };
+ return NULL;
+}
+
+/**
+ * Allocate a new memory for the counter values wrapped by all the needed
+ * management.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] raws_n
+ * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
+ *
+ * @return
+ * The new memory management pointer on success, otherwise NULL and rte_errno
+ * is set.
+ */
+static struct mlx5_counter_stats_mem_mng *
+flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
+{
+ struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
+ (dev->data->dev_private))->sh;
+ struct mlx5_devx_mkey_attr mkey_attr;
+ struct mlx5_counter_stats_mem_mng *mem_mng;
+ volatile struct flow_counter_stats *raw_data;
+ int size = (sizeof(struct flow_counter_stats) *
+ MLX5_COUNTERS_PER_POOL +
+ sizeof(struct mlx5_counter_stats_raw)) * raws_n +
+ sizeof(struct mlx5_counter_stats_mem_mng);
+ uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
+ int i;
+
+ if (!mem) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
+ size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
+ mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (!mem_mng->umem) {
+ rte_errno = errno;
+ rte_free(mem);
+ return NULL;
+ }
+ mkey_attr.addr = (uintptr_t)mem;
+ mkey_attr.size = size;
+ mkey_attr.umem_id = mem_mng->umem->umem_id;
+ mkey_attr.pd = sh->pdn;
+ mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
+ if (!mem_mng->dm) {
+ mlx5_glue->devx_umem_dereg(mem_mng->umem);
+ rte_errno = errno;
+ rte_free(mem);
+ return NULL;
+ }
+ mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
+ raw_data = (volatile struct flow_counter_stats *)mem;
+ for (i = 0; i < raws_n; ++i) {
+ mem_mng->raws[i].mem_mng = mem_mng;
+ mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
+ }
+ LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
+ return mem_mng;
+}
+
+/**
+ * Resize a counter container.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] batch
+ * Whether the pool is for counter that was allocated by batch command.
+ *
+ * @return
+ * The new container pointer on success, otherwise NULL and rte_errno is set.
+ */
+static struct mlx5_pools_container *
+flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_pools_container *cont =
+ MLX5_CNT_CONTAINER(priv->sh, batch, 0);
+ struct mlx5_pools_container *new_cont =
+ MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
+ struct mlx5_counter_stats_mem_mng *mem_mng;
+ uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
+ uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
+ int i;
+
+ if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
+ /* The last resize still hasn't detected by the host thread. */
+ rte_errno = EAGAIN;
+ return NULL;
+ }
+ new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
+ if (!new_cont->pools) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (cont->n)
+ memcpy(new_cont->pools, cont->pools, cont->n *
+ sizeof(struct mlx5_flow_counter_pool *));
+ mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
+ MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
+ if (!mem_mng) {
+ rte_free(new_cont->pools);
+ return NULL;
+ }
+ for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
+ LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
+ mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
+ i, next);
+ new_cont->n = resize;
+ rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
+ TAILQ_INIT(&new_cont->pool_list);
+ TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
+ new_cont->init_mem_mng = mem_mng;
+ rte_cio_wmb();
+ /* Flip the master container. */
+ priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
+ return new_cont;
+}
+
+/**
+ * Query a devx flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] cnt
+ * Pointer to the flow counter.
+ * @param[out] pkts
+ * The statistics value of packets.
+ * @param[out] bytes
+ * The statistics value of bytes.
+ *
+ * @return
+ * 0 on success, otherwise a negative errno value and rte_errno is set.
+ */
+static inline int
+_flow_dv_query_count(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter *cnt, uint64_t *pkts,
+ uint64_t *bytes)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter_pool *pool =
+ flow_dv_counter_pool_get(cnt);
+ int offset = cnt - &pool->counters_raw[0];
+
+ if (priv->counter_fallback)
+ return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
+
+ rte_spinlock_lock(&pool->sl);
+ /*
+ * The single counters allocation may allocate smaller ID than the
+ * current allocated in parallel to the host reading.
+ * In this case the new counter values must be reported as 0.
+ */
+ if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
+ *pkts = 0;
+ *bytes = 0;
+ } else {
+ *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
+ *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
+ }
+ rte_spinlock_unlock(&pool->sl);
+ return 0;
+}
+
+/**
+ * Create and initialize a new counter pool.
*
- * @param dev[in, out]
- * Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to modify-header resource.
- * @parm[in, out] dev_flow
- * Pointer to the dev_flow.
- * @param[out] error
- * pointer to error structure.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] dcs
+ * The devX counter handle.
+ * @param[in] batch
+ * Whether the pool is for counter that was allocated by batch command.
*
* @return
- * 0 on success otherwise -errno and errno is set.
+ * A new pool pointer on success, NULL otherwise and rte_errno is set.
*/
-static int
-flow_dv_modify_hdr_resource_register
- (struct rte_eth_dev *dev,
- struct mlx5_flow_dv_modify_hdr_resource *resource,
- struct mlx5_flow *dev_flow,
- struct rte_flow_error *error)
+static struct mlx5_flow_counter_pool *
+flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
+ uint32_t batch)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
- struct mlx5dv_dr_domain *ns;
+ struct mlx5_flow_counter_pool *pool;
+ struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
+ 0);
+ int16_t n_valid = rte_atomic16_read(&cont->n_valid);
+ uint32_t size;
+
+ if (cont->n == n_valid) {
+ cont = flow_dv_container_resize(dev, batch);
+ if (!cont)
+ return NULL;
+ }
+ size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
+ sizeof(struct mlx5_flow_counter);
+ pool = rte_calloc(__func__, 1, size, 0);
+ if (!pool) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ pool->min_dcs = dcs;
+ pool->raw = cont->init_mem_mng->raws + n_valid %
+ MLX5_CNT_CONTAINER_RESIZE;
+ pool->raw_hw = NULL;
+ rte_spinlock_init(&pool->sl);
+ /*
+ * The generation of the new allocated counters in this pool is 0, 2 in
+ * the pool generation makes all the counters valid for allocation.
+ */
+ rte_atomic64_set(&pool->query_gen, 0x2);
+ TAILQ_INIT(&pool->counters);
+ TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
+ cont->pools[n_valid] = pool;
+ /* Pool initialization must be updated before host thread access. */
+ rte_cio_wmb();
+ rte_atomic16_add(&cont->n_valid, 1);
+ return pool;
+}
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
- ns = sh->fdb_domain;
- else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
- ns = sh->tx_domain;
- else
- ns = sh->rx_domain;
- resource->flags =
- dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
- /* Lookup a matching resource from cache. */
- LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
- if (resource->ft_type == cache_resource->ft_type &&
- resource->actions_num == cache_resource->actions_num &&
- resource->flags == cache_resource->flags &&
- !memcmp((const void *)resource->actions,
- (const void *)cache_resource->actions,
- (resource->actions_num *
- sizeof(resource->actions[0])))) {
- DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.modify_hdr = cache_resource;
- return 0;
+/**
+ * Prepare a new counter and/or a new counter pool.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] cnt_free
+ * Where to put the pointer of a new counter.
+ * @param[in] batch
+ * Whether the pool is for counter that was allocated by batch command.
+ *
+ * @return
+ * The free counter pool pointer and @p cnt_free is set on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_counter_pool *
+flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter **cnt_free,
+ uint32_t batch)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter_pool *pool;
+ struct mlx5_devx_obj *dcs = NULL;
+ struct mlx5_flow_counter *cnt;
+ uint32_t i;
+
+ if (!batch) {
+ /* bulk_bitmap must be 0 for single counter allocation. */
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
+ if (!dcs)
+ return NULL;
+ pool = flow_dv_find_pool_by_id
+ (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
+ if (!pool) {
+ pool = flow_dv_pool_create(dev, dcs, batch);
+ if (!pool) {
+ mlx5_devx_cmd_destroy(dcs);
+ return NULL;
+ }
+ } else if (dcs->id < pool->min_dcs->id) {
+ rte_atomic64_set(&pool->a64_dcs,
+ (int64_t)(uintptr_t)dcs);
}
+ cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
+ TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
+ cnt->dcs = dcs;
+ *cnt_free = cnt;
+ return pool;
}
- /* Register new modify-header resource. */
- cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- *cache_resource = *resource;
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_modify_header
- (sh->ctx, cache_resource->ft_type,
- ns, cache_resource->flags,
- cache_resource->actions_num *
- sizeof(cache_resource->actions[0]),
- (uint64_t *)cache_resource->actions);
- if (!cache_resource->verbs_action) {
- rte_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
+ /* bulk_bitmap is in 128 counters units. */
+ if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ if (!dcs) {
+ rte_errno = ENODATA;
+ return NULL;
}
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->dv.modify_hdr = cache_resource;
- DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- return 0;
+ pool = flow_dv_pool_create(dev, dcs, batch);
+ if (!pool) {
+ mlx5_devx_cmd_destroy(dcs);
+ return NULL;
+ }
+ for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
+ cnt = &pool->counters_raw[i];
+ cnt->pool = pool;
+ TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
+ }
+ *cnt_free = &pool->counters_raw[0];
+ return pool;
}
/**
- * Get or create a flow counter.
+ * Search for existed shared counter.
+ *
+ * @param[in] cont
+ * Pointer to the relevant counter pool container.
+ * @param[in] id
+ * The shared counter ID to search.
+ *
+ * @return
+ * NULL if not existed, otherwise pointer to the shared counter.
+ */
+static struct mlx5_flow_counter *
+flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
+ uint32_t id)
+{
+ static struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_pool *pool;
+ int i;
+
+ TAILQ_FOREACH(pool, &cont->pool_list, next) {
+ for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
+ cnt = &pool->counters_raw[i];
+ if (cnt->ref_cnt && cnt->shared && cnt->id == id)
+ return cnt;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Allocate a flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* Indicate if this counter is shared with other flows.
* @param[in] id
* Counter identifier.
+ * @param[in] group
+ * Counter flow group.
*
* @return
* pointer to flow counter on success, NULL otherwise and rte_errno is set.
*/
static struct mlx5_flow_counter *
-flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
+flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
+ uint16_t group)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter *cnt = NULL;
- struct mlx5_devx_counter_set *dcs = NULL;
- int ret;
+ struct mlx5_flow_counter_pool *pool = NULL;
+ struct mlx5_flow_counter *cnt_free = NULL;
+ /*
+ * Currently group 0 flow counter cannot be assigned to a flow if it is
+ * not the first one in the batch counter allocation, so it is better
+ * to allocate counters one by one for these flows in a separate
+ * container.
+ * A counter can be shared between different groups so need to take
+ * shared counters from the single container.
+ */
+ uint32_t batch = (group && !shared) ? 1 : 0;
+ struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
+ 0);
+ if (priv->counter_fallback)
+ return flow_dv_counter_alloc_fallback(dev, shared, id);
if (!priv->config.devx) {
- ret = -ENOTSUP;
- goto error_exit;
+ rte_errno = ENOTSUP;
+ return NULL;
}
if (shared) {
- LIST_FOREACH(cnt, &priv->flow_counters, next) {
- if (cnt->shared && cnt->id == id) {
- cnt->ref_cnt++;
- return cnt;
+ cnt_free = flow_dv_counter_shared_search(cont, id);
+ if (cnt_free) {
+ if (cnt_free->ref_cnt + 1 == 0) {
+ rte_errno = E2BIG;
+ return NULL;
}
+ cnt_free->ref_cnt++;
+ return cnt_free;
}
}
- cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
- dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
- if (!dcs || !cnt) {
- ret = -ENOMEM;
- goto error_exit;
+ /* Pools which has a free counters are in the start. */
+ TAILQ_FOREACH(pool, &cont->pool_list, next) {
+ /*
+ * The free counter reset values must be updated between the
+ * counter release to the counter allocation, so, at least one
+ * query must be done in this time. ensure it by saving the
+ * query generation in the release time.
+ * The free list is sorted according to the generation - so if
+ * the first one is not updated, all the others are not
+ * updated too.
+ */
+ cnt_free = TAILQ_FIRST(&pool->counters);
+ if (cnt_free && cnt_free->query_gen + 1 <
+ rte_atomic64_read(&pool->query_gen))
+ break;
+ cnt_free = NULL;
}
- ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
- if (ret)
- goto error_exit;
- struct mlx5_flow_counter tmpl = {
- .shared = shared,
- .ref_cnt = 1,
- .id = id,
- .dcs = dcs,
- };
- tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
- if (!tmpl.action) {
- ret = errno;
- goto error_exit;
+ if (!cnt_free) {
+ pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
+ if (!pool)
+ return NULL;
}
- *cnt = tmpl;
- LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
- return cnt;
-error_exit:
- rte_free(cnt);
- rte_free(dcs);
- rte_errno = -ret;
- return NULL;
+ cnt_free->batch = batch;
+ /* Create a DV counter action only in the first time usage. */
+ if (!cnt_free->action) {
+ uint16_t offset;
+ struct mlx5_devx_obj *dcs;
+
+ if (batch) {
+ offset = cnt_free - &pool->counters_raw[0];
+ dcs = pool->min_dcs;
+ } else {
+ offset = 0;
+ dcs = cnt_free->dcs;
+ }
+ cnt_free->action = mlx5_glue->dv_create_flow_action_counter
+ (dcs->obj, offset);
+ if (!cnt_free->action) {
+ rte_errno = errno;
+ return NULL;
+ }
+ }
+ /* Update the counter reset values. */
+ if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
+ &cnt_free->bytes))
+ return NULL;
+ cnt_free->shared = shared;
+ cnt_free->ref_cnt = 1;
+ cnt_free->id = id;
+ if (!priv->sh->cmng.query_thread_on)
+ /* Start the asynchronous batch query by the host thread. */
+ mlx5_set_query_alarm(priv->sh);
+ TAILQ_REMOVE(&pool->counters, cnt_free, next);
+ if (TAILQ_EMPTY(&pool->counters)) {
+ /* Move the pool to the end of the container pool list. */
+ TAILQ_REMOVE(&cont->pool_list, pool, next);
+ TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
+ }
+ return cnt_free;
}
/**
* Release a flow counter.
*
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
* @param[in] counter
* Pointer to the counter handler.
*/
static void
-flow_dv_counter_release(struct mlx5_flow_counter *counter)
+flow_dv_counter_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter *counter)
{
- int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (!counter)
return;
+ if (priv->counter_fallback) {
+ flow_dv_counter_release_fallback(dev, counter);
+ return;
+ }
if (--counter->ref_cnt == 0) {
- ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
- if (ret)
- DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
- LIST_REMOVE(counter, next);
- rte_free(counter->dcs);
- rte_free(counter);
+ struct mlx5_flow_counter_pool *pool =
+ flow_dv_counter_pool_get(counter);
+
+ /* Put the counter in the end - the last updated one. */
+ TAILQ_INSERT_TAIL(&pool->counters, counter, next);
+ counter->query_gen = rte_atomic64_read(&pool->query_gen);
}
}
if (!(priv->representor || priv->master))
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "E-Switch configurationd can only be"
+ NULL, "E-Switch configuration can only be"
" done by a master or a representor device");
if (attributes->egress)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
NULL, "group must be smaller than "
- RTE_STR(MLX5_MAX_FDB_TABLES));
+ RTE_STR(MLX5_MAX_TABLES_FDB));
}
if (!(attributes->egress ^ attributes->ingress))
return rte_flow_error_set(error, ENOTSUP,
(dev, items, attr, item_flags, error);
if (ret < 0)
return ret;
- last_item |= MLX5_FLOW_ITEM_PORT_ID;
+ last_item = MLX5_FLOW_ITEM_PORT_ID;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
ret = mlx5_flow_validate_item_eth(items, item_flags,
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
- error);
+ dev, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
MLX5_FLOW_LAYER_OUTER_VLAN;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
NULL, error);
if (ret < 0)
}
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
NULL, error);
if (ret < 0)
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- case RTE_FLOW_ITEM_TYPE_NVGRE:
ret = mlx5_flow_validate_item_gre(items, item_flags,
next_protocol, error);
if (ret < 0)
gre_item = items;
last_item = MLX5_FLOW_LAYER_GRE;
break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ ret = mlx5_flow_validate_item_nvgre(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_NVGRE;
+ break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
ret = mlx5_flow_validate_item_gre_key
(items, item_flags, gre_item, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
+ last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_ICMP;
+ last_item = MLX5_FLOW_LAYER_ICMP;
break;
case RTE_FLOW_ITEM_TYPE_ICMP6:
ret = mlx5_flow_validate_item_icmp6(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_ICMP6;
+ last_item = MLX5_FLOW_LAYER_ICMP6;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ if (flow_dv_validate_action_pop_vlan(dev,
+ action_flags,
+ actions,
+ item_flags, attr,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ ret = flow_dv_validate_action_push_vlan(action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ ret = flow_dv_validate_action_set_vlan_pcp
+ (action_flags, actions, error);
+ if (ret < 0)
+ return ret;
+ /* Count PCP with push_vlan command. */
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ ret = flow_dv_validate_action_set_vlan_vid
+ (action_flags, actions, error);
+ if (ret < 0)
+ return ret;
+ /* Count VID with push_vlan command. */
+ break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = flow_dv_validate_action_l2_encap(action_flags,
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
ret = flow_dv_validate_action_jump(actions,
+ action_flags,
attr->group, error);
if (ret)
return ret;
"action not supported");
}
}
+ if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
+ (action_flags & MLX5_FLOW_VLAN_ACTIONS))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "can't have vxlan and vlan"
+ " actions in the same rule");
/* Eswitch has few restrictions on using items and actions */
if (attr->transfer) {
if (action_flags & MLX5_FLOW_ACTION_FLAG)
/**
* Add VLAN item to matcher and to the value.
*
+ * @param[in, out] dev_flow
+ * Flow descriptor.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Item is inner pattern.
*/
static void
-flow_dv_translate_item_vlan(void *matcher, void *key,
+flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
+ void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
- const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(0x0fff),
- .inner_type = RTE_BE16(0xffff),
- };
void *headers_m;
void *headers_v;
uint16_t tci_m;
if (!vlan_v)
return;
if (!vlan_m)
- vlan_m = &nic_mask;
+ vlan_m = &rte_flow_item_vlan_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ /*
+ * This is workaround, masks are not supported,
+ * and pre-validated.
+ */
+ dev_flow->dv.vf_vlan.tag =
+ rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
}
/**
int size;
int i;
- flow_dv_translate_item_gre(matcher, key, item, inner);
+ /* For NVGRE, GRE header fields must be set with defined values. */
+ const struct rte_flow_item_gre gre_spec = {
+ .c_rsvd0_ver = RTE_BE16(0x2000),
+ .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
+ };
+ const struct rte_flow_item_gre gre_mask = {
+ .c_rsvd0_ver = RTE_BE16(0xB000),
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
+ const struct rte_flow_item gre_item = {
+ .spec = &gre_spec,
+ .mask = &gre_mask,
+ .last = NULL,
+ };
+ flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
if (!nvgre_v)
return;
if (!nvgre_m)
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
-#ifdef HAVE_MLX5DV_DR
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
-#endif
return match_criteria_enable;
}
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate matcher memory");
- tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
+ tbl = flow_dv_tbl_resource_get(dev, matcher->group,
matcher->egress, matcher->transfer,
error);
if (!tbl) {
uint32_t modify_action_position = UINT32_MAX;
void *match_mask = matcher.mask.buf;
void *match_value = dev_flow->dv.value.buf;
+ uint8_t next_protocol = 0xff;
+ struct rte_vlan_hdr vlan = { 0 };
+ bool vlan_inherited = false;
+ uint16_t vlan_tci;
flow->group = attr->group;
if (attr->transfer)
rte_errno = ENOTSUP;
goto cnt_err;
}
- flow->counter = flow_dv_counter_new(dev, count->shared,
- count->id);
+ flow->counter = flow_dv_counter_alloc(dev,
+ count->shared,
+ count->id,
+ attr->group);
if (flow->counter == NULL)
goto cnt_err;
dev_flow->dv.actions[actions_n++] =
action,
"cannot create counter"
" object.");
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ dev_flow->dv.actions[actions_n++] =
+ priv->sh->pop_vlan_action;
+ action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ if (!vlan_inherited) {
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ vlan_inherited = true;
+ }
+ vlan.eth_proto = rte_be_to_cpu_16
+ ((((const struct rte_flow_action_of_push_vlan *)
+ actions->conf)->ethertype));
+ if (flow_dv_create_action_push_vlan
+ (dev, attr, &vlan, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.push_vlan_res->action;
+ action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ if (!vlan_inherited) {
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ vlan_inherited = true;
+ }
+ vlan_tci =
+ ((const struct rte_flow_action_of_set_vlan_pcp *)
+ actions->conf)->vlan_pcp;
+ vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
+ vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
+ vlan.vlan_tci |= vlan_tci;
+ /* Push VLAN command will use this value */
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ if (!vlan_inherited) {
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ vlan_inherited = true;
+ }
+ vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
+ vlan.vlan_tci |= rte_be_to_cpu_16
+ (((const struct rte_flow_action_of_set_vlan_vid *)
+ actions->conf)->vlan_vid);
+ /* Push VLAN command will use this value */
+ break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
if (flow_dv_create_action_l2_encap(dev, actions,
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
jump_data = action->conf;
- tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
- MLX5_GROUP_FACTOR,
+ tbl = flow_dv_tbl_resource_get(dev, jump_data->group,
attr->egress,
attr->transfer, error);
if (!tbl)
MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- flow_dv_translate_item_vlan(match_mask, match_value,
+ flow_dv_translate_item_vlan(dev_flow,
+ match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
items, tunnel, attr->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
MLX5_IPV4_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
items, tunnel, attr->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
MLX5_IPV6_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value,
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
flow_dv_translate_item_gre_key(match_mask,
match_value, items);
- item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
+ last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
case RTE_FLOW_ITEM_TYPE_ICMP:
flow_dv_translate_item_icmp(match_mask, match_value,
items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_ICMP;
+ last_item = MLX5_FLOW_LAYER_ICMP;
break;
case RTE_FLOW_ITEM_TYPE_ICMP6:
flow_dv_translate_item_icmp6(match_mask, match_value,
items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_ICMP6;
+ last_item = MLX5_FLOW_LAYER_ICMP6;
break;
default:
break;
dv->hash_fields,
(*flow->queue),
flow->rss.queue_num);
- if (!hrxq)
+ if (!hrxq) {
hrxq = mlx5_hrxq_new
(dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields, (*flow->queue),
flow->rss.queue_num,
!!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL));
+ }
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"hardware refuses to create flow");
goto error;
}
+ if (priv->vmwa_context &&
+ dev_flow->dv.vf_vlan.tag &&
+ !dev_flow->dv.vf_vlan.created) {
+ /*
+ * The rule contains the VLAN pattern.
+ * For VF we are going to create VLAN
+ * interface to make hypervisor set correct
+ * e-Switch vport context.
+ */
+ mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+ }
}
return 0;
error:
mlx5_hrxq_release(dev, dv->hrxq);
dv->hrxq = NULL;
}
+ if (dev_flow->dv.vf_vlan.tag &&
+ dev_flow->dv.vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
return 1;
}
+/**
+ * Release push vlan action resource.
+ *
+ * @param flow
+ * Pointer to mlx5_flow.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+{
+ struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
+ flow->dv.push_vlan_res;
+
+ assert(cache_resource->action);
+ DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->action));
+ LIST_REMOVE(cache_resource, next);
+ rte_free(cache_resource);
+ DRV_LOG(DEBUG, "push vlan action resource %p: removed",
+ (void *)cache_resource);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Remove the flow from the NIC but keeps it in memory.
*
mlx5_hrxq_release(dev, dv->hrxq);
dv->hrxq = NULL;
}
+ if (dev_flow->dv.vf_vlan.tag &&
+ dev_flow->dv.vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
}
}
return;
flow_dv_remove(dev, flow);
if (flow->counter) {
- flow_dv_counter_release(flow->counter);
+ flow_dv_counter_release(dev, flow->counter);
flow->counter = NULL;
}
if (flow->tag_resource) {
flow_dv_jump_tbl_resource_release(dev_flow);
if (dev_flow->dv.port_id_action)
flow_dv_port_id_action_resource_release(dev_flow);
+ if (dev_flow->dv.push_vlan_res)
+ flow_dv_push_vlan_action_resource_release(dev_flow);
rte_free(dev_flow);
}
}
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_query_count *qc = data;
- uint64_t pkts = 0;
- uint64_t bytes = 0;
- int err;
if (!priv->config.devx)
return rte_flow_error_set(error, ENOTSUP,
NULL,
"counters are not supported");
if (flow->counter) {
- err = mlx5_devx_cmd_flow_counter_query
- (flow->counter->dcs,
- qc->reset, &pkts, &bytes);
+ uint64_t pkts, bytes;
+ int err = _flow_dv_query_count(dev, flow->counter, &pkts,
+ &bytes);
+
if (err)
- return rte_flow_error_set
- (error, err,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot read counters");
+ return rte_flow_error_set(error, -err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot read counters");
qc->hits_set = 1;
qc->bytes_set = 1;
qc->hits = pkts - flow->counter->hits;