/* DV flows structure. */
struct mlx5_flow_dv {
- uint64_t hash_fields; /**< Fields that participate in the hash. */
struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
/* Flow DV api: */
struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
/**< Structure for VF VLAN workaround. */
struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
/**< Pointer to push VLAN action resource in cache. */
+ struct mlx5_flow_dv_tag_resource *tag_resource;
+ /**< pointer to the tag action. */
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
/**< Action list. */
};
struct ibv_flow *flow; /**< Verbs flow pointer. */
struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
- uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
struct mlx5_vf_vlan vf_vlan;
/**< Structure for VF VLAN workaround. */
};
+struct mlx5_flow_rss {
+ uint32_t level;
+ uint32_t queue_num; /**< Number of entries in @p queue. */
+ uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+};
+
/** Device flow structure. */
struct mlx5_flow {
LIST_ENTRY(mlx5_flow) next;
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
uint64_t actions;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+ uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+ uint8_t ingress; /**< 1 if the flow is ingress. */
+ uint32_t group; /**< The group index. */
+ uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
union {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_dv dv;
struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
enum mlx5_flow_drv_type drv_type; /**< Driver type. */
+ struct mlx5_flow_rss rss; /**< RSS context. */
struct mlx5_flow_counter *counter; /**< Holds flow counter. */
- struct mlx5_flow_dv_tag_resource *tag_resource;
- /**< pointer to the tag action. */
- struct rte_flow_action_rss rss;/**< RSS context. */
- uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
- uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
/**< Device flows that are part of the flow. */
struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
- uint8_t ingress; /**< 1 if the flow is ingress. */
- uint32_t group; /**< The group index. */
- uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
};
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
- struct rte_flow *flow = dev_flow->flow;
struct mlx5dv_dr_domain *domain;
- resource->flags = flow->group ? 0 : 1;
+ resource->flags = dev_flow->group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
else
ns = sh->rx_domain;
resource->flags =
- dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
if (resource->ft_type == cache_resource->ft_type &&
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
- uint32_t size = sizeof(struct mlx5_flow);
- struct mlx5_flow *flow;
+ size_t size = sizeof(struct mlx5_flow);
+ struct mlx5_flow *dev_flow;
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
+ dev_flow = rte_calloc(__func__, 1, size, 0);
+ if (!dev_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"not enough memory to create flow");
return NULL;
}
- flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
- return flow;
+ dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ dev_flow->ingress = attr->ingress;
+ dev_flow->transfer = attr->transfer;
+ return dev_flow;
}
#ifndef NDEBUG
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->flow->tag_resource = cache_resource;
+ dev_flow->dv.tag_resource = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
- dev_flow->flow->tag_resource = cache_resource;
+ dev_flow->dv.tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
&table, error);
if (ret)
return ret;
- flow->group = table;
+ dev_flow->group = table;
if (attr->transfer)
res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
case RTE_FLOW_ACTION_TYPE_FLAG:
tag_resource.tag =
mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- if (!flow->tag_resource)
+ if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
(dev, &tag_resource, dev_flow, error))
return errno;
dev_flow->dv.actions[actions_n++] =
- flow->tag_resource->action;
+ dev_flow->dv.tag_resource->action;
action_flags |= MLX5_FLOW_ACTION_FLAG;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
tag_resource.tag = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
- if (!flow->tag_resource)
+ if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
(dev, &tag_resource, dev_flow, error))
return errno;
dev_flow->dv.actions[actions_n++] =
- flow->tag_resource->action;
+ dev_flow->dv.tag_resource->action;
action_flags |= MLX5_FLOW_ACTION_MARK;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
action_flags |= MLX5_FLOW_ACTION_DROP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
+ assert(flow->rss.queue);
queue = actions->conf;
flow->rss.queue_num = 1;
- (*flow->queue)[0] = queue->index;
+ (*flow->rss.queue)[0] = queue->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
+ assert(flow->rss.queue);
rss = actions->conf;
- if (flow->queue)
- memcpy((*flow->queue), rss->queue,
+ if (flow->rss.queue)
+ memcpy((*flow->rss.queue), rss->queue,
rss->queue_num * sizeof(uint16_t));
flow->rss.queue_num = rss->queue_num;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
- /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
- flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
- flow->rss.level = rss->level;
+ memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /*
+ * rss->level and rss.types should be set in advance
+ * when expanding items for RSS.
+ */
action_flags |= MLX5_FLOW_ACTION_RSS;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
flow->counter = flow_dv_counter_alloc(dev,
count->shared,
count->id,
- flow->group);
+ dev_flow->group);
if (flow->counter == NULL)
goto cnt_err;
dev_flow->dv.actions[actions_n++] =
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel, flow->group);
+ items, tunnel,
+ dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->dv.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV4_LAYER_TYPES,
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel, flow->group);
+ items, tunnel,
+ dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->dv.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV6_LAYER_TYPES,
flow_dv_translate_item_tcp(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->dv.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_TCP,
IBV_RX_HASH_SRC_PORT_TCP |
flow_dv_translate_item_udp(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->dv.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_UDP,
IBV_RX_HASH_SRC_PORT_UDP |
matcher.priority = mlx5_flow_adjust_priority(dev, priority,
matcher.priority);
matcher.egress = attr->egress;
- matcher.group = flow->group;
+ matcher.group = dev_flow->group;
matcher.transfer = attr->transfer;
if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
return -rte_errno;
dv = &dev_flow->dv;
n = dv->actions_n;
if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
- if (flow->transfer) {
+ if (dev_flow->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
dv->hrxq = mlx5_hrxq_drop_new(dev);
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
- hrxq = mlx5_hrxq_get(dev, flow->key,
+ assert(flow->rss.queue);
+ hrxq = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- dv->hash_fields,
- (*flow->queue),
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
flow->rss.queue_num);
if (!hrxq) {
hrxq = mlx5_hrxq_new
- (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
- dv->hash_fields, (*flow->queue),
+ (dev, flow->rss.key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
flow->rss.queue_num,
!!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL));
flow_dv_counter_release(dev, flow->counter);
flow->counter = NULL;
}
- if (flow->tag_resource) {
- flow_dv_tag_release(dev, flow->tag_resource);
- flow->tag_resource = NULL;
- }
while (!LIST_EMPTY(&flow->dev_flows)) {
dev_flow = LIST_FIRST(&flow->dev_flows);
LIST_REMOVE(dev_flow, next);
flow_dv_port_id_action_resource_release(dev_flow);
if (dev_flow->dv.push_vlan_res)
flow_dv_push_vlan_action_resource_release(dev_flow);
+ if (dev_flow->dv.tag_resource)
+ flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
rte_free(dev_flow);
}
}
const struct rte_flow_action_queue *queue = action->conf;
struct rte_flow *flow = dev_flow->flow;
- if (flow->queue)
- (*flow->queue)[0] = queue->index;
+ if (flow->rss.queue)
+ (*flow->rss.queue)[0] = queue->index;
flow->rss.queue_num = 1;
}
const uint8_t *rss_key;
struct rte_flow *flow = dev_flow->flow;
- if (flow->queue)
- memcpy((*flow->queue), rss->queue,
+ if (flow->rss.queue)
+ memcpy((*flow->rss.queue), rss->queue,
rss->queue_num * sizeof(uint16_t));
flow->rss.queue_num = rss->queue_num;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
- /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
- flow->rss.level = rss->level;
+ memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /*
+ * rss->level and rss.types should be set in advance when expanding
+ * items for RSS.
+ */
}
/**
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
- struct mlx5_flow *flow;
+ size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+ struct mlx5_flow *dev_flow;
size += flow_verbs_get_actions_size(actions);
size += flow_verbs_get_items_size(items);
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
+ dev_flow = rte_calloc(__func__, 1, size, 0);
+ if (!dev_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"not enough memory to create flow");
return NULL;
}
- flow->verbs.attr = (void *)(flow + 1);
- flow->verbs.specs =
- (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
- return flow;
+ dev_flow->verbs.attr = (void *)(dev_flow + 1);
+ dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
+ dev_flow->ingress = attr->ingress;
+ dev_flow->transfer = attr->transfer;
+ return dev_flow;
}
/**
flow_verbs_translate_item_ipv4(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L3;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV4_LAYER_TYPES,
flow_verbs_translate_item_ipv6(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L3;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV6_LAYER_TYPES,
flow_verbs_translate_item_tcp(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L4;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_TCP,
(IBV_RX_HASH_SRC_PORT_TCP |
flow_verbs_translate_item_udp(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L4;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_UDP,
(IBV_RX_HASH_SRC_PORT_UDP |
} else {
struct mlx5_hrxq *hrxq;
- hrxq = mlx5_hrxq_get(dev, flow->key,
+ assert(flow->rss.queue);
+ hrxq = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
flow->rss.queue_num);
if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->key,
+ hrxq = mlx5_hrxq_new(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
flow->rss.queue_num,
!!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL));