struct mlx5_flows flows; /* RTE Flow rules. */
struct mlx5_flows ctrl_flows; /* Control flow rules. */
void *inter_flows; /* Intermediate resources for flow creation. */
+ void *rss_desc; /* Intermediate rss description resources. */
int flow_idx; /* Intermediate device flow index. */
int flow_nested_idx; /* Intermediate device flow index, nested. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
* The hash fields that should be used.
*/
uint64_t
-mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
int tunnel __rte_unused, uint64_t layer_types,
uint64_t hash_fields)
{
- struct rte_flow *flow = dev_flow->flow;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- int rss_request_inner = flow->rss.level >= 2;
+ int rss_request_inner = rss_desc->level >= 2;
/* Check RSS hash level for tunnel. */
if (tunnel && rss_request_inner)
return 0;
#endif
/* Check if requested layer matches RSS hash fields. */
- if (!(flow->rss.types & layer_types))
+ if (!(rss_desc->types & layer_types))
return 0;
return hash_fields;
}
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] flow
- * Pointer to flow structure.
* @param[in] dev_handle
* Pointer to device flow handle structure.
*/
static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct mlx5_hrxq *hrxq;
unsigned int i;
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->rss.queue)[i];
+ if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+ return;
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ dev_handle->rix_hrxq);
+ if (!hrxq)
+ return;
+ for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+ int idx = hrxq->ind_table->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
- flow_drv_rxq_flags_set(dev, flow, dev_handle);
+ flow_drv_rxq_flags_set(dev, dev_handle);
}
/**
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] flow
- * Pointer to flow structure.
* @param[in] dev_handle
* Pointer to the device flow handle structure.
*/
static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct mlx5_hrxq *hrxq;
unsigned int i;
+ if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+ return;
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ dev_handle->rix_hrxq);
+ if (!hrxq)
+ return;
MLX5_ASSERT(dev->data->dev_started);
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->rss.queue)[i];
+ for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+ int idx = hrxq->ind_table->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
- flow_drv_rxq_flags_trim(dev, flow, dev_handle);
+ flow_drv_rxq_flags_trim(dev, dev_handle);
}
/**
uint8_t buffer[2048];
} items_tx;
struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)[!!priv->flow_idx];
const struct rte_flow_action *p_actions_rx = actions;
uint32_t i;
- uint32_t flow_size;
int hairpin_flow = 0;
uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
&hairpin_id);
p_actions_rx = actions_rx.actions;
}
- flow_size = sizeof(struct rte_flow);
- rss = flow_get_rss_action(p_actions_rx);
- if (rss)
- flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
- sizeof(void *));
- else
- flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
- flow = rte_calloc(__func__, 1, flow_size, 0);
+ flow = rte_calloc(__func__, 1, sizeof(struct rte_flow), 0);
if (!flow) {
rte_errno = ENOMEM;
goto error_before_flow;
flow->hairpin_flow_id = hairpin_id;
MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
- flow->rss.queue = (void *)(flow + 1);
+ memset(rss_desc, 0, sizeof(*rss_desc));
+ rss = flow_get_rss_action(p_actions_rx);
if (rss) {
/*
* The following information is required by
* mlx5_flow_hashfields_adjust() in advance.
*/
- flow->rss.level = rss->level;
+ rss_desc->level = rss->level;
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
+ rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
}
flow->dev_handles = 0;
if (rss && rss->types) {
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (!priv->inter_flows)
- priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
- sizeof(struct mlx5_flow), 0);
+ if (!priv->inter_flows) {
+ priv->inter_flows = rte_calloc(__func__, 1,
+ MLX5_NUM_MAX_DEV_FLOWS *
+ sizeof(struct mlx5_flow) +
+ (sizeof(struct mlx5_flow_rss_desc) +
+ sizeof(uint16_t) * UINT16_MAX) * 2, 0);
+ if (!priv->inter_flows) {
+ DRV_LOG(ERR, "can't allocate intermediate memory.");
+ return;
+ }
+ }
+ priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows)
+ [MLX5_NUM_MAX_DEV_FLOWS];
/* Reset the index. */
priv->flow_idx = 0;
priv->flow_nested_idx = 0;
uint16_t size;
};
-struct mlx5_flow_rss {
+/* RSS description. */
+struct mlx5_flow_rss_desc {
uint32_t level;
uint32_t queue_num; /**< Number of entries in @p queue. */
uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
- uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+ uint16_t queue[]; /**< Destination queues to redirect traffic to. */
};
+
/** Device flow handle structure for DV mode only. */
struct mlx5_flow_handle_dv {
/* Flow DV api: */
struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
enum mlx5_flow_drv_type drv_type; /**< Driver type. */
- struct mlx5_flow_rss rss; /**< RSS context. */
uint32_t counter; /**< Holds flow counter. */
uint32_t rix_mreg_copy;
/**< Index to metadata register copy table resource. */
int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes,
bool external, uint32_t group, bool fdb_def_rule,
uint32_t *table, struct rte_flow_error *error);
-uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel,
- uint64_t layer_types,
+uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
+ int tunnel, uint64_t layer_types,
uint64_t hash_fields);
uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
uint32_t subpriority);
*
* @param[in] dev_flow
* Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ * Pointer to the mlx5_flow_rss_desc.
*/
static void
-flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
+flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc)
{
- struct rte_flow *flow = dev_flow->flow;
uint64_t items = dev_flow->handle->layers;
int rss_inner = 0;
- uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
+ uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
dev_flow->hash_fields = 0;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (flow->rss.level >= 2) {
+ if (rss_desc->level >= 2) {
dev_flow->hash_fields |= IBV_RX_HASH_INNER;
rss_inner = 1;
}
struct mlx5_dev_config *dev_conf = &priv->config;
struct rte_flow *flow = dev_flow->flow;
struct mlx5_flow_handle *handle = dev_flow->handle;
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)
+ [!!priv->flow_nested_idx];
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint64_t action_flags = 0;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- MLX5_ASSERT(flow->rss.queue);
queue = actions->conf;
- flow->rss.queue_num = 1;
- (*flow->rss.queue)[0] = queue->index;
+ rss_desc->queue_num = 1;
+ rss_desc->queue[0] = queue->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- MLX5_ASSERT(flow->rss.queue);
rss = actions->conf;
- if (flow->rss.queue)
- memcpy((*flow->rss.queue), rss->queue,
- rss->queue_num * sizeof(uint16_t));
- flow->rss.queue_num = rss->queue_num;
+ memcpy(rss_desc->queue, rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ rss_desc->queue_num = rss->queue_num;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
/*
* rss->level and rss.types should be set in advance
* when expanding items for RSS.
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(match_mask, match_value,
items, tunnel);
- matcher.priority = flow->rss.level >= 2 ?
+ matcher.priority = rss_desc->level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
items, tunnel);
- matcher.priority = flow->rss.level >= 2 ?
+ matcher.priority = rss_desc->level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
- matcher.priority = flow->rss.level >= 2 ?
+ matcher.priority = rss_desc->level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
flow_dv_translate_item_vxlan_gpe(match_mask,
match_value, items,
tunnel);
- matcher.priority = flow->rss.level >= 2 ?
+ matcher.priority = rss_desc->level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
flow_dv_translate_item_geneve(match_mask, match_value,
items, tunnel);
- matcher.priority = flow->rss.level >= 2 ?
+ matcher.priority = rss_desc->level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
- matcher.priority = flow->rss.level >= 2 ?
+ matcher.priority = rss_desc->level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_GTP:
flow_dv_translate_item_gtp(match_mask, match_value,
items, tunnel);
- matcher.priority = flow->rss.level >= 2 ?
+ matcher.priority = rss_desc->level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GTP;
break;
*/
handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
- flow_dv_hashfields_set(dev_flow);
+ flow_dv_hashfields_set(dev_flow, rss_desc);
/* Register matcher. */
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
struct mlx5_hrxq *hrxq;
uint32_t hrxq_idx;
+ struct mlx5_flow_rss_desc *rss_desc =
+ &((struct mlx5_flow_rss_desc *)priv->rss_desc)
+ [!!priv->flow_nested_idx];
- MLX5_ASSERT(flow->rss.queue);
- hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
+ MLX5_ASSERT(rss_desc->queue_num);
+ hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
- (*flow->rss.queue),
- flow->rss.queue_num);
+ rss_desc->queue,
+ rss_desc->queue_num);
if (!hrxq_idx) {
hrxq_idx = mlx5_hrxq_new
- (dev, flow->rss.key,
+ (dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
- (*flow->rss.queue),
- flow->rss.queue_num,
+ rss_desc->queue,
+ rss_desc->queue_num,
!!(dh->layers &
MLX5_FLOW_LAYER_TUNNEL));
}
* the input is valid and that there is space to insert the requested action
* into the flow.
*
- * @param[in] dev_flow
- * Pointer to mlx5_flow.
+ * @param[in] rss_desc
+ * Pointer to mlx5_flow_rss_desc.
* @param[in] action
* Action configuration.
*/
static void
-flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow,
+flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
const struct rte_flow_action *action)
{
const struct rte_flow_action_queue *queue = action->conf;
- struct rte_flow *flow = dev_flow->flow;
- if (flow->rss.queue)
- (*flow->rss.queue)[0] = queue->index;
- flow->rss.queue_num = 1;
+ rss_desc->queue[0] = queue->index;
+ rss_desc->queue_num = 1;
}
/**
* the input is valid and that there is space to insert the requested action
* into the flow.
*
+ * @param[in] rss_desc
+ * Pointer to mlx5_flow_rss_desc.
* @param[in] action
* Action configuration.
- * @param[in, out] action_flags
- * Pointer to the detected actions.
- * @param[in] dev_flow
- * Pointer to mlx5_flow.
*/
static void
-flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow,
+flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
const struct rte_flow_action *action)
{
const struct rte_flow_action_rss *rss = action->conf;
const uint8_t *rss_key;
- struct rte_flow *flow = dev_flow->flow;
- if (flow->rss.queue)
- memcpy((*flow->rss.queue), rss->queue,
- rss->queue_num * sizeof(uint16_t));
- flow->rss.queue_num = rss->queue_num;
+ memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
+ rss_desc->queue_num = rss->queue_num;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
/*
* rss->level and rss.types should be set in advance when expanding
* items for RSS.
uint64_t priority = attr->priority;
uint32_t subpriority = 0;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)
+ [!!priv->flow_nested_idx];
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- flow_verbs_translate_action_queue(dev_flow, actions);
+ flow_verbs_translate_action_queue(rss_desc, actions);
action_flags |= MLX5_FLOW_ACTION_QUEUE;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- flow_verbs_translate_action_rss(dev_flow, actions);
+ flow_verbs_translate_action_rss(rss_desc, actions);
action_flags |= MLX5_FLOW_ACTION_RSS;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
break;
subpriority = MLX5_PRIORITY_MAP_L3;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
+ (rss_desc, tunnel,
MLX5_IPV4_LAYER_TYPES,
MLX5_IPV4_IBV_RX_HASH);
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
subpriority = MLX5_PRIORITY_MAP_L3;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
+ (rss_desc, tunnel,
MLX5_IPV6_LAYER_TYPES,
MLX5_IPV6_IBV_RX_HASH);
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
subpriority = MLX5_PRIORITY_MAP_L4;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_TCP,
+ (rss_desc, tunnel, ETH_RSS_TCP,
(IBV_RX_HASH_SRC_PORT_TCP |
IBV_RX_HASH_DST_PORT_TCP));
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
subpriority = MLX5_PRIORITY_MAP_L4;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_UDP,
+ (rss_desc, tunnel, ETH_RSS_UDP,
(IBV_RX_HASH_SRC_PORT_UDP |
IBV_RX_HASH_DST_PORT_UDP));
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
}
} else {
uint32_t hrxq_idx;
+ struct mlx5_flow_rss_desc *rss_desc =
+ &((struct mlx5_flow_rss_desc *)priv->rss_desc)
+ [!!priv->flow_nested_idx];
- MLX5_ASSERT(flow->rss.queue);
- hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
+ MLX5_ASSERT(rss_desc->queue_num);
+ hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
- (*flow->rss.queue),
- flow->rss.queue_num);
+ rss_desc->queue,
+ rss_desc->queue_num);
if (!hrxq_idx)
- hrxq_idx = mlx5_hrxq_new(dev, flow->rss.key,
+ hrxq_idx = mlx5_hrxq_new(dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
- (*flow->rss.queue),
- flow->rss.queue_num,
+ rss_desc->queue,
+ rss_desc->queue_num,
!!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL));
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],