priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
if (!priv->drop_queue.hrxq)
goto error;
+ priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
+ mlx5_hrxq_create_cb,
+ mlx5_hrxq_match_cb,
+ mlx5_hrxq_remove_cb,
+ mlx5_hrxq_clone_cb,
+ mlx5_hrxq_clone_free_cb);
+ if (!priv->hrxqs)
+ goto error;
+ rte_rwlock_init(&priv->ind_tbls_lock);
if (priv->sh->config.dv_flow_en == 2)
return eth_dev;
/* Port representor shares the same max priority with pf port. */
err = ENOTSUP;
goto error;
}
- priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
- mlx5_hrxq_create_cb,
- mlx5_hrxq_match_cb,
- mlx5_hrxq_remove_cb,
- mlx5_hrxq_clone_cb,
- mlx5_hrxq_clone_free_cb);
- if (!priv->hrxqs)
- goto error;
- rte_rwlock_init(&priv->ind_tbls_lock);
/* Query availability of metadata reg_c's. */
if (!priv->sh->metadata_regc_check_flag) {
err = mlx5_flow_discover_mreg_c(eth_dev);
uint64_t hash_fields; /* Verbs Hash fields. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint32_t key_len; /**< RSS hash key len. */
+ uint32_t hws_flags; /**< HW steering action. */
uint32_t tunnel; /**< Queue in tunnel. */
uint32_t shared_rss; /**< Shared RSS index. */
struct mlx5_ind_table_obj *ind_tbl;
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
void *action; /* DV QP action pointer. */
#endif
+ uint32_t hws_flags; /* Hw steering flags. */
uint64_t hash_fields; /* Verbs Hash fields. */
uint32_t rss_key_len; /* Hash key length in bytes. */
uint32_t idx; /* Hash Rx queue index. */
LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
/* Indirection tables. */
LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
+ /* Standalone indirect tables. */
+ LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
/* Pointer to next element. */
rte_rwlock_t ind_tbls_lock;
uint32_t refcnt; /**< Reference counter. */
goto error;
}
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ if (hrxq->hws_flags) {
+ hrxq->action = mlx5dr_action_create_dest_tir
+ (priv->dr_ctx,
+ (struct mlx5dr_devx_obj *)hrxq->tir, hrxq->hws_flags);
+ if (!hrxq->action)
+ goto error;
+ return 0;
+ }
if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
&hrxq->action)) {
rte_errno = errno;
DRV_LOG(ERR, "Cannot create drop RX queue");
return ret;
}
+ if (priv->sh->config.dv_flow_en == 2)
+ return 0;
/* hrxq->ind_table queues are NULL, drop RX queue ID will be used */
ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);
if (ret != 0) {
mlx5_action_handle_attach(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_indexed_pool *ipool =
- priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
- struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
int ret = 0;
- uint32_t idx;
+ struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
const char *message;
uint32_t queue_idx;
}
if (ret != 0)
return ret;
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
-
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
if (ret != 0) {
DRV_LOG(ERR, "Port %u could not attach "
goto error;
}
}
+
return 0;
error:
- shared_rss_last = shared_rss;
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
-
- if (shared_rss == shared_rss_last)
+ ind_tbl_last = ind_tbl;
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+ if (ind_tbl == ind_tbl_last)
break;
if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
DRV_LOG(CRIT, "Port %u could not detach "
mlx5_action_handle_detach(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_indexed_pool *ipool =
- priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
- struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
int ret = 0;
- uint32_t idx;
-
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
if (ret != 0) {
DRV_LOG(ERR, "Port %u could not detach "
}
return 0;
error:
- shared_rss_last = shared_rss;
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
-
- if (shared_rss == shared_rss_last)
+ ind_tbl_last = ind_tbl;
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+ if (ind_tbl == ind_tbl_last)
break;
if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
DRV_LOG(CRIT, "Port %u could not attach "
union {
/* Jump action. */
struct mlx5_hw_jump_action *jump;
+ struct mlx5_hrxq *hrxq; /* TIR action. */
};
struct rte_flow_template_table *table; /* The table flow allcated from. */
struct mlx5dr_rule rule; /* HWS layer data struct. */
/* Dynamic action list. */
LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
struct mlx5_hw_jump_action *jump; /* Jump action. */
+ struct mlx5_hrxq *tir; /* TIR action. */
uint32_t acts_num:4; /* Total action number. */
/* Translated DR action array from action template. */
struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
struct mlx5_list_entry *entry, void *cb_ctx);
void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
struct mlx5_list_entry *entry);
+void flow_dv_hashfields_set(uint64_t item_flags,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint64_t *hash_fields);
+void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
+ uint64_t *hash_field);
struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
/**
* Set the hash fields according to the @p flow information.
*
- * @param[in] dev_flow
- * Pointer to the mlx5_flow.
+ * @param[in] item_flags
+ * The match pattern item flags.
* @param[in] rss_desc
* Pointer to the mlx5_flow_rss_desc.
+ * @param[out] hash_fields
+ * Pointer to the RSS hash fields.
*/
-static void
-flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
- struct mlx5_flow_rss_desc *rss_desc)
+void
+flow_dv_hashfields_set(uint64_t item_flags,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint64_t *hash_fields)
{
- uint64_t items = dev_flow->handle->layers;
+ uint64_t items = item_flags;
+ uint64_t fields = 0;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
- dev_flow->hash_fields = 0;
+ *hash_fields = 0;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
if (rss_desc->level >= 2)
rss_inner = 1;
#endif
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||
+ !items) {
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
+ fields |= IBV_RX_HASH_SRC_IPV4;
else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
+ fields |= IBV_RX_HASH_DST_IPV4;
else
- dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
+ fields |= MLX5_IPV4_IBV_RX_HASH;
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||
+ !items) {
if (rss_types & MLX5_IPV6_LAYER_TYPES) {
if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
+ fields |= IBV_RX_HASH_SRC_IPV6;
else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
+ fields |= IBV_RX_HASH_DST_IPV6;
else
- dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ fields |= MLX5_IPV6_IBV_RX_HASH;
}
}
- if (dev_flow->hash_fields == 0)
+ if (fields == 0)
/*
* There is no match between the RSS types and the
* L3 protocol (IPv4/IPv6) defined in the flow rule.
*/
return;
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
+ !items) {
if (rss_types & RTE_ETH_RSS_UDP) {
if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_UDP;
+ fields |= IBV_RX_HASH_SRC_PORT_UDP;
else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_UDP;
+ fields |= IBV_RX_HASH_DST_PORT_UDP;
else
- dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ fields |= MLX5_UDP_IBV_RX_HASH;
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) ||
+ !items) {
if (rss_types & RTE_ETH_RSS_TCP) {
if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_TCP;
+ fields |= IBV_RX_HASH_SRC_PORT_TCP;
else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_TCP;
+ fields |= IBV_RX_HASH_DST_PORT_TCP;
else
- dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ fields |= MLX5_TCP_IBV_RX_HASH;
}
}
if (rss_inner)
- dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+ fields |= IBV_RX_HASH_INNER;
+ *hash_fields = fields;
}
/**
struct mlx5_flow_rss_desc *rss_desc,
uint32_t *hrxq_idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *dh = dev_flow->handle;
struct mlx5_hrxq *hrxq;
rss_desc->shared_rss = 0;
if (rss_desc->hash_fields == 0)
rss_desc->queue_num = 1;
- *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
- if (!*hrxq_idx)
- return NULL;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
- *hrxq_idx);
+ hrxq = mlx5_hrxq_get(dev, rss_desc);
+ *hrxq_idx = hrxq ? hrxq->idx : 0;
return hrxq;
}
* rss->level and rss.types should be set in advance
* when expanding items for RSS.
*/
- flow_dv_hashfields_set(dev_flow, rss_desc);
+ flow_dv_hashfields_set(dev_flow->handle->layers,
+ rss_desc,
+ &dev_flow->hash_fields);
hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
rss_desc, &hrxq_idx);
if (!hrxq)
*/
handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
- flow_dv_hashfields_set(dev_flow, rss_desc);
+ flow_dv_hashfields_set(dev_flow->handle->layers,
+ rss_desc,
+ &dev_flow->hash_fields);
/* If has RSS action in the sample action, the Sample/Mirror resource
* should be registered after the hash filed be update.
*/
* MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
* same slot in mlx5_rss_hash_fields.
*
- * @param[in] rss
- * Pointer to the shared action RSS conf.
+ * @param[in] rss_types
+ * RSS type.
* @param[in, out] hash_field
* hash_field variable needed to be adjusted.
*
* @return
* void
*/
-static void
-__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
- uint64_t *hash_field)
+void
+flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
+ uint64_t *hash_field)
{
- uint64_t rss_types = rss->origin.types;
-
switch (*hash_field & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
size_t i;
int err;
- if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
- !!dev->data->dev_started)) {
+ shared_rss->ind_tbl = mlx5_ind_table_obj_new
+ (dev, shared_rss->origin.queue,
+ shared_rss->origin.queue_num,
+ true,
+ !!dev->data->dev_started);
+ if (!shared_rss->ind_tbl)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot setup indirection table");
- }
memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc.const_q = shared_rss->origin.queue;
rss_desc.shared_rss = action_idx;
rss_desc.ind_tbl = shared_rss->ind_tbl;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
- uint32_t hrxq_idx;
+ struct mlx5_hrxq *hrxq;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
int tunnel = 0;
- __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
+ flow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,
+ &hash_fields);
if (shared_rss->origin.level > 1) {
hash_fields |= IBV_RX_HASH_INNER;
tunnel = 1;
}
rss_desc.tunnel = tunnel;
rss_desc.hash_fields = hash_fields;
- hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
- if (!hrxq_idx) {
+ hrxq = mlx5_hrxq_get(dev, &rss_desc);
+ if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
goto error_hrxq_new;
}
err = __flow_dv_action_rss_hrxq_set
- (shared_rss, hash_fields, hrxq_idx);
+ (shared_rss, hash_fields, hrxq->idx);
MLX5_ASSERT(!err);
}
return 0;
error_hrxq_new:
err = rte_errno;
__flow_dv_action_rss_hrxqs_release(dev, shared_rss);
- if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
+ if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
shared_rss->ind_tbl = NULL;
rte_errno = err;
return -rte_errno;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss = NULL;
- void *queue = NULL;
struct rte_flow_action_rss *origin;
const uint8_t *rss_key;
- uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
uint32_t idx;
RTE_SET_USED(conf);
- queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
- 0, SOCKET_ID_ANY);
shared_rss = mlx5_ipool_zmalloc
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
- if (!shared_rss || !queue) {
+ if (!shared_rss) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
"rss action number out of range");
goto error_rss_init;
}
- shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(*shared_rss->ind_tbl),
- 0, SOCKET_ID_ANY);
- if (!shared_rss->ind_tbl) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- goto error_rss_init;
- }
- memcpy(queue, rss->queue, queue_size);
- shared_rss->ind_tbl->queues = queue;
- shared_rss->ind_tbl->queues_n = rss->queue_num;
origin = &shared_rss->origin;
origin->func = rss->func;
origin->level = rss->level;
memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
origin->key = &shared_rss->key[0];
origin->key_len = MLX5_RSS_HASH_KEY_LEN;
- origin->queue = queue;
+ origin->queue = rss->queue;
origin->queue_num = rss->queue_num;
if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
goto error_rss_init;
+ /* Update queue with indirect table queue memoyr. */
+ origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
__atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
rte_spinlock_lock(&priv->shared_act_sl);
error_rss_init:
if (shared_rss) {
if (shared_rss->ind_tbl)
- mlx5_free(shared_rss->ind_tbl);
+ mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
+ !!dev->data->dev_started);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
}
- if (queue)
- mlx5_free(queue);
return 0;
}
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
uint32_t old_refcnt = 1;
int remaining;
- uint16_t *queue = NULL;
if (!shared_rss)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss hrxq has references");
- queue = shared_rss->ind_tbl->queues;
- remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
+ remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
!!dev->data->dev_started);
if (remaining)
return rte_flow_error_set(error, EBUSY,
NULL,
"shared rss indirection table has"
" references");
- mlx5_free(queue);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
int ret = 0;
void *queue = NULL;
- uint16_t *queue_old = NULL;
+ void *queue_i = NULL;
uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
bool dev_started = !!dev->data->dev_started;
memcpy(queue, action_conf->queue, queue_size);
MLX5_ASSERT(shared_rss->ind_tbl);
rte_spinlock_lock(&shared_rss->action_rss_sl);
- queue_old = shared_rss->ind_tbl->queues;
+ queue_i = shared_rss->ind_tbl->queues;
ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
queue, action_conf->queue_num,
true /* standalone */,
dev_started /* ref_new_qs */,
dev_started /* deref_old_qs */);
if (ret) {
- mlx5_free(queue);
ret = rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"cannot update indirection table");
} else {
- mlx5_free(queue_old);
- shared_rss->origin.queue = queue;
+ /* Restore the queue to indirect table internal queue. */
+ memcpy(queue_i, queue, queue_size);
+ shared_rss->ind_tbl->queues = queue_i;
shared_rss->origin.queue_num = action_conf->queue_num;
}
+ mlx5_free(queue);
rte_spinlock_unlock(&shared_rss->action_rss_sl);
return ret;
}
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
if (!rss_desc[i])
continue;
- hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
- if (!hrxq_idx[i]) {
+ hrxq = mlx5_hrxq_get(dev, rss_desc[i]);
+ if (!hrxq) {
rte_spinlock_unlock(&mtr_policy->sl);
return NULL;
}
+ hrxq_idx[i] = hrxq->idx;
}
sub_policy_num = (mtr_policy->sub_policy_num >>
(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5_flow.h"
+#include "mlx5_rx.h"
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
}
+/**
+ * Register queue/RSS action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] hws_flags
+ * DR action flags.
+ * @param[in] action
+ * rte flow action.
+ *
+ * @return
+ * Table on success, NULL otherwise and rte_errno is set.
+ */
+static inline struct mlx5_hrxq*
+flow_hw_tir_action_register(struct rte_eth_dev *dev,
+ uint32_t hws_flags,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_flow_rss_desc rss_desc = {
+ .hws_flags = hws_flags,
+ };
+ struct mlx5_hrxq *hrxq;
+
+ if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue = action->conf;
+
+ rss_desc.const_q = &queue->index;
+ rss_desc.queue_num = 1;
+ } else {
+ const struct rte_flow_action_rss *rss = action->conf;
+
+ rss_desc.queue_num = rss->queue_num;
+ rss_desc.const_q = rss->queue;
+ memcpy(rss_desc.key,
+ !rss->key ? rss_hash_default_key : rss->key,
+ MLX5_RSS_HASH_KEY_LEN);
+ rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
+ flow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields);
+ flow_dv_action_rss_l34_hash_adjust(rss->types,
+ &rss_desc.hash_fields);
+ if (rss->level > 1) {
+ rss_desc.hash_fields |= IBV_RX_HASH_INNER;
+ rss_desc.tunnel = 1;
+ }
+ }
+ hrxq = mlx5_hrxq_get(dev, &rss_desc);
+ return hrxq;
+}
+
/**
* Destroy DR actions created by action template.
*
}
i++;
break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ if (masks->conf) {
+ acts->tir = flow_hw_tir_action_register
+ (dev,
+ mlx5_hw_act_flag[!!attr->group][type],
+ actions);
+ if (!acts->tir)
+ goto err;
+ acts->rule_acts[i].action =
+ acts->tir->action;
+ } else if (__flow_hw_act_data_general_append
+ (priv, acts, actions->type,
+ actions - action_start, i)) {
+ goto err;
+ }
+ i++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (masks->conf) {
+ acts->tir = flow_hw_tir_action_register
+ (dev,
+ mlx5_hw_act_flag[!!attr->group][type],
+ actions);
+ if (!acts->tir)
+ goto err;
+ acts->rule_acts[i].action =
+ acts->tir->action;
+ } else if (__flow_hw_act_data_general_append
+ (priv, acts, actions->type,
+ actions - action_start, i)) {
+ goto err;
+ }
+ i++;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
struct rte_flow_attr attr = {
.ingress = 1,
};
+ uint32_t ft_flag;
memcpy(rule_acts, hw_acts->rule_acts,
sizeof(*rule_acts) * hw_acts->acts_num);
if (LIST_EMPTY(&hw_acts->act_list))
return 0;
attr.group = table->grp->group_id;
+ ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
if (table->type == MLX5DR_TABLE_TYPE_FDB) {
attr.transfer = 1;
attr.ingress = 1;
LIST_FOREACH(act_data, &hw_acts->act_list, next) {
uint32_t jump_group;
struct mlx5_hw_jump_action *jump;
+ struct mlx5_hrxq *hrxq;
action = &actions[act_data->action_src];
MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||
job->flow->jump = jump;
job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ hrxq = flow_hw_tir_action_register(dev,
+ ft_flag,
+ action);
+ if (!hrxq)
+ return -1;
+ rule_acts[act_data->action_dst].action = hrxq->action;
+ job->flow->hrxq = hrxq;
+ job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
+ break;
default:
break;
}
if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
flow_hw_jump_release(dev, job->flow->jump);
+ else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
+ mlx5_hrxq_obj_release(dev, job->flow->hrxq);
mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
}
priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
MLX5_ASSERT(priv->drop_queue.hrxq);
hrxq = priv->drop_queue.hrxq;
} else {
- uint32_t hrxq_idx;
struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
MLX5_ASSERT(rss_desc->queue_num);
rss_desc->tunnel = !!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL);
rss_desc->shared_rss = 0;
- hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
- hrxq_idx);
+ hrxq = mlx5_hrxq_get(dev, rss_desc);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"cannot get hash queue");
goto error;
}
- handle->rix_hrxq = hrxq_idx;
+ handle->rix_hrxq = hrxq->idx;
}
MLX5_ASSERT(hrxq);
handle->drv_flow = mlx5_glue->create_flow
struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
const uint16_t *queues,
uint32_t queues_n);
+struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n,
+ bool standalone,
+ bool ref_qs);
int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
- bool standalone,
bool deref_rxqs);
int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
void *cb_ctx __rte_unused);
void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry);
-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc);
+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);
int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
- * @param standalone
- * Indirection table for Standalone queue.
* @param deref_rxqs
* If true, then dereference RX queues related to indirection table.
* Otherwise, no additional action will be taken.
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
- bool standalone,
bool deref_rxqs)
{
struct mlx5_priv *priv = dev->data->dev_private;
rte_rwlock_write_lock(&priv->ind_tbls_lock);
ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
- if (!ret && !standalone)
+ if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
if (ret)
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_obj *
+struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n, bool standalone, bool ref_qs)
{
struct mlx5_ind_table_obj *ind_tbl;
int ret;
+ /*
+ * Allocate maximum queues for shared action as queue number
+ * maybe modified later.
+ */
ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
- queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
+ (standalone ? priv->rxqs_n : queues_n) *
+ sizeof(uint16_t), 0, SOCKET_ID_ANY);
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
mlx5_free(ind_tbl);
return NULL;
}
- if (!standalone) {
- rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ if (!standalone)
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- rte_rwlock_write_unlock(&priv->ind_tbls_lock);
- }
+ else
+ LIST_INSERT_HEAD(&priv->standalone_ind_tbls, ind_tbl, next);
+ rte_rwlock_write_unlock(&priv->ind_tbls_lock);
+
return ind_tbl;
}
return (hrxq->rss_key_len != rss_desc->key_len ||
memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
+ hrxq->hws_flags != rss_desc->hws_flags ||
hrxq->hash_fields != rss_desc->hash_fields ||
hrxq->ind_table->queues_n != rss_desc->queue_num ||
memcmp(hrxq->ind_table->queues, rss_desc->queue,
}
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone, true);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table, true);
hrxq->ind_table = ind_tbl;
}
hrxq->hash_fields = hash_fields;
err = rte_errno;
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,
- true);
+ mlx5_ind_table_obj_release(dev, ind_tbl, true);
}
rte_errno = err;
return -rte_errno;
struct mlx5_priv *priv = dev->data->dev_private;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- mlx5_glue->destroy_flow_action(hrxq->action);
+ if (hrxq->hws_flags)
+ mlx5dr_action_destroy(hrxq->action);
+ else
+ mlx5_glue->destroy_flow_action(hrxq->action);
#endif
priv->obj_ops.hrxq_destroy(hrxq);
if (!hrxq->standalone) {
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone, true);
+ hrxq->hws_flags ?
+ (!!dev->data->dev_started) : true);
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
int ret;
queues_n = rss_desc->hash_fields ? queues_n : 1;
- if (!ind_tbl)
+ if (!ind_tbl && !rss_desc->hws_flags)
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
- standalone,
+ standalone ||
+ rss_desc->hws_flags,
!!dev->data->dev_started);
if (!ind_tbl)
return NULL;
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = rss_desc->hash_fields;
+ hrxq->hws_flags = rss_desc->hws_flags;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
if (ret < 0)
return hrxq;
error:
if (!rss_desc->ind_tbl)
- mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);
+ mlx5_ind_table_obj_release(dev, ind_tbl, true);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return NULL;
* RSS configuration for the Rx hash queue.
*
* @return
- * An hash Rx queue index on success.
+ * An hash Rx queue on success.
*/
-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
+ struct mlx5_hrxq *hrxq = NULL;
struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.data = rss_desc,
} else {
entry = mlx5_list_register(priv->hrxqs, &ctx);
if (!entry)
- return 0;
+ return NULL;
hrxq = container_of(entry, typeof(*hrxq), entry);
}
- if (hrxq)
- return hrxq->idx;
- return 0;
+ return hrxq;
}
/**
* @param dev
* Pointer to Ethernet device.
* @param hrxq_idx
- * Index to Hash Rx queue to release.
+ * Hash Rx queue to release.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
-int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
if (!hrxq)
return 0;
if (!hrxq->standalone)
return 0;
}
+/**
+ * Release the hash Rx queue with index.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq_idx
+ * Index to Hash Rx queue to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ return mlx5_hrxq_obj_release(dev, hrxq);
+}
+
/**
* Create a drop Rx Hash queue.
*