#include <rte_common.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
+#include <rte_eal_paging.h>
#include <rte_flow.h>
#include <rte_cycles.h>
#include <rte_flow_driver.h>
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
+#include "mlx5_common_os.h"
+#include "rte_pmd_mlx5.h"
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
+static const struct mlx5_flow_tbl_data_entry *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
+static int
+mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
+ const struct rte_flow_tunnel *app_tunnel,
+ struct mlx5_flow_tunnel **tunnel);
+
/** Device flow drivers. */
extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
},
};
+static struct rte_flow_shared_action *
+mlx5_shared_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
+static int mlx5_shared_action_destroy
+ (struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *shared_action,
+ struct rte_flow_error *error);
+static int mlx5_shared_action_update
+ (struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *shared_action,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error);
+static int mlx5_shared_action_query
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action *action,
+ void *data,
+ struct rte_flow_error *error);
+static inline bool
+mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
+ struct rte_flow_tunnel *tunnel,
+ const char *err_msg)
+{
+ err_msg = NULL;
+ if (!is_tunnel_offload_active(dev)) {
+ err_msg = "tunnel offload was not activated";
+ goto out;
+ } else if (!tunnel) {
+ err_msg = "no application tunnel";
+ goto out;
+ }
+
+ switch (tunnel->type) {
+ default:
+ err_msg = "unsupported tunnel type";
+ goto out;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ break;
+ }
+
+out:
+ return !err_msg;
+}
+
+
+static int
+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+ struct rte_flow_tunnel *app_tunnel,
+ struct rte_flow_action **actions,
+ uint32_t *num_of_actions,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct mlx5_flow_tunnel *tunnel;
+ const char *err_msg = NULL;
+ bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+
+ if (!verdict)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ err_msg);
+ ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+ if (ret < 0) {
+ return rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "failed to initialize pmd tunnel");
+ }
+ *actions = &tunnel->action;
+ *num_of_actions = 1;
+ return 0;
+}
+
+static int
+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+ struct rte_flow_tunnel *app_tunnel,
+ struct rte_flow_item **items,
+ uint32_t *num_of_items,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct mlx5_flow_tunnel *tunnel;
+ const char *err_msg = NULL;
+ bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+
+ if (!verdict)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ err_msg);
+ ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+ if (ret < 0) {
+ return rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to initialize pmd tunnel");
+ }
+ *items = &tunnel->item;
+ *num_of_items = 1;
+ return 0;
+}
+
+static int
+mlx5_flow_item_release(struct rte_eth_dev *dev,
+ struct rte_flow_item *pmd_items,
+ uint32_t num_items, struct rte_flow_error *err)
+{
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_flow_tunnel *tun;
+
+ LIST_FOREACH(tun, &thub->tunnels, chain) {
+ if (&tun->item == pmd_items)
+ break;
+ }
+ if (!tun || num_items != 1)
+ return rte_flow_error_set(err, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "invalid argument");
+ if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+ mlx5_flow_tunnel_free(dev, tun);
+ return 0;
+}
+
+static int
+mlx5_flow_action_release(struct rte_eth_dev *dev,
+ struct rte_flow_action *pmd_actions,
+ uint32_t num_actions, struct rte_flow_error *err)
+{
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_flow_tunnel *tun;
+
+ LIST_FOREACH(tun, &thub->tunnels, chain) {
+ if (&tun->action == pmd_actions)
+ break;
+ }
+ if (!tun || num_actions != 1)
+ return rte_flow_error_set(err, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "invalid argument");
+ if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+ mlx5_flow_tunnel_free(dev, tun);
+
+ return 0;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+ struct rte_mbuf *m,
+ struct rte_flow_restore_info *info,
+ struct rte_flow_error *err)
+{
+ uint64_t ol_flags = m->ol_flags;
+ const struct mlx5_flow_tbl_data_entry *tble;
+ const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+
+ if ((ol_flags & mask) != mask)
+ goto err;
+ tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
+ if (!tble) {
+ DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
+ dev->data->port_id, m->hash.fdir.hi);
+ goto err;
+ }
+ MLX5_ASSERT(tble->tunnel);
+ memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
+ info->group_id = tble->group_id;
+ info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
+ RTE_FLOW_RESTORE_INFO_GROUP_ID |
+ RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
+
+ return 0;
+
+err:
+ return rte_flow_error_set(err, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "failed to get restore info");
+}
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
.query = mlx5_flow_query,
.dev_dump = mlx5_flow_dev_dump,
.get_aged_flows = mlx5_flow_get_aged_flows,
+ .shared_action_create = mlx5_shared_action_create,
+ .shared_action_destroy = mlx5_shared_action_destroy,
+ .shared_action_update = mlx5_shared_action_update,
+ .shared_action_query = mlx5_shared_action_query,
+ .tunnel_decap_set = mlx5_flow_tunnel_decap_set,
+ .tunnel_match = mlx5_flow_tunnel_match,
+ .tunnel_action_decap_release = mlx5_flow_action_release,
+ .tunnel_item_release = mlx5_flow_item_release,
+ .get_restore_info = mlx5_flow_tunnel_get_restore_info,
};
/* Convert FDIR request to Generic flow. */
/*
* Validate the rss action.
*
- * @param[in] action
- * Pointer to the queue action.
- * @param[in] action_flags
- * Bit-fields that holds the actions detected until now.
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] attr
- * Attributes of flow that includes this action.
- * @param[in] item_flags
- * Items that were detected.
+ * @param[in] action
+ * Pointer to the queue action.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
- uint64_t action_flags,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- uint64_t item_flags,
- struct rte_flow_error *error)
+mlx5_validate_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
- int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
- if (action_flags & MLX5_FLOW_FATE_ACTIONS)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't have 2 fate actions"
- " in same flow");
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
return rte_flow_error_set(error, ENOTSUP,
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->queue[i], "queue is not configured");
}
+ return 0;
+}
+
+/*
+ * Validate the rss action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[in] item_flags
+ * Items that were detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_rss *rss = action->conf;
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int ret;
+
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions"
+ " in same flow");
+ ret = mlx5_validate_action_rss(dev, action, error);
+ if (ret)
+ return ret;
if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
+ * @param[in] ext_vlan_sup
+ * Whether extended VLAN features are supported or not.
* @param[out] error
* Pointer to error structure.
*
*/
int
mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
- uint64_t item_flags,
+ uint64_t item_flags, bool ext_vlan_sup,
struct rte_flow_error *error)
{
const struct rte_flow_item_eth *mask = item->mask;
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.type = RTE_BE16(0xffff),
+ .has_vlan = ext_vlan_sup ? 1 : 0,
};
int ret;
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
}
+static int
+flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
+ uint32_t domains __rte_unused,
+ uint32_t flags __rte_unused)
+{
+ return 0;
+}
+
/* Void driver to protect from null pointer reference. */
const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
.validate = flow_null_validate,
.remove = flow_null_remove,
.destroy = flow_null_destroy,
.query = flow_null_query,
+ .sync_domain = flow_null_sync_domain,
};
/**
return NULL;
}
+/* maps shared action to translated non shared in some actions array */
+struct mlx5_translated_shared_action {
+ struct rte_flow_shared_action *action; /**< Shared action */
+ int index; /**< Index in related array of rte_flow_action */
+};
+
+/**
+ * Translates actions of type RTE_FLOW_ACTION_TYPE_SHARED to related
+ * non shared action if translation possible.
+ * This functionality used to run same execution path for both shared & non
+ * shared actions on flow create. All necessary preparations for shared
+ * action handling should be preformed on *shared* actions list returned
+ * from this call.
+ *
+ * @param[in] actions
+ * List of actions to translate.
+ * @param[out] shared
+ * List to store translated shared actions.
+ * @param[in, out] shared_n
+ * Size of *shared* array. On return should be updated with number of shared
+ * actions retrieved from the *actions* list.
+ * @param[out] translated_actions
+ * List of actions where all shared actions were translated to non shared
+ * if possible. NULL if no translation took place.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_shared_actions_translate(const struct rte_flow_action actions[],
+ struct mlx5_translated_shared_action *shared,
+ int *shared_n,
+ struct rte_flow_action **translated_actions,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_action *translated = NULL;
+ size_t actions_size;
+ int n;
+ int copied_n = 0;
+ struct mlx5_translated_shared_action *shared_end = NULL;
+
+ for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
+ if (actions[n].type != RTE_FLOW_ACTION_TYPE_SHARED)
+ continue;
+ if (copied_n == *shared_n) {
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "too many shared actions");
+ }
+ rte_memcpy(&shared[copied_n].action, &actions[n].conf,
+ sizeof(actions[n].conf));
+ shared[copied_n].index = n;
+ copied_n++;
+ }
+ n++;
+ *shared_n = copied_n;
+ if (!copied_n)
+ return 0;
+ actions_size = sizeof(struct rte_flow_action) * n;
+ translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
+ if (!translated) {
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ memcpy(translated, actions, actions_size);
+ for (shared_end = shared + copied_n; shared < shared_end; shared++) {
+ const struct rte_flow_shared_action *shared_action;
+
+ shared_action = shared->action;
+ switch (shared_action->type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
+ translated[shared->index].type =
+ RTE_FLOW_ACTION_TYPE_RSS;
+ translated[shared->index].conf =
+ &shared_action->rss.origin;
+ break;
+ default:
+ mlx5_free(translated);
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "invalid shared action type");
+ }
+ }
+ *translated_actions = translated;
+ return 0;
+}
+
+/**
+ * Get Shared RSS action from the action list.
+ *
+ * @param[in] shared
+ * Pointer to the list of actions.
+ * @param[in] shared_n
+ * Actions list length.
+ *
+ * @return
+ * Pointer to the MLX5 RSS action if exists, otherwise return NULL.
+ */
+static struct mlx5_shared_action_rss *
+flow_get_shared_rss_action(struct mlx5_translated_shared_action *shared,
+ int shared_n)
+{
+ struct mlx5_translated_shared_action *shared_end;
+
+ for (shared_end = shared + shared_n; shared < shared_end; shared++) {
+ struct rte_flow_shared_action *shared_action;
+
+ shared_action = shared->action;
+ switch (shared_action->type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
+ __atomic_add_fetch(&shared_action->refcnt, 1,
+ __ATOMIC_RELAXED);
+ return &shared_action->rss;
+ default:
+ break;
+ }
+ }
+ return NULL;
+}
+
+struct rte_flow_shared_action *
+mlx5_flow_get_shared_rss(struct rte_flow *flow)
+{
+ if (flow->shared_rss)
+ return container_of(flow->shared_rss,
+ struct rte_flow_shared_action, rss);
+ else
+ return NULL;
+}
+
static unsigned int
find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
{
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action_raw_encap *raw_encap;
+ const struct rte_eth_hairpin_conf *conf;
if (!attr->ingress)
return 0;
queue = actions->conf;
if (queue == NULL)
return 0;
- if (mlx5_rxq_get_type(dev, queue->index) !=
- MLX5_RXQ_TYPE_HAIRPIN)
+ conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
+ if (conf != NULL && !!conf->tx_explicit)
return 0;
queue_action = 1;
action_n++;
rss = actions->conf;
if (rss == NULL || rss->queue_num == 0)
return 0;
- if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
- MLX5_RXQ_TYPE_HAIRPIN)
+ conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
+ if (conf != NULL && !!conf->tx_explicit)
return 0;
queue_action = 1;
action_n++;
return 0;
}
+__extension__
+union tunnel_offload_mark {
+ uint32_t val;
+ struct {
+ uint32_t app_reserve:8;
+ uint32_t table_id:15;
+ uint32_t transfer:1;
+ uint32_t _unused_:8;
+ };
+};
+
+struct tunnel_default_miss_ctx {
+ uint16_t *queue;
+ __extension__
+ union {
+ struct rte_flow_action_rss action_rss;
+ struct rte_flow_action_queue miss_queue;
+ struct rte_flow_action_jump miss_jump;
+ uint8_t raw[0];
+ };
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action *app_actions,
+ uint32_t flow_idx,
+ struct tunnel_default_miss_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow *dev_flow;
+ struct rte_flow_attr miss_attr = *attr;
+ const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
+ const struct rte_flow_item miss_items[2] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = NULL,
+ .last = NULL,
+ .mask = NULL
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ .spec = NULL,
+ .last = NULL,
+ .mask = NULL
+ }
+ };
+ union tunnel_offload_mark mark_id;
+ struct rte_flow_action_mark miss_mark;
+ struct rte_flow_action miss_actions[3] = {
+ [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
+ [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL }
+ };
+ const struct rte_flow_action_jump *jump_data;
+ uint32_t i, flow_table = 0; /* prevent compilation warning */
+ struct flow_grp_info grp_info = {
+ .external = 1,
+ .transfer = attr->transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ .std_tbl_fix = 0,
+ };
+ int ret;
+
+ if (!attr->transfer) {
+ uint32_t q_size;
+
+ miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+ q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
+ ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
+ 0, SOCKET_ID_ANY);
+ if (!ctx->queue)
+ return rte_flow_error_set
+ (error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "invalid default miss RSS");
+ ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ ctx->action_rss.level = 0,
+ ctx->action_rss.types = priv->rss_conf.rss_hf,
+ ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
+ ctx->action_rss.queue_num = priv->reta_idx_n,
+ ctx->action_rss.key = priv->rss_conf.rss_key,
+ ctx->action_rss.queue = ctx->queue;
+ if (!priv->reta_idx_n || !priv->rxqs_n)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "invalid port configuration");
+ if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ ctx->action_rss.types = 0;
+ for (i = 0; i != priv->reta_idx_n; ++i)
+ ctx->queue[i] = (*priv->reta_idx)[i];
+ } else {
+ miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
+ ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
+ }
+ miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
+ for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
+ jump_data = app_actions->conf;
+ miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
+ miss_attr.group = jump_data->group;
+ ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
+ &flow_table, grp_info, error);
+ if (ret)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "invalid tunnel id");
+ mark_id.app_reserve = 0;
+ mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
+ mark_id.transfer = !!attr->transfer;
+ mark_id._unused_ = 0;
+ miss_mark.id = mark_id.val;
+ dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
+ miss_items, miss_actions, flow_idx, error);
+ if (!dev_flow)
+ return -rte_errno;
+ dev_flow->flow = flow;
+ dev_flow->external = true;
+ dev_flow->tunnel = tunnel;
+ /* Subflow object was created, we must include one in the list. */
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
+ DRV_LOG(DEBUG,
+ "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
+ dev->data->port_id, tunnel->app_tunnel.type,
+ tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
+ ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
+ miss_actions, error);
+ if (!ret)
+ ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
+ error);
+
+ return ret;
+}
+
/**
* The last stage of splitting chain, just creates the subflow
* without any modification.
return ret;
}
+static struct mlx5_flow_tunnel *
+flow_tunnel_from_rule(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[])
+{
+ struct mlx5_flow_tunnel *tunnel;
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+ if (is_flow_tunnel_match_rule(dev, attr, items, actions))
+ tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
+ else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
+ tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
+ else
+ tunnel = NULL;
+#pragma GCC diagnostic pop
+
+ return tunnel;
+}
+
/**
* Create a flow and add it to @p list.
*
flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
+ const struct rte_flow_action original_actions[],
bool external, struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = NULL;
struct mlx5_flow *dev_flow;
const struct rte_flow_action_rss *rss;
+ struct mlx5_translated_shared_action
+ shared_actions[MLX5_MAX_SHARED_ACTIONS];
+ int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
union {
struct mlx5_flow_expand_rss buf;
uint8_t buffer[2048];
struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
priv->rss_desc)[!!priv->flow_idx];
- const struct rte_flow_action *p_actions_rx = actions;
+ const struct rte_flow_action *p_actions_rx;
uint32_t i;
uint32_t idx = 0;
int hairpin_flow;
uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
struct rte_flow_attr attr_factor = {0};
- int ret;
-
+ const struct rte_flow_action *actions;
+ struct rte_flow_action *translated_actions = NULL;
+ struct mlx5_flow_tunnel *tunnel;
+ struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
+ int ret = flow_shared_actions_translate(original_actions,
+ shared_actions,
+ &shared_actions_n,
+ &translated_actions, error);
+
+ if (ret < 0) {
+ MLX5_ASSERT(translated_actions == NULL);
+ return 0;
+ }
+ actions = translated_actions ? translated_actions : original_actions;
memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr));
- if (external)
- attr_factor.group *= MLX5_FLOW_TABLE_FACTOR;
+ p_actions_rx = actions;
hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions);
ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx,
external, hairpin_flow, error);
if (ret < 0)
- return 0;
+ goto error_before_hairpin_split;
if (hairpin_flow > 0) {
if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
rte_errno = EINVAL;
- return 0;
+ goto error_before_hairpin_split;
}
flow_hairpin_split(dev, actions, actions_rx.actions,
actions_hairpin_tx.actions, items_tx.items,
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
+ flow->shared_rss = flow_get_shared_rss_action(shared_actions,
+ shared_actions_n);
/*
* Record the start index when there is a nested call. All sub-flows
* need to be translated before another calling.
error);
if (ret < 0)
goto error;
+ if (is_flow_tunnel_steer_rule(dev, attr,
+ buf->entry[i].pattern,
+ p_actions_rx)) {
+ ret = flow_tunnel_add_default_miss(dev, flow, attr,
+ p_actions_rx,
+ idx,
+ &default_miss_ctx,
+ error);
+ if (ret < 0) {
+ mlx5_free(default_miss_ctx.queue);
+ goto error;
+ }
+ }
}
/* Create the tx flow. */
if (hairpin_flow) {
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
flow, next);
flow_rxq_flags_set(dev, flow);
+ rte_free(translated_actions);
/* Nested flow creation index recovery. */
priv->flow_idx = priv->flow_nested_idx;
if (priv->flow_nested_idx)
priv->flow_nested_idx = 0;
+ tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
+ if (tunnel) {
+ flow->tunnel = 1;
+ flow->tunnel_id = tunnel->tunnel_id;
+ __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+ mlx5_free(default_miss_ctx.queue);
+ }
return idx;
error:
MLX5_ASSERT(flow);
priv->flow_idx = priv->flow_nested_idx;
if (priv->flow_nested_idx)
priv->flow_nested_idx = 0;
+error_before_hairpin_split:
+ rte_free(translated_actions);
return 0;
}
mlx5_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
+ const struct rte_flow_action original_actions[],
struct rte_flow_error *error)
{
int hairpin_flow;
+ struct mlx5_translated_shared_action
+ shared_actions[MLX5_MAX_SHARED_ACTIONS];
+ int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
+ const struct rte_flow_action *actions;
+ struct rte_flow_action *translated_actions = NULL;
+ int ret = flow_shared_actions_translate(original_actions,
+ shared_actions,
+ &shared_actions_n,
+ &translated_actions, error);
+ if (ret)
+ return ret;
+ actions = translated_actions ? translated_actions : original_actions;
hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
- return flow_drv_validate(dev, attr, items, actions,
+ ret = flow_drv_validate(dev, attr, items, actions,
true, hairpin_flow, error);
+ rte_free(translated_actions);
+ return ret;
}
/**
"port not started");
return NULL;
}
+
return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
attr, items, actions, true, error);
}
}
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
+ if (flow->tunnel) {
+ struct mlx5_flow_tunnel *tunnel;
+ tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
+ RTE_VERIFY(tunnel);
+ if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+ mlx5_flow_tunnel_free(dev, tunnel);
+ }
}
/**
return -ENOTSUP;
}
-#define MLX5_POOL_QUERY_FREQ_US 1000000
-
/**
- * Get number of all validate pools.
+ * Allocate a new memory for the counter values wrapped by all the needed
+ * management.
*
* @param[in] sh
* Pointer to mlx5_dev_ctx_shared object.
*
* @return
- * The number of all validate pools.
+ * 0 on success, a negative errno value otherwise.
*/
-static uint32_t
-mlx5_get_all_valid_pool_count(struct mlx5_dev_ctx_shared *sh)
+static int
+mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
{
+ struct mlx5_devx_mkey_attr mkey_attr;
+ struct mlx5_counter_stats_mem_mng *mem_mng;
+ volatile struct flow_counter_stats *raw_data;
+ int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
+ int size = (sizeof(struct flow_counter_stats) *
+ MLX5_COUNTERS_PER_POOL +
+ sizeof(struct mlx5_counter_stats_raw)) * raws_n +
+ sizeof(struct mlx5_counter_stats_mem_mng);
+ size_t pgsize = rte_mem_page_size();
+ uint8_t *mem;
int i;
- uint32_t pools_n = 0;
-
- for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i)
- pools_n += rte_atomic16_read(&sh->cmng.ccont[i].n_valid);
- return pools_n;
-}
+
+ if (pgsize == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
+ if (!mem) {
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
+ size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
+ mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (!mem_mng->umem) {
+ rte_errno = errno;
+ mlx5_free(mem);
+ return -rte_errno;
+ }
+ mkey_attr.addr = (uintptr_t)mem;
+ mkey_attr.size = size;
+ mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
+ mkey_attr.pd = sh->pdn;
+ mkey_attr.log_entity_size = 0;
+ mkey_attr.pg_access = 0;
+ mkey_attr.klm_array = NULL;
+ mkey_attr.klm_num = 0;
+ mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
+ mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
+ if (!mem_mng->dm) {
+ mlx5_glue->devx_umem_dereg(mem_mng->umem);
+ rte_errno = errno;
+ mlx5_free(mem);
+ return -rte_errno;
+ }
+ mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
+ raw_data = (volatile struct flow_counter_stats *)mem;
+ for (i = 0; i < raws_n; ++i) {
+ mem_mng->raws[i].mem_mng = mem_mng;
+ mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
+ }
+ for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
+ LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
+ mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
+ next);
+ LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
+ sh->cmng.mem_mng = mem_mng;
+ return 0;
+}
+
+/**
+ * Set the statistic memory to the new counter pool.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object.
+ * @param[in] pool
+ * Pointer to the pool to set the statistic memory.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_flow_counter_pool *pool)
+{
+ struct mlx5_flow_counter_mng *cmng = &sh->cmng;
+ /* Resize statistic memory once used out. */
+ if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
+ mlx5_flow_create_counter_stat_mem_mng(sh)) {
+ DRV_LOG(ERR, "Cannot resize counter stat mem.");
+ return -1;
+ }
+ rte_spinlock_lock(&pool->sl);
+ pool->raw = cmng->mem_mng->raws + pool->index %
+ MLX5_CNT_CONTAINER_RESIZE;
+ rte_spinlock_unlock(&pool->sl);
+ pool->raw_hw = NULL;
+ return 0;
+}
+
+#define MLX5_POOL_QUERY_FREQ_US 1000000
/**
* Set the periodic procedure for triggering asynchronous batch queries for all
{
uint32_t pools_n, us;
- pools_n = mlx5_get_all_valid_pool_count(sh);
+ pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
mlx5_flow_query_alarm(void *arg)
{
struct mlx5_dev_ctx_shared *sh = arg;
- struct mlx5_devx_obj *dcs;
- uint16_t offset;
int ret;
- uint8_t batch = sh->cmng.batch;
- uint8_t age = sh->cmng.age;
uint16_t pool_index = sh->cmng.pool_index;
- struct mlx5_pools_container *cont;
+ struct mlx5_flow_counter_mng *cmng = &sh->cmng;
struct mlx5_flow_counter_pool *pool;
- int cont_loop = MLX5_CCONT_TYPE_MAX;
+ uint16_t n_valid;
if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
goto set_alarm;
-next_container:
- cont = MLX5_CNT_CONTAINER(sh, batch, age);
- rte_spinlock_lock(&cont->resize_sl);
- if (!cont->pools) {
- rte_spinlock_unlock(&cont->resize_sl);
- /* Check if all the containers are empty. */
- if (unlikely(--cont_loop == 0))
- goto set_alarm;
- batch ^= 0x1;
- pool_index = 0;
- if (batch == 0 && pool_index == 0) {
- age ^= 0x1;
- sh->cmng.batch = batch;
- sh->cmng.age = age;
- }
- goto next_container;
- }
- pool = cont->pools[pool_index];
- rte_spinlock_unlock(&cont->resize_sl);
+ rte_spinlock_lock(&cmng->pool_update_sl);
+ pool = cmng->pools[pool_index];
+ n_valid = cmng->n_valid;
+ rte_spinlock_unlock(&cmng->pool_update_sl);
+ /* Set the statistic memory to the new created pool. */
+ if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
+ goto set_alarm;
if (pool->raw_hw)
/* There is a pool query in progress. */
goto set_alarm;
if (!pool->raw_hw)
/* No free counter statistics raw memory. */
goto set_alarm;
- dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
- (&pool->a64_dcs);
- if (dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) {
- /* Pool without valid counter. */
- pool->raw_hw = NULL;
- goto next_pool;
- }
- offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
/*
* Identify the counters released between query trigger and query
* handle more efficiently. The counter released in this gap period
* will not be taken into account.
*/
pool->query_gen++;
- ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
- offset, NULL, NULL,
+ ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
+ MLX5_COUNTERS_PER_POOL,
+ NULL, NULL,
pool->raw_hw->mem_mng->dm->id,
(void *)(uintptr_t)
- (pool->raw_hw->data + offset),
+ pool->raw_hw->data,
sh->devx_comp,
(uint64_t)(uintptr_t)pool);
if (ret) {
pool->raw_hw = NULL;
goto set_alarm;
}
- pool->raw_hw->min_dcs_id = dcs->id;
LIST_REMOVE(pool->raw_hw, next);
sh->cmng.pending_queries++;
-next_pool:
pool_index++;
- if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
- batch ^= 0x1;
+ if (pool_index >= n_valid)
pool_index = 0;
- if (batch == 0 && pool_index == 0)
- age ^= 0x1;
- }
set_alarm:
- sh->cmng.batch = batch;
sh->cmng.pool_index = pool_index;
- sh->cmng.age = age;
mlx5_set_query_alarm(sh);
}
struct mlx5_flow_counter_pool *pool =
(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
struct mlx5_counter_stats_raw *raw_to_free;
- uint8_t age = !!IS_AGE_POOL(pool);
uint8_t query_gen = pool->query_gen ^ 1;
- struct mlx5_pools_container *cont =
- MLX5_CNT_CONTAINER(sh, !IS_EXT_POOL(pool), age);
+ struct mlx5_flow_counter_mng *cmng = &sh->cmng;
+ enum mlx5_counter_type cnt_type =
+ pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
+ MLX5_COUNTER_TYPE_ORIGIN;
if (unlikely(status)) {
raw_to_free = pool->raw_hw;
} else {
raw_to_free = pool->raw;
- if (IS_AGE_POOL(pool))
+ if (pool->is_aged)
mlx5_flow_aging_check(sh, pool);
rte_spinlock_lock(&pool->sl);
pool->raw = pool->raw_hw;
/* Be sure the new raw counters data is updated in memory. */
rte_io_wmb();
if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
- rte_spinlock_lock(&cont->csl);
- TAILQ_CONCAT(&cont->counters,
+ rte_spinlock_lock(&cmng->csl[cnt_type]);
+ TAILQ_CONCAT(&cmng->counters[cnt_type],
&pool->counters[query_gen], next);
- rte_spinlock_unlock(&cont->csl);
+ rte_spinlock_unlock(&cmng->csl[cnt_type]);
}
}
LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
sh->cmng.pending_queries--;
}
+static const struct mlx5_flow_tbl_data_entry *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_hlist_entry *he;
+ union tunnel_offload_mark mbits = { .val = mark };
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+ .reserved = 0,
+ .domain = !!mbits.transfer,
+ .direction = 0,
+ }
+ };
+ he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ return he ?
+ container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group, uint32_t *table,
+ struct rte_flow_error *error)
+{
+ struct mlx5_hlist_entry *he;
+ struct tunnel_tbl_entry *tte;
+ union tunnel_tbl_key key = {
+ .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
+ .group = group
+ };
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_hlist *group_hash;
+
+ group_hash = tunnel ? tunnel->groups : thub->groups;
+ he = mlx5_hlist_lookup(group_hash, key.val);
+ if (!he) {
+ int ret;
+ tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+ sizeof(*tte), 0,
+ SOCKET_ID_ANY);
+ if (!tte)
+ goto err;
+ tte->hash.key = key.val;
+ ret = mlx5_flow_id_get(thub->table_ids, &tte->flow_table);
+ if (ret) {
+ mlx5_free(tte);
+ goto err;
+ }
+ tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+ mlx5_hlist_insert(group_hash, &tte->hash);
+ } else {
+ tte = container_of(he, typeof(*tte), hash);
+ }
+ *table = tte->flow_table;
+ DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
+ dev->data->port_id, key.tunnel_id, group, *table);
+ return 0;
+
+err:
+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "tunnel group index not supported");
+}
+
+static int
+flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
+ struct flow_grp_info grp_info, struct rte_flow_error *error)
+{
+ if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+ if (group == UINT32_MAX)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "group index not supported");
+ *table = group + 1;
+ } else {
+ *table = group;
+ }
+ DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
+ return 0;
+}
+
/**
* Translate the rte_flow group index to HW table value.
*
- * @param[in] attributes
- * Pointer to flow attributes
- * @param[in] external
- * Value is part of flow rule created by request external to PMD.
+ * If tunnel offload is disabled, all group ids converted to flow table
+ * id using the standard method.
+ * If tunnel offload is enabled, group id can be converted using the
+ * standard or tunnel conversion method. Group conversion method
+ * selection depends on flags in `grp_info` parameter:
+ * - Internal (grp_info.external == 0) groups conversion uses the
+ * standard method.
+ * - Group ids in JUMP action converted with the tunnel conversion.
+ * - Group id in rule attribute conversion depends on a rule type and
+ * group id value:
+ * ** non zero group attributes converted with the tunnel method
+ * ** zero group attribute in non-tunnel rule is converted using the
+ * standard method - there's only one root table
+ * ** zero group attribute in steer tunnel rule is converted with the
+ * standard method - single root table
+ * ** zero group attribute in match tunnel rule is a special OvS
+ * case: that value is used for portability reasons. That group
+ * id is converted with the tunnel conversion method.
+ *
+ * @param[in] dev
+ * Port device
+ * @param[in] tunnel
+ * PMD tunnel offload object
* @param[in] group
* rte_flow group index value.
- * @param[out] fdb_def_rule
- * Whether fdb jump to table 1 is configured.
* @param[out] table
* HW table value.
+ * @param[in] grp_info
+ * flags used for conversion
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
- uint32_t group, bool fdb_def_rule, uint32_t *table,
+mlx5_flow_group_to_table(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group, uint32_t *table,
+ struct flow_grp_info grp_info,
struct rte_flow_error *error)
{
- if (attributes->transfer && external && fdb_def_rule) {
- if (group == UINT32_MAX)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "group index not supported");
- *table = group + 1;
+ int ret;
+ bool standard_translation;
+
+ if (grp_info.external && group < MLX5_MAX_TABLES_EXTERNAL)
+ group *= MLX5_FLOW_TABLE_FACTOR;
+ if (is_tunnel_offload_active(dev)) {
+ standard_translation = !grp_info.external ||
+ grp_info.std_tbl_fix;
} else {
- *table = group;
+ standard_translation = true;
}
- return 0;
+ DRV_LOG(DEBUG,
+ "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
+ dev->data->port_id, group, grp_info.transfer,
+ grp_info.external, grp_info.fdb_def_rule,
+ standard_translation ? "STANDARD" : "TUNNEL");
+ if (standard_translation)
+ ret = flow_group_to_table(dev->data->port_id, group, table,
+ grp_info, error);
+ else
+ ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
+ table, error);
+
+ return ret;
}
/**
dev->data->port_id);
return -ENOTSUP;
}
+
+/* Wrapper for driver action_validate op callback */
+static int
+flow_drv_action_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ const struct mlx5_flow_driver_ops *fops,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "shared action validation unsupported";
+
+ if (!fops->action_validate) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return -rte_errno;
+ }
+ return fops->action_validate(dev, conf, action, error);
+}
+
+/**
+ * Destroys the shared action by handle.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Handle for the shared action to be destroyed.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ *
+ * @note: wrapper for driver action_create op callback.
+ */
+static int
+mlx5_shared_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "shared action destruction unsupported";
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+ if (!fops->action_destroy) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return -rte_errno;
+ }
+ return fops->action_destroy(dev, action, error);
+}
+
+/* Wrapper for driver action_destroy op callback */
+static int
+flow_drv_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ const void *action_conf,
+ const struct mlx5_flow_driver_ops *fops,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "shared action update unsupported";
+
+ if (!fops->action_update) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return -rte_errno;
+ }
+ return fops->action_update(dev, action, action_conf, error);
+}
+
+/**
+ * Create shared action for reuse in multiple flow rules.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Action configuration for shared action creation.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ * @return
+ * A valid handle in case of success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_shared_action *
+mlx5_shared_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ static const char err_msg[] = "shared action creation unsupported";
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+ if (flow_drv_action_validate(dev, conf, action, fops, error))
+ return NULL;
+ if (!fops->action_create) {
+ DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, err_msg);
+ return NULL;
+ }
+ return fops->action_create(dev, conf, action, error);
+}
+
+/**
+ * Updates inplace the shared action configuration pointed by *action* handle
+ * with the configuration provided as *action* argument.
+ * The update of the shared action configuration effects all flow rules reusing
+ * the action via handle.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] shared_action
+ * Handle for the shared action to be updated.
+ * @param[in] action
+ * Action specification used to modify the action pointed by handle.
+ * *action* should be of same type with the action pointed by the *action*
+ * handle argument, otherwise considered as invalid.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_shared_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *shared_action,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_attr attr = { .transfer = 0 };
+ const struct mlx5_flow_driver_ops *fops =
+ flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+ int ret;
+
+ switch (shared_action->type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
+ if (action->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "update action type invalid");
+ }
+ ret = flow_drv_action_validate(dev, NULL, action, fops, error);
+ if (ret)
+ return ret;
+ return flow_drv_action_update(dev, shared_action, action->conf,
+ fops, error);
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
+}
+
+/**
+ * Query the shared action by handle.
+ *
+ * This function allows retrieving action-specific data such as counters.
+ * Data is gathered by special action which may be present/referenced in
+ * more than one flow rule definition.
+ *
+ * \see RTE_FLOW_ACTION_TYPE_COUNT
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Handle for the shared action to query.
+ * @param[in, out] data
+ * Pointer to storage for the associated query data type.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_shared_action_query(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action *action,
+ void *data,
+ struct rte_flow_error *error)
+{
+ (void)dev;
+ switch (action->type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
+ __atomic_load(&action->refcnt, (uint32_t *)data,
+ __ATOMIC_RELAXED);
+ return 0;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
+}
+
+/**
+ * Destroy all shared actions.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_shared_action_flush(struct rte_eth_dev *dev)
+{
+ struct rte_flow_error error;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_shared_action *action;
+ int ret = 0;
+
+ while (!LIST_EMPTY(&priv->shared_actions)) {
+ action = LIST_FIRST(&priv->shared_actions);
+ ret = mlx5_shared_action_destroy(dev, action, &error);
+ }
+ return ret;
+}
+
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel)
+{
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids;
+
+ DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
+ dev->data->port_id, tunnel->tunnel_id);
+ RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
+ LIST_REMOVE(tunnel, chain);
+ mlx5_flow_id_release(id_pool, tunnel->tunnel_id);
+ mlx5_hlist_destroy(tunnel->groups, NULL, NULL);
+ mlx5_free(tunnel);
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_flow_tunnel *tun;
+
+ LIST_FOREACH(tun, &thub->tunnels, chain) {
+ if (tun->tunnel_id == id)
+ break;
+ }
+
+ return tun;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
+ const struct rte_flow_tunnel *app_tunnel)
+{
+ int ret;
+ struct mlx5_flow_tunnel *tunnel;
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids;
+ uint32_t id;
+
+ ret = mlx5_flow_id_get(id_pool, &id);
+ if (ret)
+ return NULL;
+ /**
+ * mlx5 flow tunnel is an auxlilary data structure
+ * It's not part of IO. No need to allocate it from
+ * huge pages pools dedicated for IO
+ */
+ tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
+ 0, SOCKET_ID_ANY);
+ if (!tunnel) {
+ mlx5_flow_id_pool_release(id_pool);
+ return NULL;
+ }
+ tunnel->groups = mlx5_hlist_create("tunnel groups", 1024);
+ if (!tunnel->groups) {
+ mlx5_flow_id_pool_release(id_pool);
+ mlx5_free(tunnel);
+ return NULL;
+ }
+ /* initiate new PMD tunnel */
+ memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
+ tunnel->tunnel_id = id;
+ tunnel->action.type = (typeof(tunnel->action.type))
+ MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
+ tunnel->action.conf = tunnel;
+ tunnel->item.type = (typeof(tunnel->item.type))
+ MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
+ tunnel->item.spec = tunnel;
+ tunnel->item.last = NULL;
+ tunnel->item.mask = NULL;
+
+ DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
+ dev->data->port_id, tunnel->tunnel_id);
+
+ return tunnel;
+}
+
+static int
+mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
+ const struct rte_flow_tunnel *app_tunnel,
+ struct mlx5_flow_tunnel **tunnel)
+{
+ int ret;
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_flow_tunnel *tun;
+
+ LIST_FOREACH(tun, &thub->tunnels, chain) {
+ if (!memcmp(app_tunnel, &tun->app_tunnel,
+ sizeof(*app_tunnel))) {
+ *tunnel = tun;
+ ret = 0;
+ break;
+ }
+ }
+ if (!tun) {
+ tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
+ if (tun) {
+ LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
+ *tunnel = tun;
+ } else {
+ ret = -ENOMEM;
+ }
+ }
+ if (tun)
+ __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+
+ return ret;
+}
+
+void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
+{
+ struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
+
+ if (!thub)
+ return;
+ if (!LIST_EMPTY(&thub->tunnels))
+ DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
+ mlx5_flow_id_pool_release(thub->tunnel_ids);
+ mlx5_flow_id_pool_release(thub->table_ids);
+ mlx5_hlist_destroy(thub->groups, NULL, NULL);
+ mlx5_free(thub);
+}
+
+int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
+{
+ int err;
+ struct mlx5_flow_tunnel_hub *thub;
+
+ thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
+ 0, SOCKET_ID_ANY);
+ if (!thub)
+ return -ENOMEM;
+ LIST_INIT(&thub->tunnels);
+ thub->tunnel_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TUNNELS);
+ if (!thub->tunnel_ids) {
+ err = -rte_errno;
+ goto err;
+ }
+ thub->table_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TABLES);
+ if (!thub->table_ids) {
+ err = -rte_errno;
+ goto err;
+ }
+ thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES);
+ if (!thub->groups) {
+ err = -rte_errno;
+ goto err;
+ }
+ sh->tunnel_hub = thub;
+
+ return 0;
+
+err:
+ if (thub->groups)
+ mlx5_hlist_destroy(thub->groups, NULL, NULL);
+ if (thub->table_ids)
+ mlx5_flow_id_pool_release(thub->table_ids);
+ if (thub->tunnel_ids)
+ mlx5_flow_id_pool_release(thub->tunnel_ids);
+ if (thub)
+ mlx5_free(thub);
+ return err;
+}
+
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+ (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
+
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct mlx5_flow_driver_ops *fops;
+ int ret;
+ struct rte_flow_attr attr = { .transfer = 0 };
+
+ fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+ ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+ if (ret > 0)
+ ret = -ret;
+ return ret;
+}