#include <rte_common.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_eal_paging.h>
#include <rte_flow.h>
#include <rte_cycles.h>
#include "mlx5_common_os.h"
#include "rte_pmd_mlx5.h"
+struct tunnel_default_miss_ctx {
+ uint16_t *queue;
+ __extension__
+ union {
+ struct rte_flow_action_rss action_rss;
+ struct rte_flow_action_queue miss_queue;
+ struct rte_flow_action_jump miss_jump;
+ uint8_t raw[0];
+ };
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action *app_actions,
+ uint32_t flow_idx,
+ struct tunnel_default_miss_ctx *ctx,
+ struct rte_flow_error *error);
static struct mlx5_flow_tunnel *
mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
static void
mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
-static const struct mlx5_flow_tbl_data_entry *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
-static int
-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
- const struct rte_flow_tunnel *app_tunnel,
- struct mlx5_flow_tunnel **tunnel);
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group, uint32_t *table,
+ struct rte_flow_error *error);
+
+static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
+static void mlx5_flow_pop_thread_workspace(void);
/** Device flow drivers. */
const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
#endif
[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
return ret;
}
+#define MLX5_RSS_EXP_ELT_N 8
+
/**
* Expand RSS flows into several possible flows according to the RSS hash
* fields requested and the driver capabilities.
const struct mlx5_flow_expand_node graph[],
int graph_root_index)
{
- const int elt_n = 8;
const struct rte_flow_item *item;
const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
const int *next_node;
- const int *stack[elt_n];
+ const int *stack[MLX5_RSS_EXP_ELT_N];
int stack_pos = 0;
- struct rte_flow_item flow_items[elt_n];
+ struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
unsigned int i;
size_t lsize;
size_t user_pattern_size = 0;
memset(&missed_item, 0, sizeof(missed_item));
lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
- elt_n * sizeof(buf->entry[0]);
+ MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
if (lsize <= size) {
buf->entry[0].priority = 0;
- buf->entry[0].pattern = (void *)&buf->entry[elt_n];
+ buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
buf->entries = 0;
addr = buf->entry[0].pattern;
}
/* Go deeper. */
if (node->next) {
next_node = node->next;
- if (stack_pos++ == elt_n) {
+ if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
rte_errno = E2BIG;
return -rte_errno;
}
user_pattern_size);
addr = (void *)(((uintptr_t)addr) + user_pattern_size);
rte_memcpy(addr, flow_items, elt * sizeof(*item));
- addr = (void *)(((uintptr_t)addr) +
- elt * sizeof(*item));
}
}
return lsize;
const struct rte_flow_shared_action *action,
void *data,
struct rte_flow_error *error);
-static inline bool
-mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
- struct rte_flow_tunnel *tunnel,
- const char *err_msg)
-{
- err_msg = NULL;
- if (!is_tunnel_offload_active(dev)) {
- err_msg = "tunnel offload was not activated";
- goto out;
- } else if (!tunnel) {
- err_msg = "no application tunnel";
- goto out;
- }
-
- switch (tunnel->type) {
- default:
- err_msg = "unsupported tunnel type";
- goto out;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- break;
- }
-
-out:
- return !err_msg;
-}
-
-
static int
mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
struct rte_flow_tunnel *app_tunnel,
struct rte_flow_action **actions,
uint32_t *num_of_actions,
- struct rte_flow_error *error)
-{
- int ret;
- struct mlx5_flow_tunnel *tunnel;
- const char *err_msg = NULL;
- bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
- if (!verdict)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
- err_msg);
- ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
- if (ret < 0) {
- return rte_flow_error_set(error, ret,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
- "failed to initialize pmd tunnel");
- }
- *actions = &tunnel->action;
- *num_of_actions = 1;
- return 0;
-}
-
+ struct rte_flow_error *error);
static int
mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
struct rte_flow_tunnel *app_tunnel,
struct rte_flow_item **items,
uint32_t *num_of_items,
- struct rte_flow_error *error)
-{
- int ret;
- struct mlx5_flow_tunnel *tunnel;
- const char *err_msg = NULL;
- bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
- if (!verdict)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- err_msg);
- ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
- if (ret < 0) {
- return rte_flow_error_set(error, ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "failed to initialize pmd tunnel");
- }
- *items = &tunnel->item;
- *num_of_items = 1;
- return 0;
-}
-
+ struct rte_flow_error *error);
static int
-mlx5_flow_item_release(struct rte_eth_dev *dev,
- struct rte_flow_item *pmd_items,
- uint32_t num_items, struct rte_flow_error *err)
-{
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_tunnel *tun;
-
- rte_spinlock_lock(&thub->sl);
- LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (&tun->item == pmd_items) {
- LIST_REMOVE(tun, chain);
- break;
- }
- }
- rte_spinlock_unlock(&thub->sl);
- if (!tun || num_items != 1)
- return rte_flow_error_set(err, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "invalid argument");
- if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
- mlx5_flow_tunnel_free(dev, tun);
- return 0;
-}
-
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+ struct rte_flow_item *pmd_items,
+ uint32_t num_items, struct rte_flow_error *err);
static int
-mlx5_flow_action_release(struct rte_eth_dev *dev,
- struct rte_flow_action *pmd_actions,
- uint32_t num_actions, struct rte_flow_error *err)
-{
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_tunnel *tun;
-
- rte_spinlock_lock(&thub->sl);
- LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (&tun->action == pmd_actions) {
- LIST_REMOVE(tun, chain);
- break;
- }
- }
- rte_spinlock_unlock(&thub->sl);
- if (!tun || num_actions != 1)
- return rte_flow_error_set(err, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "invalid argument");
- if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
- mlx5_flow_tunnel_free(dev, tun);
-
- return 0;
-}
-
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+ struct rte_flow_action *pmd_actions,
+ uint32_t num_actions,
+ struct rte_flow_error *err);
static int
mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
struct rte_mbuf *m,
struct rte_flow_restore_info *info,
- struct rte_flow_error *err)
-{
- uint64_t ol_flags = m->ol_flags;
- const struct mlx5_flow_tbl_data_entry *tble;
- const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
-
- if ((ol_flags & mask) != mask)
- goto err;
- tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
- if (!tble) {
- DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
- dev->data->port_id, m->hash.fdir.hi);
- goto err;
- }
- MLX5_ASSERT(tble->tunnel);
- memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
- info->group_id = tble->group_id;
- info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
- RTE_FLOW_RESTORE_INFO_GROUP_ID |
- RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
-
- return 0;
-
-err:
- return rte_flow_error_set(err, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "failed to get restore info");
-}
+ struct rte_flow_error *err);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.shared_action_query = mlx5_shared_action_query,
.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
.tunnel_match = mlx5_flow_tunnel_match,
- .tunnel_action_decap_release = mlx5_flow_action_release,
- .tunnel_item_release = mlx5_flow_item_release,
+ .tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
+ .tunnel_item_release = mlx5_flow_tunnel_item_release,
.get_restore_info = mlx5_flow_tunnel_get_restore_info,
};
},
};
-/* Key of thread specific flow workspace data. */
-static pthread_key_t key_workspace;
-
-/* Thread specific flow workspace data once initialization data. */
-static pthread_once_t key_workspace_init;
/**
return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
REG_C_3;
case MLX5_MTR_COLOR:
+ case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
return priv->mtr_color_reg;
case MLX5_COPY_MARK:
start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
(priv->mtr_reg_share ? REG_C_3 : REG_C_4);
skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
- if (id > (REG_C_7 - start_reg))
+ if (id > (uint32_t)(REG_C_7 - start_reg))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "invalid tag id");
*/
if (skip_mtr_reg && config->flow_mreg_c
[id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
- if (id >= (REG_C_7 - start_reg))
+ if (id >= (uint32_t)(REG_C_7 - start_reg))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "invalid tag id");
return config->flow_mreg_c[2] != REG_NON;
}
+/**
+ * Get the lowest priority.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ * Pointer to device flow rule attributes.
+ *
+ * @return
+ * The value of lowest priority of flow.
+ */
+uint32_t
+mlx5_get_lowest_priority(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!attr->group && !attr->transfer)
+ return priv->config.flow_prio - 2;
+ return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
+}
+
+/**
+ * Calculate matcher priority of the flow.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Pointer to device flow rule attributes.
+ * @param[in] subpriority
+ * The priority based on the items.
+ * @return
+ * The matcher priority of the flow.
+ */
+uint16_t
+mlx5_get_matcher_priority(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ uint32_t subpriority)
+{
+ uint16_t priority = (uint16_t)attr->priority;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!attr->group && !attr->transfer) {
+ if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+ priority = priv->config.flow_prio - 1;
+ return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+ }
+ if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+ priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
+ return priority * 3 + subpriority;
+}
+
/**
* Verify the @p item specifications (spec, last, mask) are compatible with the
* NIC capabilities.
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
- struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
- if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
- return;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
dev_handle->rix_hrxq);
- if (!hrxq)
+ if (hrxq)
+ ind_tbl = hrxq->ind_table;
+ } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+ struct mlx5_shared_action_rss *shared_rss;
+
+ shared_rss = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ dev_handle->rix_srss);
+ if (shared_rss)
+ ind_tbl = shared_rss->ind_tbl;
+ }
+ if (!ind_tbl)
return;
- for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
- int idx = hrxq->ind_table->queues[i];
+ for (i = 0; i != ind_tbl->queues_n; ++i) {
+ int idx = ind_tbl->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
struct mlx5_priv *priv = dev->data->dev_private;
const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
- struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
- if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
- return;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
dev_handle->rix_hrxq);
- if (!hrxq)
+ if (hrxq)
+ ind_tbl = hrxq->ind_table;
+ } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+ struct mlx5_shared_action_rss *shared_rss;
+
+ shared_rss = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ dev_handle->rix_srss);
+ if (shared_rss)
+ ind_tbl = shared_rss->ind_tbl;
+ }
+ if (!ind_tbl)
return;
MLX5_ASSERT(dev->data->dev_started);
- for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
- int idx = hrxq->ind_table->queues[i];
+ for (i = 0; i != ind_tbl->queues_n; ++i) {
+ int idx = ind_tbl->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
+ enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
unsigned int i;
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "No queues configured");
for (i = 0; i != rss->queue_num; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
if (rss->queue[i] >= priv->rxqs_n)
return rte_flow_error_set
(error, EINVAL,
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->queue[i], "queue is not configured");
+ rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
+ struct mlx5_rxq_ctrl, rxq);
+ if (i == 0)
+ rxq_type = rxq_ctrl->type;
+ if (rxq_type != rxq_ctrl->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i],
+ "combining hairpin and regular RSS queues is not supported");
}
return 0;
}
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
NULL, "groups is not supported");
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
attributes->priority >= priority_max)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
return 0;
}
+/**
+ * Validate Geneve TLV option item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] last_item
+ * Previous validated item in the pattern items.
+ * @param[in] geneve_item
+ * Previous GENEVE item specification.
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
+ uint64_t last_item,
+ const struct rte_flow_item *geneve_item,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
+ struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+ uint8_t data_max_supported =
+ hca_attr->max_geneve_tlv_option_data_len * 4;
+ struct mlx5_dev_config *config = &priv->config;
+ const struct rte_flow_item_geneve *geneve_spec;
+ const struct rte_flow_item_geneve *geneve_mask;
+ const struct rte_flow_item_geneve_opt *spec = item->spec;
+ const struct rte_flow_item_geneve_opt *mask = item->mask;
+ unsigned int i;
+ unsigned int data_len;
+ uint8_t tlv_option_len;
+ uint16_t optlen_m, optlen_v;
+ const struct rte_flow_item_geneve_opt full_mask = {
+ .option_class = RTE_BE16(0xffff),
+ .option_type = 0xff,
+ .option_len = 0x1f,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_geneve_opt_mask;
+ if (!spec)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve TLV opt class/type/length must be specified");
+ if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve TLV opt length exceeeds the limit (31)");
+ /* Check if class type and length masks are full. */
+ if (full_mask.option_class != mask->option_class ||
+ full_mask.option_type != mask->option_type ||
+ full_mask.option_len != (mask->option_len & full_mask.option_len))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve TLV opt class/type/length masks must be full");
+ /* Check if length is supported */
+ if ((uint32_t)spec->option_len >
+ config->hca_attr.max_geneve_tlv_option_data_len)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve TLV opt length not supported");
+ if (config->hca_attr.max_geneve_tlv_options > 1)
+ DRV_LOG(DEBUG,
+ "max_geneve_tlv_options supports more than 1 option");
+ /* Check GENEVE item preceding. */
+ if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve opt item must be preceded with Geneve item");
+ geneve_spec = geneve_item->spec;
+ geneve_mask = geneve_item->mask ? geneve_item->mask :
+ &rte_flow_item_geneve_mask;
+ /* Check if GENEVE TLV option size doesn't exceed option length */
+ if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 ||
+ geneve_spec->ver_opt_len_o_c_rsvd0)) {
+ tlv_option_len = spec->option_len & mask->option_len;
+ optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0);
+ optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v);
+ optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0);
+ optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m);
+ if ((optlen_v & optlen_m) <= tlv_option_len)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GENEVE TLV option length exceeds optlen");
+ }
+ /* Check if length is 0 or data is 0. */
+ if (spec->data == NULL || spec->option_len == 0)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve TLV opt with zero data/length not supported");
+ /* Check not all data & mask are 0. */
+ data_len = spec->option_len * 4;
+ if (mask->data == NULL) {
+ for (i = 0; i < data_len; i++)
+ if (spec->data[i])
+ break;
+ if (i == data_len)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't match on Geneve option data 0");
+ } else {
+ for (i = 0; i < data_len; i++)
+ if (spec->data[i] & mask->data[i])
+ break;
+ if (i == data_len)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't match on Geneve option data and mask 0");
+ /* Check data mask supported. */
+ for (i = data_max_supported; i < data_len ; i++)
+ if (mask->data[i])
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Data mask is of unsupported size");
+ }
+ /* Check GENEVE option is supported in NIC. */
+ if (!config->hca_attr.geneve_tlv_opt)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve TLV opt not supported");
+ /* Check if we already have geneve option with different type/class. */
+ rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
+ geneve_opt_resource = sh->geneve_tlv_option_resource;
+ if (geneve_opt_resource != NULL)
+ if (geneve_opt_resource->option_class != spec->option_class ||
+ geneve_opt_resource->option_type != spec->option_type ||
+ geneve_opt_resource->length != spec->option_len) {
+ rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Only one Geneve TLV option supported");
+ }
+ rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+ return 0;
+}
+
/**
* Validate MPLS item.
*
/* MPLS over IP, UDP, GRE is allowed */
if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
MLX5_FLOW_LAYER_OUTER_L4_UDP |
- MLX5_FLOW_LAYER_GRE)))
+ MLX5_FLOW_LAYER_GRE |
+ MLX5_FLOW_LAYER_GRE_KEY)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
MLX5_FLOW_LAYER_OUTER_VLAN);
struct rte_flow_item_ecpri mask_lo;
+ if (!(last_item & outer_l2_vlan) &&
+ last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "eCPRI can only follow L2/VLAN layer or UDP layer");
if ((last_item & outer_l2_vlan) && ether_type &&
ether_type != RTE_ETHER_TYPE_ECPRI)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "eCPRI cannot follow L2/VLAN layer "
- "which ether type is not 0xAEFE.");
+ "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "eCPRI with tunnel is not supported "
- "right now.");
+ "eCPRI with tunnel is not supported right now");
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "eCPRI cannot follow a TCP layer.");
+ "eCPRI cannot coexist with a TCP layer");
/* In specification, eCPRI could be over UDP layer. */
else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "eCPRI over UDP layer is not yet "
- "supported right now.");
+ "eCPRI over UDP layer is not yet supported right now");
/* Mask for type field in common header could be zero. */
if (!mask)
mask = &rte_flow_item_ecpri_mask;
if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
- "partial mask is not supported "
- "for protocol");
+ "partial mask is not supported for protocol");
else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
- "message header mask must be after "
- "a type mask");
+ "message header mask must be after a type mask");
return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
acc_mask ? (const uint8_t *)acc_mask
: (const uint8_t *)&nic_mask,
static const struct rte_flow_action_rss*
flow_get_rss_action(const struct rte_flow_action actions[])
{
+ const struct rte_flow_action_rss *rss = NULL;
+
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- return (const struct rte_flow_action_rss *)
- actions->conf;
+ rss = actions->conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ {
+ const struct rte_flow_action_sample *sample =
+ actions->conf;
+ const struct rte_flow_action *act = sample->actions;
+ for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++)
+ if (act->type == RTE_FLOW_ACTION_TYPE_RSS)
+ rss = act->conf;
+ break;
+ }
default:
break;
}
}
- return NULL;
+ return rss;
}
/**
if (queue == NULL)
return 0;
conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
- if (conf != NULL && !!conf->tx_explicit)
+ if (conf == NULL || conf->tx_explicit != 0)
return 0;
queue_action = 1;
action_n++;
if (rss == NULL || rss->queue_num == 0)
return 0;
conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
- if (conf != NULL && !!conf->tx_explicit)
+ if (conf == NULL || conf->tx_explicit != 0)
return 0;
queue_action = 1;
action_n++;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
raw_encap = actions->conf;
- if (raw_encap->size >
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4)))
+ if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
split++;
action_n++;
break;
flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
uint32_t flow_idx);
-struct mlx5_hlist_entry *
+int
+flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key, void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res =
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+
+ return mcp_res->mark_id != key;
+}
+
+struct mlx5_hlist_entry *
flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
void *cb_ctx)
{
};
} else {
/* Default rule, wildcard match. */
- attr.priority = MLX5_FLOW_PRIO_RSVD;
+ attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_END,
};
return NULL;
}
mcp_res->idx = idx;
+ mcp_res->mark_id = mark_id;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
raw_encap = actions->conf;
- if (raw_encap->size >
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4))) {
+ if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
memcpy(actions_tx, actions,
sizeof(struct rte_flow_action));
actions_tx++;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
raw_decap = actions->conf;
- if (raw_decap->size <
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4))) {
+ if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
memcpy(actions_tx, actions,
sizeof(struct rte_flow_action));
actions_tx++;
return 0;
}
-__extension__
-union tunnel_offload_mark {
- uint32_t val;
- struct {
- uint32_t app_reserve:8;
- uint32_t table_id:15;
- uint32_t transfer:1;
- uint32_t _unused_:8;
- };
-};
-
-struct tunnel_default_miss_ctx {
- uint16_t *queue;
- __extension__
- union {
- struct rte_flow_action_rss action_rss;
- struct rte_flow_action_queue miss_queue;
- struct rte_flow_action_jump miss_jump;
- uint8_t raw[0];
- };
-};
-
-static int
-flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_action *app_actions,
- uint32_t flow_idx,
- struct tunnel_default_miss_ctx *ctx,
- struct rte_flow_error *error)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow *dev_flow;
- struct rte_flow_attr miss_attr = *attr;
- const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
- const struct rte_flow_item miss_items[2] = {
- {
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- .spec = NULL,
- .last = NULL,
- .mask = NULL
- },
- {
- .type = RTE_FLOW_ITEM_TYPE_END,
- .spec = NULL,
- .last = NULL,
- .mask = NULL
- }
- };
- union tunnel_offload_mark mark_id;
- struct rte_flow_action_mark miss_mark;
- struct rte_flow_action miss_actions[3] = {
- [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
- [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL }
- };
- const struct rte_flow_action_jump *jump_data;
- uint32_t i, flow_table = 0; /* prevent compilation warning */
- struct flow_grp_info grp_info = {
- .external = 1,
- .transfer = attr->transfer,
- .fdb_def_rule = !!priv->fdb_def_rule,
- .std_tbl_fix = 0,
- };
- int ret;
-
- if (!attr->transfer) {
- uint32_t q_size;
-
- miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
- q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
- ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
- 0, SOCKET_ID_ANY);
- if (!ctx->queue)
- return rte_flow_error_set
- (error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- NULL, "invalid default miss RSS");
- ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
- ctx->action_rss.level = 0,
- ctx->action_rss.types = priv->rss_conf.rss_hf,
- ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
- ctx->action_rss.queue_num = priv->reta_idx_n,
- ctx->action_rss.key = priv->rss_conf.rss_key,
- ctx->action_rss.queue = ctx->queue;
- if (!priv->reta_idx_n || !priv->rxqs_n)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- NULL, "invalid port configuration");
- if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
- ctx->action_rss.types = 0;
- for (i = 0; i != priv->reta_idx_n; ++i)
- ctx->queue[i] = (*priv->reta_idx)[i];
- } else {
- miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
- ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
- }
- miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
- for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
- jump_data = app_actions->conf;
- miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
- miss_attr.group = jump_data->group;
- ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
- &flow_table, grp_info, error);
- if (ret)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- NULL, "invalid tunnel id");
- mark_id.app_reserve = 0;
- mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
- mark_id.transfer = !!attr->transfer;
- mark_id._unused_ = 0;
- miss_mark.id = mark_id.val;
- dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
- miss_items, miss_actions, flow_idx, error);
- if (!dev_flow)
- return -rte_errno;
- dev_flow->flow = flow;
- dev_flow->external = true;
- dev_flow->tunnel = tunnel;
- /* Subflow object was created, we must include one in the list. */
- SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
- dev_flow->handle, next);
- DRV_LOG(DEBUG,
- "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
- dev->data->port_id, tunnel->app_tunnel.type,
- tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
- ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
- miss_actions, error);
- if (!ret)
- ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
- error);
-
- return ret;
-}
-
/**
* The last stage of splitting chain, just creates the subflow
* without any modification.
* Parent flow structure pointer.
* @param[in, out] sub_flow
* Pointer to return the created subflow, may be NULL.
- * @param[in] prefix_layers
- * Prefix subflow layers, may be 0.
- * @param[in] prefix_mark
- * Prefix subflow mark flag, may be 0.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
- * @param[in] external
- * This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- * This memory pool index to the flow.
+ * @param[in] flow_split_info
+ * Pointer to flow split info structure.
* @param[out] error
* Perform verbose error reporting if not NULL.
* @return
flow_create_split_inner(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct mlx5_flow **sub_flow,
- uint64_t prefix_layers,
- uint32_t prefix_mark,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, uint32_t flow_idx,
+ struct mlx5_flow_split_info *flow_split_info,
struct rte_flow_error *error)
{
struct mlx5_flow *dev_flow;
dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
- flow_idx, error);
+ flow_split_info->flow_idx, error);
if (!dev_flow)
return -rte_errno;
dev_flow->flow = flow;
- dev_flow->external = external;
+ dev_flow->external = flow_split_info->external;
+ dev_flow->skip_scale = flow_split_info->skip_scale;
/* Subflow object was created, we must include one in the list. */
SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
dev_flow->handle, next);
* flow may need some user defined item layer flags, and pass the
* Metadate rxq mark flag to suffix flow as well.
*/
- if (prefix_layers)
- dev_flow->handle->layers = prefix_layers;
- if (prefix_mark)
+ if (flow_split_info->prefix_layers)
+ dev_flow->handle->layers = flow_split_info->prefix_layers;
+ if (flow_split_info->prefix_mark)
dev_flow->handle->mark = 1;
if (sub_flow)
*sub_flow = dev_flow;
* Pointer to the position of the matched action if exists, otherwise is -1.
* @param[out] qrss_action_pos
* Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
+ * @param[out] modify_after_mirror
+ * Pointer to the flag of modify action after FDB mirroring.
*
* @return
* > 0 the total number of actions.
flow_check_match_action(const struct rte_flow_action actions[],
const struct rte_flow_attr *attr,
enum rte_flow_action_type action,
- int *match_action_pos, int *qrss_action_pos)
+ int *match_action_pos, int *qrss_action_pos,
+ int *modify_after_mirror)
{
const struct rte_flow_action_sample *sample;
int actions_n = 0;
- int jump_flag = 0;
uint32_t ratio = 0;
int sub_type = 0;
int flag = 0;
+ int fdb_mirror = 0;
*match_action_pos = -1;
*qrss_action_pos = -1;
flag = 1;
*match_action_pos = actions_n;
}
- if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
- actions->type == RTE_FLOW_ACTION_TYPE_RSS)
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
*qrss_action_pos = actions_n;
- if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP)
- jump_flag = 1;
- if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
sample = actions->conf;
ratio = sample->ratio;
sub_type = ((const struct rte_flow_action *)
(sample->actions))->type;
+ if (ratio == 1 && attr->transfer)
+ fdb_mirror = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ if (fdb_mirror)
+ *modify_after_mirror = 1;
+ break;
+ default:
+ break;
}
actions_n++;
}
- if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
- if (ratio == 1) {
- /* JUMP Action not support for Mirroring;
- * Mirroring support multi-destination;
- */
- if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END)
- flag = 0;
- }
+ if (flag && fdb_mirror && !*modify_after_mirror) {
+ /* FDB mirroring uses the destination array to implement
+ * instead of FLOW_SAMPLER object.
+ */
+ if (sub_type != RTE_FLOW_ACTION_TYPE_END)
+ flag = 0;
}
/* Count RTE_FLOW_ACTION_TYPE_END. */
return flag ? actions_n + 1 : 0;
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] fdb_tx
- * FDB egress flow flag.
+ * @param[in] add_tag
+ * Add extra tag action flag.
* @param[out] sfx_items
* Suffix flow match items (list terminated by the END pattern item).
* @param[in] actions
* The sample action position.
* @param[in] qrss_action_pos
* The Queue/RSS action position.
+ * @param[in] jump_table
+ * Add extra jump action flag.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
*/
static int
flow_sample_split_prep(struct rte_eth_dev *dev,
- uint32_t fdb_tx,
+ int add_tag,
struct rte_flow_item sfx_items[],
const struct rte_flow_action actions[],
struct rte_flow_action actions_sfx[],
int actions_n,
int sample_action_pos,
int qrss_action_pos,
+ int jump_table,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rte_flow_action_set_tag *set_tag;
struct mlx5_rte_flow_item_tag *tag_spec;
struct mlx5_rte_flow_item_tag *tag_mask;
+ struct rte_flow_action_jump *jump_action;
uint32_t tag_id = 0;
int index;
+ int append_index = 0;
int ret;
if (sample_action_pos < 0)
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "invalid position of sample "
"action in list");
- if (!fdb_tx) {
+ /* Prepare the actions for prefix and suffix flow. */
+ if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
+ index = qrss_action_pos;
+ /* Put the preceding the Queue/RSS action into prefix flow. */
+ if (index != 0)
+ memcpy(actions_pre, actions,
+ sizeof(struct rte_flow_action) * index);
+ /* Put others preceding the sample action into prefix flow. */
+ if (sample_action_pos > index + 1)
+ memcpy(actions_pre + index, actions + index + 1,
+ sizeof(struct rte_flow_action) *
+ (sample_action_pos - index - 1));
+ index = sample_action_pos - 1;
+ /* Put Queue/RSS action into Suffix flow. */
+ memcpy(actions_sfx, actions + qrss_action_pos,
+ sizeof(struct rte_flow_action));
+ actions_sfx++;
+ } else {
+ index = sample_action_pos;
+ if (index != 0)
+ memcpy(actions_pre, actions,
+ sizeof(struct rte_flow_action) * index);
+ }
+ /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress.
+ * For CX6DX and above, metadata registers Cx preserve their value,
+ * add an extra tag action for NIC-RX and E-Switch Domain.
+ */
+ if (add_tag) {
/* Prepare the prefix tag action. */
- set_tag = (void *)(actions_pre + actions_n + 1);
+ append_index++;
+ set_tag = (void *)(actions_pre + actions_n + append_index);
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
if (ret < 0)
return ret;
.type = (enum rte_flow_item_type)
RTE_FLOW_ITEM_TYPE_END,
};
- }
- /* Prepare the actions for prefix and suffix flow. */
- if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
- index = qrss_action_pos;
- /* Put the preceding the Queue/RSS action into prefix flow. */
- if (index != 0)
- memcpy(actions_pre, actions,
- sizeof(struct rte_flow_action) * index);
- /* Put others preceding the sample action into prefix flow. */
- if (sample_action_pos > index + 1)
- memcpy(actions_pre + index, actions + index + 1,
- sizeof(struct rte_flow_action) *
- (sample_action_pos - index - 1));
- index = sample_action_pos - 1;
- /* Put Queue/RSS action into Suffix flow. */
- memcpy(actions_sfx, actions + qrss_action_pos,
- sizeof(struct rte_flow_action));
- actions_sfx++;
- } else {
- index = sample_action_pos;
- if (index != 0)
- memcpy(actions_pre, actions,
- sizeof(struct rte_flow_action) * index);
- }
- /* Add the extra tag action for NIC-RX and E-Switch ingress. */
- if (!fdb_tx) {
+ /* Prepare the tag action in prefix subflow. */
actions_pre[index++] =
(struct rte_flow_action){
.type = (enum rte_flow_action_type)
memcpy(actions_pre + index, actions + sample_action_pos,
sizeof(struct rte_flow_action));
index += 1;
+ /* For the modify action after the sample action in E-Switch mirroring,
+ * Add the extra jump action in prefix subflow and jump into the next
+ * table, then do the modify action in the new table.
+ */
+ if (jump_table) {
+ /* Prepare the prefix jump action. */
+ append_index++;
+ jump_action = (void *)(actions_pre + actions_n + append_index);
+ jump_action->group = jump_table;
+ actions_pre[index++] =
+ (struct rte_flow_action){
+ .type = (enum rte_flow_action_type)
+ RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = jump_action,
+ };
+ }
actions_pre[index] = (struct rte_flow_action){
.type = (enum rte_flow_action_type)
RTE_FLOW_ACTION_TYPE_END,
* Pointer to Ethernet device.
* @param[in] flow
* Parent flow structure pointer.
- * @param[in] prefix_layers
- * Prefix flow layer flags.
- * @param[in] prefix_mark
- * Prefix subflow mark flag, may be 0.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
- * @param[in] external
- * This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- * This memory pool index to the flow.
+ * @param[in] flow_split_info
+ * Pointer to flow split info structure.
* @param[out] error
* Perform verbose error reporting if not NULL.
* @return
static int
flow_create_split_metadata(struct rte_eth_dev *dev,
struct rte_flow *flow,
- uint64_t prefix_layers,
- uint32_t prefix_mark,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, uint32_t flow_idx,
+ struct mlx5_flow_split_info *flow_split_info,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
if (!config->dv_flow_en ||
config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
!mlx5_flow_ext_mreg_supported(dev))
- return flow_create_split_inner(dev, flow, NULL, prefix_layers,
- prefix_mark, attr, items,
- actions, external, flow_idx,
- error);
+ return flow_create_split_inner(dev, flow, NULL, attr, items,
+ actions, flow_split_info, error);
actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
&encap_idx);
if (qrss) {
goto exit;
}
/* Add the unmodified original or prefix subflow. */
- ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers,
- prefix_mark, attr,
+ ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
items, ext_actions ? ext_actions :
- actions, external, flow_idx, error);
+ actions, flow_split_info, error);
if (ret < 0)
goto exit;
MLX5_ASSERT(dev_flow);
}
dev_flow = NULL;
/* Add suffix subflow to execute Q/RSS. */
- ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 0,
+ flow_split_info->prefix_layers = layers;
+ flow_split_info->prefix_mark = 0;
+ ret = flow_create_split_inner(dev, flow, &dev_flow,
&q_attr, mtr_sfx ? items :
q_items, q_actions,
- external, flow_idx, error);
+ flow_split_info, error);
if (ret < 0)
goto exit;
/* qrss ID should be freed if failed. */
* Pointer to Ethernet device.
* @param[in] flow
* Parent flow structure pointer.
- * @param[in] prefix_layers
- * Prefix subflow layers, may be 0.
- * @param[in] prefix_mark
- * Prefix subflow mark flag, may be 0.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
- * @param[in] external
- * This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- * This memory pool index to the flow.
+ * @param[in] flow_split_info
+ * Pointer to flow split info structure.
* @param[out] error
* Perform verbose error reporting if not NULL.
* @return
static int
flow_create_split_meter(struct rte_eth_dev *dev,
struct rte_flow *flow,
- uint64_t prefix_layers,
- uint32_t prefix_mark,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, uint32_t flow_idx,
+ struct mlx5_flow_split_info *flow_split_info,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
goto exit;
}
/* Add the prefix subflow. */
+ flow_split_info->prefix_mark = 0;
ret = flow_create_split_inner(dev, flow, &dev_flow,
- prefix_layers, 0,
- attr, items,
- pre_actions, external,
- flow_idx, error);
+ attr, items, pre_actions,
+ flow_split_info, error);
if (ret) {
ret = -rte_errno;
goto exit;
sfx_attr.group = sfx_attr.transfer ?
(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
MLX5_FLOW_TABLE_LEVEL_SUFFIX;
+ flow_split_info->prefix_layers =
+ flow_get_prefix_layer_flags(dev_flow);
+ flow_split_info->prefix_mark = dev_flow->handle->mark;
}
/* Add the prefix subflow. */
- ret = flow_create_split_metadata(dev, flow, dev_flow ?
- flow_get_prefix_layer_flags(dev_flow) :
- prefix_layers, dev_flow ?
- dev_flow->handle->mark : prefix_mark,
+ ret = flow_create_split_metadata(dev, flow,
&sfx_attr, sfx_items ?
sfx_items : items,
sfx_actions ? sfx_actions : actions,
- external, flow_idx, error);
+ flow_split_info, error);
exit:
if (sfx_actions)
mlx5_free(sfx_actions);
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
- * @param[in] external
- * This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- * This memory pool index to the flow.
+ * @param[in] flow_split_info
+ * Pointer to flow split info structure.
* @param[out] error
* Perform verbose error reporting if not NULL.
* @return
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, uint32_t flow_idx,
+ struct mlx5_flow_split_info *flow_split_info,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_sample_resource *sample_res;
struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
struct mlx5_flow_tbl_resource *sfx_tbl;
- union mlx5_flow_tbl_key sfx_table_key;
#endif
size_t act_size;
size_t item_size;
int actions_n = 0;
int sample_action_pos;
int qrss_action_pos;
+ int add_tag = 0;
+ int modify_after_mirror = 0;
+ uint16_t jump_table = 0;
+ const uint32_t next_ft_step = 1;
int ret = 0;
if (priv->sampler_en)
actions_n = flow_check_match_action(actions, attr,
RTE_FLOW_ACTION_TYPE_SAMPLE,
- &sample_action_pos, &qrss_action_pos);
+ &sample_action_pos, &qrss_action_pos,
+ &modify_after_mirror);
if (actions_n) {
/* The prefix actions must includes sample, tag, end. */
act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
"sample flow");
/* The representor_id is -1 for uplink. */
fdb_tx = (attr->transfer && priv->representor_id != -1);
- if (!fdb_tx)
+ /*
+ * When reg_c_preserve is set, metadata registers Cx preserve
+ * their value even through packet duplication.
+ */
+ add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+ if (add_tag)
sfx_items = (struct rte_flow_item *)((char *)sfx_actions
+ act_size);
+ if (modify_after_mirror)
+ jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR +
+ next_ft_step;
pre_actions = sfx_actions + actions_n;
- tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items,
+ tag_id = flow_sample_split_prep(dev, add_tag, sfx_items,
actions, sfx_actions,
pre_actions, actions_n,
sample_action_pos,
- qrss_action_pos, error);
- if (tag_id < 0 || (!fdb_tx && !tag_id)) {
+ qrss_action_pos, jump_table,
+ error);
+ if (tag_id < 0 || (add_tag && !tag_id)) {
ret = -rte_errno;
goto exit;
}
+ if (modify_after_mirror)
+ flow_split_info->skip_scale =
+ 1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
/* Add the prefix subflow. */
- ret = flow_create_split_inner(dev, flow, &dev_flow, 0, 0, attr,
- items, pre_actions, external,
- flow_idx, error);
+ ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
+ items, pre_actions,
+ flow_split_info, error);
if (ret) {
ret = -rte_errno;
goto exit;
}
dev_flow->handle->split_flow_id = tag_id;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- /* Set the sfx group attr. */
- sample_res = (struct mlx5_flow_dv_sample_resource *)
- dev_flow->dv.sample_res;
- sfx_tbl = (struct mlx5_flow_tbl_resource *)
- sample_res->normal_path_tbl;
- sfx_tbl_data = container_of(sfx_tbl,
- struct mlx5_flow_tbl_data_entry, tbl);
- sfx_table_key.v64 = sfx_tbl_data->entry.key;
- sfx_attr.group = sfx_attr.transfer ?
- (sfx_table_key.table_id - 1) :
- sfx_table_key.table_id;
+ if (!modify_after_mirror) {
+ /* Set the sfx group attr. */
+ sample_res = (struct mlx5_flow_dv_sample_resource *)
+ dev_flow->dv.sample_res;
+ sfx_tbl = (struct mlx5_flow_tbl_resource *)
+ sample_res->normal_path_tbl;
+ sfx_tbl_data = container_of(sfx_tbl,
+ struct mlx5_flow_tbl_data_entry,
+ tbl);
+ sfx_attr.group = sfx_attr.transfer ?
+ (sfx_tbl_data->table_id - 1) :
+ sfx_tbl_data->table_id;
+ } else {
+ MLX5_ASSERT(attr->transfer);
+ sfx_attr.group = jump_table;
+ }
+ flow_split_info->prefix_layers =
+ flow_get_prefix_layer_flags(dev_flow);
+ flow_split_info->prefix_mark = dev_flow->handle->mark;
+ /* Suffix group level already be scaled with factor, set
+ * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
+ * again in translation.
+ */
+ flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT;
#endif
}
/* Add the suffix subflow. */
- ret = flow_create_split_meter(dev, flow, dev_flow ?
- flow_get_prefix_layer_flags(dev_flow) : 0,
- dev_flow ? dev_flow->handle->mark : 0,
- &sfx_attr, sfx_items ? sfx_items : items,
- sfx_actions ? sfx_actions : actions,
- external, flow_idx, error);
+ ret = flow_create_split_meter(dev, flow, &sfx_attr,
+ sfx_items ? sfx_items : items,
+ sfx_actions ? sfx_actions : actions,
+ flow_split_info, error);
exit:
if (sfx_actions)
mlx5_free(sfx_actions);
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
- * @param[in] external
- * This flow rule is created by request external to PMD.
- * @param[in] flow_idx
- * This memory pool index to the flow.
+ * @param[in] flow_split_info
+ * Pointer to flow split info structure.
* @param[out] error
* Perform verbose error reporting if not NULL.
* @return
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, uint32_t flow_idx,
+ struct mlx5_flow_split_info *flow_split_info,
struct rte_flow_error *error)
{
int ret;
ret = flow_create_split_sample(dev, flow, attr, items,
- actions, external, flow_idx, error);
+ actions, flow_split_info, error);
MLX5_ASSERT(ret <= 0);
return ret;
}
struct mlx5_flow_rss_desc *rss_desc,
uint32_t nrssq_num)
{
- bool fidx = !!wks->flow_idx;
-
- if (likely(nrssq_num <= wks->rssq_num[fidx]))
+ if (likely(nrssq_num <= wks->rssq_num))
return 0;
rss_desc->queue = realloc(rss_desc->queue,
- sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2));
+ sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
if (!rss_desc->queue) {
rte_errno = ENOMEM;
return -1;
}
- wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2);
+ wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
return 0;
}
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = NULL;
struct mlx5_flow *dev_flow;
- const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action_rss *rss = NULL;
struct mlx5_translated_shared_action
shared_actions[MLX5_MAX_SHARED_ACTIONS];
int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
uint32_t idx = 0;
int hairpin_flow;
struct rte_flow_attr attr_tx = { .priority = 0 };
- struct rte_flow_attr attr_factor = {0};
const struct rte_flow_action *actions;
struct rte_flow_action *translated_actions = NULL;
struct mlx5_flow_tunnel *tunnel;
struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
- struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
- bool fidx = !!wks->flow_idx;
+ struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
+ struct mlx5_flow_split_info flow_split_info = {
+ .external = !!external,
+ .skip_scale = 0,
+ .flow_idx = 0,
+ .prefix_mark = 0,
+ .prefix_layers = 0
+ };
int ret;
MLX5_ASSERT(wks);
- rss_desc = &wks->rss_desc[fidx];
+ rss_desc = &wks->rss_desc;
ret = flow_shared_actions_translate(dev, original_actions,
shared_actions,
&shared_actions_n,
return 0;
}
actions = translated_actions ? translated_actions : original_actions;
- memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr));
p_actions_rx = actions;
- hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions);
- ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx,
+ hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
+ ret = flow_drv_validate(dev, attr, items, p_actions_rx,
external, hairpin_flow, error);
if (ret < 0)
goto error_before_hairpin_split;
idx);
p_actions_rx = actions_rx.actions;
}
- flow->drv_type = flow_get_drv_type(dev, &attr_factor);
+ flow_split_info.flow_idx = idx;
+ flow->drv_type = flow_get_drv_type(dev, attr);
MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
- rss = flow_get_rss_action(p_actions_rx);
+ /* RSS Action only works on NIC RX domain */
+ if (attr->ingress && !attr->transfer)
+ rss = flow_get_rss_action(p_actions_rx);
if (rss) {
if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
return 0;
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
- flow->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
+ rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
shared_actions_n);
- /*
- * Record the start index when there is a nested call. All sub-flows
- * need to be translated before another calling.
- * No need to use ping-pong buffer to save memory here.
- */
- if (fidx) {
- MLX5_ASSERT(!wks->flow_nested_idx);
- wks->flow_nested_idx = fidx;
- }
for (i = 0; i < buf->entries; ++i) {
+ /* Initialize flow split data. */
+ flow_split_info.prefix_layers = 0;
+ flow_split_info.prefix_mark = 0;
+ flow_split_info.skip_scale = 0;
/*
* The splitter may create multiple dev_flows,
* depending on configuration. In the simplest
* case it just creates unmodified original flow.
*/
- ret = flow_create_split_outer(dev, flow, &attr_factor,
+ ret = flow_create_split_outer(dev, flow, attr,
buf->entry[i].pattern,
- p_actions_rx, external, idx,
+ p_actions_rx, &flow_split_info,
error);
if (ret < 0)
goto error;
* the egress Flows belong to the different device and
* copy table should be updated in peer NIC Rx domain.
*/
- if (attr_factor.ingress &&
- (external || attr_factor.group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
+ if (attr->ingress &&
+ (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
ret = flow_mreg_update_copy_table(dev, flow, actions, error);
if (ret)
goto error;
}
/*
- * If the flow is external (from application) OR device is started, then
- * the flow will be applied immediately.
+ * If the flow is external (from application) OR device is started,
+ * OR mreg discover, then apply immediately.
*/
- if (external || dev->data->dev_started) {
+ if (external || dev->data->dev_started ||
+ (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
+ attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
ret = flow_drv_apply(dev, flow, error);
if (ret < 0)
goto error;
}
flow_rxq_flags_set(dev, flow);
rte_free(translated_actions);
- /* Nested flow creation index recovery. */
- wks->flow_idx = wks->flow_nested_idx;
- if (wks->flow_nested_idx)
- wks->flow_nested_idx = 0;
tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
if (tunnel) {
flow->tunnel = 1;
__atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
mlx5_free(default_miss_ctx.queue);
}
+ mlx5_flow_pop_thread_workspace();
return idx;
error:
MLX5_ASSERT(flow);
ret = rte_errno; /* Save rte_errno before cleanup. */
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
+ if (rss_desc->shared_rss)
+ __atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
+ mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
rte_errno = ret;
- wks->flow_idx = wks->flow_nested_idx;
- if (wks->flow_nested_idx)
- wks->flow_nested_idx = 0;
+ mlx5_flow_pop_thread_workspace();
error_before_hairpin_split:
rte_free(translated_actions);
return 0;
flow_idx, flow, next);
rte_spinlock_unlock(&priv->flow_list_lock);
}
- flow_mreg_del_copy_action(dev, flow);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
if (flow->tunnel) {
struct mlx5_flow_tunnel *tunnel;
- rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
- LIST_REMOVE(tunnel, chain);
- rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
mlx5_flow_tunnel_free(dev, tunnel);
}
+ flow_mreg_del_copy_action(dev, flow);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
}
/**
/**
* Release key of thread specific flow workspace data.
*/
-static void
+void
flow_release_workspace(void *data)
{
struct mlx5_flow_workspace *wks = data;
+ struct mlx5_flow_workspace *next;
- if (!wks)
- return;
- free(wks->rss_desc[0].queue);
- free(wks->rss_desc[1].queue);
- free(wks);
+ while (wks) {
+ next = wks->next;
+ free(wks->rss_desc.queue);
+ free(wks);
+ wks = next;
+ }
}
/**
- * Initialize key of thread specific flow workspace data.
+ * Get thread specific current flow workspace.
+ *
+ * @return pointer to thread specific flow workspace data, NULL on error.
*/
-static void
-flow_alloc_workspace(void)
+struct mlx5_flow_workspace*
+mlx5_flow_get_thread_workspace(void)
{
- if (pthread_key_create(&key_workspace, flow_release_workspace))
- DRV_LOG(ERR, "Can't create flow workspace data thread key.");
+ struct mlx5_flow_workspace *data;
+
+ data = mlx5_flow_os_get_specific_workspace();
+ MLX5_ASSERT(data && data->inuse);
+ if (!data || !data->inuse)
+ DRV_LOG(ERR, "flow workspace not initialized.");
+ return data;
}
/**
- * Get thread specific flow workspace.
+ * Allocate and init new flow workspace.
*
- * @return pointer to thread specific flowworkspace data, NULL on error.
+ * @return pointer to flow workspace data, NULL on error.
*/
-struct mlx5_flow_workspace*
-mlx5_flow_get_thread_workspace(void)
+static struct mlx5_flow_workspace*
+flow_alloc_thread_workspace(void)
{
- struct mlx5_flow_workspace *data;
+ struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
- if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
- DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
- return NULL;
- }
- data = pthread_getspecific(key_workspace);
if (!data) {
- data = calloc(1, sizeof(*data));
- if (!data) {
- DRV_LOG(ERR, "Failed to allocate flow workspace "
- "memory.");
- return NULL;
- }
- data->rss_desc[0].queue = calloc(1,
- sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
- if (!data->rss_desc[0].queue)
- goto err;
- data->rss_desc[1].queue = calloc(1,
- sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
- if (!data->rss_desc[1].queue)
- goto err;
- data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM;
- data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM;
- if (pthread_setspecific(key_workspace, data)) {
- DRV_LOG(ERR, "Failed to set flow workspace to thread.");
- goto err;
- }
+ DRV_LOG(ERR, "Failed to allocate flow workspace "
+ "memory.");
+ return NULL;
}
+ data->rss_desc.queue = calloc(1,
+ sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
+ if (!data->rss_desc.queue)
+ goto err;
+ data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
return data;
err:
- if (data->rss_desc[0].queue)
- free(data->rss_desc[0].queue);
- if (data->rss_desc[1].queue)
- free(data->rss_desc[1].queue);
+ if (data->rss_desc.queue)
+ free(data->rss_desc.queue);
free(data);
return NULL;
}
+/**
+ * Get new thread specific flow workspace.
+ *
+ * If current workspace inuse, create new one and set as current.
+ *
+ * @return pointer to thread specific flow workspace data, NULL on error.
+ */
+static struct mlx5_flow_workspace*
+mlx5_flow_push_thread_workspace(void)
+{
+ struct mlx5_flow_workspace *curr;
+ struct mlx5_flow_workspace *data;
+
+ curr = mlx5_flow_os_get_specific_workspace();
+ if (!curr) {
+ data = flow_alloc_thread_workspace();
+ if (!data)
+ return NULL;
+ } else if (!curr->inuse) {
+ data = curr;
+ } else if (curr->next) {
+ data = curr->next;
+ } else {
+ data = flow_alloc_thread_workspace();
+ if (!data)
+ return NULL;
+ curr->next = data;
+ data->prev = curr;
+ }
+ data->inuse = 1;
+ data->flow_idx = 0;
+ /* Set as current workspace */
+ if (mlx5_flow_os_set_specific_workspace(data))
+ DRV_LOG(ERR, "Failed to set flow workspace to thread.");
+ return data;
+}
+
+/**
+ * Close current thread specific flow workspace.
+ *
+ * If previous workspace available, set it as current.
+ *
+ * @return pointer to thread specific flow workspace data, NULL on error.
+ */
+static void
+mlx5_flow_pop_thread_workspace(void)
+{
+ struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
+
+ if (!data)
+ return;
+ if (!data->inuse) {
+ DRV_LOG(ERR, "Failed to close unused flow workspace.");
+ return;
+ }
+ data->inuse = 0;
+ if (!data->prev)
+ return;
+ if (mlx5_flow_os_set_specific_workspace(data->prev))
+ DRV_LOG(ERR, "Failed to set flow workspace to thread.");
+}
+
/**
* Verify the flow list is empty
*
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
- .priority = MLX5_FLOW_PRIO_RSVD,
+ .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
};
struct rte_flow_item items[] = {
{
}
priv->isolated = !!enable;
if (enable)
- dev->dev_ops = &mlx5_os_dev_ops_isolate;
+ dev->dev_ops = &mlx5_dev_ops_isolate;
else
- dev->dev_ops = &mlx5_os_dev_ops;
+ dev->dev_ops = &mlx5_dev_ops;
dev->rx_descriptor_status = mlx5_rx_descriptor_status;
dev->tx_descriptor_status = mlx5_tx_descriptor_status;
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
+ mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
mkey_attr.pg_access = 0;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
+ mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
+ mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
- mlx5_glue->devx_umem_dereg(mem_mng->umem);
+ mlx5_os_umem_dereg(mem_mng->umem);
rte_errno = errno;
mlx5_free(mem);
return -rte_errno;
sh->cmng.pending_queries--;
}
-static const struct mlx5_flow_tbl_data_entry *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_hlist_entry *he;
- union tunnel_offload_mark mbits = { .val = mark };
- union mlx5_flow_tbl_key table_key = {
- {
- .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
- .dummy = 0,
- .domain = !!mbits.transfer,
- .direction = 0,
- }
- };
- he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
- return he ?
- container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
-}
-
-static void
-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
-{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
- struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
-
- mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
- tunnel_flow_tbl_to_id(tte->flow_table));
- mlx5_free(tte);
-}
-
-static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
- uint64_t key __rte_unused,
- void *ctx __rte_unused)
-{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
- struct tunnel_tbl_entry *tte;
-
- tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
- sizeof(*tte), 0,
- SOCKET_ID_ANY);
- if (!tte)
- goto err;
- mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
- &tte->flow_table);
- if (tte->flow_table >= MLX5_MAX_TABLES) {
- DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
- tte->flow_table);
- mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
- tte->flow_table);
- goto err;
- } else if (!tte->flow_table) {
- goto err;
- }
- tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
- return &tte->hash;
-err:
- if (tte)
- mlx5_free(tte);
- return NULL;
-}
-
-static uint32_t
-tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
- const struct mlx5_flow_tunnel *tunnel,
- uint32_t group, uint32_t *table,
- struct rte_flow_error *error)
-{
- struct mlx5_hlist_entry *he;
- struct tunnel_tbl_entry *tte;
- union tunnel_tbl_key key = {
- .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
- .group = group
- };
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_hlist *group_hash;
-
- group_hash = tunnel ? tunnel->groups : thub->groups;
- he = mlx5_hlist_register(group_hash, key.val, NULL);
- if (!he)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "tunnel group index not supported");
- tte = container_of(he, typeof(*tte), hash);
- *table = tte->flow_table;
- DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
- dev->data->port_id, key.tunnel_id, group, *table);
- return 0;
-}
-
static int
flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
- struct flow_grp_info grp_info, struct rte_flow_error *error)
+ const struct flow_grp_info *grp_info,
+ struct rte_flow_error *error)
{
- if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+ if (grp_info->transfer && grp_info->external &&
+ grp_info->fdb_def_rule) {
if (group == UINT32_MAX)
return rte_flow_error_set
(error, EINVAL,
mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
- struct flow_grp_info grp_info,
+ const struct flow_grp_info *grp_info,
struct rte_flow_error *error)
{
int ret;
bool standard_translation;
- if (grp_info.external && group < MLX5_MAX_TABLES_EXTERNAL)
+ if (!grp_info->skip_scale && grp_info->external &&
+ group < MLX5_MAX_TABLES_EXTERNAL)
group *= MLX5_FLOW_TABLE_FACTOR;
if (is_tunnel_offload_active(dev)) {
- standard_translation = !grp_info.external ||
- grp_info.std_tbl_fix;
+ standard_translation = !grp_info->external ||
+ grp_info->std_tbl_fix;
} else {
standard_translation = true;
}
DRV_LOG(DEBUG,
- "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
- dev->data->port_id, group, grp_info.transfer,
- grp_info.external, grp_info.fdb_def_rule,
+ "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
+ dev->data->port_id, group, grp_info->transfer,
+ grp_info->external, grp_info->fdb_def_rule,
standard_translation ? "STANDARD" : "TUNNEL");
if (standard_translation)
ret = flow_group_to_table(dev->data->port_id, group, table,
for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
struct rte_flow_attr attr = {
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
- .priority = MLX5_FLOW_PRIO_RSVD,
+ .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
.ingress = 1,
};
struct rte_flow_item items[] = {
flow_idx);
if (!flow)
continue;
- if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
- config->flow_mreg_c[n++] = idx;
+ config->flow_mreg_c[n++] = idx;
flow_list_destroy(dev, NULL, flow_idx);
}
for (; n < MLX5_MREG_C_NUM; ++n)
{
struct rte_flow_error error;
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_shared_action_rss *action;
+ struct mlx5_shared_action_rss *shared_rss;
int ret = 0;
uint32_t idx;
ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- priv->rss_shared_actions, idx, action, next) {
+ priv->rss_shared_actions, idx, shared_rss, next) {
ret |= mlx5_shared_action_destroy(dev,
(struct rte_flow_shared_action *)(uintptr_t)idx, &error);
}
return ret;
}
-static void
-mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
- struct mlx5_flow_tunnel *tunnel)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
- dev->data->port_id, tunnel->tunnel_id);
- RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
- tunnel->tunnel_id);
- mlx5_hlist_destroy(tunnel->groups);
- mlx5_free(tunnel);
-}
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+ (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
{
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_tunnel *tun;
-
- LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (tun->tunnel_id == id)
- break;
- }
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct mlx5_flow_driver_ops *fops;
+ int ret;
+ struct rte_flow_attr attr = { .transfer = 0 };
- return tun;
+ fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+ ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+ if (ret > 0)
+ ret = -ret;
+ return ret;
}
-static struct mlx5_flow_tunnel *
-mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
- const struct rte_flow_tunnel *app_tunnel)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_tunnel *tunnel;
- uint32_t id;
-
- mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
- &id);
- if (id >= MLX5_MAX_TUNNELS) {
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
- DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
- return NULL;
- } else if (!id) {
- return NULL;
- }
- /**
- * mlx5 flow tunnel is an auxlilary data structure
- * It's not part of IO. No need to allocate it from
- * huge pages pools dedicated for IO
- */
- tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
- 0, SOCKET_ID_ANY);
- if (!tunnel) {
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
- return NULL;
- }
- tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
- mlx5_flow_tunnel_grp2tbl_create_cb,
- NULL,
- mlx5_flow_tunnel_grp2tbl_remove_cb);
- if (!tunnel->groups) {
- mlx5_ipool_free(priv->sh->ipool
- [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
- mlx5_free(tunnel);
- return NULL;
- }
- tunnel->groups->ctx = priv->sh;
- /* initiate new PMD tunnel */
- memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
- tunnel->tunnel_id = id;
- tunnel->action.type = (typeof(tunnel->action.type))
- MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
- tunnel->action.conf = tunnel;
- tunnel->item.type = (typeof(tunnel->item.type))
- MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
- tunnel->item.spec = tunnel;
- tunnel->item.last = NULL;
- tunnel->item.mask = NULL;
-
- DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
- dev->data->port_id, tunnel->tunnel_id);
+/**
+ * tunnel offload functionalilty is defined for DV environment only
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+__extension__
+union tunnel_offload_mark {
+ uint32_t val;
+ struct {
+ uint32_t app_reserve:8;
+ uint32_t table_id:15;
+ uint32_t transfer:1;
+ uint32_t _unused_:8;
+ };
+};
- return tunnel;
-}
+static bool
+mlx5_access_tunnel_offload_db
+ (struct rte_eth_dev *dev,
+ bool (*match)(struct rte_eth_dev *,
+ struct mlx5_flow_tunnel *, const void *),
+ void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+ void (*miss)(struct rte_eth_dev *, void *),
+ void *ctx, bool lock_op);
static int
-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
- const struct rte_flow_tunnel *app_tunnel,
- struct mlx5_flow_tunnel **tunnel)
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action *app_actions,
+ uint32_t flow_idx,
+ struct tunnel_default_miss_ctx *ctx,
+ struct rte_flow_error *error)
{
- int ret;
- struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
- struct mlx5_flow_tunnel *tun;
-
- rte_spinlock_lock(&thub->sl);
- LIST_FOREACH(tun, &thub->tunnels, chain) {
- if (!memcmp(app_tunnel, &tun->app_tunnel,
- sizeof(*app_tunnel))) {
- *tunnel = tun;
- ret = 0;
- break;
- }
- }
- if (!tun) {
- tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
- if (tun) {
- LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
- *tunnel = tun;
- } else {
- ret = -ENOMEM;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow *dev_flow;
+ struct rte_flow_attr miss_attr = *attr;
+ const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
+ const struct rte_flow_item miss_items[2] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = NULL,
+ .last = NULL,
+ .mask = NULL
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ .spec = NULL,
+ .last = NULL,
+ .mask = NULL
}
- }
- rte_spinlock_unlock(&thub->sl);
- if (tun)
- __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+ };
+ union tunnel_offload_mark mark_id;
+ struct rte_flow_action_mark miss_mark;
+ struct rte_flow_action miss_actions[3] = {
+ [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
+ [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL }
+ };
+ const struct rte_flow_action_jump *jump_data;
+ uint32_t i, flow_table = 0; /* prevent compilation warning */
+ struct flow_grp_info grp_info = {
+ .external = 1,
+ .transfer = attr->transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ .std_tbl_fix = 0,
+ };
+ int ret;
+
+ if (!attr->transfer) {
+ uint32_t q_size;
+
+ miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+ q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
+ ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
+ 0, SOCKET_ID_ANY);
+ if (!ctx->queue)
+ return rte_flow_error_set
+ (error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "invalid default miss RSS");
+ ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ ctx->action_rss.level = 0,
+ ctx->action_rss.types = priv->rss_conf.rss_hf,
+ ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
+ ctx->action_rss.queue_num = priv->reta_idx_n,
+ ctx->action_rss.key = priv->rss_conf.rss_key,
+ ctx->action_rss.queue = ctx->queue;
+ if (!priv->reta_idx_n || !priv->rxqs_n)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "invalid port configuration");
+ if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ ctx->action_rss.types = 0;
+ for (i = 0; i != priv->reta_idx_n; ++i)
+ ctx->queue[i] = (*priv->reta_idx)[i];
+ } else {
+ miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
+ ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
+ }
+ miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
+ for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
+ jump_data = app_actions->conf;
+ miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
+ miss_attr.group = jump_data->group;
+ ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
+ &flow_table, &grp_info, error);
+ if (ret)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "invalid tunnel id");
+ mark_id.app_reserve = 0;
+ mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
+ mark_id.transfer = !!attr->transfer;
+ mark_id._unused_ = 0;
+ miss_mark.id = mark_id.val;
+ dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
+ miss_items, miss_actions, flow_idx, error);
+ if (!dev_flow)
+ return -rte_errno;
+ dev_flow->flow = flow;
+ dev_flow->external = true;
+ dev_flow->tunnel = tunnel;
+ /* Subflow object was created, we must include one in the list. */
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
+ DRV_LOG(DEBUG,
+ "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
+ dev->data->port_id, tunnel->app_tunnel.type,
+ tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
+ ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
+ miss_actions, error);
+ if (!ret)
+ ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
+ error);
return ret;
}
+static const struct mlx5_flow_tbl_data_entry *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_hlist_entry *he;
+ union tunnel_offload_mark mbits = { .val = mark };
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+ .dummy = 0,
+ .domain = !!mbits.transfer,
+ .direction = 0,
+ }
+ };
+ he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+ return he ?
+ container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
+}
+
+static void
+mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ tunnel_flow_tbl_to_id(tte->flow_table));
+ mlx5_free(tte);
+}
+
+static int
+mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key, void *cb_ctx __rte_unused)
+{
+ union tunnel_tbl_key tbl = {
+ .val = key,
+ };
+ struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+ return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
+}
+
+static struct mlx5_hlist_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
+ void *ctx __rte_unused)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct tunnel_tbl_entry *tte;
+ union tunnel_tbl_key tbl = {
+ .val = key,
+ };
+
+ tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+ sizeof(*tte), 0,
+ SOCKET_ID_ANY);
+ if (!tte)
+ goto err;
+ mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ &tte->flow_table);
+ if (tte->flow_table >= MLX5_MAX_TABLES) {
+ DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
+ tte->flow_table);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+ tte->flow_table);
+ goto err;
+ } else if (!tte->flow_table) {
+ goto err;
+ }
+ tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+ tte->tunnel_id = tbl.tunnel_id;
+ tte->group = tbl.group;
+ return &tte->hash;
+err:
+ if (tte)
+ mlx5_free(tte);
+ return NULL;
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group, uint32_t *table,
+ struct rte_flow_error *error)
+{
+ struct mlx5_hlist_entry *he;
+ struct tunnel_tbl_entry *tte;
+ union tunnel_tbl_key key = {
+ .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
+ .group = group
+ };
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_hlist *group_hash;
+
+ group_hash = tunnel ? tunnel->groups : thub->groups;
+ he = mlx5_hlist_register(group_hash, key.val, NULL);
+ if (!he)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "tunnel group index not supported");
+ tte = container_of(he, typeof(*tte), hash);
+ *table = tte->flow_table;
+ DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
+ dev->data->port_id, key.tunnel_id, group, *table);
+ return 0;
+}
+
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *ipool;
+
+ DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
+ dev->data->port_id, tunnel->tunnel_id);
+ LIST_REMOVE(tunnel, chain);
+ mlx5_hlist_destroy(tunnel->groups);
+ ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+ mlx5_ipool_free(ipool, tunnel->tunnel_id);
+}
+
+static bool
+mlx5_access_tunnel_offload_db
+ (struct rte_eth_dev *dev,
+ bool (*match)(struct rte_eth_dev *,
+ struct mlx5_flow_tunnel *, const void *),
+ void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+ void (*miss)(struct rte_eth_dev *, void *),
+ void *ctx, bool lock_op)
+{
+ bool verdict = false;
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct mlx5_flow_tunnel *tunnel;
+
+ rte_spinlock_lock(&thub->sl);
+ LIST_FOREACH(tunnel, &thub->tunnels, chain) {
+ verdict = match(dev, tunnel, (const void *)ctx);
+ if (verdict)
+ break;
+ }
+ if (!lock_op)
+ rte_spinlock_unlock(&thub->sl);
+ if (verdict && hit)
+ hit(dev, tunnel, ctx);
+ if (!verdict && miss)
+ miss(dev, ctx);
+ if (lock_op)
+ rte_spinlock_unlock(&thub->sl);
+
+ return verdict;
+}
+
+struct tunnel_db_find_tunnel_id_ctx {
+ uint32_t tunnel_id;
+ struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool
+find_tunnel_id_match(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+ const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+
+ RTE_SET_USED(dev);
+ return tunnel->tunnel_id == ctx->tunnel_id;
+}
+
+static void
+find_tunnel_id_hit(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, void *x)
+{
+ struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+ RTE_SET_USED(dev);
+ ctx->tunnel = tunnel;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+ struct tunnel_db_find_tunnel_id_ctx ctx = {
+ .tunnel_id = id,
+ };
+
+ mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
+ find_tunnel_id_hit, NULL, &ctx, true);
+
+ return ctx.tunnel;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
+ const struct rte_flow_tunnel *app_tunnel)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool *ipool;
+ struct mlx5_flow_tunnel *tunnel;
+ uint32_t id;
+
+ ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+ tunnel = mlx5_ipool_zmalloc(ipool, &id);
+ if (!tunnel)
+ return NULL;
+ if (id >= MLX5_MAX_TUNNELS) {
+ mlx5_ipool_free(ipool, id);
+ DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
+ return NULL;
+ }
+ tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
+ mlx5_flow_tunnel_grp2tbl_create_cb,
+ mlx5_flow_tunnel_grp2tbl_match_cb,
+ mlx5_flow_tunnel_grp2tbl_remove_cb);
+ if (!tunnel->groups) {
+ mlx5_ipool_free(ipool, id);
+ return NULL;
+ }
+ tunnel->groups->ctx = priv->sh;
+ /* initiate new PMD tunnel */
+ memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
+ tunnel->tunnel_id = id;
+ tunnel->action.type = (typeof(tunnel->action.type))
+ MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
+ tunnel->action.conf = tunnel;
+ tunnel->item.type = (typeof(tunnel->item.type))
+ MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
+ tunnel->item.spec = tunnel;
+ tunnel->item.last = NULL;
+ tunnel->item.mask = NULL;
+
+ DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
+ dev->data->port_id, tunnel->tunnel_id);
+
+ return tunnel;
+}
+
+struct tunnel_db_get_tunnel_ctx {
+ const struct rte_flow_tunnel *app_tunnel;
+ struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool get_tunnel_match(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+ const struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+ RTE_SET_USED(dev);
+ return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
+ sizeof(*ctx->app_tunnel));
+}
+
+static void get_tunnel_hit(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, void *x)
+{
+ /* called under tunnel spinlock protection */
+ struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+ RTE_SET_USED(dev);
+ tunnel->refctn++;
+ ctx->tunnel = tunnel;
+}
+
+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
+{
+ /* called under tunnel spinlock protection */
+ struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+ struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+ rte_spinlock_unlock(&thub->sl);
+ ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
+ ctx->tunnel->refctn = 1;
+ rte_spinlock_lock(&thub->sl);
+ if (ctx->tunnel)
+ LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
+}
+
+
+static int
+mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
+ const struct rte_flow_tunnel *app_tunnel,
+ struct mlx5_flow_tunnel **tunnel)
+{
+ struct tunnel_db_get_tunnel_ctx ctx = {
+ .app_tunnel = app_tunnel,
+ };
+
+ mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
+ get_tunnel_miss, &ctx, true);
+ *tunnel = ctx.tunnel;
+ return ctx.tunnel ? 0 : -ENOMEM;
+}
+
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
{
struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
rte_spinlock_init(&thub->sl);
thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
0, mlx5_flow_tunnel_grp2tbl_create_cb,
- NULL,
+ mlx5_flow_tunnel_grp2tbl_match_cb,
mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!thub->groups) {
err = -rte_errno;
return err;
}
-#ifndef HAVE_MLX5DV_DR
-#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
-#else
-#define MLX5_DOMAIN_SYNC_FLOW \
- (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
-#endif
+static inline bool
+mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
+ struct rte_flow_tunnel *tunnel,
+ const char *err_msg)
+{
+ err_msg = NULL;
+ if (!is_tunnel_offload_active(dev)) {
+ err_msg = "tunnel offload was not activated";
+ goto out;
+ } else if (!tunnel) {
+ err_msg = "no application tunnel";
+ goto out;
+ }
-int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+ switch (tunnel->type) {
+ default:
+ err_msg = "unsupported tunnel type";
+ goto out;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ break;
+ }
+
+out:
+ return !err_msg;
+}
+
+static int
+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+ struct rte_flow_tunnel *app_tunnel,
+ struct rte_flow_action **actions,
+ uint32_t *num_of_actions,
+ struct rte_flow_error *error)
{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct mlx5_flow_driver_ops *fops;
int ret;
- struct rte_flow_attr attr = { .transfer = 0 };
+ struct mlx5_flow_tunnel *tunnel;
+ const char *err_msg = NULL;
+ bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
- fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
- ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
- if (ret > 0)
- ret = -ret;
- return ret;
+ if (!verdict)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ err_msg);
+ ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+ if (ret < 0) {
+ return rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "failed to initialize pmd tunnel");
+ }
+ *actions = &tunnel->action;
+ *num_of_actions = 1;
+ return 0;
+}
+
+static int
+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+ struct rte_flow_tunnel *app_tunnel,
+ struct rte_flow_item **items,
+ uint32_t *num_of_items,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct mlx5_flow_tunnel *tunnel;
+ const char *err_msg = NULL;
+ bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+
+ if (!verdict)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ err_msg);
+ ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+ if (ret < 0) {
+ return rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "failed to initialize pmd tunnel");
+ }
+ *items = &tunnel->item;
+ *num_of_items = 1;
+ return 0;
+}
+
+struct tunnel_db_element_release_ctx {
+ struct rte_flow_item *items;
+ struct rte_flow_action *actions;
+ uint32_t num_elements;
+ struct rte_flow_error *error;
+ int ret;
+};
+
+static bool
+tunnel_element_release_match(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+ const struct tunnel_db_element_release_ctx *ctx = x;
+
+ RTE_SET_USED(dev);
+ if (ctx->num_elements != 1)
+ return false;
+ else if (ctx->items)
+ return ctx->items == &tunnel->item;
+ else if (ctx->actions)
+ return ctx->actions == &tunnel->action;
+
+ return false;
+}
+
+static void
+tunnel_element_release_hit(struct rte_eth_dev *dev,
+ struct mlx5_flow_tunnel *tunnel, void *x)
+{
+ struct tunnel_db_element_release_ctx *ctx = x;
+ ctx->ret = 0;
+ if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+ mlx5_flow_tunnel_free(dev, tunnel);
+}
+
+static void
+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
+{
+ struct tunnel_db_element_release_ctx *ctx = x;
+ RTE_SET_USED(dev);
+ ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "invalid argument");
+}
+
+static int
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+ struct rte_flow_item *pmd_items,
+ uint32_t num_items, struct rte_flow_error *err)
+{
+ struct tunnel_db_element_release_ctx ctx = {
+ .items = pmd_items,
+ .actions = NULL,
+ .num_elements = num_items,
+ .error = err,
+ };
+
+ mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+ tunnel_element_release_hit,
+ tunnel_element_release_miss, &ctx, false);
+
+ return ctx.ret;
}
+
+static int
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+ struct rte_flow_action *pmd_actions,
+ uint32_t num_actions, struct rte_flow_error *err)
+{
+ struct tunnel_db_element_release_ctx ctx = {
+ .items = NULL,
+ .actions = pmd_actions,
+ .num_elements = num_actions,
+ .error = err,
+ };
+
+ mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+ tunnel_element_release_hit,
+ tunnel_element_release_miss, &ctx, false);
+
+ return ctx.ret;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+ struct rte_mbuf *m,
+ struct rte_flow_restore_info *info,
+ struct rte_flow_error *err)
+{
+ uint64_t ol_flags = m->ol_flags;
+ const struct mlx5_flow_tbl_data_entry *tble;
+ const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+
+ if (!is_tunnel_offload_active(dev)) {
+ info->flags = 0;
+ return 0;
+ }
+
+ if ((ol_flags & mask) != mask)
+ goto err;
+ tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
+ if (!tble) {
+ DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
+ dev->data->port_id, m->hash.fdir.hi);
+ goto err;
+ }
+ MLX5_ASSERT(tble->tunnel);
+ memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
+ info->group_id = tble->group_id;
+ info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
+ RTE_FLOW_RESTORE_INFO_GROUP_ID |
+ RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
+
+ return 0;
+
+err:
+ return rte_flow_error_set(err, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "failed to get restore info");
+}
+
+#else /* HAVE_IBV_FLOW_DV_SUPPORT */
+static int
+mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_tunnel *app_tunnel,
+ __rte_unused struct rte_flow_action **actions,
+ __rte_unused uint32_t *num_of_actions,
+ __rte_unused struct rte_flow_error *error)
+{
+ return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_tunnel *app_tunnel,
+ __rte_unused struct rte_flow_item **items,
+ __rte_unused uint32_t *num_of_items,
+ __rte_unused struct rte_flow_error *error)
+{
+ return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_item *pmd_items,
+ __rte_unused uint32_t num_items,
+ __rte_unused struct rte_flow_error *err)
+{
+ return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_action *pmd_action,
+ __rte_unused uint32_t num_actions,
+ __rte_unused struct rte_flow_error *err)
+{
+ return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_mbuf *m,
+ __rte_unused struct rte_flow_restore_info *i,
+ __rte_unused struct rte_flow_error *err)
+{
+ return -ENOTSUP;
+}
+
+static int
+flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow *flow,
+ __rte_unused const struct rte_flow_attr *attr,
+ __rte_unused const struct rte_flow_action *actions,
+ __rte_unused uint32_t flow_idx,
+ __rte_unused struct tunnel_default_miss_ctx *ctx,
+ __rte_unused struct rte_flow_error *error)
+{
+ return -ENOTSUP;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint32_t id)
+{
+ return NULL;
+}
+
+static void
+mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct mlx5_flow_tunnel *tunnel)
+{
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct mlx5_flow_tunnel *t,
+ __rte_unused uint32_t group,
+ __rte_unused uint32_t *table,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "tunnel offload requires DV support");
+}
+
+void
+mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
+ __rte_unused uint16_t port_id)
+{
+}
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+