#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_flow.h"
+#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
-/* Dev ops structure defined in mlx5.c */
-extern const struct eth_dev_ops mlx5_dev_ops;
-extern const struct eth_dev_ops mlx5_dev_ops_isolate;
-
/** Device flow drivers. */
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
-#endif
extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
return 0;
}
+/*
+ * Validate the default miss action.
+ *
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_default_miss(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "default miss action not supported "
+ "for egress");
+ if (attr->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
+ "only group 0 is supported");
+ if (attr->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ return 0;
+}
+
/*
* Validate the count action.
*
flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
+ /* The OS can determine first a specific flow type (DV, VERBS) */
+ enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
+ if (type != MLX5_FLOW_TYPE_MAX)
+ return type;
+ /* If no OS specific type - continue with DV/VERBS selection */
if (attr->transfer && priv->config.dv_esw_en)
type = MLX5_FLOW_TYPE_DV;
if (!attr->transfer)
return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
}
+/**
+ * Create default miss flow rule matching lacp traffic
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ /*
+ * The LACP matching is done by only using ether type since using
+ * a multicast dst mac causes kernel to give low priority to this flow.
+ */
+ static const struct rte_flow_item_eth lacp_spec = {
+ .type = RTE_BE16(0x8809),
+ };
+ static const struct rte_flow_item_eth lacp_mask = {
+ .type = 0xffff,
+ };
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &lacp_spec,
+ .mask = &lacp_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow_error error;
+ uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
+ &attr, items, actions, false, &error);
+
+ if (!flow_idx)
+ return -rte_errno;
+ return 0;
+}
+
/**
* Destroy a flow.
*
}
priv->isolated = !!enable;
if (enable)
- dev->dev_ops = &mlx5_dev_ops_isolate;
+ dev->dev_ops = &mlx5_os_dev_ops_isolate;
else
- dev->dev_ops = &mlx5_dev_ops;
+ dev->dev_ops = &mlx5_os_dev_ops;
return 0;
}
* should wait for a new round of query as the new arrived packets
* will not be taken into account.
*/
- rte_atomic64_add(&pool->start_query_gen, 1);
+ pool->query_gen++;
ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
offset, NULL, NULL,
pool->raw_hw->mem_mng->dm->id,
sh->devx_comp,
(uint64_t)(uintptr_t)pool);
if (ret) {
- rte_atomic64_sub(&pool->start_query_gen, 1);
DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
" %d", pool->min_dcs->id);
pool->raw_hw = NULL;
struct mlx5_flow_counter_pool *pool =
(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
struct mlx5_counter_stats_raw *raw_to_free;
+ uint8_t age = !!IS_AGE_POOL(pool);
+ uint8_t query_gen = pool->query_gen ^ 1;
+ struct mlx5_pools_container *cont =
+ MLX5_CNT_CONTAINER(sh, !IS_EXT_POOL(pool), age);
if (unlikely(status)) {
- rte_atomic64_sub(&pool->start_query_gen, 1);
raw_to_free = pool->raw_hw;
} else {
raw_to_free = pool->raw;
rte_spinlock_lock(&pool->sl);
pool->raw = pool->raw_hw;
rte_spinlock_unlock(&pool->sl);
- MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 ==
- rte_atomic64_read(&pool->start_query_gen));
- rte_atomic64_set(&pool->end_query_gen,
- rte_atomic64_read(&pool->start_query_gen));
/* Be sure the new raw counters data is updated in memory. */
rte_cio_wmb();
+ if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
+ rte_spinlock_lock(&cont->csl);
+ TAILQ_CONCAT(&cont->counters,
+ &pool->counters[query_gen], next);
+ rte_spinlock_unlock(&cont->csl);
+ }
}
LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
pool->raw_hw = NULL;