LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
};
+/* Default miss action resource structure. */
+struct mlx5_flow_default_miss_resource {
+ void *action; /* Pointer to the rdma-core action. */
+ rte_atomic32_t refcnt; /* Default miss action reference counter. */
+};
+
#define MLX5_AGE_EVENT_NEW 1
#define MLX5_AGE_TRIGGER 2
#define MLX5_AGE_SET(age_info, BIT) \
uint32_t port_id_action_list; /* List of port ID actions. */
uint32_t push_vlan_action_list; /* List of push VLAN actions. */
struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
+ struct mlx5_flow_default_miss_resource default_miss;
+ /* Default miss action resource structure. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
/* Memory Pool for mlx5 flow resources. */
struct mlx5_l3t_tbl *cnt_id_tbl; /* Shared counter lookup table. */
int mlx5_ctrl_flow(struct rte_eth_dev *dev,
struct rte_flow_item_eth *eth_spec,
struct rte_flow_item_eth *eth_mask);
+int mlx5_flow_lacp_miss(struct rte_eth_dev *dev);
struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
return 0;
}
+/*
+ * Validate the default miss action.
+ *
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_default_miss(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "default miss action not supported "
+ "for egress");
+ if (attr->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
+ "only group 0 is supported");
+ if (attr->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ return 0;
+}
+
/*
* Validate the count action.
*
return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
}
+/**
+ * Create default miss flow rule matching lacp traffic
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ /*
+ * The LACP matching is done by only using ether type since using
+ * a multicast dst mac causes kernel to give low priority to this flow.
+ */
+ static const struct rte_flow_item_eth lacp_spec = {
+ .type = RTE_BE16(0x8809),
+ };
+ static const struct rte_flow_item_eth lacp_mask = {
+ .type = 0xffff,
+ };
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &lacp_spec,
+ .mask = &lacp_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow_error error;
+ uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
+ &attr, items, actions, false, &error);
+
+ if (!flow_idx)
+ return -rte_errno;
+ return 0;
+}
+
/**
* Destroy a flow.
*
MLX5_RTE_FLOW_ACTION_TYPE_TAG,
MLX5_RTE_FLOW_ACTION_TYPE_MARK,
MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
};
/* Matches on selected register. */
#define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
#define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
#define MLX5_FLOW_ACTION_AGE (1ull << 34)
+#define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
#define MLX5_FLOW_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
- MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP)
+ MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
+ MLX5_FLOW_ACTION_DEFAULT_MISS)
#define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
MLX5_FLOW_FATE_JUMP,
MLX5_FLOW_FATE_PORT_ID,
MLX5_FLOW_FATE_DROP,
+ MLX5_FLOW_FATE_DEFAULT_MISS,
MLX5_FLOW_FATE_MAX,
};
/**< Index to port ID action resource. */
uint32_t rix_fate;
/**< Generic value indicates the fate action. */
+ uint32_t rix_default_fate;
+ /**< Indicates default miss fate action. */
};
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
const struct rte_flow_attr *attr,
uint64_t item_flags,
struct rte_flow_error *error);
+int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
int mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
struct rte_flow_error *error);
flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_tbl_resource *tbl);
+static int
+flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
+
/**
* Initialize flow attributes structure according to flow items' types.
*
return 0;
}
+/**
+ * Find existing default miss resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_default_miss_resource *cache_resource =
+ &sh->default_miss;
+ int cnt = rte_atomic32_read(&cache_resource->refcnt);
+
+ if (!cnt) {
+ MLX5_ASSERT(cache_resource->action);
+ cache_resource->action =
+ mlx5_glue->dr_create_flow_action_default_miss();
+ if (!cache_resource->action)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create default miss action");
+ DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
+ (void *)cache_resource->action, cnt);
+ }
+ rte_atomic32_inc(&cache_resource->refcnt);
+ return 0;
+}
+
/**
* Find existing table port ID resource or create and register a new one.
*
action_flags |= MLX5_FLOW_ACTION_RSS;
++actions_n;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ ret =
+ mlx5_flow_validate_action_default_miss(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ ++actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count(dev, error);
if (ret < 0)
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_DEFAULT_MISS;
+ break;
case RTE_FLOW_ACTION_TYPE_METER:
mtr = actions->conf;
if (!flow->meter) {
flow_dv_translate_item_eth(match_mask, match_value,
items, tunnel,
dev_flow->dv.group);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
+ matcher.priority = action_flags &
+ MLX5_FLOW_ACTION_DEFAULT_MISS &&
+ !dev_flow->external ?
+ MLX5_PRIORITY_MAP_L3 :
+ MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
break;
}
dh->rix_hrxq = hrxq_idx;
dv->actions[n++] = hrxq->action;
+ } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
+ if (flow_dv_default_miss_resource_register
+ (dev, error)) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create default miss resource");
+ goto error_default_miss;
+ }
+ dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS;
+ dv->actions[n++] = priv->sh->default_miss.action;
}
+
dh->ib_flow =
mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
(void *)&dv->value, n,
}
return 0;
error:
+ if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
+ flow_dv_default_miss_resource_release(dev);
+error_default_miss:
err = rte_errno; /* Save rte_errno before cleanup. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dh, next) {
return 1;
}
+/**
+ * Release a default miss resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_default_miss_resource *cache_resource =
+ &sh->default_miss;
+
+ MLX5_ASSERT(cache_resource->action);
+ DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
+ (void *)cache_resource->action,
+ rte_atomic32_read(&cache_resource->refcnt));
+ if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->action));
+ DRV_LOG(DEBUG, "default miss resource %p: removed",
+ (void *)cache_resource->action);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Release a modify-header resource.
*
{
if (!handle->rix_fate)
return;
- if (handle->fate_action == MLX5_FLOW_FATE_DROP)
+ switch (handle->fate_action) {
+ case MLX5_FLOW_FATE_DROP:
mlx5_hrxq_drop_release(dev);
- else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
+ break;
+ case MLX5_FLOW_FATE_QUEUE:
mlx5_hrxq_release(dev, handle->rix_hrxq);
- else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
+ break;
+ case MLX5_FLOW_FATE_JUMP:
flow_dv_jump_tbl_resource_release(dev, handle);
- else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
+ break;
+ case MLX5_FLOW_FATE_PORT_ID:
flow_dv_port_id_action_resource_release(dev, handle);
- else
+ break;
+ case MLX5_FLOW_FATE_DEFAULT_MISS:
+ flow_dv_default_miss_resource_release(dev);
+ break;
+ default:
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
+ break;
+ }
handle->rix_fate = 0;
}
dh->ib_flow = NULL;
}
if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
- dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+ dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
+ dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
flow_dv_fate_resource_release(dev, dh);
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);