#include <stdalign.h>
#include <stdint.h>
#include <string.h>
-
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
+#include <stdbool.h>
#include <rte_common.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_flow.h>
+#include <rte_cycles.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
#include <rte_ip.h>
-#include "mlx5.h"
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_prm.h>
+#include <mlx5_malloc.h>
+
#include "mlx5_defs.h"
+#include "mlx5.h"
#include "mlx5_flow.h"
-#include "mlx5_glue.h"
-#include "mlx5_prm.h"
+#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
-/* Dev ops structure defined in mlx5.c */
-extern const struct eth_dev_ops mlx5_dev_ops;
-extern const struct eth_dev_ops mlx5_dev_ops_isolate;
-
/** Device flow drivers. */
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
-#endif
extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
},
[MLX5_EXPANSION_VXLAN] = {
- .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
[MLX5_EXPANSION_VXLAN_GPE] = {
.flush = mlx5_flow_flush,
.isolate = mlx5_flow_isolate,
.query = mlx5_flow_query,
+ .dev_dump = mlx5_flow_dev_dump,
+ .get_aged_flows = mlx5_flow_get_aged_flows,
};
/* Convert FDIR request to Generic flow. */
struct rte_flow_action_queue queue;
};
-/* Map of Verbs to Flow priority with 8 Verbs priorities. */
-static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
- { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
-};
-
-/* Map of Verbs to Flow priority with 16 Verbs priorities. */
-static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
- { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
- { 9, 10, 11 }, { 12, 13, 14 },
-};
-
/* Tunnel information. */
struct mlx5_flow_tunnel_info {
uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
.tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
.ptype = RTE_PTYPE_TUNNEL_IP,
},
+ {
+ .tunnel = MLX5_FLOW_LAYER_GTP,
+ .ptype = RTE_PTYPE_TUNNEL_GTPU,
+ },
};
/**
* The request register on success, a negative errno
* value otherwise and rte_errno is set.
*/
-enum modify_reg
+int
mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
enum mlx5_feature_name feature,
uint32_t id,
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
enum modify_reg start_reg;
+ bool skip_mtr_reg = false;
switch (feature) {
case MLX5_HAIRPIN_RX:
case MLX5_METADATA_TX:
return REG_A;
case MLX5_METADATA_FDB:
- return REG_C_0;
+ switch (config->dv_xmeta_en) {
+ case MLX5_XMETA_MODE_LEGACY:
+ return REG_NONE;
+ case MLX5_XMETA_MODE_META16:
+ return REG_C_0;
+ case MLX5_XMETA_MODE_META32:
+ return REG_C_1;
+ }
+ break;
case MLX5_FLOW_MARK:
switch (config->dv_xmeta_en) {
case MLX5_XMETA_MODE_LEGACY:
return REG_C_0;
}
break;
- case MLX5_COPY_MARK:
case MLX5_MTR_SFX:
+ /*
+ * If meter color and flow match share one register, flow match
+ * should use the meter color register for match.
+ */
+ if (priv->mtr_reg_share)
+ return priv->mtr_color_reg;
+ else
+ return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
+ REG_C_3;
+ case MLX5_MTR_COLOR:
+ MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
+ return priv->mtr_color_reg;
+ case MLX5_COPY_MARK:
/*
* Metadata COPY_MARK register using is in meter suffix sub
* flow while with meter. It's safe to share the same register.
*/
return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
- case MLX5_MTR_COLOR:
- RTE_ASSERT(priv->mtr_color_reg != REG_NONE);
- return priv->mtr_color_reg;
case MLX5_APP_TAG:
/*
- * If meter is enable, it will engage two registers for color
+ * If meter is enable, it will engage the register for color
* match and flow match. If meter color match is not using the
* REG_C_2, need to skip the REG_C_x be used by meter color
* match.
* If meter is disable, free to use all available registers.
*/
- if (priv->mtr_color_reg != REG_NONE)
- start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_3 :
- REG_C_4;
- else
- start_reg = REG_C_2;
+ start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
+ (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
+ skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
if (id > (REG_C_7 - start_reg))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
* If the available index REG_C_y >= REG_C_x, skip the
* color register.
*/
- if (start_reg == REG_C_3 && config->flow_mreg_c
- [id + REG_C_3 - REG_C_0] >= priv->mtr_color_reg) {
- if (config->flow_mreg_c[id + 1 + REG_C_3 - REG_C_0] !=
- REG_NONE)
+ if (skip_mtr_reg && config->flow_mreg_c
+ [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
+ if (id >= (REG_C_7 - start_reg))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "invalid tag id");
+ if (config->flow_mreg_c
+ [id + 1 + start_reg - REG_C_0] != REG_NONE)
return config->flow_mreg_c
- [id + 1 + REG_C_3 - REG_C_0];
+ [id + 1 + start_reg - REG_C_0];
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "unsupported tag id");
}
return config->flow_mreg_c[id + start_reg - REG_C_0];
}
- assert(false);
+ MLX5_ASSERT(false);
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "invalid feature name");
return config->flow_mreg_c[2] != REG_NONE;
}
-/**
- * Discover the maximum number of priority available.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- *
- * @return
- * number of supported flow priority on success, a negative errno
- * value otherwise and rte_errno is set.
- */
-int
-mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct {
- struct ibv_flow_attr attr;
- struct ibv_flow_spec_eth eth;
- struct ibv_flow_spec_action_drop drop;
- } flow_attr = {
- .attr = {
- .num_of_specs = 2,
- .port = (uint8_t)priv->ibv_port,
- },
- .eth = {
- .type = IBV_FLOW_SPEC_ETH,
- .size = sizeof(struct ibv_flow_spec_eth),
- },
- .drop = {
- .size = sizeof(struct ibv_flow_spec_action_drop),
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- },
- };
- struct ibv_flow *flow;
- struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
- uint16_t vprio[] = { 8, 16 };
- int i;
- int priority = 0;
-
- if (!drop) {
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
- for (i = 0; i != RTE_DIM(vprio); i++) {
- flow_attr.attr.priority = vprio[i] - 1;
- flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
- if (!flow)
- break;
- claim_zero(mlx5_glue->destroy_flow(flow));
- priority = vprio[i];
- }
- mlx5_hrxq_drop_release(dev);
- switch (priority) {
- case 8:
- priority = RTE_DIM(priority_map_3);
- break;
- case 16:
- priority = RTE_DIM(priority_map_5);
- break;
- default:
- rte_errno = ENOTSUP;
- DRV_LOG(ERR,
- "port %u verbs maximum priority: %d expected 8/16",
- dev->data->port_id, priority);
- return -rte_errno;
- }
- DRV_LOG(INFO, "port %u flow maximum priority: %d",
- dev->data->port_id, priority);
- return priority;
-}
-
-/**
- * Adjust flow priority based on the highest layer and the request priority.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] priority
- * The rule base priority.
- * @param[in] subpriority
- * The priority based on the items.
- *
- * @return
- * The new priority.
- */
-uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
- uint32_t subpriority)
-{
- uint32_t res = 0;
- struct mlx5_priv *priv = dev->data->dev_private;
-
- switch (priv->config.flow_prio) {
- case RTE_DIM(priority_map_3):
- res = priority_map_3[priority][subpriority];
- break;
- case RTE_DIM(priority_map_5):
- res = priority_map_5[priority][subpriority];
- break;
- }
- return res;
-}
-
/**
* Verify the @p item specifications (spec, last, mask) are compatible with the
* NIC capabilities.
{
unsigned int i;
- assert(nic_mask);
+ MLX5_ASSERT(nic_mask);
for (i = 0; i < size; ++i)
if ((nic_mask[i] | mask[i]) != nic_mask[i])
return rte_flow_error_set(error, ENOTSUP,
* The hash fields that should be used.
*/
uint64_t
-mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
int tunnel __rte_unused, uint64_t layer_types,
uint64_t hash_fields)
{
- struct rte_flow *flow = dev_flow->flow;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- int rss_request_inner = flow->rss.level >= 2;
+ int rss_request_inner = rss_desc->level >= 2;
/* Check RSS hash level for tunnel. */
if (tunnel && rss_request_inner)
return 0;
#endif
/* Check if requested layer matches RSS hash fields. */
- if (!(flow->rss.types & layer_types))
+ if (!(rss_desc->types & layer_types))
return 0;
return hash_fields;
}
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- * Pointer to device flow structure.
+ * @param[in] dev_handle
+ * Pointer to device flow handle structure.
*/
static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->actions &
- (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int mark = dev_handle->mark;
+ const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct mlx5_hrxq *hrxq;
unsigned int i;
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->rss.queue)[i];
+ if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+ return;
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ dev_handle->rix_hrxq);
+ if (!hrxq)
+ return;
+ for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+ int idx = hrxq->ind_table->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
/* Increase the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
if ((tunnels_info[j].tunnel &
- dev_flow->layers) ==
+ dev_handle->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]++;
break;
static void
flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_set(dev, dev_flow);
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
+ flow_drv_rxq_flags_set(dev, dev_handle);
}
/**
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] dev_flow
- * Pointer to the device flow.
+ * @param[in] dev_handle
+ * Pointer to the device flow handle structure.
*/
static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->actions &
- (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int mark = dev_handle->mark;
+ const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct mlx5_hrxq *hrxq;
unsigned int i;
- assert(dev->data->dev_started);
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->rss.queue)[i];
+ if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+ return;
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ dev_handle->rix_hrxq);
+ if (!hrxq)
+ return;
+ MLX5_ASSERT(dev->data->dev_started);
+ for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+ int idx = hrxq->ind_table->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
/* Decrease the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
if ((tunnels_info[j].tunnel &
- dev_flow->layers) ==
+ dev_handle->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]--;
break;
static void
flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_trim(dev, dev_flow);
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
+ flow_drv_rxq_flags_trim(dev, dev_handle);
}
/**
}
}
+/**
+ * Set the Rx queue dynamic metadata (mask and offset) for a flow
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ */
+void
+mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *data;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ if (!(*priv->rxqs)[i])
+ continue;
+ data = (*priv->rxqs)[i];
+ if (!rte_flow_dynf_metadata_avail()) {
+ data->dynf_meta = 0;
+ data->flow_meta_mask = 0;
+ data->flow_meta_offset = -1;
+ } else {
+ data->dynf_meta = 1;
+ data->flow_meta_mask = rte_flow_dynf_metadata_mask;
+ data->flow_meta_offset = rte_flow_dynf_metadata_offs;
+ }
+ }
+}
+
/*
* return a pointer to the desired action in the list of actions.
*
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
-
- if (action_flags & MLX5_FLOW_ACTION_DROP)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't drop and flag in same flow");
if (action_flags & MLX5_FLOW_ACTION_MARK)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
&mark->id,
"mark id must in 0 <= id < "
RTE_STR(MLX5_FLOW_MARK_MAX));
- if (action_flags & MLX5_FLOW_ACTION_DROP)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't drop and mark in same flow");
if (action_flags & MLX5_FLOW_ACTION_FLAG)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_flow_validate_action_drop(uint64_t action_flags,
+mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
- if (action_flags & MLX5_FLOW_ACTION_FLAG)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't drop and flag in same flow");
- if (action_flags & MLX5_FLOW_ACTION_MARK)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't drop and mark in same flow");
- if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
- MLX5_FLOW_FATE_ESWITCH_ACTIONS))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't have 2 fate actions in"
- " same flow");
if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
&rss->types,
"some RSS protocols are not"
" supported");
+ if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
+ !(rss->types & ETH_RSS_IP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "L3 partial RSS requested but L3 RSS"
+ " type not specified");
+ if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
+ !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "L4 partial RSS requested but L4 RSS"
+ " type not specified");
if (!priv->rxqs_n)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "No queues configured");
for (i = 0; i != rss->queue_num; ++i) {
+ if (rss->queue[i] >= priv->rxqs_n)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i], "queue index out of range");
if (!(*priv->rxqs)[rss->queue[i]])
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
"rss action not supported for "
"egress");
- if (rss->level > 1 && !tunnel)
+ if (rss->level > 1 && !tunnel)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"inner RSS is not supported for "
"non-tunnel flows");
+ if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
+ !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "RSS on eCPRI is not supported now");
+ }
+ return 0;
+}
+
+/*
+ * Validate the default miss action.
+ *
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_default_miss(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "default miss action not supported "
+ "for egress");
+ if (attr->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
+ "only group 0 is supported");
+ if (attr->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
return 0;
}
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
+ * @param[in] last_item
+ * Previous validated item in the pattern items.
+ * @param[in] ether_type
+ * Type in the ethernet layer header (including dot1q).
* @param[in] acc_mask
* Acceptable mask, if NULL default internal default mask
* will be used to check whether item fields are supported.
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
+ * @param[in] last_item
+ * Previous validated item in the pattern items.
+ * @param[in] ether_type
+ * Type in the ethernet layer header (including dot1q).
* @param[in] acc_mask
* Acceptable mask, if NULL default internal default mask
* will be used to check whether item fields are supported.
"\xff\xff\xff\xff\xff\xff\xff\xff",
.vtc_flow = RTE_BE32(0xffffffff),
.proto = 0xff,
- .hop_limits = 0xff,
},
};
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
MLX5_FLOW_LAYER_OUTER_L4;
int ret;
- assert(flow_mask);
+ MLX5_ASSERT(flow_mask);
if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
uint32_t vlan_id;
uint8_t vni[4];
} id = { .vlan_id = 0, };
- uint32_t vlan_id = 0;
if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return ret;
if (spec) {
memcpy(&id.vni[1], spec->vni, 3);
- vlan_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
- vlan_id &= id.vlan_id;
}
- /*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
- * only this layer is defined in the Verbs specification it is
- * interpreted as wildcard and all packets will match this
- * rule, if it follows a full stack layer (ex: eth / ipv4 /
- * udp), all packets matching the layers before will also
- * match this rule. To avoid such situation, VNI 0 is
- * currently refused.
- */
- if (!vlan_id)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "VXLAN vni cannot be 0");
if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
uint32_t vlan_id;
uint8_t vni[4];
} id = { .vlan_id = 0, };
- uint32_t vlan_id = 0;
if (!priv->config.l3_vxlan_en)
return rte_flow_error_set(error, ENOTSUP,
"VxLAN-GPE protocol"
" not supported");
memcpy(&id.vni[1], spec->vni, 3);
- vlan_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
- vlan_id &= id.vlan_id;
}
- /*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
- * layer is defined in the Verbs specification it is interpreted as
- * wildcard and all packets will match this rule, if it follows a full
- * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
- * before will also match this rule. To avoid such situation, VNI 0
- * is currently refused.
- */
- if (!vlan_id)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "VXLAN-GPE vni cannot be 0");
if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
const rte_be32_t *mask = item->mask;
int ret = 0;
rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
- const struct rte_flow_item_gre *gre_spec = gre_item->spec;
- const struct rte_flow_item_gre *gre_mask = gre_item->mask;
+ const struct rte_flow_item_gre *gre_spec;
+ const struct rte_flow_item_gre *gre_mask;
if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
return rte_flow_error_set(error, ENOTSUP,
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"GRE key following a wrong item");
+ gre_mask = gre_item->mask;
if (!gre_mask)
gre_mask = &rte_flow_item_gre_mask;
+ gre_spec = gre_item->spec;
if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
!(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
return rte_flow_error_set(error, EINVAL,
.protocol = RTE_BE16(UINT16_MAX),
};
- if (!(priv->config.hca_attr.flex_parser_protocols &
- MLX5_HCA_FLEX_GENEVE_ENABLED) ||
- !priv->config.hca_attr.tunnel_stateless_geneve_rx)
+ if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 Geneve is not enabled by device"
if (ret < 0)
return ret;
return 0;
-#endif
+#else
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"MPLS is not supported by Verbs, please"
" update.");
+#endif
}
/**
return 0;
}
+/**
+ * Validate eCPRI item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] last_item
+ * Previous validated item in the pattern items.
+ * @param[in] ether_type
+ * Type in the ethernet layer header (including dot1q).
+ * @param[in] acc_mask
+ * Acceptable mask, if NULL default internal default mask
+ * will be used to check whether item fields are supported.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
+ const struct rte_flow_item_ecpri *acc_mask,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ecpri *mask = item->mask;
+ const struct rte_flow_item_ecpri nic_mask = {
+ .hdr = {
+ .common = {
+ .u32 =
+ RTE_BE32(((const struct rte_ecpri_common_hdr) {
+ .type = 0xFF,
+ }).u32),
+ },
+ .dummy[0] = 0xFFFFFFFF,
+ },
+ };
+ const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN);
+ struct rte_flow_item_ecpri mask_lo;
+
+ if ((last_item & outer_l2_vlan) && ether_type &&
+ ether_type != RTE_ETHER_TYPE_ECPRI)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "eCPRI cannot follow L2/VLAN layer "
+ "which ether type is not 0xAEFE.");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "eCPRI with tunnel is not supported "
+ "right now.");
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L3 layers not supported");
+ else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "eCPRI cannot follow a TCP layer.");
+ /* In specification, eCPRI could be over UDP layer. */
+ else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "eCPRI over UDP layer is not yet "
+ "supported right now.");
+ /* Mask for type field in common header could be zero. */
+ if (!mask)
+ mask = &rte_flow_item_ecpri_mask;
+ mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
+ /* Input mask is in big-endian format. */
+ if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "partial mask is not supported "
+ "for protocol");
+ else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "message header mask must be after "
+ "a type mask");
+ return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ acc_mask ? (const uint8_t *)acc_mask
+ : (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ecpri),
+ error);
+}
+
/* Allocate unique ID for the split Q/RSS subflows. */
static uint32_t
flow_qrss_get_id(struct rte_eth_dev *dev)
ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
if (ret)
return 0;
- assert(qrss_id);
+ MLX5_ASSERT(qrss_id);
return qrss_id;
}
flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- if (dev_flow->qrss_id)
- flow_qrss_free_id(dev, dev_flow->qrss_id);
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
+ if (dev_handle->split_flow_id)
+ flow_qrss_free_id(dev, dev_handle->split_flow_id);
}
static int
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
bool external __rte_unused,
+ int hairpin __rte_unused,
struct rte_flow_error *error)
{
return rte_flow_error_set(error, ENOTSUP,
}
static struct mlx5_flow *
-flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
+ /* The OS can determine first a specific flow type (DV, VERBS) */
+ enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
+ if (type != MLX5_FLOW_TYPE_MAX)
+ return type;
+ /* If no OS specific type - continue with DV/VERBS selection */
if (attr->transfer && priv->config.dv_esw_en)
type = MLX5_FLOW_TYPE_DV;
if (!attr->transfer)
* Pointer to the list of actions.
* @param[in] external
* This flow rule is created by request external to PMD.
+ * @param[in] hairpin
+ * Number of hairpin TX actions, 0 means classic flow.
* @param[out] error
* Pointer to the error structure.
*
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, struct rte_flow_error *error)
+ bool external, int hairpin, struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
fops = flow_get_drv_ops(type);
- return fops->validate(dev, attr, items, actions, external, error);
+ return fops->validate(dev, attr, items, actions, external,
+ hairpin, error);
}
/**
* setting backward reference to the flow should be done out of this function.
* layers field is not filled either.
*
+ * @param[in] dev
+ * Pointer to the dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
+ * @param[in] flow_idx
+ * This memory pool index to the flow.
* @param[out] error
* Pointer to the error structure.
*
* Pointer to device flow on success, otherwise NULL and rte_errno is set.
*/
static inline struct mlx5_flow *
-flow_drv_prepare(const struct rte_flow *flow,
+flow_drv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow *flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
+ uint32_t flow_idx,
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
+ struct mlx5_flow *mlx5_flow = NULL;
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
- return fops->prepare(attr, items, actions, error);
+ mlx5_flow = fops->prepare(dev, attr, items, actions, error);
+ if (mlx5_flow)
+ mlx5_flow->flow_idx = flow_idx;
+ return mlx5_flow;
}
/**
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->translate(dev, dev_flow, attr, items, actions, error);
}
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->apply(dev, flow, error);
}
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->remove(dev, flow);
}
enum mlx5_flow_drv_type type = flow->drv_type;
flow_mreg_split_qrss_release(dev, flow);
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->destroy(dev, flow);
}
-/**
- * Validate a flow supported by the NIC.
- *
- * @see rte_flow_validate()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- int ret;
-
- ret = flow_drv_validate(dev, attr, items, actions, true, error);
- if (ret < 0)
- return ret;
- return 0;
-}
-
/**
* Get RSS action from the action list.
*
}
/**
- * Get QUEUE/RSS action from the action list.
+ * Get layer flags from the prefix flow.
+ *
+ * Some flows may be split to several subflows, the prefix subflow gets the
+ * match items and the suffix sub flow gets the actions.
+ * Some actions need the user defined match item flags to get the detail for
+ * the action.
+ * This function helps the suffix flow to get the item layer flags from prefix
+ * subflow.
+ *
+ * @param[in] dev_flow
+ * Pointer the created preifx subflow.
+ *
+ * @return
+ * The layers get from prefix subflow.
+ */
+static inline uint64_t
+flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
+{
+ uint64_t layers = 0;
+
+ /*
+ * Layers bits could be localization, but usually the compiler will
+ * help to do the optimization work for source code.
+ * If no decap actions, use the layers directly.
+ */
+ if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
+ return dev_flow->handle->layers;
+ /* Convert L3 layers with decap action. */
+ if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ /* Convert L4 layers with decap action. */
+ if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+ layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+ layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ return layers;
+}
+
+/**
+ * Get metadata split action information.
*
* @param[in] actions
* Pointer to the list of actions.
* @param[out] qrss_type
* Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
* if no QUEUE/RSS is found.
+ * @param[out] encap_idx
+ * Pointer to the index of the encap action if exists, otherwise the last
+ * action index.
*
* @return
* Total number of actions.
*/
static int
-flow_parse_qrss_action(const struct rte_flow_action actions[],
- const struct rte_flow_action **qrss)
+flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
+ const struct rte_flow_action **qrss,
+ int *encap_idx)
{
+ const struct rte_flow_action_raw_encap *raw_encap;
int actions_n = 0;
+ int raw_decap_idx = -1;
+ *encap_idx = -1;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ *encap_idx = actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ raw_decap_idx = actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ raw_encap = actions->conf;
+ if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
+ *encap_idx = raw_decap_idx != -1 ?
+ raw_decap_idx : actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_RSS:
*qrss = actions;
}
actions_n++;
}
+ if (*encap_idx == -1)
+ *encap_idx = actions_n;
+ /* Count RTE_FLOW_ACTION_TYPE_END. */
+ return actions_n + 1;
+}
+
+/**
+ * Check meter action from the action list.
+ *
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] mtr
+ * Pointer to the meter exist flag.
+ *
+ * @return
+ * Total number of actions.
+ */
+static int
+flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
+{
+ int actions_n = 0;
+
+ MLX5_ASSERT(mtr);
+ *mtr = 0;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_METER:
+ *mtr = 1;
+ break;
+ default:
+ break;
+ }
+ actions_n++;
+ }
/* Count RTE_FLOW_ACTION_TYPE_END. */
return actions_n + 1;
}
/**
- * Check if the flow should be splited due to hairpin.
+ * Check if the flow should be split due to hairpin.
* The reason for the split is that in current HW we can't
- * support encap on Rx, so if a flow have encap we move it
- * to Tx.
+ * support encap and push-vlan on Rx, so if a flow contains
+ * these actions we move it to Tx.
*
* @param dev
* Pointer to Ethernet device.
{
int queue_action = 0;
int action_n = 0;
- int encap = 0;
+ int split = 0;
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action_raw_encap *raw_encap;
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
queue = actions->conf;
+ if (queue == NULL)
+ return 0;
if (mlx5_rxq_get_type(dev, queue->index) !=
MLX5_RXQ_TYPE_HAIRPIN)
return 0;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
rss = actions->conf;
+ if (rss == NULL || rss->queue_num == 0)
+ return 0;
if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
MLX5_RXQ_TYPE_HAIRPIN)
return 0;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- encap = 1;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ split++;
action_n++;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
if (raw_encap->size >
(sizeof(struct rte_flow_item_eth) +
sizeof(struct rte_flow_item_ipv4)))
- encap = 1;
+ split++;
action_n++;
break;
default:
break;
}
}
- if (encap == 1 && queue_action)
+ if (split && queue_action)
return action_n;
return 0;
}
/* Declare flow create/destroy prototype in advance. */
-static struct rte_flow *
-flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+static uint32_t
+flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
bool external, struct rte_flow_error *error);
static void
-flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
- struct rte_flow *flow);
+flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+ uint32_t flow_idx);
/**
* Add a flow of copying flow metadata registers in RX_CP_TBL.
};
struct mlx5_flow_action_copy_mreg cp_mreg = {
.dst = REG_B,
- .src = 0,
+ .src = REG_NONE,
};
struct rte_flow_action_jump jump = {
.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
};
struct mlx5_flow_mreg_copy_resource *mcp_res;
+ uint32_t idx = 0;
int ret;
/* Fill the register fileds in the flow. */
return NULL;
cp_mreg.src = ret;
/* Check if already registered. */
- assert(priv->mreg_cp_tbl);
+ MLX5_ASSERT(priv->mreg_cp_tbl);
mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
if (mcp_res) {
/* For non-default rule. */
- if (mark_id)
+ if (mark_id != MLX5_DEFAULT_COPY_ID)
mcp_res->refcnt++;
- assert(mark_id || mcp_res->refcnt == 1);
+ MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
+ mcp_res->refcnt == 1);
return mcp_res;
}
/* Provide the full width of FLAG specific value. */
if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
/* Build a new flow. */
- if (mark_id) {
+ if (mark_id != MLX5_DEFAULT_COPY_ID) {
items[0] = (struct rte_flow_item){
- .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TAG,
.spec = &tag_spec,
};
items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_END,
};
actions[0] = (struct rte_flow_action){
- .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK,
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_MARK,
.conf = &ftag,
};
actions[1] = (struct rte_flow_action){
- .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
.conf = &cp_mreg,
};
actions[2] = (struct rte_flow_action){
.type = RTE_FLOW_ITEM_TYPE_END,
};
actions[0] = (struct rte_flow_action){
- .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
.conf = &cp_mreg,
};
actions[1] = (struct rte_flow_action){
};
}
/* Build a new entry. */
- mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0);
+ mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
if (!mcp_res) {
rte_errno = ENOMEM;
return NULL;
}
+ mcp_res->idx = idx;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
* be applied, removed, deleted in ardbitrary order
* by list traversing.
*/
- mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
+ mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
actions, false, error);
- if (!mcp_res->flow)
+ if (!mcp_res->rix_flow)
goto error;
mcp_res->refcnt++;
mcp_res->hlist_ent.key = mark_id;
ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
&mcp_res->hlist_ent);
- assert(!ret);
+ MLX5_ASSERT(!ret);
if (ret)
goto error;
return mcp_res;
error:
- if (mcp_res->flow)
- flow_list_destroy(dev, NULL, mcp_res->flow);
- rte_free(mcp_res);
+ if (mcp_res->rix_flow)
+ flow_list_destroy(dev, NULL, mcp_res->rix_flow);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
return NULL;
}
flow_mreg_del_copy_action(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
struct mlx5_priv *priv = dev->data->dev_private;
+ if (!flow->rix_mreg_copy)
+ return;
+ mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+ flow->rix_mreg_copy);
if (!mcp_res || !priv->mreg_cp_tbl)
return;
if (flow->copy_applied) {
- assert(mcp_res->appcnt);
+ MLX5_ASSERT(mcp_res->appcnt);
flow->copy_applied = 0;
--mcp_res->appcnt;
- if (!mcp_res->appcnt)
- flow_drv_remove(dev, mcp_res->flow);
+ if (!mcp_res->appcnt) {
+ struct rte_flow *mcp_flow = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ mcp_res->rix_flow);
+
+ if (mcp_flow)
+ flow_drv_remove(dev, mcp_flow);
+ }
}
/*
* We do not check availability of metadata registers here,
- * because copy resources are allocated in this case.
+ * because copy resources are not allocated in this case.
*/
if (--mcp_res->refcnt)
return;
- assert(mcp_res->flow);
- flow_list_destroy(dev, NULL, mcp_res->flow);
+ MLX5_ASSERT(mcp_res->rix_flow);
+ flow_list_destroy(dev, NULL, mcp_res->rix_flow);
mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
- rte_free(mcp_res);
- flow->mreg_copy = NULL;
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+ flow->rix_mreg_copy = 0;
}
/**
flow_mreg_start_copy_action(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
- if (!mcp_res || flow->copy_applied)
+ if (!flow->rix_mreg_copy || flow->copy_applied)
+ return 0;
+ mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+ flow->rix_mreg_copy);
+ if (!mcp_res)
return 0;
if (!mcp_res->appcnt) {
- ret = flow_drv_apply(dev, mcp_res->flow, NULL);
- if (ret)
- return ret;
+ struct rte_flow *mcp_flow = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ mcp_res->rix_flow);
+
+ if (mcp_flow) {
+ ret = flow_drv_apply(dev, mcp_flow, NULL);
+ if (ret)
+ return ret;
+ }
}
++mcp_res->appcnt;
flow->copy_applied = 1;
flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_priv *priv = dev->data->dev_private;
- if (!mcp_res || !flow->copy_applied)
+ if (!flow->rix_mreg_copy || !flow->copy_applied)
+ return;
+ mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+ flow->rix_mreg_copy);
+ if (!mcp_res)
return;
- assert(mcp_res->appcnt);
+ MLX5_ASSERT(mcp_res->appcnt);
--mcp_res->appcnt;
flow->copy_applied = 0;
- if (!mcp_res->appcnt)
- flow_drv_remove(dev, mcp_res->flow);
+ if (!mcp_res->appcnt) {
+ struct rte_flow *mcp_flow = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ mcp_res->rix_flow);
+
+ if (mcp_flow)
+ flow_drv_remove(dev, mcp_flow);
+ }
}
/**
/* Check if default flow is registered. */
if (!priv->mreg_cp_tbl)
return;
- mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 0ULL);
+ mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
+ MLX5_DEFAULT_COPY_ID);
if (!mcp_res)
return;
- assert(mcp_res->flow);
- flow_list_destroy(dev, NULL, mcp_res->flow);
+ MLX5_ASSERT(mcp_res->rix_flow);
+ flow_list_destroy(dev, NULL, mcp_res->rix_flow);
mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
- rte_free(mcp_res);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
}
/**
!mlx5_flow_ext_mreg_supported(dev) ||
!priv->sh->dv_regc0_mask)
return 0;
- mcp_res = flow_mreg_add_copy_action(dev, 0, error);
+ mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
if (!mcp_res)
return -rte_errno;
return 0;
(dev, MLX5_FLOW_MARK_DEFAULT, error);
if (!mcp_res)
return -rte_errno;
- flow->mreg_copy = mcp_res;
+ flow->rix_mreg_copy = mcp_res->idx;
if (dev->data->dev_started) {
mcp_res->appcnt++;
flow->copy_applied = 1;
flow_mreg_add_copy_action(dev, mark->id, error);
if (!mcp_res)
return -rte_errno;
- flow->mreg_copy = mcp_res;
+ flow->rix_mreg_copy = mcp_res->idx;
if (dev->data->dev_started) {
mcp_res->appcnt++;
flow->copy_applied = 1;
/**
* Split the hairpin flow.
- * Since HW can't support encap on Rx we move the encap to Tx.
+ * Since HW can't support encap and push-vlan on Rx, we move these
+ * actions to Tx.
* If the count action is after the encap then we also
* move the count action. in this case the count will also measure
* the outer bytes.
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
rte_memcpy(actions_tx, actions,
sizeof(struct rte_flow_action));
actions_tx++;
}
/* Add set meta action and end action for the Rx flow. */
tag_action = actions_rx;
- tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+ tag_action->type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_TAG;
actions_rx++;
rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
actions_rx++;
set_tag = (void *)actions_rx;
set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
- assert(set_tag->id > REG_NONE);
+ MLX5_ASSERT(set_tag->id > REG_NONE);
set_tag->data = *flow_id;
tag_action->conf = set_tag;
/* Create Tx item list. */
rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
addr = (void *)&pattern_tx[2];
item = pattern_tx;
- item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+ item->type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TAG;
tag_item = (void *)addr;
tag_item->data = *flow_id;
tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
- assert(set_tag->id > REG_NONE);
+ MLX5_ASSERT(set_tag->id > REG_NONE);
item->spec = tag_item;
addr += sizeof(struct mlx5_rte_flow_item_tag);
tag_item = (void *)addr;
tag_item->data = UINT32_MAX;
tag_item->id = UINT16_MAX;
item->mask = tag_item;
- addr += sizeof(struct mlx5_rte_flow_item_tag);
item->last = NULL;
item++;
item->type = RTE_FLOW_ITEM_TYPE_END;
* The last stage of splitting chain, just creates the subflow
* without any modification.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
* @param[in] flow
* Parent flow structure pointer.
* @param[in, out] sub_flow
* Pointer to return the created subflow, may be NULL.
+ * @param[in] prefix_layers
+ * Prefix subflow layers, may be 0.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
* Associated actions (list terminated by the END action).
* @param[in] external
* This flow rule is created by request external to PMD.
+ * @param[in] flow_idx
+ * This memory pool index to the flow.
* @param[out] error
* Perform verbose error reporting if not NULL.
* @return
flow_create_split_inner(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct mlx5_flow **sub_flow,
+ uint64_t prefix_layers,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, struct rte_flow_error *error)
+ bool external, uint32_t flow_idx,
+ struct rte_flow_error *error)
{
struct mlx5_flow *dev_flow;
- dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+ dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
+ flow_idx, error);
if (!dev_flow)
return -rte_errno;
dev_flow->flow = flow;
dev_flow->external = external;
/* Subflow object was created, we must include one in the list. */
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
+ /*
+ * If dev_flow is as one of the suffix flow, some actions in suffix
+ * flow may need some user defined item layer flags.
+ */
+ if (prefix_layers)
+ dev_flow->handle->layers = prefix_layers;
if (sub_flow)
*sub_flow = dev_flow;
return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
}
+/**
+ * Split the meter flow.
+ *
+ * As meter flow will split to three sub flow, other than meter
+ * action, the other actions make sense to only meter accepts
+ * the packet. If it need to be dropped, no other additional
+ * actions should be take.
+ *
+ * One kind of special action which decapsulates the L3 tunnel
+ * header will be in the prefix sub flow, as not to take the
+ * L3 tunnel header into account.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[out] sfx_items
+ * Suffix flow match items (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] actions_sfx
+ * Suffix flow actions.
+ * @param[out] actions_pre
+ * Prefix flow actions.
+ * @param[out] pattern_sfx
+ * The pattern items for the suffix flow.
+ * @param[out] tag_sfx
+ * Pointer to suffix flow tag.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+flow_meter_split_prep(struct rte_eth_dev *dev,
+ const struct rte_flow_item items[],
+ struct rte_flow_item sfx_items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_action actions_sfx[],
+ struct rte_flow_action actions_pre[])
+{
+ struct rte_flow_action *tag_action = NULL;
+ struct rte_flow_item *tag_item;
+ struct mlx5_rte_flow_action_set_tag *set_tag;
+ struct rte_flow_error error;
+ const struct rte_flow_action_raw_encap *raw_encap;
+ const struct rte_flow_action_raw_decap *raw_decap;
+ struct mlx5_rte_flow_item_tag *tag_spec;
+ struct mlx5_rte_flow_item_tag *tag_mask;
+ uint32_t tag_id;
+ bool copy_vlan = false;
+
+ /* Prepare the actions for prefix and suffix flow. */
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ struct rte_flow_action **action_cur = NULL;
+
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_METER:
+ /* Add the extra tag action first. */
+ tag_action = actions_pre;
+ tag_action->type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+ actions_pre++;
+ action_cur = &actions_pre;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ action_cur = &actions_pre;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ raw_encap = actions->conf;
+ if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
+ action_cur = &actions_pre;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ raw_decap = actions->conf;
+ if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
+ action_cur = &actions_pre;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ copy_vlan = true;
+ break;
+ default:
+ break;
+ }
+ if (!action_cur)
+ action_cur = &actions_sfx;
+ memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
+ (*action_cur)++;
+ }
+ /* Add end action to the actions. */
+ actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
+ actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
+ actions_pre++;
+ /* Set the tag. */
+ set_tag = (void *)actions_pre;
+ set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
+ /*
+ * Get the id from the qrss_pool to make qrss share the id with meter.
+ */
+ tag_id = flow_qrss_get_id(dev);
+ set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
+ assert(tag_action);
+ tag_action->conf = set_tag;
+ /* Prepare the suffix subflow items. */
+ tag_item = sfx_items++;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int item_type = items->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ sfx_items++;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ if (copy_vlan) {
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ /*
+ * Convert to internal match item, it is used
+ * for vlan push and set vid.
+ */
+ sfx_items->type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
+ sfx_items++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
+ sfx_items++;
+ tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
+ tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
+ tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
+ tag_mask = tag_spec + 1;
+ tag_mask->data = 0xffffff00;
+ tag_item->type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+ tag_item->spec = tag_spec;
+ tag_item->last = NULL;
+ tag_item->mask = tag_mask;
+ return tag_id;
+}
+
/**
* Split action list having QUEUE/RSS for metadata register copy.
*
struct mlx5_rte_flow_action_set_tag *set_tag;
struct rte_flow_action_jump *jump;
const int qrss_idx = qrss - actions;
- uint32_t flow_id;
+ uint32_t flow_id = 0;
int ret = 0;
/*
* As a result, there will be one more action.
*/
++actions_n;
+ memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
+ set_tag = (void *)(split_actions + actions_n);
/*
- * Allocate the new subflow ID. This one is unique within
- * device and not shared with representors. Otherwise,
- * we would have to resolve multi-thread access synch
- * issue. Each flow on the shared device is appended
- * with source vport identifier, so the resulting
- * flows will be unique in the shared (by master and
- * representors) domain even if they have coinciding
- * IDs.
+ * If tag action is not set to void(it means we are not the meter
+ * suffix flow), add the tag action. Since meter suffix flow already
+ * has the tag added.
*/
- flow_id = flow_qrss_get_id(dev);
- if (!flow_id)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "can't allocate id "
- "for split Q/RSS subflow");
- /* Internal SET_TAG action to set flow ID. */
- set_tag = (void *)(split_actions + actions_n);
- *set_tag = (struct mlx5_rte_flow_action_set_tag){
- .data = flow_id,
- };
- ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
- if (ret < 0)
- return ret;
- set_tag->id = ret;
+ if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
+ /*
+ * Allocate the new subflow ID. This one is unique within
+ * device and not shared with representors. Otherwise,
+ * we would have to resolve multi-thread access synch
+ * issue. Each flow on the shared device is appended
+ * with source vport identifier, so the resulting
+ * flows will be unique in the shared (by master and
+ * representors) domain even if they have coinciding
+ * IDs.
+ */
+ flow_id = flow_qrss_get_id(dev);
+ if (!flow_id)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't allocate id "
+ "for split Q/RSS subflow");
+ /* Internal SET_TAG action to set flow ID. */
+ *set_tag = (struct mlx5_rte_flow_action_set_tag){
+ .data = flow_id,
+ };
+ ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
+ if (ret < 0)
+ return ret;
+ set_tag->id = ret;
+ /* Construct new actions array. */
+ /* Replace QUEUE/RSS action. */
+ split_actions[qrss_idx] = (struct rte_flow_action){
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_TAG,
+ .conf = set_tag,
+ };
+ }
/* JUMP action to jump to mreg copy table (CP_TBL). */
jump = (void *)(set_tag + 1);
*jump = (struct rte_flow_action_jump){
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
};
- /* Construct new actions array. */
- memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
- /* Replace QUEUE/RSS action. */
- split_actions[qrss_idx] = (struct rte_flow_action){
- .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG,
- .conf = set_tag,
- };
split_actions[actions_n - 2] = (struct rte_flow_action){
.type = RTE_FLOW_ACTION_TYPE_JUMP,
.conf = jump,
* Number of actions in the list.
* @param[out] error
* Perform verbose error reporting if not NULL.
+ * @param[in] encap_idx
+ * The encap action inndex.
*
* @return
* 0 on success, negative value otherwise
flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
struct rte_flow_action *ext_actions,
const struct rte_flow_action *actions,
- int actions_n, struct rte_flow_error *error)
+ int actions_n, struct rte_flow_error *error,
+ int encap_idx)
{
struct mlx5_flow_action_copy_mreg *cp_mreg =
(struct mlx5_flow_action_copy_mreg *)
if (ret < 0)
return ret;
cp_mreg->src = ret;
- memcpy(ext_actions, actions,
- sizeof(*ext_actions) * actions_n);
- ext_actions[actions_n - 1] = (struct rte_flow_action){
- .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
- .conf = cp_mreg,
- };
- ext_actions[actions_n] = (struct rte_flow_action){
- .type = RTE_FLOW_ACTION_TYPE_END,
- };
+ if (encap_idx != 0)
+ memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
+ if (encap_idx == actions_n - 1) {
+ ext_actions[actions_n - 1] = (struct rte_flow_action){
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = cp_mreg,
+ };
+ ext_actions[actions_n] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
+ } else {
+ ext_actions[encap_idx] = (struct rte_flow_action){
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = cp_mreg,
+ };
+ memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
+ sizeof(*ext_actions) * (actions_n - encap_idx));
+ }
return 0;
}
* Pointer to Ethernet device.
* @param[in] flow
* Parent flow structure pointer.
+ * @param[in] prefix_layers
+ * Prefix flow layer flags.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
* Associated actions (list terminated by the END action).
* @param[in] external
* This flow rule is created by request external to PMD.
+ * @param[in] flow_idx
+ * This memory pool index to the flow.
* @param[out] error
* Perform verbose error reporting if not NULL.
* @return
static int
flow_create_split_metadata(struct rte_eth_dev *dev,
struct rte_flow *flow,
+ uint64_t prefix_layers,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, struct rte_flow_error *error)
+ bool external, uint32_t flow_idx,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
struct rte_flow_action *ext_actions = NULL;
struct mlx5_flow *dev_flow = NULL;
uint32_t qrss_id = 0;
+ int mtr_sfx = 0;
size_t act_size;
int actions_n;
+ int encap_idx;
int ret;
/* Check whether extensive metadata feature is engaged. */
if (!config->dv_flow_en ||
config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
!mlx5_flow_ext_mreg_supported(dev))
- return flow_create_split_inner(dev, flow, NULL, attr, items,
- actions, external, error);
- actions_n = flow_parse_qrss_action(actions, &qrss);
+ return flow_create_split_inner(dev, flow, NULL, prefix_layers,
+ attr, items, actions, external,
+ flow_idx, error);
+ actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
+ &encap_idx);
if (qrss) {
/* Exclude hairpin flows from splitting. */
if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
}
}
if (qrss) {
+ /* Check if it is in meter suffix table. */
+ mtr_sfx = attr->group == (attr->transfer ?
+ (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
+ MLX5_FLOW_TABLE_LEVEL_SUFFIX);
/*
* Q/RSS action on NIC Rx should be split in order to pass by
* the mreg copy table (RX_CP_TBL) and then it jumps to the
act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
sizeof(struct rte_flow_action_set_tag) +
sizeof(struct rte_flow_action_jump);
- ext_actions = rte_zmalloc(__func__, act_size, 0);
+ ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
+ SOCKET_ID_ANY);
if (!ext_actions)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no memory to split "
"metadata flow");
+ /*
+ * If we are the suffix flow of meter, tag already exist.
+ * Set the tag action to void.
+ */
+ if (mtr_sfx)
+ ext_actions[qrss - actions].type =
+ RTE_FLOW_ACTION_TYPE_VOID;
+ else
+ ext_actions[qrss - actions].type =
+ (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_TAG;
/*
* Create the new actions list with removed Q/RSS action
* and appended set tag and jump to register copy table
*/
qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
qrss, actions_n, error);
- if (!qrss_id) {
+ if (!mtr_sfx && !qrss_id) {
ret = -rte_errno;
goto exit;
}
*/
act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
sizeof(struct mlx5_flow_action_copy_mreg);
- ext_actions = rte_zmalloc(__func__, act_size, 0);
+ ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
+ SOCKET_ID_ANY);
if (!ext_actions)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
"metadata flow");
/* Create the action list appended with copy register. */
ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
- actions_n, error);
+ actions_n, error, encap_idx);
if (ret < 0)
goto exit;
}
/* Add the unmodified original or prefix subflow. */
- ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items,
- ext_actions ? ext_actions : actions,
- external, error);
+ ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr,
+ items, ext_actions ? ext_actions :
+ actions, external, flow_idx, error);
if (ret < 0)
goto exit;
- assert(dev_flow);
- if (qrss_id) {
+ MLX5_ASSERT(dev_flow);
+ if (qrss) {
const struct rte_flow_attr q_attr = {
.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
.ingress = 1,
/* Internal PMD action to set register. */
struct mlx5_rte_flow_item_tag q_tag_spec = {
.data = qrss_id,
- .id = 0,
+ .id = REG_NONE,
};
struct rte_flow_item q_items[] = {
{
- .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TAG,
.spec = &q_tag_spec,
.last = NULL,
.mask = NULL,
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- uint64_t hash_fields = dev_flow->hash_fields;
+ uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
+
/*
- * Put unique id in prefix flow due to it is destroyed after
- * prefix flow and id will be freed after there is no actual
- * flows with this id and identifier reallocation becomes
- * possible (for example, for other flows in other threads).
+ * Configure the tag item only if there is no meter subflow.
+ * Since tag is already marked in the meter suffix subflow
+ * we can just use the meter suffix items as is.
*/
- dev_flow->qrss_id = qrss_id;
- qrss_id = 0;
+ if (qrss_id) {
+ /* Not meter subflow. */
+ MLX5_ASSERT(!mtr_sfx);
+ /*
+ * Put unique id in prefix flow due to it is destroyed
+ * after suffix flow and id will be freed after there
+ * is no actual flows with this id and identifier
+ * reallocation becomes possible (for example, for
+ * other flows in other threads).
+ */
+ dev_flow->handle->split_flow_id = qrss_id;
+ ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
+ error);
+ if (ret < 0)
+ goto exit;
+ q_tag_spec.id = ret;
+ }
dev_flow = NULL;
- ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
- if (ret < 0)
- goto exit;
- q_tag_spec.id = ret;
/* Add suffix subflow to execute Q/RSS. */
- ret = flow_create_split_inner(dev, flow, &dev_flow,
- &q_attr, q_items, q_actions,
- external, error);
+ ret = flow_create_split_inner(dev, flow, &dev_flow, layers,
+ &q_attr, mtr_sfx ? items :
+ q_items, q_actions,
+ external, flow_idx, error);
if (ret < 0)
goto exit;
- assert(dev_flow);
- dev_flow->hash_fields = hash_fields;
+ /* qrss ID should be freed if failed. */
+ qrss_id = 0;
+ MLX5_ASSERT(dev_flow);
}
exit:
* by flow_drv_destroy.
*/
flow_qrss_free_id(dev, qrss_id);
- rte_free(ext_actions);
+ mlx5_free(ext_actions);
+ return ret;
+}
+
+/**
+ * The splitting for meter feature.
+ *
+ * - The meter flow will be split to two flows as prefix and
+ * suffix flow. The packets make sense only it pass the prefix
+ * meter action.
+ *
+ * - Reg_C_5 is used for the packet to match betweend prefix and
+ * suffix flow.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[in] flow_idx
+ * This memory pool index to the flow.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, negative value otherwise
+ */
+static int
+flow_create_split_meter(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, uint32_t flow_idx,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_action *sfx_actions = NULL;
+ struct rte_flow_action *pre_actions = NULL;
+ struct rte_flow_item *sfx_items = NULL;
+ struct mlx5_flow *dev_flow = NULL;
+ struct rte_flow_attr sfx_attr = *attr;
+ uint32_t mtr = 0;
+ uint32_t mtr_tag_id = 0;
+ size_t act_size;
+ size_t item_size;
+ int actions_n = 0;
+ int ret;
+
+ if (priv->mtr_en)
+ actions_n = flow_check_meter_action(actions, &mtr);
+ if (mtr) {
+ /* The five prefix actions: meter, decap, encap, tag, end. */
+ act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
+ sizeof(struct mlx5_rte_flow_action_set_tag);
+ /* tag, vlan, port id, end. */
+#define METER_SUFFIX_ITEM 4
+ item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
+ sizeof(struct mlx5_rte_flow_item_tag) * 2;
+ sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
+ 0, SOCKET_ID_ANY);
+ if (!sfx_actions)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no memory to split "
+ "meter flow");
+ sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
+ act_size);
+ pre_actions = sfx_actions + actions_n;
+ mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
+ actions, sfx_actions,
+ pre_actions);
+ if (!mtr_tag_id) {
+ ret = -rte_errno;
+ goto exit;
+ }
+ /* Add the prefix subflow. */
+ ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
+ items, pre_actions, external,
+ flow_idx, error);
+ if (ret) {
+ ret = -rte_errno;
+ goto exit;
+ }
+ dev_flow->handle->split_flow_id = mtr_tag_id;
+ /* Setting the sfx group atrr. */
+ sfx_attr.group = sfx_attr.transfer ?
+ (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
+ MLX5_FLOW_TABLE_LEVEL_SUFFIX;
+ }
+ /* Add the prefix subflow. */
+ ret = flow_create_split_metadata(dev, flow, dev_flow ?
+ flow_get_prefix_layer_flags(dev_flow) :
+ 0, &sfx_attr,
+ sfx_items ? sfx_items : items,
+ sfx_actions ? sfx_actions : actions,
+ external, flow_idx, error);
+exit:
+ if (sfx_actions)
+ mlx5_free(sfx_actions);
return ret;
}
* Associated actions (list terminated by the END action).
* @param[in] external
* This flow rule is created by request external to PMD.
+ * @param[in] flow_idx
+ * This memory pool index to the flow.
* @param[out] error
* Perform verbose error reporting if not NULL.
* @return
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, struct rte_flow_error *error)
+ bool external, uint32_t flow_idx,
+ struct rte_flow_error *error)
{
int ret;
- ret = flow_create_split_metadata(dev, flow, attr, items,
- actions, external, error);
- assert(ret <= 0);
+ ret = flow_create_split_meter(dev, flow, attr, items,
+ actions, external, flow_idx, error);
+ MLX5_ASSERT(ret <= 0);
return ret;
}
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow on success, NULL otherwise and rte_errno is set.
+ * A flow index on success, 0 otherwise and rte_errno is set.
*/
-static struct rte_flow *
-flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+static uint32_t
+flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
uint8_t buffer[2048];
} items_tx;
struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)[!!priv->flow_idx];
const struct rte_flow_action *p_actions_rx = actions;
- int ret;
uint32_t i;
- uint32_t flow_size;
- int hairpin_flow = 0;
+ uint32_t idx = 0;
+ int hairpin_flow;
uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
+ int ret;
hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
+ ret = flow_drv_validate(dev, attr, items, p_actions_rx,
+ external, hairpin_flow, error);
+ if (ret < 0)
+ return 0;
if (hairpin_flow > 0) {
if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
rte_errno = EINVAL;
- return NULL;
+ return 0;
}
flow_hairpin_split(dev, actions, actions_rx.actions,
actions_hairpin_tx.actions, items_tx.items,
&hairpin_id);
p_actions_rx = actions_rx.actions;
}
- ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
- error);
- if (ret < 0)
- goto error_before_flow;
- flow_size = sizeof(struct rte_flow);
- rss = flow_get_rss_action(p_actions_rx);
- if (rss)
- flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
- sizeof(void *));
- else
- flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
- flow = rte_calloc(__func__, 1, flow_size, 0);
+ flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
if (!flow) {
rte_errno = ENOMEM;
goto error_before_flow;
flow->drv_type = flow_get_drv_type(dev, attr);
if (hairpin_id != 0)
flow->hairpin_flow_id = hairpin_id;
- assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
- flow->drv_type < MLX5_FLOW_TYPE_MAX);
- flow->rss.queue = (void *)(flow + 1);
+ MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+ flow->drv_type < MLX5_FLOW_TYPE_MAX);
+ memset(rss_desc, 0, sizeof(*rss_desc));
+ rss = flow_get_rss_action(p_actions_rx);
if (rss) {
/*
* The following information is required by
* mlx5_flow_hashfields_adjust() in advance.
*/
- flow->rss.level = rss->level;
+ rss_desc->level = rss->level;
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
+ rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
}
- LIST_INIT(&flow->dev_flows);
+ flow->dev_handles = 0;
if (rss && rss->types) {
unsigned int graph_root;
items, rss->types,
mlx5_support_expansion,
graph_root);
- assert(ret > 0 &&
+ MLX5_ASSERT(ret > 0 &&
(unsigned int)ret < sizeof(expand_buffer.buffer));
} else {
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
+ /*
+ * Record the start index when there is a nested call. All sub-flows
+ * need to be translated before another calling.
+ * No need to use ping-pong buffer to save memory here.
+ */
+ if (priv->flow_idx) {
+ MLX5_ASSERT(!priv->flow_nested_idx);
+ priv->flow_nested_idx = priv->flow_idx;
+ }
for (i = 0; i < buf->entries; ++i) {
/*
* The splitter may create multiple dev_flows,
*/
ret = flow_create_split_outer(dev, flow, attr,
buf->entry[i].pattern,
- p_actions_rx, external,
+ p_actions_rx, external, idx,
error);
if (ret < 0)
goto error;
attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
attr_tx.ingress = 0;
attr_tx.egress = 1;
- dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
- actions_hairpin_tx.actions, error);
+ dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
+ actions_hairpin_tx.actions,
+ idx, error);
if (!dev_flow)
goto error;
dev_flow->flow = flow;
dev_flow->external = 0;
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
ret = flow_drv_translate(dev, dev_flow, &attr_tx,
items_tx.items,
actions_hairpin_tx.actions, error);
if (ret)
goto error;
}
- if (dev->data->dev_started) {
+ /*
+ * If the flow is external (from application) OR device is started, then
+ * the flow will be applied immediately.
+ */
+ if (external || dev->data->dev_started) {
ret = flow_drv_apply(dev, flow, error);
if (ret < 0)
goto error;
}
if (list)
- TAILQ_INSERT_TAIL(list, flow, next);
+ ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
+ flow, next);
flow_rxq_flags_set(dev, flow);
- return flow;
-error_before_flow:
- if (hairpin_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- hairpin_id);
- return NULL;
+ /* Nested flow creation index recovery. */
+ priv->flow_idx = priv->flow_nested_idx;
+ if (priv->flow_nested_idx)
+ priv->flow_nested_idx = 0;
+ return idx;
error:
- assert(flow);
- flow_mreg_del_copy_action(dev, flow);
+ MLX5_ASSERT(flow);
ret = rte_errno; /* Save rte_errno before cleanup. */
- if (flow->hairpin_flow_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- flow->hairpin_flow_id);
- assert(flow);
+ flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
- rte_free(flow);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
rte_errno = ret; /* Restore rte_errno. */
- return NULL;
+error_before_flow:
+ ret = rte_errno;
+ if (hairpin_id)
+ mlx5_flow_id_release(priv->sh->flow_id_pool,
+ hairpin_id);
+ rte_errno = ret;
+ priv->flow_idx = priv->flow_nested_idx;
+ if (priv->flow_nested_idx)
+ priv->flow_nested_idx = 0;
+ return 0;
}
/**
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
- return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
- actions, false, &error);
+ return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
+ &attr, &pattern,
+ actions, false, &error);
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int hairpin_flow;
+
+ hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
+ return flow_drv_validate(dev, attr, items, actions,
+ true, hairpin_flow, error);
}
/**
{
struct mlx5_priv *priv = dev->data->dev_private;
- return flow_list_create(dev, &priv->flows,
- attr, items, actions, true, error);
+ /*
+ * If the device is not started yet, it is not allowed to created a
+ * flow from application. PMD default flows and traffic control flows
+ * are not affected.
+ */
+ if (unlikely(!dev->data->dev_started)) {
+ DRV_LOG(DEBUG, "port %u is not started when "
+ "inserting a flow", dev->data->port_id);
+ rte_flow_error_set(error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port not started");
+ return NULL;
+ }
+ return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
+ attr, items, actions, true, error);
}
/**
* @param dev
* Pointer to Ethernet device.
* @param list
- * Pointer to a TAILQ flow list. If this parameter NULL,
- * there is no flow removal from the list.
- * @param[in] flow
- * Flow to destroy.
+ * Pointer to the Indexed flow list. If this parameter NULL,
+ * there is no flow removal from the list. Be noted that as
+ * flow is add to the indexed list, memory of the indexed
+ * list points to maybe changed as flow destroyed.
+ * @param[in] flow_idx
+ * Index of flow to destroy.
*/
static void
-flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
- struct rte_flow *flow)
+flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+ uint32_t flow_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
+ struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_RTE_FLOW], flow_idx);
+ if (!flow)
+ return;
/*
* Update RX queue flags only if port is started, otherwise it is
* already clean.
flow->hairpin_flow_id);
flow_drv_destroy(dev, flow);
if (list)
- TAILQ_REMOVE(list, flow, next);
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
+ flow_idx, flow, next);
flow_mreg_del_copy_action(dev, flow);
- rte_free(flow->fdir);
- rte_free(flow);
+ if (flow->fdir) {
+ LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+ if (priv_fdir_flow->rix_flow == flow_idx)
+ break;
+ }
+ if (priv_fdir_flow) {
+ LIST_REMOVE(priv_fdir_flow, next);
+ mlx5_free(priv_fdir_flow->fdir);
+ mlx5_free(priv_fdir_flow);
+ }
+ }
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
}
/**
* @param dev
* Pointer to Ethernet device.
* @param list
- * Pointer to a TAILQ flow list.
+ * Pointer to the Indexed flow list.
+ * @param active
+ * If flushing is called avtively.
*/
void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
{
- while (!TAILQ_EMPTY(list)) {
- struct rte_flow *flow;
+ uint32_t num_flushed = 0;
- flow = TAILQ_FIRST(list);
- flow_list_destroy(dev, list, flow);
+ while (*list) {
+ flow_list_destroy(dev, list, *list);
+ num_flushed++;
+ }
+ if (active) {
+ DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
+ dev->data->port_id, num_flushed);
}
}
* @param dev
* Pointer to Ethernet device.
* @param list
- * Pointer to a TAILQ flow list.
+ * Pointer to the Indexed flow list.
*/
void
-mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list)
{
- struct rte_flow *flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ uint32_t idx;
- TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
+ flow, next) {
flow_drv_remove(dev, flow);
flow_mreg_stop_copy_action(dev, flow);
}
* @param dev
* Pointer to Ethernet device.
* @param list
- * Pointer to a TAILQ flow list.
+ * Pointer to the Indexed flow list.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list)
{
- struct rte_flow *flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
struct rte_flow_error error;
+ uint32_t idx;
int ret = 0;
/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
if (ret < 0)
return -rte_errno;
/* Apply Flows created by application. */
- TAILQ_FOREACH(flow, list, next) {
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
+ flow, next) {
ret = flow_mreg_start_copy_action(dev, flow);
if (ret < 0)
goto error;
return -rte_errno;
}
+/**
+ * Stop all default actions for flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_stop_default(struct rte_eth_dev *dev)
+{
+ flow_mreg_del_default_copy_action(dev);
+ flow_rxq_flags_clear(dev);
+}
+
+/**
+ * Start all default actions for flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_start_default(struct rte_eth_dev *dev)
+{
+ struct rte_flow_error error;
+
+ /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+ return flow_mreg_add_default_copy_action(dev, &error);
+}
+
+/**
+ * Allocate intermediate resources for flow creation.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!priv->inter_flows) {
+ priv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO,
+ MLX5_NUM_MAX_DEV_FLOWS *
+ sizeof(struct mlx5_flow) +
+ (sizeof(struct mlx5_flow_rss_desc) +
+ sizeof(uint16_t) * UINT16_MAX) * 2, 0,
+ SOCKET_ID_ANY);
+ if (!priv->inter_flows) {
+ DRV_LOG(ERR, "can't allocate intermediate memory.");
+ return;
+ }
+ }
+ priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows)
+ [MLX5_NUM_MAX_DEV_FLOWS];
+ /* Reset the index. */
+ priv->flow_idx = 0;
+ priv->flow_nested_idx = 0;
+}
+
+/**
+ * Free intermediate resources for flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ mlx5_free(priv->inter_flows);
+ priv->inter_flows = NULL;
+}
+
/**
* Verify the flow list is empty
*
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
+ uint32_t idx;
int ret = 0;
- TAILQ_FOREACH(flow, &priv->flows, next) {
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
+ flow, next) {
DRV_LOG(DEBUG, "port %u flow %p still referenced",
dev->data->port_id, (void *)flow);
++ret;
};
struct rte_flow_item items[] = {
{
- .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
.spec = &queue_spec,
.last = NULL,
.mask = &queue_mask,
.group = MLX5_HAIRPIN_TX_TABLE,
};
struct rte_flow_action actions[2];
- struct rte_flow *flow;
+ uint32_t flow_idx;
struct rte_flow_error error;
actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
actions[0].conf = &jump;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
- flow = flow_list_create(dev, &priv->ctrl_flows,
+ flow_idx = flow_list_create(dev, &priv->ctrl_flows,
&attr, items, actions, false, &error);
- if (!flow) {
+ if (!flow_idx) {
DRV_LOG(DEBUG,
"Failed to create ctrl flow: rte_errno(%d),"
" type(%d), message(%s)",
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct rte_flow *flow;
+ uint32_t flow_idx;
struct rte_flow_error error;
unsigned int i;
if (!priv->reta_idx_n || !priv->rxqs_n) {
return 0;
}
+ if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
- flow = flow_list_create(dev, &priv->ctrl_flows,
+ flow_idx = flow_list_create(dev, &priv->ctrl_flows,
&attr, items, actions, false, &error);
- if (!flow)
+ if (!flow_idx)
return -rte_errno;
return 0;
}
return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
}
+/**
+ * Create default miss flow rule matching lacp traffic
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ /*
+ * The LACP matching is done by only using ether type since using
+ * a multicast dst mac causes kernel to give low priority to this flow.
+ */
+ static const struct rte_flow_item_eth lacp_spec = {
+ .type = RTE_BE16(0x8809),
+ };
+ static const struct rte_flow_item_eth lacp_mask = {
+ .type = 0xffff,
+ };
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &lacp_spec,
+ .mask = &lacp_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow_error error;
+ uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
+ &attr, items, actions, false, &error);
+
+ if (!flow_idx)
+ return -rte_errno;
+ return 0;
+}
+
/**
* Destroy a flow.
*
{
struct mlx5_priv *priv = dev->data->dev_private;
- flow_list_destroy(dev, &priv->flows, flow);
+ flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
return 0;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
- mlx5_flow_list_flush(dev, &priv->flows);
+ mlx5_flow_list_flush(dev, &priv->flows, false);
return 0;
}
}
priv->isolated = !!enable;
if (enable)
- dev->dev_ops = &mlx5_dev_ops_isolate;
+ dev->dev_ops = &mlx5_os_dev_ops_isolate;
else
- dev->dev_ops = &mlx5_dev_ops;
+ dev->dev_ops = &mlx5_os_dev_ops;
return 0;
}
*/
static int
flow_drv_query(struct rte_eth_dev *dev,
- struct rte_flow *flow,
+ uint32_t flow_idx,
const struct rte_flow_action *actions,
void *data,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_driver_ops *fops;
- enum mlx5_flow_drv_type ftype = flow->drv_type;
+ struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_RTE_FLOW],
+ flow_idx);
+ enum mlx5_flow_drv_type ftype;
- assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
+ if (!flow) {
+ return rte_flow_error_set(error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "invalid flow handle");
+ }
+ ftype = flow->drv_type;
+ MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(ftype);
return fops->query(dev, flow, actions, data, error);
{
int ret;
- ret = flow_drv_query(dev, flow, actions, data, error);
+ ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
+ error);
if (ret < 0)
return ret;
return 0;
* FDIR flow to lookup.
*
* @return
- * Pointer of flow if found, NULL otherwise.
+ * Index of flow if found, 0 otherwise.
*/
-static struct rte_flow *
+static uint32_t
flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = NULL;
-
- assert(fdir_flow);
- TAILQ_FOREACH(flow, &priv->flows, next) {
- if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
- DRV_LOG(DEBUG, "port %u found FDIR flow %p",
- dev->data->port_id, (void *)flow);
+ uint32_t flow_idx = 0;
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
+
+ MLX5_ASSERT(fdir_flow);
+ LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+ if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
+ DRV_LOG(DEBUG, "port %u found FDIR flow %u",
+ dev->data->port_id, flow_idx);
+ flow_idx = priv_fdir_flow->rix_flow;
break;
}
}
- return flow;
+ return flow_idx;
}
/**
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_fdir *fdir_flow;
struct rte_flow *flow;
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
+ uint32_t flow_idx;
int ret;
- fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
+ fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
+ SOCKET_ID_ANY);
if (!fdir_flow) {
rte_errno = ENOMEM;
return -rte_errno;
ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
if (ret)
goto error;
- flow = flow_fdir_filter_lookup(dev, fdir_flow);
- if (flow) {
+ flow_idx = flow_fdir_filter_lookup(dev, fdir_flow);
+ if (flow_idx) {
rte_errno = EEXIST;
goto error;
}
- flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
- fdir_flow->items, fdir_flow->actions, true,
- NULL);
+ priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_fdir_flow),
+ 0, SOCKET_ID_ANY);
+ if (!priv_fdir_flow) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
+ fdir_flow->items, fdir_flow->actions, true,
+ NULL);
+ flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
if (!flow)
goto error;
- assert(!flow->fdir);
- flow->fdir = fdir_flow;
+ flow->fdir = 1;
+ priv_fdir_flow->fdir = fdir_flow;
+ priv_fdir_flow->rix_flow = flow_idx;
+ LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
DRV_LOG(DEBUG, "port %u created FDIR flow %p",
dev->data->port_id, (void *)flow);
return 0;
error:
- rte_free(fdir_flow);
+ mlx5_free(priv_fdir_flow);
+ mlx5_free(fdir_flow);
return -rte_errno;
}
const struct rte_eth_fdir_filter *fdir_filter)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow;
+ uint32_t flow_idx;
struct mlx5_fdir fdir_flow = {
.attr.group = 0,
};
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
int ret;
ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
if (ret)
return -rte_errno;
- flow = flow_fdir_filter_lookup(dev, &fdir_flow);
- if (!flow) {
- rte_errno = ENOENT;
- return -rte_errno;
+ LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+ /* Find the fdir in priv list */
+ if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
+ break;
}
- flow_list_destroy(dev, &priv->flows, flow);
- DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
- dev->data->port_id, (void *)flow);
+ if (!priv_fdir_flow)
+ return 0;
+ LIST_REMOVE(priv_fdir_flow, next);
+ flow_idx = priv_fdir_flow->rix_flow;
+ flow_list_destroy(dev, &priv->flows, flow_idx);
+ mlx5_free(priv_fdir_flow->fdir);
+ mlx5_free(priv_fdir_flow);
+ DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
+ dev->data->port_id, flow_idx);
return 0;
}
flow_fdir_filter_flush(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
-
- mlx5_flow_list_flush(dev, &priv->flows);
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
+
+ while (!LIST_EMPTY(&priv->fdir_flows)) {
+ priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
+ LIST_REMOVE(priv_fdir_flow, next);
+ flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
+ mlx5_free(priv_fdir_flow->fdir);
+ mlx5_free(priv_fdir_flow);
+ }
}
/**
*
* @param[in] dev
* Pointer to Ethernet device.
+ * @param[in] fm
+ * Pointer to the flow meter.
*
* @return
* Pointer to table set on success, NULL otherwise.
*/
struct mlx5_meter_domains_infos *
-mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev)
+mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
+ const struct mlx5_flow_meter *fm)
{
const struct mlx5_flow_driver_ops *fops;
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->create_mtr_tbls(dev);
+ return fops->create_mtr_tbls(dev, fm);
}
/**
return fops->destroy_policer_rules(dev, fm, attr);
}
+/**
+ * Allocate a counter.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * Index to allocated counter on success, 0 otherwise.
+ */
+uint32_t
+mlx5_counter_alloc(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr attr = { .transfer = 0 };
+
+ if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->counter_alloc(dev);
+ }
+ DRV_LOG(ERR,
+ "port %u counter allocate is not supported.",
+ dev->data->port_id);
+ return 0;
+}
+
+/**
+ * Free a counter.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] cnt
+ * Index to counter to be free.
+ */
+void
+mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr attr = { .transfer = 0 };
+
+ if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ fops->counter_free(dev, cnt);
+ return;
+ }
+ DRV_LOG(ERR,
+ "port %u counter free is not supported.",
+ dev->data->port_id);
+}
+
+/**
+ * Query counter statistics.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] cnt
+ * Index to counter to query.
+ * @param[in] clear
+ * Set to clear counter statistics.
+ * @param[out] pkts
+ * The counter hits packets number to save.
+ * @param[out] bytes
+ * The counter hits bytes number to save.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+int
+mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
+ bool clear, uint64_t *pkts, uint64_t *bytes)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr attr = { .transfer = 0 };
+
+ if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->counter_query(dev, cnt, clear, pkts, bytes);
+ }
+ DRV_LOG(ERR,
+ "port %u counter query is not supported.",
+ dev->data->port_id);
+ return -ENOTSUP;
+}
+
#define MLX5_POOL_QUERY_FREQ_US 1000000
+/**
+ * Get number of all validate pools.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object.
+ *
+ * @return
+ * The number of all validate pools.
+ */
+static uint32_t
+mlx5_get_all_valid_pool_count(struct mlx5_dev_ctx_shared *sh)
+{
+ int i;
+ uint32_t pools_n = 0;
+
+ for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i)
+ pools_n += rte_atomic16_read(&sh->cmng.ccont[i].n_valid);
+ return pools_n;
+}
+
/**
* Set the periodic procedure for triggering asynchronous batch queries for all
* the counter pools.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
*/
void
-mlx5_set_query_alarm(struct mlx5_ibv_shared *sh)
+mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0);
- uint32_t pools_n = rte_atomic16_read(&cont->n_valid);
- uint32_t us;
+ uint32_t pools_n, us;
- cont = MLX5_CNT_CONTAINER(sh, 1, 0);
- pools_n += rte_atomic16_read(&cont->n_valid);
+ pools_n = mlx5_get_all_valid_pool_count(sh);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
void
mlx5_flow_query_alarm(void *arg)
{
- struct mlx5_ibv_shared *sh = arg;
+ struct mlx5_dev_ctx_shared *sh = arg;
struct mlx5_devx_obj *dcs;
uint16_t offset;
int ret;
uint8_t batch = sh->cmng.batch;
+ uint8_t age = sh->cmng.age;
uint16_t pool_index = sh->cmng.pool_index;
struct mlx5_pools_container *cont;
- struct mlx5_pools_container *mcont;
struct mlx5_flow_counter_pool *pool;
+ int cont_loop = MLX5_CCONT_TYPE_MAX;
if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
goto set_alarm;
next_container:
- cont = MLX5_CNT_CONTAINER(sh, batch, 1);
- mcont = MLX5_CNT_CONTAINER(sh, batch, 0);
- /* Check if resize was done and need to flip a container. */
- if (cont != mcont) {
- if (cont->pools) {
- /* Clean the old container. */
- rte_free(cont->pools);
- memset(cont, 0, sizeof(*cont));
- }
- rte_cio_wmb();
- /* Flip the host container. */
- sh->cmng.mhi[batch] ^= (uint8_t)2;
- cont = mcont;
- }
+ cont = MLX5_CNT_CONTAINER(sh, batch, age);
+ rte_spinlock_lock(&cont->resize_sl);
if (!cont->pools) {
- /* 2 empty containers case is unexpected. */
- if (unlikely(batch != sh->cmng.batch))
+ rte_spinlock_unlock(&cont->resize_sl);
+ /* Check if all the containers are empty. */
+ if (unlikely(--cont_loop == 0))
goto set_alarm;
batch ^= 0x1;
pool_index = 0;
+ if (batch == 0 && pool_index == 0) {
+ age ^= 0x1;
+ sh->cmng.batch = batch;
+ sh->cmng.age = age;
+ }
goto next_container;
}
pool = cont->pools[pool_index];
+ rte_spinlock_unlock(&cont->resize_sl);
if (pool->raw_hw)
/* There is a pool query in progress. */
goto set_alarm;
goto set_alarm;
dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
(&pool->a64_dcs);
+ if (dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) {
+ /* Pool without valid counter. */
+ pool->raw_hw = NULL;
+ goto next_pool;
+ }
offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
+ /*
+ * Identify the counters released between query trigger and query
+ * handle more effiecntly. The counter released in this gap period
+ * should wait for a new round of query as the new arrived packets
+ * will not be taken into account.
+ */
+ pool->query_gen++;
ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
offset, NULL, NULL,
pool->raw_hw->mem_mng->dm->id,
pool->raw_hw->min_dcs_id = dcs->id;
LIST_REMOVE(pool->raw_hw, next);
sh->cmng.pending_queries++;
+next_pool:
pool_index++;
if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
batch ^= 0x1;
pool_index = 0;
+ if (batch == 0 && pool_index == 0)
+ age ^= 0x1;
}
set_alarm:
sh->cmng.batch = batch;
sh->cmng.pool_index = pool_index;
+ sh->cmng.age = age;
mlx5_set_query_alarm(sh);
}
+/**
+ * Check and callback event for new aged flow in the counter pool
+ *
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object.
+ * @param[in] pool
+ * Pointer to Current counter pool.
+ */
+static void
+mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_flow_counter_pool *pool)
+{
+ struct mlx5_priv *priv;
+ struct mlx5_flow_counter *cnt;
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param;
+ struct mlx5_counter_stats_raw *cur = pool->raw_hw;
+ struct mlx5_counter_stats_raw *prev = pool->raw;
+ uint16_t curr = rte_rdtsc() / (rte_get_tsc_hz() / 10);
+ uint32_t i;
+
+ for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
+ cnt = MLX5_POOL_GET_CNT(pool, i);
+ age_param = MLX5_CNT_TO_AGE(cnt);
+ if (rte_atomic16_read(&age_param->state) != AGE_CANDIDATE)
+ continue;
+ if (cur->data[i].hits != prev->data[i].hits) {
+ age_param->expire = curr + age_param->timeout;
+ continue;
+ }
+ if ((uint16_t)(curr - age_param->expire) >= (UINT16_MAX / 2))
+ continue;
+ /**
+ * Hold the lock first, or if between the
+ * state AGE_TMOUT and tailq operation the
+ * release happened, the release procedure
+ * may delete a non-existent tailq node.
+ */
+ priv = rte_eth_devices[age_param->port_id].data->dev_private;
+ age_info = GET_PORT_AGE_INFO(priv);
+ rte_spinlock_lock(&age_info->aged_sl);
+ /* If the cpmset fails, release happens. */
+ if (rte_atomic16_cmpset((volatile uint16_t *)
+ &age_param->state,
+ AGE_CANDIDATE,
+ AGE_TMOUT) ==
+ AGE_CANDIDATE) {
+ TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
+ MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
+ }
+ rte_spinlock_unlock(&age_info->aged_sl);
+ }
+ for (i = 0; i < sh->max_port; i++) {
+ age_info = &sh->port[i].age_info;
+ if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
+ continue;
+ if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
+ _rte_eth_dev_callback_process
+ (&rte_eth_devices[sh->port[i].devx_ih_port_id],
+ RTE_ETH_EVENT_FLOW_AGED, NULL);
+ age_info->flags = 0;
+ }
+}
+
/**
* Handler for the HW respond about ready values from an asynchronous batch
* query. This function is probably called by the host thread.
*
* @param[in] sh
- * The pointer to the shared IB device context.
+ * The pointer to the shared device context.
* @param[in] async_id
* The Devx async ID.
* @param[in] status
* The status of the completion.
*/
void
-mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
+mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
uint64_t async_id, int status)
{
struct mlx5_flow_counter_pool *pool =
(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
struct mlx5_counter_stats_raw *raw_to_free;
+ uint8_t age = !!IS_AGE_POOL(pool);
+ uint8_t query_gen = pool->query_gen ^ 1;
+ struct mlx5_pools_container *cont =
+ MLX5_CNT_CONTAINER(sh, !IS_EXT_POOL(pool), age);
if (unlikely(status)) {
raw_to_free = pool->raw_hw;
} else {
raw_to_free = pool->raw;
+ if (IS_AGE_POOL(pool))
+ mlx5_flow_aging_check(sh, pool);
rte_spinlock_lock(&pool->sl);
pool->raw = pool->raw_hw;
rte_spinlock_unlock(&pool->sl);
- rte_atomic64_add(&pool->query_gen, 1);
/* Be sure the new raw counters data is updated in memory. */
rte_cio_wmb();
+ if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
+ rte_spinlock_lock(&cont->csl);
+ TAILQ_CONCAT(&cont->counters,
+ &pool->counters[query_gen], next);
+ rte_spinlock_unlock(&cont->csl);
+ }
}
LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
pool->raw_hw = NULL;
* Value is part of flow rule created by request external to PMD.
* @param[in] group
* rte_flow group index value.
+ * @param[out] fdb_def_rule
+ * Whether fdb jump to table 1 is configured.
* @param[out] table
* HW table value.
* @param[out] error
*/
int
mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
- uint32_t group, uint32_t *table,
+ uint32_t group, bool fdb_def_rule, uint32_t *table,
struct rte_flow_error *error)
{
- if (attributes->transfer && external) {
+ if (attributes->transfer && external && fdb_def_rule) {
if (group == UINT32_MAX)
return rte_flow_error_set
(error, EINVAL,
};
struct rte_flow_action actions[] = {
[0] = {
- .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .type = (enum rte_flow_action_type)
+ MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
.conf = &(struct mlx5_flow_action_copy_mreg){
.src = REG_C_1,
.dst = idx,
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
+ uint32_t flow_idx;
struct rte_flow *flow;
struct rte_flow_error error;
if (!config->dv_flow_en)
break;
/* Create internal flow, validation skips copy action. */
- flow = flow_list_create(dev, NULL, &attr, items,
- actions, false, &error);
+ flow_idx = flow_list_create(dev, NULL, &attr, items,
+ actions, false, &error);
+ flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ flow_idx);
if (!flow)
continue;
if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
config->flow_mreg_c[n++] = idx;
- flow_list_destroy(dev, NULL, flow);
+ flow_list_destroy(dev, NULL, flow_idx);
}
for (; n < MLX5_MREG_C_NUM; ++n)
config->flow_mreg_c[n] = REG_NONE;
return 0;
}
+
+/**
+ * Dump flow raw hw data to file
+ *
+ * @param[in] dev
+ * The pointer to Ethernet device.
+ * @param[in] file
+ * A pointer to a file for output.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ * @return
+ * 0 on success, a nagative value otherwise.
+ */
+int
+mlx5_flow_dev_dump(struct rte_eth_dev *dev,
+ FILE *file,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+
+ return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
+ sh->tx_domain, file);
+}
+
+/**
+ * Get aged-out flows.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] context
+ * The address of an array of pointers to the aged-out flows contexts.
+ * @param[in] nb_countexts
+ * The length of context array pointers.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * how many contexts get in success, otherwise negative errno value.
+ * if nb_contexts is 0, return the amount of all aged contexts.
+ * if nb_contexts is not 0 , return the amount of aged flows reported
+ * in the context array.
+ */
+int
+mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
+ uint32_t nb_contexts, struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr attr = { .transfer = 0 };
+
+ if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->get_aged_flows(dev, contexts, nb_contexts,
+ error);
+ }
+ DRV_LOG(ERR,
+ "port %u get aged flows is not supported.",
+ dev->data->port_id);
+ return -ENOTSUP;
+}