.tunnel = MLX5_FLOW_LAYER_VXLAN,
.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
},
+ {
+ .tunnel = MLX5_FLOW_LAYER_GENEVE,
+ .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
+ },
{
.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
},
};
+/**
+ * Translate tag ID to register.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] feature
+ * The feature that request the register.
+ * @param[in] id
+ * The request register ID.
+ * @param[out] error
+ * Error description in case of any.
+ *
+ * @return
+ * The request register on success, a negative errno
+ * value otherwise and rte_errno is set.
+ */
+enum modify_reg
+mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
+ enum mlx5_feature_name feature,
+ uint32_t id,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ enum modify_reg start_reg;
+
+ switch (feature) {
+ case MLX5_HAIRPIN_RX:
+ return REG_B;
+ case MLX5_HAIRPIN_TX:
+ return REG_A;
+ case MLX5_METADATA_RX:
+ switch (config->dv_xmeta_en) {
+ case MLX5_XMETA_MODE_LEGACY:
+ return REG_B;
+ case MLX5_XMETA_MODE_META16:
+ return REG_C_0;
+ case MLX5_XMETA_MODE_META32:
+ return REG_C_1;
+ }
+ break;
+ case MLX5_METADATA_TX:
+ return REG_A;
+ case MLX5_METADATA_FDB:
+ return REG_C_0;
+ case MLX5_FLOW_MARK:
+ switch (config->dv_xmeta_en) {
+ case MLX5_XMETA_MODE_LEGACY:
+ return REG_NONE;
+ case MLX5_XMETA_MODE_META16:
+ return REG_C_1;
+ case MLX5_XMETA_MODE_META32:
+ return REG_C_0;
+ }
+ break;
+ case MLX5_COPY_MARK:
+ case MLX5_MTR_SFX:
+ /*
+ * Metadata COPY_MARK register using is in meter suffix sub
+ * flow while with meter. It's safe to share the same register.
+ */
+ return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
+ case MLX5_MTR_COLOR:
+ RTE_ASSERT(priv->mtr_color_reg != REG_NONE);
+ return priv->mtr_color_reg;
+ case MLX5_APP_TAG:
+ /*
+ * If meter is enable, it will engage two registers for color
+ * match and flow match. If meter color match is not using the
+ * REG_C_2, need to skip the REG_C_x be used by meter color
+ * match.
+ * If meter is disable, free to use all available registers.
+ */
+ if (priv->mtr_color_reg != REG_NONE)
+ start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_3 :
+ REG_C_4;
+ else
+ start_reg = REG_C_2;
+ if (id > (REG_C_7 - start_reg))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "invalid tag id");
+ if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "unsupported tag id");
+ /*
+ * This case means meter is using the REG_C_x great than 2.
+ * Take care not to conflict with meter color REG_C_x.
+ * If the available index REG_C_y >= REG_C_x, skip the
+ * color register.
+ */
+ if (start_reg == REG_C_3 && config->flow_mreg_c
+ [id + REG_C_3 - REG_C_0] >= priv->mtr_color_reg) {
+ if (config->flow_mreg_c[id + 1 + REG_C_3 - REG_C_0] !=
+ REG_NONE)
+ return config->flow_mreg_c
+ [id + 1 + REG_C_3 - REG_C_0];
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "unsupported tag id");
+ }
+ return config->flow_mreg_c[id + start_reg - REG_C_0];
+ }
+ assert(false);
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid feature name");
+}
+
+/**
+ * Check extensive flow metadata register support.
+ *
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * True if device supports extensive flow metadata register, otherwise false.
+ */
+bool
+mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+
+ /*
+ * Having available reg_c can be regarded inclusively as supporting
+ * extensive flow metadata register, which could mean,
+ * - metadata register copy action by modify header.
+ * - 16 modify header actions is supported.
+ * - reg_c's are preserved across different domain (FDB and NIC) on
+ * packet loopback by flow lookup miss.
+ */
+ return config->flow_mreg_c[2] != REG_NONE;
+}
+
/**
* Discover the maximum number of priority available.
*
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(flow->actions &
+ const int mark = !!(dev_flow->actions &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
+ int idx = (*flow->rss.queue)[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
- if (mark) {
+ /*
+ * To support metadata register copy on Tx loopback,
+ * this must be always enabled (metadata may arive
+ * from other port - not from local flows only.
+ */
+ if (priv->config.dv_flow_en &&
+ priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ mlx5_flow_ext_mreg_supported(dev)) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n = 1;
+ } else if (mark) {
rxq_ctrl->rxq.mark = 1;
rxq_ctrl->flow_mark_n++;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(flow->actions &
+ const int mark = !!(dev_flow->actions &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
assert(dev->data->dev_started);
for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
+ int idx = (*flow->rss.queue)[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
- if (mark) {
+ if (priv->config.dv_flow_en &&
+ priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ mlx5_flow_ext_mreg_supported(dev)) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n = 1;
+ } else if (mark) {
rxq_ctrl->flow_mark_n--;
rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
}
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"multiple L2 layers not supported");
- if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))
+ if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
+ (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L2 layer should not follow "
+ "L3 layers");
+ if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
+ (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "inner L2 layer should not "
- "follow inner L3 layers");
+ "L2 layer should not follow VLAN");
if (!mask)
mask = &rte_flow_item_eth_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
MLX5_FLOW_LAYER_OUTER_VLAN;
- const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
if (item_flags & vlanm)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
else if ((item_flags & l34m) != 0)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "L2 layer cannot follow L3/L4 layer");
- else if ((item_flags & l2m) == 0)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "no L2 layer before VLAN");
+ "VLAN cannot follow L3/L4 layer");
if (!mask)
mask = &rte_flow_item_vlan_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
int
mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
const struct rte_flow_item_ipv4 *acc_mask,
struct rte_flow_error *error)
{
MLX5_FLOW_LAYER_OUTER_L4;
int ret;
uint8_t next_proto = 0xFF;
+ const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN |
+ MLX5_FLOW_LAYER_INNER_VLAN);
+ if ((last_item & l2_vlan) && ether_type &&
+ ether_type != RTE_ETHER_TYPE_IPV4)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv4 cannot follow L2/VLAN layer "
+ "which ether type is not IPv4");
if (item_flags & MLX5_FLOW_LAYER_IPIP) {
if (mask && spec)
next_proto = mask->hdr.next_proto_id &
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an NVGRE layer.");
- else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "no L2 layer before IPV4");
if (!mask)
mask = &rte_flow_item_ipv4_mask;
else if (mask->hdr.next_proto_id != 0 &&
int
mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
const struct rte_flow_item_ipv6 *acc_mask,
struct rte_flow_error *error)
{
MLX5_FLOW_LAYER_OUTER_L4;
int ret;
uint8_t next_proto = 0xFF;
+ const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN |
+ MLX5_FLOW_LAYER_INNER_VLAN);
+ if ((last_item & l2_vlan) && ether_type &&
+ ether_type != RTE_ETHER_TYPE_IPV6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv6 cannot follow L2/VLAN layer "
+ "which ether type is not IPv6");
if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
if (mask && spec)
next_proto = mask->hdr.proto & spec->hdr.proto;
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an NVGRE layer.");
- else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "no L2 layer before IPV6");
if (!mask)
mask = &rte_flow_item_ipv6_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
return 0;
}
+/**
+ * Validate Geneve item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] itemFlags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] enPriv
+ * Pointer to the private data structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+
+int
+mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_geneve *spec = item->spec;
+ const struct rte_flow_item_geneve *mask = item->mask;
+ int ret;
+ uint16_t gbhdr;
+ uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+ MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
+ const struct rte_flow_item_geneve nic_mask = {
+ .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
+ .vni = "\xff\xff\xff",
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
+
+ if (!(priv->config.hca_attr.flex_parser_protocols &
+ MLX5_HCA_FLEX_GENEVE_ENABLED) ||
+ !priv->config.hca_attr.tunnel_stateless_geneve_rx)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 Geneve is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_geneve_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_geneve), error);
+ if (ret)
+ return ret;
+ if (spec) {
+ gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
+ if (MLX5_GENEVE_VER_VAL(gbhdr) ||
+ MLX5_GENEVE_CRITO_VAL(gbhdr) ||
+ MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Geneve protocol unsupported"
+ " fields are being used");
+ if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported Geneve options length");
+ }
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve tunnel must be fully defined");
+ return 0;
+}
+
/**
* Validate MPLS item.
*
return 0;
}
+/* Allocate unique ID for the split Q/RSS subflows. */
+static uint32_t
+flow_qrss_get_id(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t qrss_id, ret;
+
+ ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
+ if (ret)
+ return 0;
+ assert(qrss_id);
+ return qrss_id;
+}
+
+/* Free unique ID for the split Q/RSS subflows. */
+static void
+flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (qrss_id)
+ mlx5_flow_id_release(priv->qrss_id_pool, qrss_id);
+}
+
+/**
+ * Release resource related QUEUE/RSS action split.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Flow to release id's from.
+ */
+static void
+flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ if (dev_flow->qrss_id)
+ flow_qrss_free_id(dev, dev_flow->qrss_id);
+}
+
static int
flow_null_validate(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_attr *attr __rte_unused,
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
+ flow_mreg_split_qrss_release(dev, flow);
assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->destroy(dev, flow);
}
/**
- * Create a flow and add it to @p list.
+ * Get QUEUE/RSS action from the action list.
+ *
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] qrss
+ * Pointer to the return pointer.
+ * @param[out] qrss_type
+ * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
+ * if no QUEUE/RSS is found.
+ *
+ * @return
+ * Total number of actions.
+ */
+static int
+flow_parse_qrss_action(const struct rte_flow_action actions[],
+ const struct rte_flow_action **qrss)
+{
+ int actions_n = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ *qrss = actions;
+ break;
+ default:
+ break;
+ }
+ actions_n++;
+ }
+ /* Count RTE_FLOW_ACTION_TYPE_END. */
+ return actions_n + 1;
+}
+
+/**
+ * Check if the flow should be splited due to hairpin.
+ * The reason for the split is that in current HW we can't
+ * support encap on Rx, so if a flow have encap we move it
+ * to Tx.
*
* @param dev
* Pointer to Ethernet device.
- * @param list
- * Pointer to a TAILQ flow list.
* @param[in] attr
* Flow rule attributes.
- * @param[in] items
- * Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
- * @param[in] external
- * This flow rule is created by request external to PMD.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
*
* @return
- * A flow on success, NULL otherwise and rte_errno is set.
+ * > 0 the number of actions and the flow should be split,
+ * 0 when no split required.
*/
+static int
+flow_check_hairpin_split(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[])
+{
+ int queue_action = 0;
+ int action_n = 0;
+ int encap = 0;
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action_raw_encap *raw_encap;
+
+ if (!attr->ingress)
+ return 0;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+ if (mlx5_rxq_get_type(dev, queue->index) !=
+ MLX5_RXQ_TYPE_HAIRPIN)
+ return 0;
+ queue_action = 1;
+ action_n++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
+ if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
+ MLX5_RXQ_TYPE_HAIRPIN)
+ return 0;
+ queue_action = 1;
+ action_n++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ encap = 1;
+ action_n++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ raw_encap = actions->conf;
+ if (raw_encap->size >
+ (sizeof(struct rte_flow_item_eth) +
+ sizeof(struct rte_flow_item_ipv4)))
+ encap = 1;
+ action_n++;
+ break;
+ default:
+ action_n++;
+ break;
+ }
+ }
+ if (encap == 1 && queue_action)
+ return action_n;
+ return 0;
+}
+
+/* Declare flow create/destroy prototype in advance. */
static struct rte_flow *
flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- bool external, struct rte_flow_error *error)
+ bool external, struct rte_flow_error *error);
+
+static void
+flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow);
+
+/**
+ * Add a flow of copying flow metadata registers in RX_CP_TBL.
+ *
+ * As mark_id is unique, if there's already a registered flow for the mark_id,
+ * return by increasing the reference counter of the resource. Otherwise, create
+ * the resource (mcp_res) and flow.
+ *
+ * Flow looks like,
+ * - If ingress port is ANY and reg_c[1] is mark_id,
+ * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * For default flow (zero mark_id), flow is like,
+ * - If ingress port is ANY,
+ * reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mark_id
+ * ID of MARK action, zero means default flow for META.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * Associated resource on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_mreg_copy_resource *
+flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
+ struct rte_flow_error *error)
{
- struct rte_flow *flow = NULL;
- struct mlx5_flow *dev_flow;
- const struct rte_flow_action_rss *rss;
- union {
- struct rte_flow_expand_rss buf;
- uint8_t buffer[2048];
- } expand_buffer;
- struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_attr attr = {
+ .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
+ .ingress = 1,
+ };
+ struct mlx5_rte_flow_item_tag tag_spec = {
+ .data = mark_id,
+ };
+ struct rte_flow_item items[] = {
+ [1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
+ };
+ struct rte_flow_action_mark ftag = {
+ .id = mark_id,
+ };
+ struct mlx5_flow_action_copy_mreg cp_mreg = {
+ .dst = REG_B,
+ .src = 0,
+ };
+ struct rte_flow_action_jump jump = {
+ .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
+ };
+ struct rte_flow_action actions[] = {
+ [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
+ };
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
int ret;
- uint32_t i;
- uint32_t flow_size;
- ret = flow_drv_validate(dev, attr, items, actions, external, error);
+ /* Fill the register fileds in the flow. */
+ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return NULL;
- flow_size = sizeof(struct rte_flow);
- rss = flow_get_rss_action(actions);
- if (rss)
- flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
- sizeof(void *));
- else
- flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
- flow = rte_calloc(__func__, 1, flow_size, 0);
- if (!flow) {
- rte_errno = ENOMEM;
+ tag_spec.id = ret;
+ ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
+ if (ret < 0)
return NULL;
+ cp_mreg.src = ret;
+ /* Check if already registered. */
+ assert(priv->mreg_cp_tbl);
+ mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
+ if (mcp_res) {
+ /* For non-default rule. */
+ if (mark_id)
+ mcp_res->refcnt++;
+ assert(mark_id || mcp_res->refcnt == 1);
+ return mcp_res;
}
- flow->drv_type = flow_get_drv_type(dev, attr);
- flow->ingress = attr->ingress;
- flow->transfer = attr->transfer;
- assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
- flow->drv_type < MLX5_FLOW_TYPE_MAX);
- flow->queue = (void *)(flow + 1);
- LIST_INIT(&flow->dev_flows);
- if (rss && rss->types) {
- unsigned int graph_root;
-
- graph_root = find_graph_root(items, rss->level);
- ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
- items, rss->types,
- mlx5_support_expansion,
- graph_root);
- assert(ret > 0 &&
- (unsigned int)ret < sizeof(expand_buffer.buffer));
+ /* Provide the full width of FLAG specific value. */
+ if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
+ tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
+ /* Build a new flow. */
+ if (mark_id) {
+ items[0] = (struct rte_flow_item){
+ .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .spec = &tag_spec,
+ };
+ items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ };
+ actions[0] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK,
+ .conf = &ftag,
+ };
+ actions[1] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = &cp_mreg,
+ };
+ actions[2] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ };
+ actions[3] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
} else {
- buf->entries = 1;
+ /* Default rule, wildcard match. */
+ attr.priority = MLX5_FLOW_PRIO_RSVD;
+ items[0] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ };
+ actions[0] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = &cp_mreg,
+ };
+ actions[1] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ };
+ actions[2] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
+ }
+ /* Build a new entry. */
+ mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0);
+ if (!mcp_res) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ /*
+ * The copy Flows are not included in any list. There
+ * ones are referenced from other Flows and can not
+ * be applied, removed, deleted in ardbitrary order
+ * by list traversing.
+ */
+ mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
+ actions, false, error);
+ if (!mcp_res->flow)
+ goto error;
+ mcp_res->refcnt++;
+ mcp_res->hlist_ent.key = mark_id;
+ ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
+ &mcp_res->hlist_ent);
+ assert(!ret);
+ if (ret)
+ goto error;
+ return mcp_res;
+error:
+ if (mcp_res->flow)
+ flow_list_destroy(dev, NULL, mcp_res->flow);
+ rte_free(mcp_res);
+ return NULL;
+}
+
+/**
+ * Release flow in RX_CP_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @flow
+ * Parent flow for wich copying is provided.
+ */
+static void
+flow_mreg_del_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!mcp_res || !priv->mreg_cp_tbl)
+ return;
+ if (flow->copy_applied) {
+ assert(mcp_res->appcnt);
+ flow->copy_applied = 0;
+ --mcp_res->appcnt;
+ if (!mcp_res->appcnt)
+ flow_drv_remove(dev, mcp_res->flow);
+ }
+ /*
+ * We do not check availability of metadata registers here,
+ * because copy resources are allocated in this case.
+ */
+ if (--mcp_res->refcnt)
+ return;
+ assert(mcp_res->flow);
+ flow_list_destroy(dev, NULL, mcp_res->flow);
+ mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
+ rte_free(mcp_res);
+ flow->mreg_copy = NULL;
+}
+
+/**
+ * Start flow in RX_CP_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @flow
+ * Parent flow for wich copying is provided.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_mreg_start_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ int ret;
+
+ if (!mcp_res || flow->copy_applied)
+ return 0;
+ if (!mcp_res->appcnt) {
+ ret = flow_drv_apply(dev, mcp_res->flow, NULL);
+ if (ret)
+ return ret;
+ }
+ ++mcp_res->appcnt;
+ flow->copy_applied = 1;
+ return 0;
+}
+
+/**
+ * Stop flow in RX_CP_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @flow
+ * Parent flow for wich copying is provided.
+ */
+static void
+flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+
+ if (!mcp_res || !flow->copy_applied)
+ return;
+ assert(mcp_res->appcnt);
+ --mcp_res->appcnt;
+ flow->copy_applied = 0;
+ if (!mcp_res->appcnt)
+ flow_drv_remove(dev, mcp_res->flow);
+}
+
+/**
+ * Remove the default copy action from RX_CP_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ /* Check if default flow is registered. */
+ if (!priv->mreg_cp_tbl)
+ return;
+ mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, 0ULL);
+ if (!mcp_res)
+ return;
+ assert(mcp_res->flow);
+ flow_list_destroy(dev, NULL, mcp_res->flow);
+ mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
+ rte_free(mcp_res);
+}
+
+/**
+ * Add the default copy action in in RX_CP_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 for success, negative value otherwise and rte_errno is set.
+ */
+static int
+flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+
+ /* Check whether extensive metadata feature is engaged. */
+ if (!priv->config.dv_flow_en ||
+ priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ !mlx5_flow_ext_mreg_supported(dev) ||
+ !priv->sh->dv_regc0_mask)
+ return 0;
+ mcp_res = flow_mreg_add_copy_action(dev, 0, error);
+ if (!mcp_res)
+ return -rte_errno;
+ return 0;
+}
+
+/**
+ * Add a flow of copying flow metadata registers in RX_CP_TBL.
+ *
+ * All the flow having Q/RSS action should be split by
+ * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
+ * performs the following,
+ * - CQE->flow_tag := reg_c[1] (MARK)
+ * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
+ * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
+ * but there should be a flow per each MARK ID set by MARK action.
+ *
+ * For the aforementioned reason, if there's a MARK action in flow's action
+ * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
+ * the MARK ID to CQE's flow_tag like,
+ * - If reg_c[1] is mark_id,
+ * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * For SET_META action which stores value in reg_c[0], as the destination is
+ * also a flow metadata register (reg_b), adding a default flow is enough. Zero
+ * MARK ID means the default flow. The default flow looks like,
+ * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to flow structure.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, negative value otherwise and rte_errno is set.
+ */
+static int
+flow_mreg_update_copy_table(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ const struct rte_flow_action_mark *mark;
+
+ /* Check whether extensive metadata feature is engaged. */
+ if (!config->dv_flow_en ||
+ config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ !mlx5_flow_ext_mreg_supported(dev) ||
+ !priv->sh->dv_regc0_mask)
+ return 0;
+ /* Find MARK action. */
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ mcp_res = flow_mreg_add_copy_action
+ (dev, MLX5_FLOW_MARK_DEFAULT, error);
+ if (!mcp_res)
+ return -rte_errno;
+ flow->mreg_copy = mcp_res;
+ if (dev->data->dev_started) {
+ mcp_res->appcnt++;
+ flow->copy_applied = 1;
+ }
+ return 0;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ mark = (const struct rte_flow_action_mark *)
+ actions->conf;
+ mcp_res =
+ flow_mreg_add_copy_action(dev, mark->id, error);
+ if (!mcp_res)
+ return -rte_errno;
+ flow->mreg_copy = mcp_res;
+ if (dev->data->dev_started) {
+ mcp_res->appcnt++;
+ flow->copy_applied = 1;
+ }
+ return 0;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+#define MLX5_MAX_SPLIT_ACTIONS 24
+#define MLX5_MAX_SPLIT_ITEMS 24
+
+/**
+ * Split the hairpin flow.
+ * Since HW can't support encap on Rx we move the encap to Tx.
+ * If the count action is after the encap then we also
+ * move the count action. in this case the count will also measure
+ * the outer bytes.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] actions_rx
+ * Rx flow actions.
+ * @param[out] actions_tx
+ * Tx flow actions..
+ * @param[out] pattern_tx
+ * The pattern items for the Tx flow.
+ * @param[out] flow_id
+ * The flow ID connected to this flow.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+flow_hairpin_split(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow_action actions_rx[],
+ struct rte_flow_action actions_tx[],
+ struct rte_flow_item pattern_tx[],
+ uint32_t *flow_id)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_raw_encap *raw_encap;
+ const struct rte_flow_action_raw_decap *raw_decap;
+ struct mlx5_rte_flow_action_set_tag *set_tag;
+ struct rte_flow_action *tag_action;
+ struct mlx5_rte_flow_item_tag *tag_item;
+ struct rte_flow_item *item;
+ char *addr;
+ int encap = 0;
+
+ mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ rte_memcpy(actions_tx, actions,
+ sizeof(struct rte_flow_action));
+ actions_tx++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ if (encap) {
+ rte_memcpy(actions_tx, actions,
+ sizeof(struct rte_flow_action));
+ actions_tx++;
+ } else {
+ rte_memcpy(actions_rx, actions,
+ sizeof(struct rte_flow_action));
+ actions_rx++;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ raw_encap = actions->conf;
+ if (raw_encap->size >
+ (sizeof(struct rte_flow_item_eth) +
+ sizeof(struct rte_flow_item_ipv4))) {
+ memcpy(actions_tx, actions,
+ sizeof(struct rte_flow_action));
+ actions_tx++;
+ encap = 1;
+ } else {
+ rte_memcpy(actions_rx, actions,
+ sizeof(struct rte_flow_action));
+ actions_rx++;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ raw_decap = actions->conf;
+ if (raw_decap->size <
+ (sizeof(struct rte_flow_item_eth) +
+ sizeof(struct rte_flow_item_ipv4))) {
+ memcpy(actions_tx, actions,
+ sizeof(struct rte_flow_action));
+ actions_tx++;
+ } else {
+ rte_memcpy(actions_rx, actions,
+ sizeof(struct rte_flow_action));
+ actions_rx++;
+ }
+ break;
+ default:
+ rte_memcpy(actions_rx, actions,
+ sizeof(struct rte_flow_action));
+ actions_rx++;
+ break;
+ }
+ }
+ /* Add set meta action and end action for the Rx flow. */
+ tag_action = actions_rx;
+ tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+ actions_rx++;
+ rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
+ actions_rx++;
+ set_tag = (void *)actions_rx;
+ set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
+ assert(set_tag->id > REG_NONE);
+ set_tag->data = *flow_id;
+ tag_action->conf = set_tag;
+ /* Create Tx item list. */
+ rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
+ addr = (void *)&pattern_tx[2];
+ item = pattern_tx;
+ item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+ tag_item = (void *)addr;
+ tag_item->data = *flow_id;
+ tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
+ assert(set_tag->id > REG_NONE);
+ item->spec = tag_item;
+ addr += sizeof(struct mlx5_rte_flow_item_tag);
+ tag_item = (void *)addr;
+ tag_item->data = UINT32_MAX;
+ tag_item->id = UINT16_MAX;
+ item->mask = tag_item;
+ addr += sizeof(struct mlx5_rte_flow_item_tag);
+ item->last = NULL;
+ item++;
+ item->type = RTE_FLOW_ITEM_TYPE_END;
+ return 0;
+}
+
+/**
+ * The last stage of splitting chain, just creates the subflow
+ * without any modification.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param[in, out] sub_flow
+ * Pointer to return the created subflow, may be NULL.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, negative value otherwise
+ */
+static int
+flow_create_split_inner(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct mlx5_flow **sub_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
+{
+ struct mlx5_flow *dev_flow;
+
+ dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+ if (!dev_flow)
+ return -rte_errno;
+ dev_flow->flow = flow;
+ dev_flow->external = external;
+ /* Subflow object was created, we must include one in the list. */
+ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ if (sub_flow)
+ *sub_flow = dev_flow;
+ return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
+}
+
+/**
+ * Split action list having QUEUE/RSS for metadata register copy.
+ *
+ * Once Q/RSS action is detected in user's action list, the flow action
+ * should be split in order to copy metadata registers, which will happen in
+ * RX_CP_TBL like,
+ * - CQE->flow_tag := reg_c[1] (MARK)
+ * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
+ * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
+ * This is because the last action of each flow must be a terminal action
+ * (QUEUE, RSS or DROP).
+ *
+ * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
+ * stored and kept in the mlx5_flow structure per each sub_flow.
+ *
+ * The Q/RSS action is replaced with,
+ * - SET_TAG, setting the allocated flow ID to reg_c[2].
+ * And the following JUMP action is added at the end,
+ * - JUMP, to RX_CP_TBL.
+ *
+ * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
+ * flow_create_split_metadata() routine. The flow will look like,
+ * - If flow ID matches (reg_c[2]), perform Q/RSS.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] split_actions
+ * Pointer to store split actions to jump to CP_TBL.
+ * @param[in] actions
+ * Pointer to the list of original flow actions.
+ * @param[in] qrss
+ * Pointer to the Q/RSS action.
+ * @param[in] actions_n
+ * Number of original actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * non-zero unique flow_id on success, otherwise 0 and
+ * error/rte_error are set.
+ */
+static uint32_t
+flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
+ struct rte_flow_action *split_actions,
+ const struct rte_flow_action *actions,
+ const struct rte_flow_action *qrss,
+ int actions_n, struct rte_flow_error *error)
+{
+ struct mlx5_rte_flow_action_set_tag *set_tag;
+ struct rte_flow_action_jump *jump;
+ const int qrss_idx = qrss - actions;
+ uint32_t flow_id;
+ int ret = 0;
+
+ /*
+ * Given actions will be split
+ * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
+ * - Add jump to mreg CP_TBL.
+ * As a result, there will be one more action.
+ */
+ ++actions_n;
+ /*
+ * Allocate the new subflow ID. This one is unique within
+ * device and not shared with representors. Otherwise,
+ * we would have to resolve multi-thread access synch
+ * issue. Each flow on the shared device is appended
+ * with source vport identifier, so the resulting
+ * flows will be unique in the shared (by master and
+ * representors) domain even if they have coinciding
+ * IDs.
+ */
+ flow_id = flow_qrss_get_id(dev);
+ if (!flow_id)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't allocate id "
+ "for split Q/RSS subflow");
+ /* Internal SET_TAG action to set flow ID. */
+ set_tag = (void *)(split_actions + actions_n);
+ *set_tag = (struct mlx5_rte_flow_action_set_tag){
+ .data = flow_id,
+ };
+ ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
+ if (ret < 0)
+ return ret;
+ set_tag->id = ret;
+ /* JUMP action to jump to mreg copy table (CP_TBL). */
+ jump = (void *)(set_tag + 1);
+ *jump = (struct rte_flow_action_jump){
+ .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
+ };
+ /* Construct new actions array. */
+ memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
+ /* Replace QUEUE/RSS action. */
+ split_actions[qrss_idx] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG,
+ .conf = set_tag,
+ };
+ split_actions[actions_n - 2] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = jump,
+ };
+ split_actions[actions_n - 1] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
+ return flow_id;
+}
+
+/**
+ * Extend the given action list for Tx metadata copy.
+ *
+ * Copy the given action list to the ext_actions and add flow metadata register
+ * copy action in order to copy reg_a set by WQE to reg_c[0].
+ *
+ * @param[out] ext_actions
+ * Pointer to the extended action list.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[in] actions_n
+ * Number of actions in the list.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+static int
+flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
+ struct rte_flow_action *ext_actions,
+ const struct rte_flow_action *actions,
+ int actions_n, struct rte_flow_error *error)
+{
+ struct mlx5_flow_action_copy_mreg *cp_mreg =
+ (struct mlx5_flow_action_copy_mreg *)
+ (ext_actions + actions_n + 1);
+ int ret;
+
+ ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
+ if (ret < 0)
+ return ret;
+ cp_mreg->dst = ret;
+ ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
+ if (ret < 0)
+ return ret;
+ cp_mreg->src = ret;
+ memcpy(ext_actions, actions,
+ sizeof(*ext_actions) * actions_n);
+ ext_actions[actions_n - 1] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = cp_mreg,
+ };
+ ext_actions[actions_n] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
+ return 0;
+}
+
+/**
+ * The splitting for metadata feature.
+ *
+ * - Q/RSS action on NIC Rx should be split in order to pass by
+ * the mreg copy table (RX_CP_TBL) and then it jumps to the
+ * action table (RX_ACT_TBL) which has the split Q/RSS action.
+ *
+ * - All the actions on NIC Tx should have a mreg copy action to
+ * copy reg_a from WQE to reg_c[0].
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, negative value otherwise
+ */
+static int
+flow_create_split_metadata(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ const struct rte_flow_action *qrss = NULL;
+ struct rte_flow_action *ext_actions = NULL;
+ struct mlx5_flow *dev_flow = NULL;
+ uint32_t qrss_id = 0;
+ size_t act_size;
+ int actions_n;
+ int ret;
+
+ /* Check whether extensive metadata feature is engaged. */
+ if (!config->dv_flow_en ||
+ config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ !mlx5_flow_ext_mreg_supported(dev))
+ return flow_create_split_inner(dev, flow, NULL, attr, items,
+ actions, external, error);
+ actions_n = flow_parse_qrss_action(actions, &qrss);
+ if (qrss) {
+ /* Exclude hairpin flows from splitting. */
+ if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue;
+
+ queue = qrss->conf;
+ if (mlx5_rxq_get_type(dev, queue->index) ==
+ MLX5_RXQ_TYPE_HAIRPIN)
+ qrss = NULL;
+ } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ const struct rte_flow_action_rss *rss;
+
+ rss = qrss->conf;
+ if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
+ MLX5_RXQ_TYPE_HAIRPIN)
+ qrss = NULL;
+ }
+ }
+ if (qrss) {
+ /*
+ * Q/RSS action on NIC Rx should be split in order to pass by
+ * the mreg copy table (RX_CP_TBL) and then it jumps to the
+ * action table (RX_ACT_TBL) which has the split Q/RSS action.
+ */
+ act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
+ sizeof(struct rte_flow_action_set_tag) +
+ sizeof(struct rte_flow_action_jump);
+ ext_actions = rte_zmalloc(__func__, act_size, 0);
+ if (!ext_actions)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no memory to split "
+ "metadata flow");
+ /*
+ * Create the new actions list with removed Q/RSS action
+ * and appended set tag and jump to register copy table
+ * (RX_CP_TBL). We should preallocate unique tag ID here
+ * in advance, because it is needed for set tag action.
+ */
+ qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
+ qrss, actions_n, error);
+ if (!qrss_id) {
+ ret = -rte_errno;
+ goto exit;
+ }
+ } else if (attr->egress && !attr->transfer) {
+ /*
+ * All the actions on NIC Tx should have a metadata register
+ * copy action to copy reg_a from WQE to reg_c[meta]
+ */
+ act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
+ sizeof(struct mlx5_flow_action_copy_mreg);
+ ext_actions = rte_zmalloc(__func__, act_size, 0);
+ if (!ext_actions)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no memory to split "
+ "metadata flow");
+ /* Create the action list appended with copy register. */
+ ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
+ actions_n, error);
+ if (ret < 0)
+ goto exit;
+ }
+ /* Add the unmodified original or prefix subflow. */
+ ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items,
+ ext_actions ? ext_actions : actions,
+ external, error);
+ if (ret < 0)
+ goto exit;
+ assert(dev_flow);
+ if (qrss_id) {
+ const struct rte_flow_attr q_attr = {
+ .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
+ .ingress = 1,
+ };
+ /* Internal PMD action to set register. */
+ struct mlx5_rte_flow_item_tag q_tag_spec = {
+ .data = qrss_id,
+ .id = 0,
+ };
+ struct rte_flow_item q_items[] = {
+ {
+ .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .spec = &q_tag_spec,
+ .last = NULL,
+ .mask = NULL,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action q_actions[] = {
+ {
+ .type = qrss->type,
+ .conf = qrss->conf,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ uint64_t hash_fields = dev_flow->hash_fields;
+ /*
+ * Put unique id in prefix flow due to it is destroyed after
+ * prefix flow and id will be freed after there is no actual
+ * flows with this id and identifier reallocation becomes
+ * possible (for example, for other flows in other threads).
+ */
+ dev_flow->qrss_id = qrss_id;
+ qrss_id = 0;
+ dev_flow = NULL;
+ ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
+ if (ret < 0)
+ goto exit;
+ q_tag_spec.id = ret;
+ /* Add suffix subflow to execute Q/RSS. */
+ ret = flow_create_split_inner(dev, flow, &dev_flow,
+ &q_attr, q_items, q_actions,
+ external, error);
+ if (ret < 0)
+ goto exit;
+ assert(dev_flow);
+ dev_flow->hash_fields = hash_fields;
+ }
+
+exit:
+ /*
+ * We do not destroy the partially created sub_flows in case of error.
+ * These ones are included into parent flow list and will be destroyed
+ * by flow_drv_destroy.
+ */
+ flow_qrss_free_id(dev, qrss_id);
+ rte_free(ext_actions);
+ return ret;
+}
+
+/**
+ * Split the flow to subflow set. The splitters might be linked
+ * in the chain, like this:
+ * flow_create_split_outer() calls:
+ * flow_create_split_meter() calls:
+ * flow_create_split_metadata(meter_subflow_0) calls:
+ * flow_create_split_inner(metadata_subflow_0)
+ * flow_create_split_inner(metadata_subflow_1)
+ * flow_create_split_inner(metadata_subflow_2)
+ * flow_create_split_metadata(meter_subflow_1) calls:
+ * flow_create_split_inner(metadata_subflow_0)
+ * flow_create_split_inner(metadata_subflow_1)
+ * flow_create_split_inner(metadata_subflow_2)
+ *
+ * This provide flexible way to add new levels of flow splitting.
+ * The all of successfully created subflows are included to the
+ * parent flow dev_flow list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Parent flow structure pointer.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, negative value otherwise
+ */
+static int
+flow_create_split_outer(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = flow_create_split_metadata(dev, flow, attr, items,
+ actions, external, error);
+ assert(ret <= 0);
+ return ret;
+}
+
+/**
+ * Create a flow and add it to @p list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param list
+ * Pointer to a TAILQ flow list. If this parameter NULL,
+ * no list insertion occurred, flow is just created,
+ * this is caller's responsibility to track the
+ * created flow.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ struct mlx5_flow *dev_flow;
+ const struct rte_flow_action_rss *rss;
+ union {
+ struct rte_flow_expand_rss buf;
+ uint8_t buffer[2048];
+ } expand_buffer;
+ union {
+ struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
+ uint8_t buffer[2048];
+ } actions_rx;
+ union {
+ struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
+ uint8_t buffer[2048];
+ } actions_hairpin_tx;
+ union {
+ struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
+ uint8_t buffer[2048];
+ } items_tx;
+ struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ const struct rte_flow_action *p_actions_rx = actions;
+ int ret;
+ uint32_t i;
+ uint32_t flow_size;
+ int hairpin_flow = 0;
+ uint32_t hairpin_id = 0;
+ struct rte_flow_attr attr_tx = { .priority = 0 };
+
+ hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
+ if (hairpin_flow > 0) {
+ if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ flow_hairpin_split(dev, actions, actions_rx.actions,
+ actions_hairpin_tx.actions, items_tx.items,
+ &hairpin_id);
+ p_actions_rx = actions_rx.actions;
+ }
+ ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
+ error);
+ if (ret < 0)
+ goto error_before_flow;
+ flow_size = sizeof(struct rte_flow);
+ rss = flow_get_rss_action(p_actions_rx);
+ if (rss)
+ flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
+ sizeof(void *));
+ else
+ flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
+ flow = rte_calloc(__func__, 1, flow_size, 0);
+ if (!flow) {
+ rte_errno = ENOMEM;
+ goto error_before_flow;
+ }
+ flow->drv_type = flow_get_drv_type(dev, attr);
+ if (hairpin_id != 0)
+ flow->hairpin_flow_id = hairpin_id;
+ assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+ flow->drv_type < MLX5_FLOW_TYPE_MAX);
+ flow->rss.queue = (void *)(flow + 1);
+ if (rss) {
+ /*
+ * The following information is required by
+ * mlx5_flow_hashfields_adjust() in advance.
+ */
+ flow->rss.level = rss->level;
+ /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
+ flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
+ }
+ LIST_INIT(&flow->dev_flows);
+ if (rss && rss->types) {
+ unsigned int graph_root;
+
+ graph_root = find_graph_root(items, rss->level);
+ ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+ items, rss->types,
+ mlx5_support_expansion,
+ graph_root);
+ assert(ret > 0 &&
+ (unsigned int)ret < sizeof(expand_buffer.buffer));
+ } else {
+ buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
for (i = 0; i < buf->entries; ++i) {
- dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
- actions, error);
+ /*
+ * The splitter may create multiple dev_flows,
+ * depending on configuration. In the simplest
+ * case it just creates unmodified original flow.
+ */
+ ret = flow_create_split_outer(dev, flow, attr,
+ buf->entry[i].pattern,
+ p_actions_rx, external,
+ error);
+ if (ret < 0)
+ goto error;
+ }
+ /* Create the tx flow. */
+ if (hairpin_flow) {
+ attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
+ attr_tx.ingress = 0;
+ attr_tx.egress = 1;
+ dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+ actions_hairpin_tx.actions, error);
if (!dev_flow)
goto error;
dev_flow->flow = flow;
- dev_flow->external = external;
+ dev_flow->external = 0;
LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
- ret = flow_drv_translate(dev, dev_flow, attr,
- buf->entry[i].pattern,
- actions, error);
+ ret = flow_drv_translate(dev, dev_flow, &attr_tx,
+ items_tx.items,
+ actions_hairpin_tx.actions, error);
if (ret < 0)
goto error;
}
+ /*
+ * Update the metadata register copy table. If extensive
+ * metadata feature is enabled and registers are supported
+ * we might create the extra rte_flow for each unique
+ * MARK/FLAG action ID.
+ *
+ * The table is updated for ingress Flows only, because
+ * the egress Flows belong to the different device and
+ * copy table should be updated in peer NIC Rx domain.
+ */
+ if (attr->ingress &&
+ (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
+ ret = flow_mreg_update_copy_table(dev, flow, actions, error);
+ if (ret)
+ goto error;
+ }
if (dev->data->dev_started) {
ret = flow_drv_apply(dev, flow, error);
if (ret < 0)
goto error;
}
- TAILQ_INSERT_TAIL(list, flow, next);
+ if (list)
+ TAILQ_INSERT_TAIL(list, flow, next);
flow_rxq_flags_set(dev, flow);
return flow;
+error_before_flow:
+ if (hairpin_id)
+ mlx5_flow_id_release(priv->sh->flow_id_pool,
+ hairpin_id);
+ return NULL;
error:
+ assert(flow);
+ flow_mreg_del_copy_action(dev, flow);
ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (flow->hairpin_flow_id)
+ mlx5_flow_id_release(priv->sh->flow_id_pool,
+ flow->hairpin_flow_id);
assert(flow);
flow_drv_destroy(dev, flow);
rte_free(flow);
* @param dev
* Pointer to Ethernet device.
* @param list
- * Pointer to a TAILQ flow list.
+ * Pointer to a TAILQ flow list. If this parameter NULL,
+ * there is no flow removal from the list.
* @param[in] flow
* Flow to destroy.
*/
flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
struct rte_flow *flow)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
/*
* Update RX queue flags only if port is started, otherwise it is
* already clean.
*/
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
+ if (flow->hairpin_flow_id)
+ mlx5_flow_id_release(priv->sh->flow_id_pool,
+ flow->hairpin_flow_id);
flow_drv_destroy(dev, flow);
- TAILQ_REMOVE(list, flow, next);
+ if (list)
+ TAILQ_REMOVE(list, flow, next);
+ flow_mreg_del_copy_action(dev, flow);
rte_free(flow->fdir);
rte_free(flow);
}
{
struct rte_flow *flow;
- TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
flow_drv_remove(dev, flow);
+ flow_mreg_stop_copy_action(dev, flow);
+ }
+ flow_mreg_del_default_copy_action(dev);
flow_rxq_flags_clear(dev);
}
struct rte_flow_error error;
int ret = 0;
+ /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+ ret = flow_mreg_add_default_copy_action(dev, &error);
+ if (ret < 0)
+ return -rte_errno;
+ /* Apply Flows created by application. */
TAILQ_FOREACH(flow, list, next) {
+ ret = flow_mreg_start_copy_action(dev, flow);
+ if (ret < 0)
+ goto error;
ret = flow_drv_apply(dev, flow, &error);
if (ret < 0)
goto error;
return ret;
}
+/**
+ * Enable default hairpin egress flow.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queue
+ * The queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
+ uint32_t queue)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .egress = 1,
+ .priority = 0,
+ };
+ struct mlx5_rte_flow_item_tx_queue queue_spec = {
+ .queue = queue,
+ };
+ struct mlx5_rte_flow_item_tx_queue queue_mask = {
+ .queue = UINT32_MAX,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ .spec = &queue_spec,
+ .last = NULL,
+ .mask = &queue_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action_jump jump = {
+ .group = MLX5_HAIRPIN_TX_TABLE,
+ };
+ struct rte_flow_action actions[2];
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
+ actions[0].conf = &jump;
+ actions[1].type = RTE_FLOW_ACTION_TYPE_END;
+ flow = flow_list_create(dev, &priv->ctrl_flows,
+ &attr, items, actions, false, &error);
+ if (!flow) {
+ DRV_LOG(DEBUG,
+ "Failed to create ctrl flow: rte_errno(%d),"
+ " type(%d), message(%s)",
+ rte_errno, error.type,
+ error.message ? error.message : " (no stated reason)");
+ return -rte_errno;
+ }
+ return 0;
+}
+
/**
* Enable a control flow configured from the control plane.
*
return 0;
}
+/**
+ * Create the needed meter and suffix tables.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Pointer to table set on success, NULL otherwise.
+ */
+struct mlx5_meter_domains_infos *
+mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->create_mtr_tbls(dev);
+}
+
+/**
+ * Destroy the meter table set.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] tbl
+ * Pointer to the meter table set.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
+ struct mlx5_meter_domains_infos *tbls)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->destroy_mtr_tbls(dev, tbls);
+}
+
+/**
+ * Create policer rules.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] fm
+ * Pointer to flow meter structure.
+ * @param[in] attr
+ * Pointer to flow attributes.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+int
+mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter *fm,
+ const struct rte_flow_attr *attr)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->create_policer_rules(dev, fm, attr);
+}
+
+/**
+ * Destroy policer rules.
+ *
+ * @param[in] fm
+ * Pointer to flow meter structure.
+ * @param[in] attr
+ * Pointer to flow attributes.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+int
+mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter *fm,
+ const struct rte_flow_attr *attr)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+ return fops->destroy_policer_rules(dev, fm, attr);
+}
+
#define MLX5_POOL_QUERY_FREQ_US 1000000
/**
cont = MLX5_CNT_CONTAINER(sh, 1, 0);
pools_n += rte_atomic16_read(&cont->n_valid);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
- DRV_LOG(DEBUG, "Set alarm for %u pools each %u us\n", pools_n, us);
+ DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
sh->cmng.query_thread_on = 0;
- DRV_LOG(ERR, "Cannot reinitialize query alarm\n");
+ DRV_LOG(ERR, "Cannot reinitialize query alarm");
} else {
sh->cmng.query_thread_on = 1;
}
(uint64_t)(uintptr_t)pool);
if (ret) {
DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
- " %d\n", pool->min_dcs->id);
+ " %d", pool->min_dcs->id);
pool->raw_hw = NULL;
goto set_alarm;
}
}
return 0;
}
+
+/**
+ * Discover availability of metadata reg_c's.
+ *
+ * Iteratively use test flows to check availability.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ enum modify_reg idx;
+ int n = 0;
+
+ /* reg_c[0] and reg_c[1] are reserved. */
+ config->flow_mreg_c[n++] = REG_C_0;
+ config->flow_mreg_c[n++] = REG_C_1;
+ /* Discover availability of other reg_c's. */
+ for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
+ struct rte_flow_attr attr = {
+ .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
+ .priority = MLX5_FLOW_PRIO_RSVD,
+ .ingress = 1,
+ };
+ struct rte_flow_item items[] = {
+ [0] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ [0] = {
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = &(struct mlx5_flow_action_copy_mreg){
+ .src = REG_C_1,
+ .dst = idx,
+ },
+ },
+ [1] = {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &(struct rte_flow_action_jump){
+ .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
+ },
+ },
+ [2] = {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ if (!config->dv_flow_en)
+ break;
+ /* Create internal flow, validation skips copy action. */
+ flow = flow_list_create(dev, NULL, &attr, items,
+ actions, false, &error);
+ if (!flow)
+ continue;
+ if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
+ config->flow_mreg_c[n++] = idx;
+ flow_list_destroy(dev, NULL, flow);
+ }
+ for (; n < MLX5_MREG_C_NUM; ++n)
+ config->flow_mreg_c[n] = REG_NONE;
+ return 0;
+}