[MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+ [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
#endif
[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
return false;
}
+/**
+ * Network Service Header (NSH) and its next protocol values
+ * are described in RFC-8393.
+ */
+static enum rte_flow_item_type
+mlx5_nsh_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+ enum rte_flow_item_type type;
+
+ switch (proto_mask & proto_spec) {
+ case RTE_VXLAN_GPE_TYPE_IPV4:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case RTE_VXLAN_GPE_TYPE_IPV6:
+ type = RTE_VXLAN_GPE_TYPE_IPV6;
+ break;
+ case RTE_VXLAN_GPE_TYPE_ETH:
+ type = RTE_FLOW_ITEM_TYPE_ETH;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
+static enum rte_flow_item_type
+mlx5_inet_proto_to_item_type(uint8_t proto_spec, uint8_t proto_mask)
+{
+ enum rte_flow_item_type type;
+
+ switch (proto_mask & proto_spec) {
+ case IPPROTO_UDP:
+ type = RTE_FLOW_ITEM_TYPE_UDP;
+ break;
+ case IPPROTO_TCP:
+ type = RTE_FLOW_ITEM_TYPE_TCP;
+ break;
+ case IPPROTO_IP:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case IPPROTO_IPV6:
+ type = RTE_FLOW_ITEM_TYPE_IPV6;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
+static enum rte_flow_item_type
+mlx5_ethertype_to_item_type(rte_be16_t type_spec,
+ rte_be16_t type_mask, bool is_tunnel)
+{
+ enum rte_flow_item_type type;
+
+ switch (rte_be_to_cpu_16(type_spec & type_mask)) {
+ case RTE_ETHER_TYPE_TEB:
+ type = is_tunnel ?
+ RTE_FLOW_ITEM_TYPE_ETH : RTE_FLOW_ITEM_TYPE_END;
+ break;
+ case RTE_ETHER_TYPE_VLAN:
+ type = !is_tunnel ?
+ RTE_FLOW_ITEM_TYPE_VLAN : RTE_FLOW_ITEM_TYPE_END;
+ break;
+ case RTE_ETHER_TYPE_IPV4:
+ type = RTE_FLOW_ITEM_TYPE_IPV4;
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ type = RTE_FLOW_ITEM_TYPE_IPV6;
+ break;
+ default:
+ type = RTE_FLOW_ITEM_TYPE_END;
+ }
+ return type;
+}
+
static enum rte_flow_item_type
mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
{
- enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
- uint16_t ether_type = 0;
- uint16_t ether_type_m;
- uint8_t ip_next_proto = 0;
- uint8_t ip_next_proto_m;
+#define MLX5_XSET_ITEM_MASK_SPEC(type, fld) \
+ do { \
+ const void *m = item->mask; \
+ const void *s = item->spec; \
+ mask = m ? \
+ ((const struct rte_flow_item_##type *)m)->fld : \
+ rte_flow_item_##type##_mask.fld; \
+ spec = ((const struct rte_flow_item_##type *)s)->fld; \
+ } while (0)
+
+ enum rte_flow_item_type ret;
+ uint16_t spec, mask;
if (item == NULL || item->spec == NULL)
- return ret;
+ return RTE_FLOW_ITEM_TYPE_VOID;
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- if (item->mask)
- ether_type_m = ((const struct rte_flow_item_eth *)
- (item->mask))->type;
- else
- ether_type_m = rte_flow_item_eth_mask.type;
- if (ether_type_m != RTE_BE16(0xFFFF))
- break;
- ether_type = ((const struct rte_flow_item_eth *)
- (item->spec))->type;
- if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
- ret = RTE_FLOW_ITEM_TYPE_VLAN;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(eth, type);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_ethertype_to_item_type(spec, mask, false);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- if (item->mask)
- ether_type_m = ((const struct rte_flow_item_vlan *)
- (item->mask))->inner_type;
- else
- ether_type_m = rte_flow_item_vlan_mask.inner_type;
- if (ether_type_m != RTE_BE16(0xFFFF))
- break;
- ether_type = ((const struct rte_flow_item_vlan *)
- (item->spec))->inner_type;
- if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
- ret = RTE_FLOW_ITEM_TYPE_VLAN;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(vlan, inner_type);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_ethertype_to_item_type(spec, mask, false);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- if (item->mask)
- ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
- (item->mask))->hdr.next_proto_id;
- else
- ip_next_proto_m =
- rte_flow_item_ipv4_mask.hdr.next_proto_id;
- if (ip_next_proto_m != 0xFF)
- break;
- ip_next_proto = ((const struct rte_flow_item_ipv4 *)
- (item->spec))->hdr.next_proto_id;
- if (ip_next_proto == IPPROTO_UDP)
- ret = RTE_FLOW_ITEM_TYPE_UDP;
- else if (ip_next_proto == IPPROTO_TCP)
- ret = RTE_FLOW_ITEM_TYPE_TCP;
- else if (ip_next_proto == IPPROTO_IP)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (ip_next_proto == IPPROTO_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(ipv4, hdr.next_proto_id);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_inet_proto_to_item_type(spec, mask);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- if (item->mask)
- ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
- (item->mask))->hdr.proto;
- else
- ip_next_proto_m =
- rte_flow_item_ipv6_mask.hdr.proto;
- if (ip_next_proto_m != 0xFF)
- break;
- ip_next_proto = ((const struct rte_flow_item_ipv6 *)
- (item->spec))->hdr.proto;
- if (ip_next_proto == IPPROTO_UDP)
- ret = RTE_FLOW_ITEM_TYPE_UDP;
- else if (ip_next_proto == IPPROTO_TCP)
- ret = RTE_FLOW_ITEM_TYPE_TCP;
- else if (ip_next_proto == IPPROTO_IP)
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- else if (ip_next_proto == IPPROTO_IPV6)
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- else
- ret = RTE_FLOW_ITEM_TYPE_END;
+ MLX5_XSET_ITEM_MASK_SPEC(ipv6, hdr.proto);
+ if (!mask)
+ return RTE_FLOW_ITEM_TYPE_VOID;
+ ret = mlx5_inet_proto_to_item_type(spec, mask);
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
- ether_type_m = item->mask ?
- ((const struct rte_flow_item_geneve *)
- (item->mask))->protocol :
- rte_flow_item_geneve_mask.protocol;
- ether_type = ((const struct rte_flow_item_geneve *)
- (item->spec))->protocol;
- ether_type_m = rte_be_to_cpu_16(ether_type_m);
- ether_type = rte_be_to_cpu_16(ether_type);
- switch (ether_type_m & ether_type) {
- case RTE_ETHER_TYPE_TEB:
- ret = RTE_FLOW_ITEM_TYPE_ETH;
- break;
- case RTE_ETHER_TYPE_IPV4:
- ret = RTE_FLOW_ITEM_TYPE_IPV4;
- break;
- case RTE_ETHER_TYPE_IPV6:
- ret = RTE_FLOW_ITEM_TYPE_IPV6;
- break;
- default:
- ret = RTE_FLOW_ITEM_TYPE_END;
- }
+ MLX5_XSET_ITEM_MASK_SPEC(geneve, protocol);
+ ret = mlx5_ethertype_to_item_type(spec, mask, true);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ MLX5_XSET_ITEM_MASK_SPEC(gre, protocol);
+ ret = mlx5_ethertype_to_item_type(spec, mask, true);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ MLX5_XSET_ITEM_MASK_SPEC(vxlan_gpe, protocol);
+ ret = mlx5_nsh_proto_to_item_type(spec, mask);
break;
default:
ret = RTE_FLOW_ITEM_TYPE_VOID;
break;
}
return ret;
+#undef MLX5_XSET_ITEM_MASK_SPEC
}
static const int *
mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
const struct rte_flow_item_flex_handle *handle,
struct rte_flow_error *error);
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *err);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.get_restore_info = mlx5_flow_tunnel_get_restore_info,
.flex_item_create = mlx5_flow_flex_item_create,
.flex_item_release = mlx5_flow_flex_item_release,
+ .info_get = mlx5_flow_info_get,
+ .configure = mlx5_flow_port_configure,
};
/* Tunnel information. */
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
enum modify_reg start_reg;
bool skip_mtr_reg = false;
}
/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
* flow.
*
* @param[in] dev
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
* this must be always enabled (metadata may arive
* from other port - not from local flows only.
*/
- if (priv->config.dv_flow_en &&
- priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- mlx5_flow_ext_mreg_supported(dev)) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n = 1;
- } else if (mark) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n++;
- }
if (tunnel) {
unsigned int j;
}
}
+static void
+flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (priv->mark_enabled)
+ return;
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ rxq_ctrl->rxq.mark = 1;
+ }
+ priv->mark_enabled = 1;
+}
+
/**
* Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
*
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
+ if (wks->mark)
+ flow_rxq_mark_flag_set(dev);
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
flow_drv_rxq_flags_set(dev, dev_handle);
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
MLX5_ASSERT(rxq_ctrl != NULL);
if (rxq_ctrl == NULL)
continue;
- if (priv->config.dv_flow_en &&
- priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- mlx5_flow_ext_mreg_supported(dev)) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n = 1;
- } else if (mark) {
- rxq_ctrl->flow_mark_n--;
- rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
- }
if (tunnel) {
unsigned int j;
if (rxq == NULL || rxq->ctrl == NULL)
continue;
- rxq->ctrl->flow_mark_n = 0;
rxq->ctrl->rxq.mark = 0;
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
rxq->ctrl->flow_tunnels_n[j] = 0;
rxq->ctrl->rxq.tunnel = 0;
}
+ priv->mark_enabled = 0;
}
/**
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->key_len,
"RSS hash key too large");
- if (rss->queue_num > priv->config.ind_table_max_size)
+ if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&rss->queue_num,
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
"egress is not supported");
- if (attributes->transfer && !priv->config.dv_esw_en)
+ if (attributes->transfer && !priv->sh->config.dv_esw_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
NULL, "transfer is not supported");
uint8_t vni[4];
} id = { .vlan_id = 0, };
- if (!priv->config.l3_vxlan_en)
+ if (!priv->sh->config.l3_vxlan_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 VXLAN is not enabled by device"
const struct rte_flow_item_geneve *mask = item->mask;
int ret;
uint16_t gbhdr;
- uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+ uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
const struct rte_flow_item_geneve nic_mask = {
.ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
.protocol = RTE_BE16(UINT16_MAX),
};
- if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
+ if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 Geneve is not enabled by device"
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
- struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
uint8_t data_max_supported =
hca_attr->max_geneve_tlv_option_data_len * 4;
- struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_item_geneve *geneve_spec;
const struct rte_flow_item_geneve *geneve_mask;
const struct rte_flow_item_geneve_opt *spec = item->spec;
if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Geneve TLV opt length exceeeds the limit (31)");
+ "Geneve TLV opt length exceeds the limit (31)");
/* Check if class type and length masks are full. */
if (full_mask.option_class != mask->option_class ||
full_mask.option_type != mask->option_type ||
"Geneve TLV opt class/type/length masks must be full");
/* Check if length is supported */
if ((uint32_t)spec->option_len >
- config->hca_attr.max_geneve_tlv_option_data_len)
+ hca_attr->max_geneve_tlv_option_data_len)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
"Geneve TLV opt length not supported");
- if (config->hca_attr.max_geneve_tlv_options > 1)
+ if (hca_attr->max_geneve_tlv_options > 1)
DRV_LOG(DEBUG,
"max_geneve_tlv_options supports more than 1 option");
/* Check GENEVE item preceding. */
"Data mask is of unsupported size");
}
/* Check GENEVE option is supported in NIC. */
- if (!config->hca_attr.geneve_tlv_opt)
+ if (!hca_attr->geneve_tlv_opt)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
"Geneve TLV opt not supported");
struct mlx5_priv *priv = dev->data->dev_private;
int ret;
- if (!priv->config.mpls_en)
+ if (!priv->sh->dev_cap.mpls_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"MPLS not supported or"
if (type != MLX5_FLOW_TYPE_MAX)
return type;
+ /*
+ * Currently when dv_flow_en == 2, only HW steering engine is
+ * supported. New engines can also be chosen here if ready.
+ */
+ if (priv->sh->config.dv_flow_en == 2)
+ return MLX5_FLOW_TYPE_HW;
/* If no OS specific type - continue with DV/VERBS selection */
- if (attr->transfer && priv->config.dv_esw_en)
+ if (attr->transfer && priv->sh->config.dv_esw_en)
type = MLX5_FLOW_TYPE_DV;
if (!attr->transfer)
- type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
- MLX5_FLOW_TYPE_VERBS;
+ type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+ MLX5_FLOW_TYPE_VERBS;
return type;
}
* subflow.
*
* @param[in] dev_flow
- * Pointer the created preifx subflow.
+ * Pointer the created prefix subflow.
*
* @return
* The layers get from prefix subflow.
return true;
case RTE_FLOW_ACTION_TYPE_FLAG:
case RTE_FLOW_ACTION_TYPE_MARK:
- if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+ if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
return true;
else
return false;
[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
};
- /* Fill the register fileds in the flow. */
+ /* Fill the register fields in the flow. */
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return NULL;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
- * be applied, removed, deleted in ardbitrary order
+ * be applied, removed, deleted in arbitrary order
* by list traversing.
*/
mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
uint32_t mark_id;
/* Check whether extensive metadata feature is engaged. */
- if (!priv->config.dv_flow_en ||
- priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ if (!priv->sh->config.dv_flow_en ||
+ priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
!mlx5_flow_ext_mreg_supported(dev) ||
!priv->sh->dv_regc0_mask)
return 0;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
struct mlx5_flow_mreg_copy_resource *mcp_res;
const struct rte_flow_action_mark *mark;
struct rte_flow_error *error)
{
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
flow_split_info->flow_idx, error);
/*
* If dev_flow is as one of the suffix flow, some actions in suffix
* flow may need some user defined item layer flags, and pass the
- * Metadate rxq mark flag to suffix flow as well.
+ * Metadata rxq mark flag to suffix flow as well.
*/
if (flow_split_info->prefix_layers)
dev_flow->handle->layers = flow_split_info->prefix_layers;
- if (flow_split_info->prefix_mark)
- dev_flow->handle->mark = 1;
+ if (flow_split_info->prefix_mark) {
+ MLX5_ASSERT(wks);
+ wks->mark = 1;
+ }
if (sub_flow)
*sub_flow = dev_flow;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
uint32_t tag_id = 0;
struct rte_flow_item *vlan_item_dst = NULL;
const struct rte_flow_item *vlan_item_src = NULL;
+ const struct rte_flow_item *orig_items = items;
struct rte_flow_action *hw_mtr_action;
struct rte_flow_action *action_pre_head = NULL;
int32_t flow_src_port = priv->representor_id;
if (!fm->def_policy) {
sub_policy = get_meter_sub_policy(dev, flow, wks,
- attr, items, error);
+ attr, orig_items,
+ error);
if (!sub_policy)
return -rte_errno;
} else {
* @param[out] error
* Perform verbose error reporting if not NULL.
* @param[in] encap_idx
- * The encap action inndex.
+ * The encap action index.
*
* @return
* 0 on success, negative value otherwise
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_sh_config *config = &priv->sh->config;
const struct rte_flow_action *qrss = NULL;
struct rte_flow_action *ext_actions = NULL;
struct mlx5_flow *dev_flow = NULL;
/* Add suffix subflow to execute Q/RSS. */
flow_split_info->prefix_layers = layers;
flow_split_info->prefix_mark = 0;
+ flow_split_info->table_id = 0;
ret = flow_create_split_inner(dev, flow, &dev_flow,
&q_attr, mtr_sfx ? items :
q_items, q_actions,
goto exit;
}
/* Add the prefix subflow. */
- flow_split_info->prefix_mark = 0;
skip_scale_restore = flow_split_info->skip_scale;
flow_split_info->skip_scale |=
1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
MLX5_FLOW_TABLE_LEVEL_METER;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
- flow_split_info->prefix_mark = dev_flow->handle->mark;
+ flow_split_info->prefix_mark |= wks->mark;
flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
}
/* Add the prefix subflow. */
struct mlx5_flow_dv_sample_resource *sample_res;
struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
struct mlx5_flow_tbl_resource *sfx_tbl;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
#endif
size_t act_size;
size_t item_size;
* When reg_c_preserve is set, metadata registers Cx preserve
* their value even through packet duplication.
*/
- add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+ add_tag = (!fdb_tx ||
+ priv->sh->cdev->config.hca_attr.reg_c_preserve);
if (add_tag)
sfx_items = (struct rte_flow_item *)((char *)sfx_actions
+ act_size);
}
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
- flow_split_info->prefix_mark = dev_flow->handle->mark;
+ MLX5_ASSERT(wks);
+ flow_split_info->prefix_mark |= wks->mark;
/* Suffix group level already be scaled with factor, set
* MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
* again in translation.
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q creation not supported");
+ return NULL;
+ }
/*
* If the device is not started yet, it is not allowed to created a
* flow from application. PMD default flows and traffic control flows
* @param type
* Flow type to be flushed.
* @param active
- * If flushing is called avtively.
+ * If flushing is called actively.
*/
void
mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
return data;
err:
- if (data->rss_desc.queue)
- free(data->rss_desc.queue);
+ free(data->rss_desc.queue);
free(data);
return NULL;
}
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q destruction not supported");
flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
(uintptr_t)(void *)flow);
return 0;
struct rte_flow_error *error)
{
int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ if (priv->sh->config.dv_flow_en == 2)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow non-Q query not supported");
ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
error);
if (ret < 0)
*/
int
mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
- bool clear, uint64_t *pkts, uint64_t *bytes)
+ bool clear, uint64_t *pkts, uint64_t *bytes, void **action)
{
const struct mlx5_flow_driver_ops *fops;
struct rte_flow_attr attr = { .transfer = 0 };
if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->counter_query(dev, cnt, clear, pkts, bytes);
+ return fops->counter_query(dev, cnt, clear, pkts,
+ bytes, action);
}
DRV_LOG(ERR,
"port %u counter query is not supported.",
return -ENOTSUP;
}
+/**
+ * Get information about HWS pre-configurable resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[out] port_info
+ * Pointer to port information.
+ * @param[out] queue_info
+ * Pointer to queue information.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "info get with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->info_get(dev, port_info, queue_info, error);
+}
+
+/**
+ * Configure port HWS resources.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] port_attr
+ * Port configuration attributes.
+ * @param[in] nb_queue
+ * Number of queue.
+ * @param[in] queue_attr
+ * Array that holds attributes for each flow queue.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_port_configure(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port configure with incorrect steering mode");
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
+}
+
/**
* Allocate a new memory for the counter values wrapped by all the needed
* management.
struct rte_flow *flow;
struct rte_flow_error error;
- if (!priv->config.dv_flow_en)
+ if (!priv->sh->config.dv_flow_en)
break;
/* Create internal flow, validation skips copy action. */
flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
"invalid flow handle");
}
handle_idx = flow->dev_handles;
+ /* query counter */
+ if (flow->counter &&
+ (!mlx5_counter_query(dev, flow->counter, false,
+ &count.hits, &count.bytes, &action)) && action) {
+ id = (uint64_t)(uintptr_t)action;
+ type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+ save_dump_file(NULL, 0, type,
+ id, (void *)&count, file);
+ }
+
while (handle_idx) {
dh = mlx5_ipool_get(priv->sh->ipool
[MLX5_IPOOL_MLX5_FLOW], handle_idx);
continue;
handle_idx = dh->next.next;
- /* query counter */
- type = DR_DUMP_REC_TYPE_PMD_COUNTER;
- flow_dv_query_count_ptr(dev, flow->counter,
- &action, error);
- if (action) {
- id = (uint64_t)(uintptr_t)action;
- if (!mlx5_flow_query_counter(dev, flow, &count, error))
- save_dump_file(NULL, 0, type,
- id, (void *)&count, file);
- }
/* Get modify_hdr and encap_decap buf from ipools. */
encap_decap = NULL;
modify_hdr = dh->dvh.modify_hdr;
*/
static int
mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
- FILE *file, struct rte_flow_error *error)
+ FILE *file, struct rte_flow_error *error __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
max = MLX5_COUNTERS_PER_POOL * cmng->n_valid;
for (j = 1; j <= max; j++) {
action = NULL;
- flow_dv_query_count_ptr(dev, j, &action, error);
- if (action) {
- if (!flow_dv_query_count(dev, j, &count, error)) {
- type = DR_DUMP_REC_TYPE_PMD_COUNTER;
- id = (uint64_t)(uintptr_t)action;
- save_dump_file(NULL, 0, type,
- id, (void *)&count, file);
- }
+ if ((!mlx5_counter_query(dev, j, false, &count.hits,
+ &count.bytes, &action)) && action) {
+ id = (uint64_t)(uintptr_t)action;
+ type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+ save_dump_file(NULL, 0, type,
+ id, (void *)&count, file);
}
}
return 0;
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
- * 0 on success, a nagative value otherwise.
+ * 0 on success, a negative value otherwise.
*/
int
mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
struct mlx5_flow_handle *dh;
struct rte_flow *flow;
- if (!priv->config.dv_flow_en) {
+ if (!sh->config.dv_flow_en) {
if (fputs("device dv flow disabled\n", file) <= 0)
return -errno;
return -ENOTSUP;
}
/**
- * tunnel offload functionalilty is defined for DV environment only
+ * tunnel offload functionality is defined for DV environment only
*/
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
__extension__
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (!priv->config.dv_flow_en)
+ if (!priv->sh->config.dv_flow_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"flow DV interface is off");
type = mlx5_flow_os_get_type();
if (type == MLX5_FLOW_TYPE_MAX) {
type = MLX5_FLOW_TYPE_VERBS;
- if (priv->sh->devx && priv->config.dv_flow_en)
+ if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
type = MLX5_FLOW_TYPE_DV;
}
fops = flow_get_drv_ops(type);