#include <rte_ip.h>
#include <rte_gre.h>
#include <rte_vxlan.h>
+#include <rte_gtp.h>
#include "mlx5.h"
#include "mlx5_defs.h"
#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
#endif
+#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
+ sizeof(struct rte_flow_item_ipv4))
/* VLAN header definitions */
#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
/**
* Initialize flow attributes structure according to flow items' types.
*
+ * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
+ * mode. For tunnel mode, the items to be modified are the outermost ones.
+ *
* @param[in] item
* Pointer to item specification.
* @param[out] attr
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_IPV4:
- attr->ipv4 = 1;
+ if (!attr->ipv6)
+ attr->ipv4 = 1;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- attr->ipv6 = 1;
+ if (!attr->ipv4)
+ attr->ipv6 = 1;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- attr->udp = 1;
+ if (!attr->tcp)
+ attr->udp = 1;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- attr->tcp = 1;
+ if (!attr->udp)
+ attr->tcp = 1;
break;
default:
break;
};
struct field_modify_info modify_ipv4[] = {
+ {1, 1, MLX5_MODI_OUT_IP_DSCP},
{1, 8, MLX5_MODI_OUT_IPV4_TTL},
{4, 12, MLX5_MODI_OUT_SIPV4},
{4, 16, MLX5_MODI_OUT_DIPV4},
};
struct field_modify_info modify_ipv6[] = {
+ {1, 0, MLX5_MODI_OUT_IP_DSCP},
{1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
{4, 8, MLX5_MODI_OUT_SIPV6_127_96},
{4, 12, MLX5_MODI_OUT_SIPV6_95_64},
if (reg < 0)
return reg;
assert(reg > 0);
+ if (reg == REG_C_0) {
+ uint32_t msk_c0 = priv->sh->dv_regc0_mask;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
+
+ data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
+ mask = rte_cpu_to_be_32(mask) & msk_c0;
+ mask = rte_cpu_to_be_32(mask << shl_c0);
+ }
reg_c_x[0].id = reg_to_field[reg];
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
MLX5_MODIFICATION_TYPE_SET, error);
}
+/**
+ * Convert modify-header set IPv4 DSCP action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_ipv4_dscp
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_dscp *conf =
+ (const struct rte_flow_action_set_dscp *)(action->conf);
+ struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv4 ipv4_mask;
+
+ memset(&ipv4, 0, sizeof(ipv4));
+ memset(&ipv4_mask, 0, sizeof(ipv4_mask));
+ ipv4.hdr.type_of_service = conf->dscp;
+ ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
+ item.spec = &ipv4;
+ item.mask = &ipv4_mask;
+ return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
+/**
+ * Convert modify-header set IPv6 DSCP action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_ipv6_dscp
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_dscp *conf =
+ (const struct rte_flow_action_set_dscp *)(action->conf);
+ struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_ipv6 ipv6_mask;
+
+ memset(&ipv6, 0, sizeof(ipv6));
+ memset(&ipv6_mask, 0, sizeof(ipv6_mask));
+ /*
+ * Even though the DSCP bits offset of IPv6 is not byte aligned,
+ * rdma-core only accept the DSCP bits byte aligned start from
+ * bit 0 to 5 as to be compatible with IPv4. No need to shift the
+ * bits in IPv6 case as rdma-core requires byte aligned value.
+ */
+ ipv6.hdr.vtc_flow = conf->dscp;
+ ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
+ item.spec = &ipv6;
+ item.mask = &ipv6_mask;
+ return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
/**
* Validate MARK item.
*
return 0;
}
+/**
+ * Validate GTP item.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_gtp *mask = item->mask;
+ const struct rte_flow_item_gtp nic_mask = {
+ .msg_type = 0xff,
+ .teid = RTE_BE32(0xffffffff),
+ };
+
+ if (!priv->config.hca_attr.tunnel_stateless_gtp)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GTP support is not enabled");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_gtp_mask;
+ return mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_gtp),
+ error);
+}
+
/**
* Validate the pop VLAN action.
*
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ const struct rte_flow_action_raw_decap *decap = action->conf;
+
if (action_flags & MLX5_FLOW_ACTION_DROP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can only have a single decap"
" action in a flow");
- if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't have decap action after"
- " modify action");
/* decap action is valid on egress only if it is followed by encap */
- if (attr->egress) {
- for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
- action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
- action++) {
- }
- if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL, "decap action not supported"
- " for egress");
+ if (attr->egress && decap &&
+ decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL, "decap action not supported"
+ " for egress");
+ } else if (decap && decap->size > MLX5_ENCAPSULATION_DECISION_SIZE &&
+ (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't have decap action "
+ "after modify action");
}
return 0;
}
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
+ /*
+ * Depending on rdma_core version the glue routine calls
+ * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
+ * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
+ */
cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_vport
+ mlx5_glue->dr_create_flow_action_dest_port
(priv->sh->fdb_domain, resource->port_id);
if (!cache_resource->action) {
rte_free(cache_resource);
encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
res.size = encap_data->size;
memcpy(res.buf, encap_data->data, res.size);
- res.reformat_type = attr->egress ?
- MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
- MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
+ MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
+ MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
if (attr->transfer)
res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
else
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_meter *am = action->conf;
- struct mlx5_flow_meter *fm = mlx5_flow_meter_find(priv, am->mtr_id);
+ struct mlx5_flow_meter *fm;
+
+ if (!am)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "meter action conf is NULL");
if (action_flags & MLX5_FLOW_ACTION_METER)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"meter action not supported");
+ fm = mlx5_flow_meter_find(priv, am->mtr_id);
if (!fm)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
return 0;
}
+/**
+ * Validate the modify-header IPv4 DSCP actions.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] item_flags
+ * Holds the items detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (!ret) {
+ if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no ipv4 item in pattern");
+ }
+ return ret;
+}
+
+/**
+ * Validate the modify-header IPv6 DSCP actions.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] item_flags
+ * Holds the items detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (!ret) {
+ if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no ipv6 item in pattern");
+ }
+ return ret;
+}
+
/**
* Find existing modify-header resource or create and register a new one.
*
error);
if (ret < 0)
return ret;
- last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ last_item = MLX5_FLOW_LAYER_GENEVE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
ret = mlx5_flow_validate_item_mpls(dev, items,
case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
break;
+ case RTE_FLOW_ITEM_TYPE_GTP:
+ ret = flow_dv_validate_item_gtp(dev, items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GTP;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
action_flags |= MLX5_FLOW_ACTION_METER;
++actions_n;
break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+ ret = flow_dv_validate_action_modify_ipv4_dscp
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ ret = flow_dv_validate_action_modify_ipv6_dscp
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
void *misc2_v =
MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+ uint32_t temp;
data &= mask;
switch (reg_type) {
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
break;
case REG_C_0:
- MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, mask);
- MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, data);
+ /*
+ * The metadata register C0 field might be divided into
+ * source vport index and META item value, we should set
+ * this field according to specified mask, not as whole one.
+ */
+ temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
+ temp |= mask;
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
+ temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
+ temp &= ~mask;
+ temp |= data;
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
break;
case REG_C_1:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
/* Get the metadata register index for the mark. */
reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
assert(reg > 0);
+ if (reg == REG_C_0) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t msk_c0 = priv->sh->dv_regc0_mask;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
+
+ mask &= msk_c0;
+ mask <<= shl_c0;
+ value <<= shl_c0;
+ }
flow_dv_match_meta_reg(matcher, key, reg, value, mask);
}
}
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0 = rte_bsf32(msk_c0);
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
- msk_c0 = rte_cpu_to_be_32(msk_c0);
+ value >>= shr_c0;
+ mask >>= shr_c0;
+#endif
value <<= shl_c0;
mask <<= shl_c0;
assert(msk_c0);
/**
* Add tag item to matcher
*
+ * @param[in] dev
+ * The devich to configure through.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow pattern to translate.
*/
static void
-flow_dv_translate_mlx5_item_tag(void *matcher, void *key,
+flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
+ void *matcher, void *key,
const struct rte_flow_item *item)
{
const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
+ uint32_t mask, value;
assert(tag_v);
- flow_dv_match_meta_reg(matcher, key, tag_v->id, tag_v->data,
- tag_m ? tag_m->data : UINT32_MAX);
+ value = tag_v->data;
+ mask = tag_m ? tag_m->data : UINT32_MAX;
+ if (tag_v->id == REG_C_0) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t msk_c0 = priv->sh->dv_regc0_mask;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
+
+ mask &= msk_c0;
+ mask <<= shl_c0;
+ value <<= shl_c0;
+ }
+ flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
}
/**
icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
}
+/**
+ * Add GTP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_gtp(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner)
+{
+ const struct rte_flow_item_gtp *gtp_m = item->mask;
+ const struct rte_flow_item_gtp *gtp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_3);
+ void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ uint16_t dport = RTE_GTPU_UDP_PORT;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!gtp_v)
+ return;
+ if (!gtp_m)
+ gtp_m = &rte_flow_item_gtp_mask;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
+ gtp_v->msg_type & gtp_m->msg_type);
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
+ rte_be_to_cpu_32(gtp_m->teid));
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
+ rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
+}
+
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
.direction = !!egress,
}
};
- struct mlx5_hlist_entry *pos;
+ struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
+ table_key.v64);
struct mlx5_flow_tbl_data_entry *tbl_data;
-
-#ifdef HAVE_MLX5DV_DR
int ret;
void *domain;
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
tbl = &tbl_data->tbl;
- if (!tbl->obj) {
- rte_flow_error_set(error, ENOKEY,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot find created table");
- return NULL;
- }
rte_atomic32_inc(&tbl->refcnt);
return tbl;
}
}
rte_atomic32_inc(&tbl->refcnt);
return tbl;
-#else
- /* Just to make the compiling pass when no HAVE_MLX5DV_DR defined. */
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- tbl = &tbl_data->tbl;
- if (!tbl->obj) {
- rte_flow_error_set(error, ENOKEY,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot find created table");
- return NULL;
- }
- rte_atomic32_inc(&tbl->refcnt);
- return tbl;
- }
- return NULL;
-#endif
}
/**
rte_atomic32_inc(&cache_matcher->refcnt);
dev_flow->dv.matcher = cache_matcher;
/* old matcher should not make the table ref++. */
-#ifdef HAVE_MLX5DV_DR
flow_dv_tbl_resource_release(dev, tbl);
-#endif
return 0;
}
}
/* Register new matcher. */
cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
if (!cache_matcher) {
-#ifdef HAVE_MLX5DV_DR
flow_dv_tbl_resource_release(dev, tbl);
-#endif
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate matcher memory");
*
* @param dev[in, out]
* Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to tag resource.
+ * @param[in, out] tag_be24
+ * Tag value in big endian then R-shift 8.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
static int
flow_dv_tag_resource_register
(struct rte_eth_dev *dev,
- struct mlx5_flow_dv_tag_resource *resource,
+ uint32_t tag_be24,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
+ struct mlx5_hlist_entry *entry;
/* Lookup a matching resource from cache. */
- LIST_FOREACH(cache_resource, &sh->tags, next) {
- if (resource->tag == cache_resource->tag) {
- DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.tag_resource = cache_resource;
- return 0;
- }
+ entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
+ if (entry) {
+ cache_resource = container_of
+ (entry, struct mlx5_flow_dv_tag_resource, entry);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->dv.tag_resource = cache_resource;
+ DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
}
- /* Register new resource. */
+ /* Register new resource. */
cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
- *cache_resource = *resource;
- cache_resource->action = mlx5_glue->dv_create_flow_action_tag
- (resource->tag);
+ cache_resource->entry.key = (uint64_t)tag_be24;
+ cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
if (!cache_resource->action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
+ if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
+ mlx5_glue->destroy_flow_action(cache_resource->action);
+ rte_free(cache_resource);
+ return rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot insert tag");
+ }
dev_flow->dv.tag_resource = cache_resource;
- DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
+ DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
flow_dv_tag_release(struct rte_eth_dev *dev,
struct mlx5_flow_dv_tag_resource *tag)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+
assert(tag);
DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
dev->data->port_id, (void *)tag,
rte_atomic32_read(&tag->refcnt));
if (rte_atomic32_dec_and_test(&tag->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action(tag->action));
- LIST_REMOVE(tag, next);
+ mlx5_hlist_remove(sh->tag_table, &tag->entry);
DRV_LOG(DEBUG, "port %u tag %p: removed",
dev->data->port_id, (void *)tag);
rte_free(tag);
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"No eswitch info was found for port");
- if (priv->vport_meta_mask)
- *dst_port_id = priv->vport_meta_tag;
- else
- *dst_port_id = priv->vport_id;
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+ /*
+ * This parameter is transferred to
+ * mlx5dv_dr_action_create_dest_ib_port().
+ */
+ *dst_port_id = priv->ibv_port;
+#else
+ /*
+ * Legacy mode, no LAG configurations is supported.
+ * This parameter is transferred to
+ * mlx5dv_dr_action_create_dest_vport().
+ */
+ *dst_port_id = priv->vport_id;
+#endif
return 0;
}
mlx5_txq_release(dev, queue_v->queue);
}
+/**
+ * Set the hash fields according to the @p flow information.
+ *
+ * @param[in] dev_flow
+ * Pointer to the mlx5_flow.
+ */
+static void
+flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
+{
+ struct rte_flow *flow = dev_flow->flow;
+ uint64_t items = dev_flow->layers;
+ int rss_inner = 0;
+ uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
+
+ dev_flow->hash_fields = 0;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (flow->rss.level >= 2) {
+ dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+ rss_inner = 1;
+ }
+#endif
+ if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
+ if (rss_types & MLX5_IPV4_LAYER_TYPES) {
+ if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
+ else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
+ else
+ dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
+ }
+ } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
+ if (rss_types & MLX5_IPV6_LAYER_TYPES) {
+ if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
+ else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
+ else
+ dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ }
+ }
+ if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
+ if (rss_types & ETH_RSS_UDP) {
+ if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_SRC_PORT_UDP;
+ else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_DST_PORT_UDP;
+ else
+ dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ }
+ } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
+ if (rss_types & ETH_RSS_TCP) {
+ if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_SRC_PORT_TCP;
+ else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_DST_PORT_TCP;
+ else
+ dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ }
+ }
+}
+
/**
* Fill the flow with DV spec, lock free
* (mutex should be acquired by caller).
MLX5DV_FLOW_TABLE_TYPE_NIC_RX
};
union flow_dv_attr flow_attr = { .attr = 0 };
- struct mlx5_flow_dv_tag_resource tag_resource;
+ uint32_t tag_be;
union mlx5_flow_tbl_key tbl_key;
uint32_t modify_action_position = UINT32_MAX;
void *match_mask = matcher.mask.buf;
action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
break;
}
- tag_resource.tag =
- mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+ tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
- (dev, &tag_resource, dev_flow, error))
- return errno;
+ (dev, tag_be, dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.tag_resource->action;
break;
/* Fall-through */
case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
/* Legacy (non-extensive) MARK action. */
- tag_resource.tag = mlx5_flow_mark_set
+ tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
- (dev, &tag_resource, dev_flow, error))
- return errno;
+ (dev, tag_be, dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.tag_resource->action;
break;
flow->meter->mfts->meter_action;
action_flags |= MLX5_FLOW_ACTION_METER;
break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+ if (flow_dv_convert_action_modify_ipv4_dscp(&mhdr_res,
+ actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ if (flow_dv_convert_action_modify_ipv6_dscp(&mhdr_res,
+ actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res.actions_num) {
items, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
- MLX5_IPV4_LAYER_TYPES,
- MLX5_IPV4_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
items, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
- MLX5_IPV6_LAYER_TYPES,
- MLX5_IPV6_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
flow_dv_translate_item_tcp(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_TCP,
- IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
flow_dv_translate_item_udp(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_UDP,
- IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
last_item = MLX5_FLOW_ITEM_TAG;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
- flow_dv_translate_mlx5_item_tag(match_mask,
+ flow_dv_translate_mlx5_item_tag(dev, match_mask,
match_value, items);
last_item = MLX5_FLOW_ITEM_TAG;
break;
items);
last_item = MLX5_FLOW_ITEM_TX_QUEUE;
break;
+ case RTE_FLOW_ITEM_TYPE_GTP:
+ flow_dv_translate_item_gtp(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_LAYER_GTP;
+ break;
default:
break;
}
assert(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
dev_flow->layers = item_flags;
+ if (action_flags & MLX5_FLOW_ACTION_RSS)
+ flow_dv_hashfields_set(dev_flow);
/* Register matcher. */
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);