#include <string.h>
#include <unistd.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
#include <rte_common.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_gre.h>
#include <rte_vxlan.h>
#include <rte_gtp.h>
+#include <rte_eal_paging.h>
+#include <rte_mpls.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
+#include "mlx5_common_os.h"
#include "mlx5_flow.h"
+#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_tbl_resource *tbl);
+static int
+flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
+
+static int
+flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
+ uint32_t encap_decap_idx);
+
+static int
+flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
+ uint32_t port_id);
+
/**
* Initialize flow attributes structure according to flow items' types.
*
flow_dv_shared_lock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
flow_dv_shared_unlock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
}
static enum mlx5_modification_field reg_to_field[] = {
- [REG_NONE] = MLX5_MODI_OUT_NONE,
+ [REG_NON] = MLX5_MODI_OUT_NONE,
[REG_A] = MLX5_MODI_META_DATA_REG_A,
[REG_B] = MLX5_MODI_META_DATA_REG_B,
[REG_C_0] = MLX5_MODI_META_REG_C_0,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
- MLX5_ASSERT(conf->id != REG_NONE);
+ MLX5_ASSERT(conf->id != REG_NON);
MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
actions[i] = (struct mlx5_modification_cmd) {
.action_type = MLX5_MODIFICATION_TYPE_SET,
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
if (ret < 0)
return ret;
- MLX5_ASSERT(ret != REG_NONE);
+ MLX5_ASSERT(ret != REG_NON);
MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
reg_type = reg_to_field[ret];
MLX5_ASSERT(reg_type > 0);
.mask = &mask,
};
struct field_modify_info reg_c_x[] = {
- {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
- {0, 0, 0},
+ [1] = {0, 0, 0},
};
int reg;
mask = rte_cpu_to_be_32(mask) & msk_c0;
mask = rte_cpu_to_be_32(mask << shl_c0);
}
- reg_c_x[0].id = reg_to_field[reg];
+ reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
"isn't supported");
if (reg != REG_A)
nic_mask.data = priv->sh->dv_meta_mask;
+ } else if (attr->transfer) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "extended metadata feature "
+ "should be enabled when "
+ "meta item is requested "
+ "with e-switch mode ");
}
if (!mask)
mask = &rte_flow_item_meta_mask;
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
if (ret < 0)
return ret;
- MLX5_ASSERT(ret != REG_NONE);
+ MLX5_ASSERT(ret != REG_NON);
return 0;
}
return 0;
}
+/**
+ * Validate VLAN item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] dev
+ * Ethernet device flow is being created on.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_vlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *mask = item->mask;
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(UINT16_MAX),
+ .inner_type = RTE_BE16(UINT16_MAX),
+ };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int ret;
+ const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4);
+ const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+
+ if (item_flags & vlanm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple VLAN layers not supported");
+ else if ((item_flags & l34m) != 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN cannot follow L3/L4 layer");
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan),
+ error);
+ if (ret)
+ return ret;
+ if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->vmwa_context) {
+ /*
+ * Non-NULL context means we have a virtual machine
+ * and SR-IOV enabled, we have to create VLAN interface
+ * to make hypervisor to setup E-Switch vport
+ * context correctly. We avoid creating the multiple
+ * VLAN interfaces, so we cannot support VLAN tag mask.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN tag mask is not"
+ " supported in virtual"
+ " environment");
+ }
+ }
+ return 0;
+}
+
+/*
+ * GTP flags are contained in 1 byte of the format:
+ * -------------------------------------------
+ * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 |
+ * |-----------------------------------------|
+ * | value | Version | PT | Res | E | S | PN |
+ * -------------------------------------------
+ *
+ * Matching is supported only for GTP flags E, S, PN.
+ */
+#define MLX5_GTP_FLAGS_MASK 0x07
+
/**
* Validate GTP item.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_gtp *spec = item->spec;
const struct rte_flow_item_gtp *mask = item->mask;
const struct rte_flow_item_gtp nic_mask = {
+ .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
.msg_type = 0xff,
.teid = RTE_BE32(0xffffffff),
};
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_gtp_mask;
+ if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Match is supported for GTP"
+ " flags only");
return mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"no support for multiple VLAN "
"actions");
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
+ /* Pop VLAN with preceding Decap requires inner header with VLAN. */
+ if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
+ !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot pop vlan after decap without "
+ "match on inner vlan in the flow");
+ /* Pop VLAN without preceding Decap requires outer header with VLAN. */
+ if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
+ !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
const struct rte_flow_item_vlan *vlan_m = items->mask;
const struct rte_flow_item_vlan *vlan_v = items->spec;
+ /* If VLAN item in pattern doesn't contain data, return here. */
+ if (!vlan_v)
+ return;
if (!vlan_m)
vlan_m = &nic_mask;
/* Only full match values are accepted */
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
const struct mlx5_priv *priv = dev->data->dev_private;
- if (!attr->transfer && attr->ingress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "push VLAN action not supported for "
- "ingress");
if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"invalid vlan ethertype");
- if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "no support for multiple VLAN "
- "actions");
if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
{
const struct mlx5_priv *priv = dev->data->dev_private;
+ if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
+ !priv->config.decap_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "decap is not enabled");
if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
return 0;
}
+/**
+ * Match encap_decap resource.
+ *
+ * @param entry
+ * Pointer to exist resource entry object.
+ * @param ctx
+ * Pointer to new encap_decap resource.
+ *
+ * @return
+ * 0 on matching, -1 otherwise.
+ */
+static int
+flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
+{
+ struct mlx5_flow_dv_encap_decap_resource *resource;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+
+ resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx;
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_encap_decap_resource,
+ entry);
+ if (resource->entry.key == cache_resource->entry.key &&
+ resource->reformat_type == cache_resource->reformat_type &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->flags == cache_resource->flags &&
+ resource->size == cache_resource->size &&
+ !memcmp((const void *)resource->buf,
+ (const void *)cache_resource->buf,
+ resource->size))
+ return 0;
+ return -1;
+}
+
/**
* Find existing encap/decap resource or create and register a new one.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
- uint32_t idx = 0;
+ struct mlx5_hlist_entry *entry;
+ union mlx5_flow_encap_decap_key encap_decap_key = {
+ {
+ .ft_type = resource->ft_type,
+ .refmt_type = resource->reformat_type,
+ .buf_size = resource->size,
+ .table_level = !!dev_flow->dv.group,
+ .cksum = 0,
+ }
+ };
+ int ret;
resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->rx_domain;
else
domain = sh->tx_domain;
+ encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
+ resource->size, 0);
+ resource->entry.key = encap_decap_key.v64;
/* Lookup a matching resource from cache. */
- ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
- cache_resource, next) {
- if (resource->reformat_type == cache_resource->reformat_type &&
- resource->ft_type == cache_resource->ft_type &&
- resource->flags == cache_resource->flags &&
- resource->size == cache_resource->size &&
- !memcmp((const void *)resource->buf,
- (const void *)cache_resource->buf,
- resource->size)) {
- DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.rix_encap_decap = idx;
- dev_flow->dv.encap_decap = cache_resource;
- return 0;
- }
+ entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key,
+ flow_dv_encap_decap_resource_match,
+ (void *)resource);
+ if (entry) {
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_encap_decap_resource, entry);
+ DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx;
+ dev_flow->dv.encap_decap = cache_resource;
+ return 0;
}
/* Register new encap/decap resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_packet_reformat
- (sh->ctx, cache_resource->reformat_type,
- cache_resource->ft_type, domain, cache_resource->flags,
- cache_resource->size,
- (cache_resource->size ? cache_resource->buf : NULL));
- if (!cache_resource->verbs_action) {
- rte_free(cache_resource);
+ cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap;
+ ret = mlx5_flow_os_create_flow_action_packet_reformat
+ (sh->ctx, domain, cache_resource,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
- ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
- dev_flow->handle->dvh.rix_encap_decap, cache_resource,
- next);
+ if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry,
+ flow_dv_encap_decap_resource_match,
+ (void *)cache_resource)) {
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ cache_resource->idx);
+ return rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "action exist");
+ }
dev_flow->dv.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
{
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
- int cnt;
+ int cnt, ret;
MLX5_ASSERT(tbl);
cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
if (!cnt) {
- tbl_data->jump.action =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (tbl->obj);
- if (!tbl_data->jump.action)
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (tbl->obj, &tbl_data->jump.action);
+ if (ret)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create jump action");
return 0;
}
+/**
+ * Find existing default miss resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_default_miss_resource *cache_resource =
+ &sh->default_miss;
+ int cnt = rte_atomic32_read(&cache_resource->refcnt);
+
+ if (!cnt) {
+ MLX5_ASSERT(cache_resource->action);
+ cache_resource->action =
+ mlx5_glue->dr_create_flow_action_default_miss();
+ if (!cache_resource->action)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create default miss action");
+ DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
+ (void *)cache_resource->action, cnt);
+ }
+ rte_atomic32_inc(&cache_resource->refcnt);
+ return 0;
+}
+
/**
* Find existing table port ID resource or create and register a new one.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
uint32_t idx = 0;
+ int ret;
/* Lookup a matching resource from cache. */
ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
- /*
- * Depending on rdma_core version the glue routine calls
- * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
- * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
- */
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_port
- (priv->sh->fdb_domain, resource->port_id);
- if (!cache_resource->action) {
- rte_free(cache_resource);
+ ret = mlx5_flow_os_create_flow_action_dest_port
+ (priv->sh->fdb_domain, resource->port_id,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
+ int ret;
/* Lookup a matching resource from cache. */
ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
domain = sh->rx_domain;
else
domain = sh->tx_domain;
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_push_vlan(domain,
- resource->vlan_tag);
- if (!cache_resource->action) {
- rte_free(cache_resource);
+ ret = mlx5_flow_os_create_flow_action_push_vlan
+ (domain, resource->vlan_tag,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
return 0;
}
/**
- * Get the size of specific rte_flow_item_type
+ * Get the size of specific rte_flow_item_type hdr size
*
* @param[in] item_type
* Tested rte_flow_item_type.
* sizeof struct item_type, 0 if void or irrelevant.
*/
static size_t
-flow_dv_get_item_len(const enum rte_flow_item_type item_type)
+flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
{
size_t retval;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- retval = sizeof(struct rte_flow_item_eth);
+ retval = sizeof(struct rte_ether_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- retval = sizeof(struct rte_flow_item_vlan);
+ retval = sizeof(struct rte_vlan_hdr);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- retval = sizeof(struct rte_flow_item_ipv4);
+ retval = sizeof(struct rte_ipv4_hdr);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- retval = sizeof(struct rte_flow_item_ipv6);
+ retval = sizeof(struct rte_ipv6_hdr);
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- retval = sizeof(struct rte_flow_item_udp);
+ retval = sizeof(struct rte_udp_hdr);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- retval = sizeof(struct rte_flow_item_tcp);
+ retval = sizeof(struct rte_tcp_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- retval = sizeof(struct rte_flow_item_vxlan);
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ retval = sizeof(struct rte_vxlan_hdr);
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- retval = sizeof(struct rte_flow_item_gre);
- break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- retval = sizeof(struct rte_flow_item_nvgre);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- retval = sizeof(struct rte_flow_item_vxlan_gpe);
+ retval = sizeof(struct rte_gre_hdr);
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
- retval = sizeof(struct rte_flow_item_mpls);
+ retval = sizeof(struct rte_mpls_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
default:
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "invalid empty data");
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- len = flow_dv_get_item_len(items->type);
+ len = flow_dv_get_item_hdr_len(items->type);
if (len + temp_size > MLX5_ENCAP_MAX_LEN)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(dev, &res, dev_flow, error);
}
+static int fdb_mirror;
+
/**
* Validate the modify-header actions.
*
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have encap action before"
" modify action");
+ if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't support sample action before"
+ " modify action for E-Switch"
+ " mirroring");
return 0;
}
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"jump with meter not support");
+ if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "E-Switch mirroring can't support"
+ " Sample action and jump action in"
+ " same flow now");
if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
}
/**
- * Find existing modify-header resource or create and register a new one.
+ * Match modify-header resource.
*
- * @param dev[in, out]
- * Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to modify-header resource.
- * @parm[in, out] dev_flow
- * Pointer to the dev_flow.
- * @param[out] error
- * pointer to error structure.
+ * @param entry
+ * Pointer to exist resource entry object.
+ * @param ctx
+ * Pointer to new modify-header resource.
*
* @return
- * 0 on success otherwise -errno and errno is set.
+ * 0 on matching, -1 otherwise.
*/
static int
-flow_dv_modify_hdr_resource_register
- (struct rte_eth_dev *dev,
- struct mlx5_flow_dv_modify_hdr_resource *resource,
- struct mlx5_flow *dev_flow,
- struct rte_flow_error *error)
+flow_dv_modify_hdr_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_flow_dv_modify_hdr_resource *resource;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
- struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
- resource->flags = dev_flow->dv.group ? 0 :
- MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
- if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
- resource->flags))
- return rte_flow_error_set(error, EOVERFLOW,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "too many modify header items");
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
- ns = sh->fdb_domain;
- else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
- ns = sh->tx_domain;
- else
- ns = sh->rx_domain;
- /* Lookup a matching resource from cache. */
+ resource = (struct mlx5_flow_dv_modify_hdr_resource *)ctx;
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_modify_hdr_resource,
+ entry);
actions_len = resource->actions_num * sizeof(resource->actions[0]);
- LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
- if (resource->ft_type == cache_resource->ft_type &&
- resource->actions_num == cache_resource->actions_num &&
- resource->flags == cache_resource->flags &&
- !memcmp((const void *)resource->actions,
- (const void *)cache_resource->actions,
- actions_len)) {
- DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.modify_hdr = cache_resource;
- return 0;
- }
- }
- /* Register new modify-header resource. */
- cache_resource = rte_calloc(__func__, 1,
- sizeof(*cache_resource) + actions_len, 0);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- *cache_resource = *resource;
- rte_memcpy(cache_resource->actions, resource->actions, actions_len);
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_modify_header
- (sh->ctx, cache_resource->ft_type, ns,
- cache_resource->flags, actions_len,
- (uint64_t *)cache_resource->actions);
- if (!cache_resource->verbs_action) {
- rte_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
- }
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->handle->dvh.modify_hdr = cache_resource;
- DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- return 0;
+ if (resource->entry.key == cache_resource->entry.key &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->actions_num == cache_resource->actions_num &&
+ resource->flags == cache_resource->flags &&
+ !memcmp((const void *)resource->actions,
+ (const void *)cache_resource->actions,
+ actions_len))
+ return 0;
+ return -1;
}
/**
- * Get DV flow counter by index.
+ * Validate the sample action.
*
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the sample action.
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] idx
- * mlx5 flow counter index in the container.
- * @param[out] ppool
- * mlx5 flow counter pool in the container,
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * Pointer to the counter, NULL otherwise.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_flow_counter *
-flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
- uint32_t idx,
- struct mlx5_flow_counter_pool **ppool)
+static int
+flow_dv_validate_action_sample(uint64_t action_flags,
+ const struct rte_flow_action *action,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont;
- struct mlx5_flow_counter_pool *pool;
- uint32_t batch = 0, age = 0;
-
- idx--;
- age = MLX_CNT_IS_AGE(idx);
- idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
- if (idx >= MLX5_CNT_BATCH_OFFSET) {
- idx -= MLX5_CNT_BATCH_OFFSET;
- batch = 1;
- }
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0, age);
- MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
- pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
- MLX5_ASSERT(pool);
- if (ppool)
- *ppool = pool;
- return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
-}
+ struct mlx5_dev_config *dev_conf = &priv->config;
+ const struct rte_flow_action_sample *sample = action->conf;
+ const struct rte_flow_action *act;
+ uint64_t sub_action_flags = 0;
+ uint16_t queue_index = 0xFFFF;
+ int actions_n = 0;
+ int ret;
+ fdb_mirror = 0;
-/**
- * Get a pool by devx counter ID.
- *
+ if (!sample)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be NULL");
+ if (sample->ratio == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "ratio value starts from 1");
+ if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "sample action not supported");
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Multiple sample actions not "
+ "supported");
+ if (action_flags & MLX5_FLOW_ACTION_METER)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, meter should "
+ "be after sample action");
+ if (action_flags & MLX5_FLOW_ACTION_JUMP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, jump should "
+ "be after sample action");
+ act = sample->actions;
+ for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
+ if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "too many actions");
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(act,
+ sub_action_flags,
+ dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ queue_index = ((const struct rte_flow_action_queue *)
+ (act->conf))->index;
+ sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = flow_dv_validate_action_mark(dev, act,
+ sub_action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+ sub_action_flags |= MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ else
+ sub_action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_dv_validate_action_count(dev, error);
+ if (ret < 0)
+ return ret;
+ sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ ret = flow_dv_validate_action_port_id(dev,
+ sub_action_flags,
+ act,
+ attr,
+ error);
+ if (ret)
+ return ret;
+ sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev, NULL, act->conf, attr, &sub_action_flags,
+ &actions_n, error);
+ if (ret < 0)
+ return ret;
+ ++actions_n;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Doesn't support optional "
+ "action");
+ }
+ }
+ if (attr->ingress && !attr->transfer) {
+ if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Ingress must has a dest "
+ "QUEUE for Sample");
+ } else if (attr->egress && !attr->transfer) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Sample Only support Ingress "
+ "or E-Switch");
+ } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
+ MLX5_ASSERT(attr->transfer);
+ if (sample->ratio > 1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "E-Switch doesn't support "
+ "any optional action "
+ "for sampling");
+ fdb_mirror = 1;
+ if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action QUEUE");
+ if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "E-Switch must has a dest "
+ "port for mirroring");
+ }
+ /* Continue validation for Xcap actions.*/
+ if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
+ (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
+ MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap and decap "
+ "combination aren't "
+ "supported");
+ if (!attr->transfer && attr->ingress && (sub_action_flags &
+ MLX5_FLOW_ACTION_ENCAP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ }
+ return 0;
+}
+
+/**
+ * Find existing modify-header resource or create and register a new one.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] resource
+ * Pointer to modify-header resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_modify_hdr_resource_register
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
+ struct mlx5dv_dr_domain *ns;
+ uint32_t actions_len;
+ struct mlx5_hlist_entry *entry;
+ union mlx5_flow_modify_hdr_key hdr_mod_key = {
+ {
+ .ft_type = resource->ft_type,
+ .actions_num = resource->actions_num,
+ .group = dev_flow->dv.group,
+ .cksum = 0,
+ }
+ };
+ int ret;
+
+ resource->flags = dev_flow->dv.group ? 0 :
+ MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
+ resource->flags))
+ return rte_flow_error_set(error, EOVERFLOW,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "too many modify header items");
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ ns = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+ ns = sh->tx_domain;
+ else
+ ns = sh->rx_domain;
+ /* Lookup a matching resource from cache. */
+ actions_len = resource->actions_num * sizeof(resource->actions[0]);
+ hdr_mod_key.cksum = __rte_raw_cksum(resource->actions, actions_len, 0);
+ resource->entry.key = hdr_mod_key.v64;
+ entry = mlx5_hlist_lookup_ex(sh->modify_cmds, resource->entry.key,
+ flow_dv_modify_hdr_resource_match,
+ (void *)resource);
+ if (entry) {
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_modify_hdr_resource,
+ entry);
+ DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
+ return 0;
+
+ }
+ /* Register new modify-header resource. */
+ cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*cache_resource) + actions_len, 0,
+ SOCKET_ID_ANY);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ rte_memcpy(cache_resource->actions, resource->actions, actions_len);
+ ret = mlx5_flow_os_create_flow_action_modify_header
+ (sh->ctx, ns, cache_resource,
+ actions_len, &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ }
+ rte_atomic32_init(&cache_resource->refcnt);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry,
+ flow_dv_modify_hdr_resource_match,
+ (void *)cache_resource)) {
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
+ mlx5_free(cache_resource);
+ return rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "action exist");
+ }
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
+ DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
+}
+
+/**
+ * Get DV flow counter by index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * mlx5 flow counter index in the container.
+ * @param[out] ppool
+ * mlx5 flow counter pool in the container,
+ *
+ * @return
+ * Pointer to the counter, NULL otherwise.
+ */
+static struct mlx5_flow_counter *
+flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
+ uint32_t idx,
+ struct mlx5_flow_counter_pool **ppool)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_pools_container *cont;
+ struct mlx5_flow_counter_pool *pool;
+ uint32_t batch = 0, age = 0;
+
+ idx--;
+ age = MLX_CNT_IS_AGE(idx);
+ idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
+ if (idx >= MLX5_CNT_BATCH_OFFSET) {
+ idx -= MLX5_CNT_BATCH_OFFSET;
+ batch = 1;
+ }
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
+ MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
+ pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
+ MLX5_ASSERT(pool);
+ if (ppool)
+ *ppool = pool;
+ return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
+}
+
+/**
+ * Check the devx counter belongs to the pool.
+ *
+ * @param[in] pool
+ * Pointer to the counter pool.
+ * @param[in] id
+ * The counter devx ID.
+ *
+ * @return
+ * True if counter belongs to the pool, false otherwise.
+ */
+static bool
+flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
+{
+ int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
+ MLX5_COUNTERS_PER_POOL;
+
+ if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
+ return true;
+ return false;
+}
+
+/**
+ * Get a pool by devx counter ID.
+ *
* @param[in] cont
* Pointer to the counter container.
* @param[in] id
flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
{
uint32_t i;
- uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
- for (i = 0; i < n_valid; i++) {
+ /* Check last used pool. */
+ if (cont->last_pool_idx != POOL_IDX_INVALID &&
+ flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
+ return cont->pools[cont->last_pool_idx];
+ /* ID out of range means no suitable pool in the container. */
+ if (id > cont->max_id || id < cont->min_id)
+ return NULL;
+ /*
+ * Find the pool from the end of the container, since mostly counter
+ * ID is sequence increasing, and the last pool should be the needed
+ * one.
+ */
+ i = rte_atomic16_read(&cont->n_valid);
+ while (i--) {
struct mlx5_flow_counter_pool *pool = cont->pools[i];
- int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
- MLX5_COUNTERS_PER_POOL;
- if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) {
- /*
- * Move the pool to the head, as counter allocate
- * always gets the first pool in the container.
- */
- if (pool != TAILQ_FIRST(&cont->pool_list)) {
- TAILQ_REMOVE(&cont->pool_list, pool, next);
- TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
- }
+ if (flow_dv_is_counter_in_pool(pool, id))
return pool;
- }
}
return NULL;
}
static struct mlx5_counter_stats_mem_mng *
flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
{
- struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
- (dev->data->dev_private))->sh;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
MLX5_COUNTERS_PER_POOL +
sizeof(struct mlx5_counter_stats_raw)) * raws_n +
sizeof(struct mlx5_counter_stats_mem_mng);
- uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
+ size_t pgsize = rte_mem_page_size();
+ if (pgsize == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize,
+ SOCKET_ID_ANY);
int i;
if (!mem) {
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
- rte_free(mem);
+ mlx5_free(mem);
return NULL;
}
mkey_attr.addr = (uintptr_t)mem;
mkey_attr.size = size;
- mkey_attr.umem_id = mem_mng->umem->umem_id;
+ mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
mkey_attr.pd = sh->pdn;
mkey_attr.log_entity_size = 0;
mkey_attr.pg_access = 0;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = 1;
+ if (priv->config.hca_attr.relaxed_ordering_write &&
+ priv->config.hca_attr.relaxed_ordering_read &&
+ !haswell_broadwell_cpu)
+ mkey_attr.relaxed_ordering = 1;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
rte_errno = errno;
- rte_free(mem);
+ mlx5_free(mem);
return NULL;
}
mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
* Whether the pool is for Aging counter.
*
* @return
- * The new container pointer on success, otherwise NULL and rte_errno is set.
+ * 0 on success, otherwise negative errno value and rte_errno is set.
*/
-static struct mlx5_pools_container *
+static int
flow_dv_container_resize(struct rte_eth_dev *dev,
uint32_t batch, uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont =
- MLX5_CNT_CONTAINER(priv->sh, batch, 0, age);
- struct mlx5_pools_container *new_cont =
- MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0, age);
+ struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
+ age);
struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
+ void *old_pools = cont->pools;
uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
- int i;
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
- /* Fallback mode has no background thread. Skip the check. */
- if (!priv->counter_fallback &&
- cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1, age)) {
- /* The last resize still hasn't detected by the host thread. */
- rte_errno = EAGAIN;
- return NULL;
- }
- new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
- if (!new_cont->pools) {
+ if (!pools) {
rte_errno = ENOMEM;
- return NULL;
+ return -ENOMEM;
}
- if (cont->n)
- memcpy(new_cont->pools, cont->pools, cont->n *
- sizeof(struct mlx5_flow_counter_pool *));
+ if (old_pools)
+ memcpy(pools, old_pools, cont->n *
+ sizeof(struct mlx5_flow_counter_pool *));
/*
* Fallback mode query the counter directly, no background query
* resources are needed.
*/
if (!priv->counter_fallback) {
+ int i;
+
mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
- MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
+ MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
if (!mem_mng) {
- rte_free(new_cont->pools);
- return NULL;
+ mlx5_free(pools);
+ return -ENOMEM;
}
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
mem_mng->raws +
MLX5_CNT_CONTAINER_RESIZE +
i, next);
- } else {
- /*
- * Release the old container pools directly as no background
- * thread helps that.
- */
- rte_free(cont->pools);
}
- new_cont->n = resize;
- rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
- TAILQ_INIT(&new_cont->pool_list);
- TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
- new_cont->init_mem_mng = mem_mng;
- rte_cio_wmb();
- /* Flip the master container. */
- priv->sh->cmng.mhi[batch][age] ^= (uint8_t)1;
- return new_cont;
+ rte_spinlock_lock(&cont->resize_sl);
+ cont->n = resize;
+ cont->mem_mng = mem_mng;
+ cont->pools = pools;
+ rte_spinlock_unlock(&cont->resize_sl);
+ if (old_pools)
+ mlx5_free(old_pools);
+ return 0;
}
/**
* @return
* The pool container pointer on success, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_pools_container *
+static struct mlx5_flow_counter_pool *
flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
uint32_t batch, uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- 0, age);
+ age);
int16_t n_valid = rte_atomic16_read(&cont->n_valid);
uint32_t size = sizeof(*pool);
- if (cont->n == n_valid) {
- cont = flow_dv_container_resize(dev, batch, age);
- if (!cont)
- return NULL;
- }
+ if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
+ return NULL;
size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
- pool = rte_calloc(__func__, 1, size, 0);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
}
pool->min_dcs = dcs;
if (!priv->counter_fallback)
- pool->raw = cont->init_mem_mng->raws + n_valid %
- MLX5_CNT_CONTAINER_RESIZE;
+ pool->raw = cont->mem_mng->raws + n_valid %
+ MLX5_CNT_CONTAINER_RESIZE;
pool->raw_hw = NULL;
pool->type = 0;
pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT);
pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE);
+ pool->query_gen = 0;
rte_spinlock_init(&pool->sl);
- /*
- * The generation of the new allocated counters in this pool is 0, 2 in
- * the pool generation makes all the counters valid for allocation.
- * The start and end query generation protect the counters be released
- * between the query and update gap period will not be reallocated
- * without the last query finished and stats updated to the memory.
- */
- rte_atomic64_set(&pool->start_query_gen, 0x2);
- /*
- * There's no background query thread for fallback mode, set the
- * end_query_gen to the maximum value since no need to wait for
- * statistics update.
- */
- rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ?
- INT64_MAX : 0x2);
- TAILQ_INIT(&pool->counters);
+ TAILQ_INIT(&pool->counters[0]);
+ TAILQ_INIT(&pool->counters[1]);
TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
pool->index = n_valid;
cont->pools[n_valid] = pool;
+ if (!batch) {
+ int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
+
+ if (base < cont->min_id)
+ cont->min_id = base;
+ if (base > cont->max_id)
+ cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
+ cont->last_pool_idx = pool->index;
+ }
/* Pool initialization must be updated before host thread access. */
- rte_cio_wmb();
+ rte_io_wmb();
rte_atomic16_add(&cont->n_valid, 1);
- return cont;
+ return pool;
}
/**
- * Update the minimum dcs-id for aged or no-aged counter pool.
+ * Restore skipped counters in the pool.
+ *
+ * As counter pool query requires the first counter dcs
+ * ID start with 4 alinged, if the pool counters with
+ * min_dcs ID are not aligned with 4, the counters will
+ * be skipped.
+ * Once other min_dcs ID less than these skipped counter
+ * dcs ID appears, the skipped counters will be safe to
+ * use.
+ * Should be called when min_dcs is updated.
*
- * @param[in] dev
- * Pointer to the Ethernet device structure.
* @param[in] pool
* Current counter pool.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
- * @param[in] age
- * Whether the counter is for aging.
+ * @param[in] last_min_dcs
+ * Last min_dcs.
*/
static void
-flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev,
- struct mlx5_flow_counter_pool *pool,
- uint32_t batch, uint32_t age)
+flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool,
+ struct mlx5_devx_obj *last_min_dcs)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_pool *other;
- struct mlx5_pools_container *cont;
+ struct mlx5_flow_counter_ext *cnt_ext;
+ uint32_t offset, new_offset;
+ uint32_t skip_cnt = 0;
+ uint32_t i;
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0, (age ^ 0x1));
- other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id);
- if (!other)
+ if (!pool->skip_cnt)
return;
- if (pool->min_dcs->id < other->min_dcs->id) {
- rte_atomic64_set(&other->a64_dcs,
- rte_atomic64_read(&pool->a64_dcs));
- } else {
- rte_atomic64_set(&pool->a64_dcs,
- rte_atomic64_read(&other->a64_dcs));
+ /*
+ * If last min_dcs is not valid. The skipped counter may even after
+ * last min_dcs, set the offset to the whole pool.
+ */
+ if (last_min_dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))
+ offset = MLX5_COUNTERS_PER_POOL;
+ else
+ offset = last_min_dcs->id % MLX5_COUNTERS_PER_POOL;
+ new_offset = pool->min_dcs->id % MLX5_COUNTERS_PER_POOL;
+ /*
+ * Check the counters from 1 to the last_min_dcs range. Counters
+ * before new min_dcs indicates pool still has skipped counters.
+ * Counters be skipped after new min_dcs will be ready to use.
+ * Offset 0 counter must be empty or min_dcs, start from 1.
+ */
+ for (i = 1; i < offset; i++) {
+ cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+ if (cnt_ext->skipped) {
+ if (i > new_offset) {
+ cnt_ext->skipped = 0;
+ TAILQ_INSERT_TAIL
+ (&pool->counters[pool->query_gen],
+ MLX5_POOL_GET_CNT(pool, i), next);
+ } else {
+ skip_cnt++;
+ }
+ }
}
+ if (!skip_cnt)
+ pool->skip_cnt = 0;
}
+
/**
* Prepare a new counter and/or a new counter pool.
*
* Whether the pool is for counter that was allocated for aging.
*
* @return
- * The counter container pointer and @p cnt_free is set on success,
+ * The counter pool pointer and @p cnt_free is set on success,
* NULL otherwise and rte_errno is set.
*/
-static struct mlx5_pools_container *
+static struct mlx5_flow_counter_pool *
flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
struct mlx5_flow_counter **cnt_free,
uint32_t batch, uint32_t age)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont;
struct mlx5_flow_counter_pool *pool;
+ struct mlx5_counters tmp_tq;
+ struct mlx5_devx_obj *last_min_dcs;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
+ uint32_t add2other;
uint32_t i;
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0, age);
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
if (!batch) {
+retry:
+ add2other = 0;
/* bulk_bitmap must be 0 for single counter allocation. */
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
if (!dcs)
return NULL;
pool = flow_dv_find_pool_by_id(cont, dcs->id);
+ /* Check if counter belongs to exist pool ID range. */
if (!pool) {
- cont = flow_dv_pool_create(dev, dcs, batch, age);
- if (!cont) {
- mlx5_devx_cmd_destroy(dcs);
- return NULL;
+ pool = flow_dv_find_pool_by_id
+ (MLX5_CNT_CONTAINER
+ (priv->sh, batch, (age ^ 0x1)), dcs->id);
+ /*
+ * Pool eixsts, counter will be added to the other
+ * container, need to reallocate it later.
+ */
+ if (pool) {
+ add2other = 1;
+ } else {
+ pool = flow_dv_pool_create(dev, dcs, batch,
+ age);
+ if (!pool) {
+ mlx5_devx_cmd_destroy(dcs);
+ return NULL;
+ }
}
- pool = TAILQ_FIRST(&cont->pool_list);
- } else if (dcs->id < pool->min_dcs->id) {
+ }
+ if ((dcs->id < pool->min_dcs->id ||
+ pool->min_dcs->id &
+ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) &&
+ !(dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))) {
+ /*
+ * Update the pool min_dcs only if current dcs is
+ * valid and exist min_dcs is not valid or greater
+ * than new dcs.
+ */
+ last_min_dcs = pool->min_dcs;
rte_atomic64_set(&pool->a64_dcs,
(int64_t)(uintptr_t)dcs);
+ /*
+ * Restore any skipped counters if the new min_dcs
+ * ID is smaller or min_dcs is not valid.
+ */
+ if (dcs->id < last_min_dcs->id ||
+ last_min_dcs->id &
+ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))
+ flow_dv_counter_restore(pool, last_min_dcs);
}
- flow_dv_counter_update_min_dcs(dev,
- pool, batch, age);
i = dcs->id % MLX5_COUNTERS_PER_POOL;
cnt = MLX5_POOL_GET_CNT(pool, i);
- TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
+ cnt->pool = pool;
MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
+ /*
+ * If min_dcs is not valid, it means the new allocated dcs
+ * also fail to become the valid min_dcs, just skip it.
+ * Or if min_dcs is valid, and new dcs ID is smaller than
+ * min_dcs, but not become the min_dcs, also skip it.
+ */
+ if (pool->min_dcs->id &
+ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1) ||
+ dcs->id < pool->min_dcs->id) {
+ MLX5_GET_POOL_CNT_EXT(pool, i)->skipped = 1;
+ pool->skip_cnt = 1;
+ goto retry;
+ }
+ if (add2other) {
+ TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen],
+ cnt, next);
+ goto retry;
+ }
*cnt_free = cnt;
- return cont;
+ return pool;
}
/* bulk_bitmap is in 128 counters units. */
if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
rte_errno = ENODATA;
return NULL;
}
- cont = flow_dv_pool_create(dev, dcs, batch, age);
- if (!cont) {
+ pool = flow_dv_pool_create(dev, dcs, batch, age);
+ if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
- pool = TAILQ_FIRST(&cont->pool_list);
- for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
+ TAILQ_INIT(&tmp_tq);
+ for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
- TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
+ cnt->pool = pool;
+ TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
}
+ rte_spinlock_lock(&cont->csl);
+ TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
+ rte_spinlock_unlock(&cont->csl);
*cnt_free = MLX5_POOL_GET_CNT(pool, 0);
- return cont;
+ (*cnt_free)->pool = pool;
+ return pool;
}
/**
* Search for existed shared counter.
*
- * @param[in] cont
- * Pointer to the relevant counter pool container.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
* @param[in] id
* The shared counter ID to search.
* @param[out] ppool
* NULL if not existed, otherwise pointer to the shared extend counter.
*/
static struct mlx5_flow_counter_ext *
-flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
+flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
struct mlx5_flow_counter_pool **ppool)
{
- static struct mlx5_flow_counter_ext *cnt;
- struct mlx5_flow_counter_pool *pool;
- uint32_t i;
- uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
-
- for (i = 0; i < n_valid; i++) {
- pool = cont->pools[i];
- for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = MLX5_GET_POOL_CNT_EXT(pool, i);
- if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
- if (ppool)
- *ppool = cont->pools[i];
- return cnt;
- }
- }
- }
- return NULL;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ union mlx5_l3t_data data;
+ uint32_t cnt_idx;
+
+ if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
+ return NULL;
+ cnt_idx = data.dword;
+ /*
+ * Shared counters don't have age info. The counter extend is after
+ * the counter datat structure.
+ */
+ return (struct mlx5_flow_counter_ext *)
+ ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
}
/**
*/
uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- 0, age);
+ age);
uint32_t cnt_idx;
if (!priv->config.devx) {
return 0;
}
if (shared) {
- cnt_ext = flow_dv_counter_shared_search(cont, id, &pool);
+ cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
if (cnt_ext) {
if (cnt_ext->ref_cnt + 1 == 0) {
rte_errno = E2BIG;
return cnt_idx;
}
}
- /* Pools which has a free counters are in the start. */
- TAILQ_FOREACH(pool, &cont->pool_list, next) {
- /*
- * The free counter reset values must be updated between the
- * counter release to the counter allocation, so, at least one
- * query must be done in this time. ensure it by saving the
- * query generation in the release time.
- * The free list is sorted according to the generation - so if
- * the first one is not updated, all the others are not
- * updated too.
- */
- cnt_free = TAILQ_FIRST(&pool->counters);
- if (cnt_free && cnt_free->query_gen <
- rte_atomic64_read(&pool->end_query_gen))
- break;
- cnt_free = NULL;
- }
- if (!cnt_free) {
- cont = flow_dv_counter_pool_prepare(dev, &cnt_free, batch, age);
- if (!cont)
- return 0;
- pool = TAILQ_FIRST(&cont->pool_list);
- }
+ /* Get free counters from container. */
+ rte_spinlock_lock(&cont->csl);
+ cnt_free = TAILQ_FIRST(&cont->counters);
+ if (cnt_free)
+ TAILQ_REMOVE(&cont->counters, cnt_free, next);
+ rte_spinlock_unlock(&cont->csl);
+ if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
+ batch, age))
+ goto err;
+ pool = cnt_free->pool;
if (!batch)
cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
/* Create a DV counter action only in the first time usage. */
if (!cnt_free->action) {
uint16_t offset;
struct mlx5_devx_obj *dcs;
+ int ret;
if (batch) {
offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
offset = 0;
dcs = cnt_ext->dcs;
}
- cnt_free->action = mlx5_glue->dv_create_flow_action_counter
- (dcs->obj, offset);
- if (!cnt_free->action) {
+ ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
+ &cnt_free->action);
+ if (ret) {
rte_errno = errno;
- return 0;
+ goto err;
}
}
cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
/* Update the counter reset values. */
if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
- return 0;
+ goto err;
if (cnt_ext) {
cnt_ext->shared = shared;
cnt_ext->ref_cnt = 1;
cnt_ext->id = id;
+ if (shared) {
+ union mlx5_l3t_data data;
+
+ data.dword = cnt_idx;
+ if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
+ return 0;
+ }
}
if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
/* Start the asynchronous batch query by the host thread. */
mlx5_set_query_alarm(priv->sh);
- TAILQ_REMOVE(&pool->counters, cnt_free, next);
- if (TAILQ_EMPTY(&pool->counters)) {
- /* Move the pool to the end of the container pool list. */
- TAILQ_REMOVE(&cont->pool_list, pool, next);
- TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
- }
return cnt_idx;
+err:
+ if (cnt_free) {
+ cnt_free->pool = pool;
+ rte_spinlock_lock(&cont->csl);
+ TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
+ rte_spinlock_unlock(&cont->csl);
+ }
+ return 0;
}
/**
static void
flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
struct mlx5_flow_counter_ext *cnt_ext = NULL;
MLX5_ASSERT(pool);
if (counter < MLX5_CNT_BATCH_OFFSET) {
cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
- if (cnt_ext && --cnt_ext->ref_cnt)
- return;
+ if (cnt_ext) {
+ if (--cnt_ext->ref_cnt)
+ return;
+ if (cnt_ext->shared)
+ mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
+ cnt_ext->id);
+ }
}
if (IS_AGE_POOL(pool))
flow_dv_counter_remove_from_age(dev, counter, cnt);
- /* Put the counter in the end - the last updated one. */
- TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
+ cnt->pool = pool;
/*
- * Counters released between query trigger and handler need
- * to wait the next round of query. Since the packets arrive
- * in the gap period will not be taken into account to the
- * old counter.
+ * Put the counter back to list to be updated in none fallback mode.
+ * Currently, we are using two list alternately, while one is in query,
+ * add the freed counter to the other list based on the pool query_gen
+ * value. After query finishes, add counter the list to the global
+ * container counter list. The list changes while query starts. In
+ * this case, lock will not be needed as query callback and release
+ * function both operate with the different list.
+ *
*/
- cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
+ if (!priv->counter_fallback)
+ TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
+ else
+ TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
+ (priv->sh, 0, 0))->counters),
+ cnt, next);
}
/**
.hop_limits = 0xff,
},
};
+ const struct rte_flow_item_ecpri nic_ecpri_mask = {
+ .hdr = {
+ .common = {
+ .u32 =
+ RTE_BE32(((const struct rte_ecpri_common_hdr) {
+ .type = 0xFF,
+ }).u32),
+ },
+ .dummy[0] = 0xffffffff,
+ },
+ };
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
uint16_t queue_index = 0xFFFF;
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int type = items->type;
+ if (!mlx5_flow_os_item_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- ret = mlx5_flow_validate_item_vlan(items, item_flags,
- dev, error);
+ ret = flow_dv_validate_item_vlan(items, item_flags,
+ dev, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
return ret;
last_item = MLX5_FLOW_LAYER_GTP;
break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ /* Capacity will be checked in the translate stage. */
+ ret = mlx5_flow_validate_item_ecpri(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ecpri_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
int type = actions->type;
+
+ if (!mlx5_flow_os_action_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
action_flags |= MLX5_FLOW_ACTION_RSS;
++actions_n;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ ret =
+ mlx5_flow_validate_action_default_miss(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ ++actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count(dev, error);
if (ret < 0)
action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
rw_act_num += MLX5_ACT_NUM_SET_DSCP;
break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ ret = flow_dv_validate_action_sample(action_flags,
+ actions, dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ ++actions_n;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no fate action is found");
}
- /* Continue validation for Xcap actions.*/
- if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ /* Continue validation for Xcap and VLAN actions.*/
+ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
+ MLX5_FLOW_VLAN_ACTIONS)) &&
+ (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap and decap "
"combination aren't supported");
- if (!attr->transfer && attr->ingress && (action_flags &
- MLX5_FLOW_ACTION_ENCAP))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "encap is not supported"
- " for ingress traffic");
+ if (!attr->transfer && attr->ingress) {
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "push VLAN action not "
+ "supported for ingress");
+ else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
+ MLX5_FLOW_VLAN_ACTIONS)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no support for "
+ "multiple VLAN actions");
+ }
}
/* Hairpin flow will add one more TAG action. */
if (hairpin > 0)
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
dev_flow->handle = dev_handle;
dev_flow->handle_idx = handle_idx;
- dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ /*
+ * In some old rdma-core releases, before continuing, a check of the
+ * length of matching parameter will be done at first. It needs to use
+ * the length without misc4 param. If the flow has misc4 support, then
+ * the length needs to be adjusted accordingly. Each param member is
+ * aligned with a 64B boundary naturally.
+ */
+ dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4);
/*
* The matching value needs to be cleared to 0 before using. In the
* past, it will be automatically cleared when using rte_*alloc
}
#endif
+/**
+ * Add match of ip_version.
+ *
+ * @param[in] group
+ * Flow group.
+ * @param[in] headers_v
+ * Values header pointer.
+ * @param[in] headers_m
+ * Masks header pointer.
+ * @param[in] ip_version
+ * The IP version to set.
+ */
+static inline void
+flow_dv_set_match_ip_version(uint32_t group,
+ void *headers_v,
+ void *headers_m,
+ uint8_t ip_version)
+{
+ if (group == 0)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ else
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
+ ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
+}
+
/**
* Add Ethernet item to matcher and to the value.
*
*/
static void
flow_dv_translate_item_eth(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item, int inner,
+ uint32_t group)
{
const struct rte_flow_item_eth *eth_m = item->mask;
const struct rte_flow_item_eth *eth_v = item->spec;
* HW supports match on one Ethertype, the Ethertype following the last
* VLAN tag of the packet (see PRM).
* Set match on ethertype only if ETH header is not followed by VLAN.
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ * eCPRI over Ether layer will use type value 0xAEFE.
*/
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(eth_m->type));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
- *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ }
}
/**
flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ int inner, uint32_t group)
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
uint16_t tci_m;
uint16_t tci_v;
- if (!vlan_v)
- return;
- if (!vlan_m)
- vlan_m = &rte_flow_item_vlan_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->handle->vf_vlan.tag =
- rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
+ if (vlan_v)
+ dev_flow->handle->vf_vlan.tag =
+ rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
- tci_m = rte_be_to_cpu_16(vlan_m->tci);
- tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
+ /*
+ * When VLAN item exists in flow, mark packet as tagged,
+ * even if TCI is not specified.
+ */
MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ if (!vlan_v)
+ return;
+ if (!vlan_m)
+ vlan_m = &rte_flow_item_vlan_mask;
+ tci_m = rte_be_to_cpu_16(vlan_m->tci);
+ tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
+ /*
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ */
+ if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type &
+ vlan_v->inner_type));
+ }
}
/**
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- if (group == 0)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
- else
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
/*
* On outer header (which must contains L2), or inner header with L2,
* set cvlan_tag mask bit to mark this packet as untagged.
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- if (group == 0)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
- else
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
/*
* On outer header (which must contains L2), or inner header with L2,
* set cvlan_tag mask bit to mark this packet as untagged.
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- const char *tni_flow_id_m = (const char *)nvgre_m->tni;
- const char *tni_flow_id_v = (const char *)nvgre_v->tni;
+ const char *tni_flow_id_m;
+ const char *tni_flow_id_v;
char *gre_key_m;
char *gre_key_v;
int size;
return;
if (!nvgre_m)
nvgre_m = &rte_flow_item_nvgre_mask;
+ tni_flow_id_m = (const char *)nvgre_m->tni;
+ tni_flow_id_v = (const char *)nvgre_v->tni;
size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
{
const struct rte_flow_item_icmp *icmp_m = item->mask;
const struct rte_flow_item_icmp *icmp_v = item->spec;
+ uint32_t icmp_header_data_m = 0;
+ uint32_t icmp_header_data_v = 0;
void *headers_m;
void *headers_v;
void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
icmp_m->hdr.icmp_code);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
+ icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
+ icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
+ if (icmp_header_data_m) {
+ icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
+ icmp_header_data_v |=
+ rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
+ MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
+ icmp_header_data_m);
+ MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
+ icmp_header_data_v & icmp_header_data_m);
+ }
}
/**
return;
if (!gtp_m)
gtp_m = &rte_flow_item_gtp_mask;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
+ gtp_m->v_pt_rsv_flags);
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
+ gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
gtp_v->msg_type & gtp_m->msg_type);
rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
}
+/**
+ * Add eCPRI item to matcher and to the value.
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] samples
+ * Sample IDs to be used in the matching.
+ */
+static void
+flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
+ void *key, const struct rte_flow_item *item)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_ecpri *ecpri_m = item->mask;
+ const struct rte_flow_item_ecpri *ecpri_v = item->spec;
+ void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_4);
+ void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
+ uint32_t *samples;
+ void *dw_m;
+ void *dw_v;
+
+ if (!ecpri_v)
+ return;
+ if (!ecpri_m)
+ ecpri_m = &rte_flow_item_ecpri_mask;
+ /*
+ * Maximal four DW samples are supported in a single matching now.
+ * Two are used now for a eCPRI matching:
+ * 1. Type: one byte, mask should be 0x00ff0000 in network order
+ * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
+ * if any.
+ */
+ if (!ecpri_m->hdr.common.u32)
+ return;
+ samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+ /* Need to take the whole DW as the mask to fill the entry. */
+ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
+ prog_sample_field_value_0);
+ dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
+ prog_sample_field_value_0);
+ /* Already big endian (network order) in the header. */
+ *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
+ *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
+ /* Sample#0, used for matching type, offset 0. */
+ MLX5_SET(fte_match_set_misc4, misc4_m,
+ prog_sample_field_id_0, samples[0]);
+ /* It makes no sense to set the sample ID in the mask field. */
+ MLX5_SET(fte_match_set_misc4, misc4_v,
+ prog_sample_field_id_0, samples[0]);
+ /*
+ * Checking if message body part needs to be matched.
+ * Some wildcard rules only matching type field should be supported.
+ */
+ if (ecpri_m->hdr.dummy[0]) {
+ switch (ecpri_v->hdr.common.type) {
+ case RTE_ECPRI_MSG_TYPE_IQ_DATA:
+ case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
+ case RTE_ECPRI_MSG_TYPE_DLY_MSR:
+ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
+ prog_sample_field_value_1);
+ dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
+ prog_sample_field_value_1);
+ *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
+ *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
+ /* Sample#1, to match message body, offset 4. */
+ MLX5_SET(fte_match_set_misc4, misc4_m,
+ prog_sample_field_id_1, samples[1]);
+ MLX5_SET(fte_match_set_misc4, misc4_v,
+ prog_sample_field_id_1, samples[1]);
+ break;
+ default:
+ /* Others, do not match any sample ID. */
+ break;
+ }
+ }
+}
+
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
return match_criteria_enable;
}
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_resource *tbl;
union mlx5_flow_tbl_key table_key = {
{
domain = sh->tx_domain;
else
domain = sh->rx_domain;
- tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
- if (!tbl->obj) {
+ ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
+ if (ret) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create flow table object");
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot insert flow table data entry");
- mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
}
rte_atomic32_inc(&tbl->refcnt);
struct mlx5_flow_tbl_resource *tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
struct mlx5_hlist_entry *pos = &tbl_data->entry;
- mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
tbl->obj = NULL;
/* remove the entry from the hash list and free memory. */
mlx5_hlist_remove(sh->flow_tbls, pos);
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_matcher *cache_matcher;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
};
struct mlx5_flow_tbl_resource *tbl;
struct mlx5_flow_tbl_data_entry *tbl_data;
+ int ret;
tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
key->domain, error);
}
}
/* Register new matcher. */
- cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
+ cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
+ SOCKET_ID_ANY);
if (!cache_matcher) {
flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set(error, ENOMEM,
dv_attr.priority = matcher->priority;
if (key->direction)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
- cache_matcher->matcher_object =
- mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
- if (!cache_matcher->matcher_object) {
- rte_free(cache_matcher);
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
+ &cache_matcher->matcher_object);
+ if (ret) {
+ mlx5_free(cache_matcher);
#ifdef HAVE_MLX5DV_DR
flow_dv_tbl_resource_release(dev, tbl);
#endif
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
struct mlx5_hlist_entry *entry;
+ int ret;
/* Lookup a matching resource from cache. */
entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
cache_resource->entry.key = (uint64_t)tag_be24;
- cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
- if (!cache_resource->action) {
- rte_free(cache_resource);
+ ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
- mlx5_glue->destroy_flow_action(cache_resource->action);
- rte_free(cache_resource);
+ mlx5_flow_os_destroy_flow_action(cache_resource->action);
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, EEXIST,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
uint32_t tag_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *tag;
tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
dev->data->port_id, (void *)tag,
rte_atomic32_read(&tag->refcnt));
if (rte_atomic32_dec_and_test(&tag->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action(tag->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
mlx5_hlist_remove(sh->tag_table, &tag->entry);
DRV_LOG(DEBUG, "port %u tag %p: removed",
dev->data->port_id, (void *)tag);
* This parameter is transferred to
* mlx5dv_dr_action_create_dest_ib_port().
*/
- *dst_port_id = priv->ibv_port;
+ *dst_port_id = priv->dev_port;
#else
/*
* Legacy mode, no LAG configurations is supported.
}
/**
- * Set the hash fields according to the @p flow information.
+ * Set the hash fields according to the @p flow information.
+ *
+ * @param[in] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ * Pointer to the mlx5_flow_rss_desc.
+ */
+static void
+flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc)
+{
+ uint64_t items = dev_flow->handle->layers;
+ int rss_inner = 0;
+ uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
+
+ dev_flow->hash_fields = 0;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss_desc->level >= 2) {
+ dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+ rss_inner = 1;
+ }
+#endif
+ if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
+ if (rss_types & MLX5_IPV4_LAYER_TYPES) {
+ if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
+ else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
+ else
+ dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
+ }
+ } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
+ if (rss_types & MLX5_IPV6_LAYER_TYPES) {
+ if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
+ else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
+ else
+ dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ }
+ }
+ if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
+ if (rss_types & ETH_RSS_UDP) {
+ if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_SRC_PORT_UDP;
+ else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_DST_PORT_UDP;
+ else
+ dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ }
+ } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
+ if (rss_types & ETH_RSS_TCP) {
+ if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_SRC_PORT_TCP;
+ else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_DST_PORT_TCP;
+ else
+ dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ }
+ }
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ * Pointer to the mlx5_flow_rss_desc.
+ * @param[out] hrxq_idx
+ * Hash Rx queue index.
+ *
+ * @return
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_hrxq *
+flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint32_t *hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_handle *dh = dev_flow->handle;
+ struct mlx5_hrxq *hrxq;
+
+ MLX5_ASSERT(rss_desc->queue_num);
+ *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num);
+ if (!*hrxq_idx) {
+ *hrxq_idx = mlx5_hrxq_new
+ (dev, rss_desc->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num,
+ !!(dh->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (!*hrxq_idx)
+ return NULL;
+ }
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ *hrxq_idx);
+ return hrxq;
+}
+
+/**
+ * Find existing sample resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[in] resource
+ * Pointer to sample resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[in, out] sample_dv_actions
+ * Pointer to sample actions list.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_sample_resource_register(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct mlx5_flow_dv_sample_resource *resource,
+ struct mlx5_flow *dev_flow,
+ void **sample_dv_actions,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_sample_resource *cache_resource;
+ struct mlx5dv_dr_flow_sampler_attr sampler_attr;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_tbl_resource *tbl;
+ uint32_t idx = 0;
+ const uint32_t next_ft_step = 1;
+ uint32_t next_ft_id = resource->ft_id + next_ft_step;
+
+ /* Lookup a matching resource from cache. */
+ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list,
+ idx, cache_resource, next) {
+ if (resource->ratio == cache_resource->ratio &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->ft_id == cache_resource->ft_id &&
+ resource->set_action == cache_resource->set_action &&
+ !memcmp((void *)&resource->sample_act,
+ (void *)&cache_resource->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list))) {
+ DRV_LOG(DEBUG, "sample resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ dev_flow->handle->dvh.rix_sample = idx;
+ dev_flow->dv.sample_res = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new sample resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE],
+ &dev_flow->handle->dvh.rix_sample);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ /* Create normal path table level */
+ tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
+ attr->egress, attr->transfer, error);
+ if (!tbl) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "fail to create normal path table "
+ "for sample");
+ goto error;
+ }
+ cache_resource->normal_path_tbl = tbl;
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+ cache_resource->default_miss =
+ mlx5_glue->dr_create_flow_action_default_miss();
+ if (!cache_resource->default_miss) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create default miss "
+ "action");
+ goto error;
+ }
+ sample_dv_actions[resource->sample_act.actions_num++] =
+ cache_resource->default_miss;
+ }
+ /* Create a DR sample action */
+ sampler_attr.sample_ratio = cache_resource->ratio;
+ sampler_attr.default_next_table = tbl->obj;
+ sampler_attr.num_sample_actions = resource->sample_act.actions_num;
+ sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
+ &sample_dv_actions[0];
+ sampler_attr.action = cache_resource->set_action;
+ cache_resource->verbs_action =
+ mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
+ if (!cache_resource->verbs_action) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create sample action");
+ goto error;
+ }
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
+ ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list,
+ dev_flow->handle->dvh.rix_sample, cache_resource,
+ next);
+ dev_flow->dv.sample_res = cache_resource;
+ DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ return 0;
+error:
+ if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+ if (cache_resource->default_miss)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->default_miss));
+ } else {
+ if (cache_resource->sample_idx.rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ cache_resource->sample_idx.rix_hrxq))
+ cache_resource->sample_idx.rix_hrxq = 0;
+ if (cache_resource->sample_idx.rix_tag &&
+ !flow_dv_tag_release(dev,
+ cache_resource->sample_idx.rix_tag))
+ cache_resource->sample_idx.rix_tag = 0;
+ if (cache_resource->sample_idx.cnt) {
+ flow_dv_counter_release(dev,
+ cache_resource->sample_idx.cnt);
+ cache_resource->sample_idx.cnt = 0;
+ }
+ }
+ if (cache_resource->normal_path_tbl)
+ flow_dv_tbl_resource_release(dev,
+ cache_resource->normal_path_tbl);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE],
+ dev_flow->handle->dvh.rix_sample);
+ dev_flow->handle->dvh.rix_sample = 0;
+ return -rte_errno;
+}
+
+/**
+ * Find existing destination array resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[in] resource
+ * Pointer to destination array resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct mlx5_flow_dv_dest_array_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
+ struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5dv_dr_domain *domain;
+ uint32_t idx = 0;
+
+ /* Lookup a matching resource from cache. */
+ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ sh->dest_array_list,
+ idx, cache_resource, next) {
+ if (resource->num_of_dest == cache_resource->num_of_dest &&
+ resource->ft_type == cache_resource->ft_type &&
+ !memcmp((void *)cache_resource->sample_act,
+ (void *)resource->sample_act,
+ (resource->num_of_dest *
+ sizeof(struct mlx5_flow_sub_actions_list)))) {
+ DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ dev_flow->handle->dvh.rix_dest_array = idx;
+ dev_flow->dv.dest_array_res = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new destination array resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &dev_flow->handle->dvh.rix_dest_array);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ if (attr->transfer)
+ domain = sh->fdb_domain;
+ else if (attr->ingress)
+ domain = sh->rx_domain;
+ else
+ domain = sh->tx_domain;
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5dv_dr_action_dest_attr),
+ 0, SOCKET_ID_ANY);
+ if (!dest_attr[idx]) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ goto error;
+ }
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
+ sample_act = &resource->sample_act[idx];
+ if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
+ dest_attr[idx]->dest = sample_act->dr_queue_action;
+ } else if (sample_act->action_flags ==
+ (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
+ dest_attr[idx]->dest_reformat = &dest_reformat[idx];
+ dest_attr[idx]->dest_reformat->reformat =
+ sample_act->dr_encap_action;
+ dest_attr[idx]->dest_reformat->dest =
+ sample_act->dr_port_id_action;
+ } else if (sample_act->action_flags ==
+ MLX5_FLOW_ACTION_PORT_ID) {
+ dest_attr[idx]->dest = sample_act->dr_port_id_action;
+ }
+ }
+ /* create a dest array actioin */
+ cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
+ (domain,
+ cache_resource->num_of_dest,
+ dest_attr);
+ if (!cache_resource->action) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create destination array action");
+ goto error;
+ }
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
+ ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &sh->dest_array_list,
+ dev_flow->handle->dvh.rix_dest_array, cache_resource,
+ next);
+ dev_flow->dv.dest_array_res = cache_resource;
+ DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ for (idx = 0; idx < resource->num_of_dest; idx++)
+ mlx5_free(dest_attr[idx]);
+ return 0;
+error:
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ struct mlx5_flow_sub_actions_idx *act_res =
+ &cache_resource->sample_idx[idx];
+ if (act_res->rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ act_res->rix_hrxq))
+ act_res->rix_hrxq = 0;
+ if (act_res->rix_encap_decap &&
+ !flow_dv_encap_decap_resource_release(dev,
+ act_res->rix_encap_decap))
+ act_res->rix_encap_decap = 0;
+ if (act_res->rix_port_id_action &&
+ !flow_dv_port_id_action_resource_release(dev,
+ act_res->rix_port_id_action))
+ act_res->rix_port_id_action = 0;
+ if (dest_attr[idx])
+ mlx5_free(dest_attr[idx]);
+ }
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ dev_flow->handle->dvh.rix_dest_array);
+ dev_flow->handle->dvh.rix_dest_array = 0;
+ return -rte_errno;
+}
+
+/**
+ * Convert Sample action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to action structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in, out] num_of_dest
+ * Pointer to the num of destination.
+ * @param[in, out] sample_actions
+ * Pointer to sample actions list.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate_action_sample(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ uint32_t *num_of_dest,
+ void **sample_actions,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_sample *sample_action;
+ const struct rte_flow_action *sub_actions;
+ const struct rte_flow_action_queue *queue;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5_flow_sub_actions_idx *sample_idx;
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)
+ [!!priv->flow_nested_idx];
+ uint64_t action_flags = 0;
+
+ sample_act = &res->sample_act;
+ sample_idx = &res->sample_idx;
+ sample_action = (const struct rte_flow_action_sample *)action->conf;
+ res->ratio = sample_action->ratio;
+ sub_actions = sample_action->actions;
+ for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
+ int type = sub_actions->type;
+ uint32_t pre_rix = 0;
+ void *pre_r;
+ switch (type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ {
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ queue = sub_actions->conf;
+ rss_desc->queue_num = 1;
+ rss_desc->queue[0] = queue->index;
+ hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create fate queue");
+ sample_act->dr_queue_action = hrxq->action;
+ sample_idx->rix_hrxq = hrxq_idx;
+ sample_actions[sample_act->actions_num++] =
+ hrxq->action;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_QUEUE;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ uint32_t tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (sub_actions->conf))->id);
+
+ dev_flow->handle->mark = 1;
+ pre_rix = dev_flow->handle->dvh.rix_tag;
+ /* Save the mark resource before sample */
+ pre_r = dev_flow->dv.tag_resource;
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ sample_act->dr_tag_action =
+ dev_flow->dv.tag_resource->action;
+ sample_idx->rix_tag =
+ dev_flow->handle->dvh.rix_tag;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_tag_action;
+ /* Recover the mark resource after sample */
+ dev_flow->dv.tag_resource = pre_r;
+ dev_flow->handle->dvh.rix_tag = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ {
+ uint32_t counter;
+
+ counter = flow_dv_translate_create_counter(dev,
+ dev_flow, sub_actions->conf, 0);
+ if (!counter)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create counter"
+ " object.");
+ sample_idx->cnt = counter;
+ sample_act->dr_cnt_action =
+ (flow_dv_counter_get_by_idx(dev,
+ counter, NULL))->action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_cnt_action;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ {
+ struct mlx5_flow_dv_port_id_action_resource
+ port_id_resource;
+ uint32_t port_id = 0;
+
+ memset(&port_id_resource, 0, sizeof(port_id_resource));
+ /* Save the port id resource before sample */
+ pre_rix = dev_flow->handle->rix_port_id_action;
+ pre_r = dev_flow->dv.port_id_action;
+ if (flow_dv_translate_action_port_id(dev, sub_actions,
+ &port_id, error))
+ return -rte_errno;
+ port_id_resource.port_id = port_id;
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
+ return -rte_errno;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ sample_idx->rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_port_id_action;
+ /* Recover the port id resource after sample */
+ dev_flow->dv.port_id_action = pre_r;
+ dev_flow->handle->rix_port_id_action = pre_rix;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ /* Save the encap resource before sample */
+ pre_rix = dev_flow->handle->dvh.rix_encap_decap;
+ pre_r = dev_flow->dv.encap_decap;
+ if (flow_dv_create_action_l2_encap(dev, sub_actions,
+ dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ sample_idx->rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_encap_action;
+ /* Recover the encap resource after sample */
+ dev_flow->dv.encap_decap = pre_r;
+ dev_flow->handle->dvh.rix_encap_decap = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Not support for sampler action");
+ }
+ }
+ sample_act->action_flags = action_flags;
+ res->ft_id = dev_flow->dv.group;
+ if (attr->transfer) {
+ union {
+ uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
+ uint64_t set_action;
+ } action_ctx = { .set_action = 0 };
+
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ MLX5_SET(set_action_in, action_ctx.action_in, action_type,
+ MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, action_ctx.action_in, field,
+ MLX5_MODI_META_REG_C_0);
+ MLX5_SET(set_action_in, action_ctx.action_in, data,
+ priv->vport_meta_tag);
+ res->set_action = action_ctx.set_action;
+ } else if (attr->ingress) {
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ }
+ return 0;
+}
+
+/**
+ * Convert Sample action to DV specification.
*
- * @param[in] dev_flow
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
* Pointer to the mlx5_flow.
- * @param[in] rss_desc
- * Pointer to the mlx5_flow_rss_desc.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] num_of_dest
+ * The num of destination.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[in, out] mdest_res
+ * Pointer to destination array resource.
+ * @param[in] sample_actions
+ * Pointer to sample path actions list.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static void
-flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
- struct mlx5_flow_rss_desc *rss_desc)
+static int
+flow_dv_create_action_sample(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ uint32_t num_of_dest,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct mlx5_flow_dv_dest_array_resource *mdest_res,
+ void **sample_actions,
+ uint64_t action_flags,
+ struct rte_flow_error *error)
{
- uint64_t items = dev_flow->handle->layers;
- int rss_inner = 0;
- uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
-
- dev_flow->hash_fields = 0;
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (rss_desc->level >= 2) {
- dev_flow->hash_fields |= IBV_RX_HASH_INNER;
- rss_inner = 1;
- }
-#endif
- if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
- if (rss_types & MLX5_IPV4_LAYER_TYPES) {
- if (rss_types & ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
- else if (rss_types & ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
- else
- dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
- }
- } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
- if (rss_types & MLX5_IPV6_LAYER_TYPES) {
- if (rss_types & ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
- else if (rss_types & ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
- else
- dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ /* update normal path action resource into last index of array */
+ uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
+ struct mlx5_flow_sub_actions_list *sample_act =
+ &mdest_res->sample_act[dest_index];
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)
+ [!!priv->flow_nested_idx];
+ uint32_t normal_idx = 0;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ if (num_of_dest > 1) {
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
+ /* Handle QP action for mirroring */
+ hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create rx queue");
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
+ sample_act->dr_queue_action = hrxq->action;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
}
- }
- if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
- if (rss_types & ETH_RSS_UDP) {
- if (rss_types & ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_UDP;
- else if (rss_types & ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_UDP;
- else
- dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
}
- } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
- if (rss_types & ETH_RSS_TCP) {
- if (rss_types & ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_TCP;
- else if (rss_types & ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_TCP;
- else
- dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
}
+ sample_act->actions_num = normal_idx;
+ /* update sample action resource into first index of array */
+ mdest_res->ft_type = res->ft_type;
+ memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
+ sizeof(struct mlx5_flow_sub_actions_idx));
+ memcpy(&mdest_res->sample_act[0], &res->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list));
+ mdest_res->num_of_dest = num_of_dest;
+ if (flow_dv_dest_array_resource_register(dev, attr, mdest_res,
+ dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create sample "
+ "action");
+ } else {
+ if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
+ sample_actions, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't create sample action");
}
+ return 0;
}
/**
uint64_t priority = attr->priority;
struct mlx5_flow_dv_matcher matcher = {
.mask = {
- .size = sizeof(matcher.mask.buf),
+ .size = sizeof(matcher.mask.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
},
};
int actions_n = 0;
void *match_value = dev_flow->dv.value.buf;
uint8_t next_protocol = 0xff;
struct rte_vlan_hdr vlan = { 0 };
+ struct mlx5_flow_dv_dest_array_resource mdest_res;
+ struct mlx5_flow_dv_sample_resource sample_res;
+ void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+ struct mlx5_flow_sub_actions_list *sample_act;
+ uint32_t sample_act_pos = UINT32_MAX;
+ uint32_t num_of_dest = 0;
+ int tmp_actions_n = 0;
uint32_t table;
int ret = 0;
+ memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
+ memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ /* update normal path action resource into last index of array */
+ sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
!!priv->fdb_def_rule, &table, error);
if (ret)
const struct rte_flow_action_rss *rss;
const struct rte_flow_action *action = actions;
const uint8_t *rss_key;
- const struct rte_flow_action_jump *jump_data;
const struct rte_flow_action_meter *mtr;
struct mlx5_flow_tbl_resource *tbl;
uint32_t port_id = 0;
int action_type = actions->type;
const struct rte_flow_action *found_action = NULL;
struct mlx5_flow_meter *fm = NULL;
+ uint32_t jump_group = 0;
+ if (!mlx5_flow_os_action_supported(action_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
switch (action_type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
if (flow_dv_translate_action_port_id(dev, action,
&port_id, error))
return -rte_errno;
- memset(&port_id_resource, 0, sizeof(port_id_resource));
port_id_resource.port_id = port_id;
+ MLX5_ASSERT(!handle->rix_port_id_action);
if (flow_dv_port_id_action_resource_register
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
- MLX5_ASSERT(!handle->rix_port_id_action);
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ num_of_dest++;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
rss_desc->queue[0] = queue->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ num_of_dest++;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
rss = actions->conf;
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- jump_data = action->conf;
+ jump_group = ((const struct rte_flow_action_jump *)
+ action->conf)->group;
+ if (dev_flow->external && jump_group <
+ MLX5_MAX_TABLES_EXTERNAL)
+ jump_group *= MLX5_FLOW_TABLE_FACTOR;
ret = mlx5_flow_group_to_table(attr, dev_flow->external,
- jump_data->group,
+ jump_group,
!!priv->fdb_def_rule,
&table, error);
if (ret)
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_DEFAULT_MISS;
+ break;
case RTE_FLOW_ACTION_TYPE_METER:
mtr = actions->conf;
if (!flow->meter) {
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ sample_act_pos = actions_n;
+ ret = flow_dv_translate_action_sample(dev,
+ actions,
+ dev_flow, attr,
+ &num_of_dest,
+ sample_actions,
+ &sample_res,
+ error);
+ if (ret < 0)
+ return ret;
+ actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ /* put encap action into group if work with port id */
+ if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
+ (action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res->actions_num) {
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- handle->dvh.modify_hdr->verbs_action;
+ handle->dvh.modify_hdr->action;
}
if (action_flags & MLX5_FLOW_ACTION_COUNT) {
flow->counter =
NULL,
"cannot create counter"
" object.");
- dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.actions[actions_n] =
(flow_dv_counter_get_by_idx(dev,
flow->counter, NULL))->action;
+ actions_n++;
+ }
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
+ ret = flow_dv_create_action_sample(dev,
+ dev_flow, attr,
+ num_of_dest,
+ &sample_res,
+ &mdest_res,
+ sample_actions,
+ action_flags,
+ error);
+ if (ret < 0)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create sample action");
+ if (num_of_dest > 1) {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.dest_array_res->action;
+ } else {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.sample_res->verbs_action;
+ }
}
break;
default:
modify_action_position == UINT32_MAX)
modify_action_position = actions_n++;
}
+ /*
+ * For multiple destination (sample action with ratio=1), the encap
+ * action and port id action will be combined into group action.
+ * So need remove the original these actions in the flow and only
+ * use the sample action instead of.
+ */
+ if (num_of_dest > 1 && sample_act->dr_port_id_action) {
+ int i;
+ void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+
+ for (i = 0; i < actions_n; i++) {
+ if ((sample_act->dr_encap_action &&
+ sample_act->dr_encap_action ==
+ dev_flow->dv.actions[i]) ||
+ (sample_act->dr_port_id_action &&
+ sample_act->dr_port_id_action ==
+ dev_flow->dv.actions[i]))
+ continue;
+ temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
+ }
+ memcpy((void *)dev_flow->dv.actions,
+ (void *)temp_actions,
+ tmp_actions_n * sizeof(void *));
+ actions_n = tmp_actions_n;
+ }
dev_flow->dv.actions_n = actions_n;
dev_flow->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
+ if (!mlx5_flow_os_item_supported(item_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id(dev, match_mask,
break;
case RTE_FLOW_ITEM_TYPE_ETH:
flow_dv_translate_item_eth(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
+ items, tunnel,
+ dev_flow->dv.group);
+ matcher.priority = action_flags &
+ MLX5_FLOW_ACTION_DEFAULT_MISS &&
+ !dev_flow->external ?
+ MLX5_PRIORITY_MAP_L3 :
+ MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
flow_dv_translate_item_vlan(dev_flow,
match_mask, match_value,
- items, tunnel);
+ items, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_INNER_VLAN) :
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
flow_dv_translate_item_vxlan_gpe(match_mask,
match_value, items,
tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
flow_dv_translate_item_geneve(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_MARK:
case RTE_FLOW_ITEM_TYPE_GTP:
flow_dv_translate_item_gtp(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GTP;
break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ if (!mlx5_flex_parser_ecpri_exist(dev)) {
+ /* Create it only the first time to be used. */
+ ret = mlx5_flex_parser_ecpri_alloc(dev);
+ if (ret)
+ return rte_flow_error_set
+ (error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "cannot create eCPRI parser");
+ }
+ /* Adjust the length matcher and device flow value. */
+ matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ dev_flow->dv.value.size =
+ MLX5_ST_SZ_BYTES(fte_match_param);
+ flow_dv_translate_item_ecpri(dev, match_mask,
+ match_value, items);
+ /* No other protocol should follow eCPRI layer. */
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
default:
break;
}
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
struct mlx5_hrxq *drop_hrxq;
- drop_hrxq = mlx5_hrxq_drop_new(dev);
+ drop_hrxq = mlx5_drop_action_create(dev);
if (!drop_hrxq) {
rte_flow_error_set
(error, errno,
}
/*
* Drop queues will be released by the specify
- * mlx5_hrxq_drop_release() function. Assign
+ * mlx5_drop_action_destroy() function. Assign
* the special index to hrxq to mark the queue
* has been allocated.
*/
dh->rix_hrxq = UINT32_MAX;
dv->actions[n++] = drop_hrxq->action;
}
- } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
+ } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
+ !dv_h->rix_sample && !dv_h->rix_dest_array) {
struct mlx5_hrxq *hrxq;
uint32_t hrxq_idx;
struct mlx5_flow_rss_desc *rss_desc =
if (!hrxq_idx) {
hrxq_idx = mlx5_hrxq_new
(dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num,
+ !!(dh->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
}
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
}
dh->rix_hrxq = hrxq_idx;
dv->actions[n++] = hrxq->action;
+ } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
+ if (flow_dv_default_miss_resource_register
+ (dev, error)) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create default miss resource");
+ goto error_default_miss;
+ }
+ dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS;
+ dv->actions[n++] = priv->sh->default_miss.action;
}
- dh->ib_flow =
- mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
- (void *)&dv->value, n,
- dv->actions);
- if (!dh->ib_flow) {
+ err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
+ (void *)&dv->value, n,
+ dv->actions, &dh->drv_flow);
+ if (err) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
}
return 0;
error:
+ if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
+ flow_dv_default_miss_resource_release(dev);
+error_default_miss:
err = rte_errno; /* Save rte_errno before cleanup. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dh, next) {
/* hrxq is union, don't clear it if the flag is not set. */
if (dh->rix_hrxq) {
if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
- mlx5_hrxq_drop_release(dev);
+ mlx5_drop_action_destroy(dev);
dh->rix_hrxq = 0;
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
mlx5_hrxq_release(dev, dh->rix_hrxq);
dev->data->port_id, (void *)matcher,
rte_atomic32_read(&matcher->refcnt));
if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
/* table ref-- in release interface. */
flow_dv_tbl_resource_release(dev, matcher->tbl);
- rte_free(matcher);
+ mlx5_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
return 0;
*
* @param dev
* Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param encap_decap_idx
+ * Index of encap decap resource.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+ uint32_t encap_decap_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t idx = handle->dvh.rix_encap_decap;
+ uint32_t idx = encap_decap_idx;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
idx);
if (!cache_resource)
return 0;
- MLX5_ASSERT(cache_resource->verbs_action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->verbs_action));
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
- &priv->sh->encaps_decaps, idx,
- cache_resource, next);
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
+ mlx5_hlist_remove(priv->sh->encaps_decaps,
+ &cache_resource->entry);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
DRV_LOG(DEBUG, "encap/decap resource %p: removed",
(void *)cache_resource);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
/* jump action memory free is inside the table release. */
flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
DRV_LOG(DEBUG, "jump table resource %p: removed",
return 1;
}
+/**
+ * Release a default miss resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_default_miss_resource *cache_resource =
+ &sh->default_miss;
+
+ MLX5_ASSERT(cache_resource->action);
+ DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
+ (void *)cache_resource->action,
+ rte_atomic32_read(&cache_resource->refcnt));
+ if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->action));
+ DRV_LOG(DEBUG, "default miss resource %p: removed",
+ (void *)cache_resource->action);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Release a modify-header resource.
*
+ * @param dev
+ * Pointer to Ethernet device.
* @param handle
* Pointer to mlx5_flow_handle.
*
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
+flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
handle->dvh.modify_hdr;
- MLX5_ASSERT(cache_resource->verbs_action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->verbs_action));
- LIST_REMOVE(cache_resource, next);
- rte_free(cache_resource);
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
+ mlx5_hlist_remove(priv->sh->modify_cmds,
+ &cache_resource->entry);
+ mlx5_free(cache_resource);
DRV_LOG(DEBUG, "modify-header resource %p: removed",
(void *)cache_resource);
return 0;
*/
static int
flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+ uint32_t port_id)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
- uint32_t idx = handle->rix_port_id_action;
+ uint32_t idx = port_id;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
idx);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
&priv->sh->port_id_action_list, idx,
cache_resource, next);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
&priv->sh->push_vlan_action_list, idx,
cache_resource, next);
{
if (!handle->rix_fate)
return;
- if (handle->fate_action == MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
+ switch (handle->fate_action) {
+ case MLX5_FLOW_FATE_DROP:
+ mlx5_drop_action_destroy(dev);
+ break;
+ case MLX5_FLOW_FATE_QUEUE:
mlx5_hrxq_release(dev, handle->rix_hrxq);
- else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
+ break;
+ case MLX5_FLOW_FATE_JUMP:
flow_dv_jump_tbl_resource_release(dev, handle);
- else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
- flow_dv_port_id_action_resource_release(dev, handle);
- else
+ break;
+ case MLX5_FLOW_FATE_PORT_ID:
+ flow_dv_port_id_action_resource_release(dev,
+ handle->rix_port_id_action);
+ break;
+ case MLX5_FLOW_FATE_DEFAULT_MISS:
+ flow_dv_default_miss_resource_release(dev);
+ break;
+ default:
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
+ break;
+ }
handle->rix_fate = 0;
}
+/**
+ * Release an sample resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_sample_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t idx = handle->dvh.rix_sample;
+ struct mlx5_flow_dv_sample_resource *cache_resource;
+
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
+ idx);
+ if (!cache_resource)
+ return 0;
+ MLX5_ASSERT(cache_resource->verbs_action);
+ DRV_LOG(DEBUG, "sample resource %p: refcnt %d--",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
+ if (cache_resource->verbs_action)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->verbs_action));
+ if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+ if (cache_resource->default_miss)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->default_miss));
+ }
+ if (cache_resource->normal_path_tbl)
+ flow_dv_tbl_resource_release(dev,
+ cache_resource->normal_path_tbl);
+ }
+ if (cache_resource->sample_idx.rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ cache_resource->sample_idx.rix_hrxq))
+ cache_resource->sample_idx.rix_hrxq = 0;
+ if (cache_resource->sample_idx.rix_tag &&
+ !flow_dv_tag_release(dev,
+ cache_resource->sample_idx.rix_tag))
+ cache_resource->sample_idx.rix_tag = 0;
+ if (cache_resource->sample_idx.cnt) {
+ flow_dv_counter_release(dev,
+ cache_resource->sample_idx.cnt);
+ cache_resource->sample_idx.cnt = 0;
+ }
+ if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) {
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
+ &priv->sh->sample_action_list, idx,
+ cache_resource, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx);
+ DRV_LOG(DEBUG, "sample resource %p: removed",
+ (void *)cache_resource);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Release an destination array resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5_flow_sub_actions_idx *mdest_act_res;
+ uint32_t idx = handle->dvh.rix_dest_array;
+ uint32_t i = 0;
+
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ idx);
+ if (!cache_resource)
+ return 0;
+ MLX5_ASSERT(cache_resource->action);
+ DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
+ if (cache_resource->action)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->action));
+ for (; i < cache_resource->num_of_dest; i++) {
+ mdest_act_res = &cache_resource->sample_idx[i];
+ if (mdest_act_res->rix_hrxq) {
+ mlx5_hrxq_release(dev,
+ mdest_act_res->rix_hrxq);
+ mdest_act_res->rix_hrxq = 0;
+ }
+ if (mdest_act_res->rix_encap_decap) {
+ flow_dv_encap_decap_resource_release(dev,
+ mdest_act_res->rix_encap_decap);
+ mdest_act_res->rix_encap_decap = 0;
+ }
+ if (mdest_act_res->rix_port_id_action) {
+ flow_dv_port_id_action_resource_release(dev,
+ mdest_act_res->rix_port_id_action);
+ mdest_act_res->rix_port_id_action = 0;
+ }
+ if (mdest_act_res->rix_tag) {
+ flow_dv_tag_release(dev,
+ mdest_act_res->rix_tag);
+ mdest_act_res->rix_tag = 0;
+ }
+ }
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &priv->sh->dest_array_list, idx,
+ cache_resource, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
+ DRV_LOG(DEBUG, "destination array resource %p: removed",
+ (void *)cache_resource);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Remove the flow from the NIC but keeps it in memory.
* Lock free, (mutex should be acquired by caller).
handle_idx);
if (!dh)
return;
- if (dh->ib_flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
- dh->ib_flow = NULL;
+ if (dh->drv_flow) {
+ claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
+ dh->drv_flow = NULL;
}
if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
- dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+ dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
+ dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
flow_dv_fate_resource_release(dev, dh);
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
flow->dev_handles = dev_handle->next.next;
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_sample)
+ flow_dv_sample_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_dest_array)
+ flow_dv_dest_array_resource_release(dev, dev_handle);
if (dev_handle->dvh.rix_encap_decap)
- flow_dv_encap_decap_resource_release(dev, dev_handle);
+ flow_dv_encap_decap_resource_release(dev,
+ dev_handle->dvh.rix_encap_decap);
if (dev_handle->dvh.modify_hdr)
- flow_dv_modify_hdr_resource_release(dev_handle);
+ flow_dv_modify_hdr_resource_release(dev, dev_handle);
if (dev_handle->dvh.rix_push_vlan)
flow_dv_push_vlan_action_resource_release(dev,
dev_handle);
if (!mtd || !priv->config.dv_flow_en)
return 0;
if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->egress.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->egress.color_matcher));
if (mtd->egress.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->egress.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->egress.any_matcher));
if (mtd->egress.tbl)
flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
if (mtd->egress.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
if (mtd->ingress.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->ingress.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->ingress.color_matcher));
if (mtd->ingress.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->ingress.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->ingress.any_matcher));
if (mtd->ingress.tbl)
flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
if (mtd->ingress.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
if (mtd->transfer.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->transfer.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->transfer.color_matcher));
if (mtd->transfer.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->transfer.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->transfer.any_matcher));
if (mtd->transfer.tbl)
flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
if (mtd->transfer.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
if (mtd->drop_actn)
- claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
- rte_free(mtd);
+ claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
+ mlx5_free(mtd);
return 0;
}
uint32_t color_reg_c_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_match_params mask = {
.size = sizeof(mask.buf),
};
struct mlx5_meter_domain_info *dtb;
struct rte_flow_error error;
int i = 0;
+ int ret;
if (transfer)
dtb = &mtb->transfer;
/* Create matchers, Any and Color. */
dv_attr.priority = 3;
dv_attr.match_criteria_enable = 0;
- dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
- &dv_attr,
- dtb->tbl->obj);
- if (!dtb->any_matcher) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
+ &dtb->any_matcher);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter"
" policer default matcher.");
goto error_exit;
1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
- dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
- &dv_attr,
- dtb->tbl->obj);
- if (!dtb->color_matcher) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
+ &dtb->color_matcher);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter policer color matcher.");
goto error_exit;
}
actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
actions[i++] = mtb->drop_actn;
/* Default rule: lowest priority, match any, actions: drop. */
- dtb->policer_rules[RTE_MTR_DROPPED] =
- mlx5_glue->dv_create_flow(dtb->any_matcher,
- (void *)&value, i, actions);
- if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
+ ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
+ actions,
+ &dtb->policer_rules[RTE_MTR_DROPPED]);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter policer drop rule.");
goto error_exit;
}
rte_errno = ENOTSUP;
return NULL;
}
- mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
+ mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
if (!mtb) {
DRV_LOG(ERR, "Failed to allocate memory for meter.");
return NULL;
mtb->count_actns[i] = cnt->action;
}
/* Create drop action. */
- mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
- if (!mtb->drop_actn) {
+ ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
+ if (ret) {
DRV_LOG(ERR, "Failed to create drop action.");
goto error_exit;
}
for (i = 0; i < RTE_MTR_DROPPED; i++) {
if (dt->policer_rules[i]) {
- claim_zero(mlx5_glue->dv_destroy_flow
- (dt->policer_rules[i]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (dt->policer_rules[i]));
dt->policer_rules[i] = NULL;
}
}
if (dt->jump_actn) {
- claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
+ claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
dt->jump_actn = NULL;
}
}
struct mlx5_meter_domains_infos *mtb = fm->mfts;
void *actions[METER_ACTIONS];
int i;
+ int ret = 0;
/* Create jump action. */
if (!dtb->jump_actn)
- dtb->jump_actn =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (dtb->sfx_tbl->obj);
- if (!dtb->jump_actn) {
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (dtb->sfx_tbl->obj, &dtb->jump_actn);
+ if (ret) {
DRV_LOG(ERR, "Failed to create policer jump action.");
goto error;
}
actions[j++] = mtb->drop_actn;
else
actions[j++] = dtb->jump_actn;
- dtb->policer_rules[i] =
- mlx5_glue->dv_create_flow(dtb->color_matcher,
- (void *)&value,
- j, actions);
- if (!dtb->policer_rules[i]) {
+ ret = mlx5_flow_os_create_flow(dtb->color_matcher,
+ (void *)&value, j, actions,
+ &dtb->policer_rules[i]);
+ if (ret) {
DRV_LOG(ERR, "Failed to create policer rule.");
goto error;
}
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+