#include <string.h>
#include <unistd.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
#include <rte_common.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
+#include <rte_cycles.h>
#include <rte_ip.h>
#include <rte_gre.h>
#include <rte_vxlan.h>
#include <rte_gtp.h>
+#include <rte_eal_paging.h>
+#include <rte_mpls.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
+#include "mlx5_common_os.h"
#include "mlx5_flow.h"
+#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
+#include "rte_pmd_mlx5.h"
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
};
static int
-flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
+flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
struct mlx5_flow_tbl_resource *tbl);
+static int
+flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
+ uint32_t encap_decap_idx);
+
+static int
+flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
+ uint32_t port_id);
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
+
+static int
+flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
+ uint32_t rix_jump);
+
/**
* Initialize flow attributes structure according to flow items' types.
*
}
}
-/**
- * Acquire the synchronizing object to protect multithreaded access
- * to shared dv context. Lock occurs only if context is actually
- * shared, i.e. we have multiport IB device and representors are
- * created.
- *
- * @param[in] dev
- * Pointer to the rte_eth_dev structure.
- */
-static void
-flow_dv_shared_lock(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
-
- if (sh->dv_refcnt > 1) {
- int ret;
-
- ret = pthread_mutex_lock(&sh->dv_mutex);
- MLX5_ASSERT(!ret);
- (void)ret;
- }
-}
-
-static void
-flow_dv_shared_unlock(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
-
- if (sh->dv_refcnt > 1) {
- int ret;
-
- ret = pthread_mutex_unlock(&sh->dv_mutex);
- MLX5_ASSERT(!ret);
- (void)ret;
- }
-}
-
/* Update VLAN's VID/PCP based on input rte_flow_action.
*
* @param[in] action
/* Fetch variable byte size mask from the array. */
mask = flow_dv_fetch_field((const uint8_t *)item->mask +
field->offset, field->size);
- MLX5_ASSERT(mask);
if (!mask) {
++field;
continue;
(int)dcopy->offset < 0 ? off_b : dcopy->offset;
/* Convert entire record to big-endian format. */
actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
+ ++dcopy;
} else {
MLX5_ASSERT(item->spec);
data = flow_dv_fetch_field((const uint8_t *)item->spec +
}
static enum mlx5_modification_field reg_to_field[] = {
- [REG_NONE] = MLX5_MODI_OUT_NONE,
+ [REG_NON] = MLX5_MODI_OUT_NONE,
[REG_A] = MLX5_MODI_META_DATA_REG_A,
[REG_B] = MLX5_MODI_META_DATA_REG_B,
[REG_C_0] = MLX5_MODI_META_REG_C_0,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
- MLX5_ASSERT(conf->id != REG_NONE);
- MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
+ MLX5_ASSERT(conf->id != REG_NON);
+ MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
actions[i] = (struct mlx5_modification_cmd) {
.action_type = MLX5_MODIFICATION_TYPE_SET,
.field = reg_to_field[conf->id],
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
if (ret < 0)
return ret;
- MLX5_ASSERT(ret != REG_NONE);
+ MLX5_ASSERT(ret != REG_NON);
MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
reg_type = reg_to_field[ret];
MLX5_ASSERT(reg_type > 0);
.mask = &mask,
};
struct field_modify_info reg_c_x[] = {
- {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
- {0, 0, 0},
+ [1] = {0, 0, 0},
};
int reg;
mask = rte_cpu_to_be_32(mask) & msk_c0;
mask = rte_cpu_to_be_32(mask << shl_c0);
}
- reg_c_x[0].id = reg_to_field[reg];
+ reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
if (reg < 0)
return reg;
+ MLX5_ASSERT(reg != REG_NON);
/*
* In datapath code there is no endianness
* coversions for perfromance reasons, all
MLX5_MODIFICATION_TYPE_SET, error);
}
+static void
+mlx5_flow_field_id_to_modify_info
+ (const struct rte_flow_action_modify_data *data,
+ struct field_modify_info *info,
+ uint32_t *mask, uint32_t *value, uint32_t width,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ uint32_t idx = 0;
+ switch (data->field) {
+ case RTE_FLOW_FIELD_START:
+ /* not supported yet */
+ MLX5_ASSERT(false);
+ break;
+ case RTE_FLOW_FIELD_MAC_DST:
+ if (mask) {
+ if (data->offset < 32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DMAC_47_16};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ info[idx] = (struct field_modify_info){2, 4 * idx,
+ MLX5_MODI_OUT_DMAC_15_0};
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ } else {
+ if (data->offset < 32)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DMAC_47_16};
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_DMAC_15_0};
+ }
+ break;
+ case RTE_FLOW_FIELD_MAC_SRC:
+ if (mask) {
+ if (data->offset < 32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SMAC_47_16};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ info[idx] = (struct field_modify_info){2, 4 * idx,
+ MLX5_MODI_OUT_SMAC_15_0};
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ } else {
+ if (data->offset < 32)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SMAC_47_16};
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_SMAC_15_0};
+ }
+ break;
+ case RTE_FLOW_FIELD_VLAN_TYPE:
+ /* not supported yet */
+ break;
+ case RTE_FLOW_FIELD_VLAN_ID:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_FIRST_VID};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x00000fff >>
+ (12 - width));
+ break;
+ case RTE_FLOW_FIELD_MAC_TYPE:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_ETHERTYPE};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV4_DSCP:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IP_DSCP};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000003f >>
+ (6 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV4_TTL:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IPV4_TTL};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x000000ff >>
+ (8 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV4_SRC:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV4};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV4_DST:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV4};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV6_DSCP:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IP_DSCP};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000003f >>
+ (6 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x000000ff >>
+ (8 - width));
+ break;
+ case RTE_FLOW_FIELD_IPV6_SRC:
+ if (mask) {
+ if (data->offset < 32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_127_96};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ if (data->offset < 64) {
+ info[idx] = (struct field_modify_info){4,
+ 4 * idx,
+ MLX5_MODI_OUT_SIPV6_95_64};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ if (data->offset < 96) {
+ info[idx] = (struct field_modify_info){4,
+ 8 * idx,
+ MLX5_MODI_OUT_SIPV6_63_32};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ info[idx] = (struct field_modify_info){4, 12 * idx,
+ MLX5_MODI_OUT_SIPV6_31_0};
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ } else {
+ if (data->offset < 32)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_127_96};
+ if (data->offset < 64)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_95_64};
+ if (data->offset < 96)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_63_32};
+ if (data->offset < 128)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SIPV6_31_0};
+ }
+ break;
+ case RTE_FLOW_FIELD_IPV6_DST:
+ if (mask) {
+ if (data->offset < 32) {
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_127_96};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ if (data->offset < 64) {
+ info[idx] = (struct field_modify_info){4,
+ 4 * idx,
+ MLX5_MODI_OUT_DIPV6_95_64};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ if (data->offset < 96) {
+ info[idx] = (struct field_modify_info){4,
+ 8 * idx,
+ MLX5_MODI_OUT_DIPV6_63_32};
+ if (width < 32) {
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ width = 0;
+ } else {
+ mask[idx] = RTE_BE32(0xffffffff);
+ width -= 32;
+ }
+ if (!width)
+ break;
+ ++idx;
+ }
+ info[idx] = (struct field_modify_info){4, 12 * idx,
+ MLX5_MODI_OUT_DIPV6_31_0};
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ } else {
+ if (data->offset < 32)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_127_96};
+ if (data->offset < 64)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_95_64};
+ if (data->offset < 96)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_63_32};
+ if (data->offset < 128)
+ info[idx++] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DIPV6_31_0};
+ }
+ break;
+ case RTE_FLOW_FIELD_TCP_PORT_SRC:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_TCP_SPORT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_TCP_PORT_DST:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_TCP_DPORT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_TCP_SEQ_NUM:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_TCP_SEQ_NUM};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_TCP_ACK_NUM:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_TCP_ACK_NUM};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_TCP_FLAGS:
+ info[idx] = (struct field_modify_info){1, 0,
+ MLX5_MODI_OUT_TCP_FLAGS};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000003f >>
+ (6 - width));
+ break;
+ case RTE_FLOW_FIELD_UDP_PORT_SRC:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_UDP_SPORT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_UDP_PORT_DST:
+ info[idx] = (struct field_modify_info){2, 0,
+ MLX5_MODI_OUT_UDP_DPORT};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
+ (16 - width));
+ break;
+ case RTE_FLOW_FIELD_VXLAN_VNI:
+ /* not supported yet */
+ break;
+ case RTE_FLOW_FIELD_GENEVE_VNI:
+ /* not supported yet*/
+ break;
+ case RTE_FLOW_FIELD_GTP_TEID:
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_GTP_TEID};
+ if (mask)
+ mask[idx] = rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ break;
+ case RTE_FLOW_FIELD_TAG:
+ {
+ int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
+ data->level, error);
+ if (reg < 0)
+ return;
+ MLX5_ASSERT(reg != REG_NON);
+ MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
+ info[idx] = (struct field_modify_info){4, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ }
+ break;
+ case RTE_FLOW_FIELD_MARK:
+ {
+ int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
+ 0, error);
+ if (reg < 0)
+ return;
+ MLX5_ASSERT(reg != REG_NON);
+ MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
+ info[idx] = (struct field_modify_info){4, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ }
+ break;
+ case RTE_FLOW_FIELD_META:
+ {
+ int reg = flow_dv_get_metadata_reg(dev, attr, error);
+ if (reg < 0)
+ return;
+ MLX5_ASSERT(reg != REG_NON);
+ MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
+ info[idx] = (struct field_modify_info){4, 0,
+ reg_to_field[reg]};
+ if (mask)
+ mask[idx] =
+ rte_cpu_to_be_32(0xffffffff >>
+ (32 - width));
+ }
+ break;
+ case RTE_FLOW_FIELD_POINTER:
+ for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
+ if (mask[idx]) {
+ memcpy(&value[idx],
+ (void *)(uintptr_t)data->value, 32);
+ value[idx] = rte_cpu_to_be_32(value[idx]);
+ break;
+ }
+ }
+ break;
+ case RTE_FLOW_FIELD_VALUE:
+ for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
+ if (mask[idx]) {
+ value[idx] =
+ rte_cpu_to_be_32((uint32_t)data->value);
+ break;
+ }
+ }
+ break;
+ default:
+ MLX5_ASSERT(false);
+ break;
+ }
+}
+
+/**
+ * Convert modify_field action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_field
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_modify_field *conf =
+ (const struct rte_flow_action_modify_field *)(action->conf);
+ struct rte_flow_item item;
+ struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
+ {0, 0, 0} };
+ struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
+ {0, 0, 0} };
+ uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
+ uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
+ uint32_t type;
+
+ if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
+ conf->src.field == RTE_FLOW_FIELD_VALUE) {
+ type = MLX5_MODIFICATION_TYPE_SET;
+ /** For SET fill the destination field (field) first. */
+ mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
+ value, conf->width, dev, attr, error);
+ /** Then copy immediate value from source as per mask. */
+ mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
+ value, conf->width, dev, attr, error);
+ item.spec = &value;
+ } else {
+ type = MLX5_MODIFICATION_TYPE_COPY;
+ /** For COPY fill the destination field (dcopy) without mask. */
+ mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
+ value, conf->width, dev, attr, error);
+ /** Then construct the source field (field) with mask. */
+ mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
+ value, conf->width, dev, attr, error);
+ }
+ item.mask = &mask;
+ return flow_dv_convert_modify_action(&item,
+ field, dcopy, resource, type, error);
+}
+
/**
* Validate MARK item.
*
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_mark),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
return 0;
reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return reg;
+ if (reg == REG_NON)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "unavalable extended metadata register");
if (reg == REG_B)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"isn't supported");
if (reg != REG_A)
nic_mask.data = priv->sh->dv_meta_mask;
+ } else {
+ if (attr->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "extended metadata feature "
+ "should be enabled when "
+ "meta item is requested "
+ "with e-switch mode ");
+ if (attr->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "match on metadata for ingress "
+ "is not supported in legacy "
+ "metadata mode");
}
if (!mask)
mask = &rte_flow_item_meta_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_meta),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
return ret;
}
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_tag),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
if (mask->index != 0xff)
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
if (ret < 0)
return ret;
- MLX5_ASSERT(ret != REG_NONE);
+ MLX5_ASSERT(ret != REG_NON);
return 0;
}
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_port_id_mask,
sizeof(struct rte_flow_item_port_id),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret)
return ret;
if (!spec)
return 0;
}
+/**
+ * Validate VLAN item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] dev
+ * Ethernet device flow is being created on.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_vlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *mask = item->mask;
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(UINT16_MAX),
+ .inner_type = RTE_BE16(UINT16_MAX),
+ .has_more_vlan = 1,
+ };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int ret;
+ const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4);
+ const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+
+ if (item_flags & vlanm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple VLAN layers not supported");
+ else if ((item_flags & l34m) != 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN cannot follow L3/L4 layer");
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
+ if (ret)
+ return ret;
+ if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->vmwa_context) {
+ /*
+ * Non-NULL context means we have a virtual machine
+ * and SR-IOV enabled, we have to create VLAN interface
+ * to make hypervisor to setup E-Switch vport
+ * context correctly. We avoid creating the multiple
+ * VLAN interfaces, so we cannot support VLAN tag mask.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN tag mask is not"
+ " supported in virtual"
+ " environment");
+ }
+ }
+ return 0;
+}
+
+/*
+ * GTP flags are contained in 1 byte of the format:
+ * -------------------------------------------
+ * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 |
+ * |-----------------------------------------|
+ * | value | Version | PT | Res | E | S | PN |
+ * -------------------------------------------
+ *
+ * Matching is supported only for GTP flags E, S, PN.
+ */
+#define MLX5_GTP_FLAGS_MASK 0x07
+
/**
* Validate GTP item.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_gtp *spec = item->spec;
const struct rte_flow_item_gtp *mask = item->mask;
const struct rte_flow_item_gtp nic_mask = {
+ .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
.msg_type = 0xff,
.teid = RTE_BE32(0xffffffff),
};
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_gtp_mask;
- return mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_gtp),
- error);
+ if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Match is supported for GTP"
+ " flags only");
+ return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_gtp),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
/**
- * Validate the pop VLAN action.
+ * Validate GTP PSC item.
*
- * @param[in] dev
- * Pointer to the rte_eth_dev structure.
- * @param[in] action_flags
- * Holds the actions detected until now.
- * @param[in] action
- * Pointer to the pop vlan action.
- * @param[in] item_flags
- * The items found in this flow rule.
+ * @param[in] item
+ * Item specification.
+ * @param[in] last_item
+ * Previous validated item in the pattern items.
+ * @param[in] gtp_item
+ * Previous GTP item specification.
* @param[in] attr
* Pointer to flow attributes.
* @param[out] error
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
- uint64_t action_flags,
- const struct rte_flow_action *action,
- uint64_t item_flags,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
+flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
+ uint64_t last_item,
+ const struct rte_flow_item *gtp_item,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
- const struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_gtp *gtp_spec;
+ const struct rte_flow_item_gtp *gtp_mask;
+ const struct rte_flow_item_gtp_psc *spec;
+ const struct rte_flow_item_gtp_psc *mask;
+ const struct rte_flow_item_gtp_psc nic_mask = {
+ .pdu_type = 0xFF,
+ .qfi = 0xFF,
+ };
- (void)action;
- (void)attr;
- if (!priv->sh->pop_vlan_action)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "pop vlan action is not supported");
- if (attr->egress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL,
- "pop vlan action not supported for "
- "egress");
- if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "no support for multiple VLAN "
- "actions");
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot pop vlan without a "
- "match on (outer) vlan in the flow");
- if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "wrong action order, port_id should "
- "be after pop VLAN action");
- if (!attr->transfer && priv->representor)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "pop vlan action for VF representor "
- "not supported on NIC table");
- return 0;
+ if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GTP PSC item must be preceded with GTP item");
+ gtp_spec = gtp_item->spec;
+ gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
+ /* GTP spec and E flag is requested to match zero. */
+ if (gtp_spec &&
+ (gtp_mask->v_pt_rsv_flags &
+ ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GTP E flag must be 1 to match GTP PSC");
+ /* Check the flow is not created in group zero. */
+ if (!attr->transfer && !attr->group)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "GTP PSC is not supported for group 0");
+ /* GTP spec is here and E flag is requested to match zero. */
+ if (!item->spec)
+ return 0;
+ spec = item->spec;
+ mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
+ if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "PDU type should be smaller than 16");
+ return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_gtp_psc),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
/**
- * Get VLAN default info from vlan match info.
+ * Validate IPV4 item.
+ * Use existing validation function mlx5_flow_validate_item_ipv4(), and
+ * add specific validation of fragment_offset field,
*
- * @param[in] items
- * the list of item specifications.
- * @param[out] vlan
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
+ struct rte_flow_error *error)
+{
+ int ret;
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *last = item->last;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ rte_be16_t fragment_offset_spec = 0;
+ rte_be16_t fragment_offset_last = 0;
+ const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .fragment_offset = RTE_BE16(0xffff),
+ .next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ },
+ };
+
+ ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
+ ether_type, &nic_ipv4_mask,
+ MLX5_ITEM_RANGE_ACCEPTED, error);
+ if (ret < 0)
+ return ret;
+ if (spec && mask)
+ fragment_offset_spec = spec->hdr.fragment_offset &
+ mask->hdr.fragment_offset;
+ if (!fragment_offset_spec)
+ return 0;
+ /*
+ * spec and mask are valid, enforce using full mask to make sure the
+ * complete value is used correctly.
+ */
+ if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+ != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ item, "must use full mask for"
+ " fragment_offset");
+ /*
+ * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
+ * indicating this is 1st fragment of fragmented packet.
+ * This is not yet supported in MLX5, return appropriate error message.
+ */
+ if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "match on first fragment not "
+ "supported");
+ if (fragment_offset_spec && !last)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "specified value not supported");
+ /* spec and last are valid, validate the specified range. */
+ fragment_offset_last = last->hdr.fragment_offset &
+ mask->hdr.fragment_offset;
+ /*
+ * Match on fragment_offset spec 0x2001 and last 0x3fff
+ * means MF is 1 and frag-offset is > 0.
+ * This packet is fragment 2nd and onward, excluding last.
+ * This is not yet supported in MLX5, return appropriate
+ * error message.
+ */
+ if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
+ fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ last, "match on following "
+ "fragments not supported");
+ /*
+ * Match on fragment_offset spec 0x0001 and last 0x1fff
+ * means MF is 0 and frag-offset is > 0.
+ * This packet is last fragment of fragmented packet.
+ * This is not yet supported in MLX5, return appropriate
+ * error message.
+ */
+ if (fragment_offset_spec == RTE_BE16(1) &&
+ fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ last, "match on last "
+ "fragment not supported");
+ /*
+ * Match on fragment_offset spec 0x0001 and last 0x3fff
+ * means MF and/or frag-offset is not 0.
+ * This is a fragmented packet.
+ * Other range values are invalid and rejected.
+ */
+ if (!(fragment_offset_spec == RTE_BE16(1) &&
+ fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
+ "specified range not supported");
+ return 0;
+}
+
+/**
+ * Validate IPV6 fragment extension item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
+ const struct rte_flow_item_ipv6_frag_ext *last = item->last;
+ const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
+ rte_be16_t frag_data_spec = 0;
+ rte_be16_t frag_data_last = 0;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret = 0;
+ struct rte_flow_item_ipv6_frag_ext nic_mask = {
+ .hdr = {
+ .next_header = 0xff,
+ .frag_data = RTE_BE16(0xffff),
+ },
+ };
+
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "ipv6 fragment extension item cannot "
+ "follow L4 item.");
+ if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
+ (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "ipv6 fragment extension item must "
+ "follow ipv6 item");
+ if (spec && mask)
+ frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
+ if (!frag_data_spec)
+ return 0;
+ /*
+ * spec and mask are valid, enforce using full mask to make sure the
+ * complete value is used correctly.
+ */
+ if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
+ RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ item, "must use full mask for"
+ " frag_data");
+ /*
+ * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
+ * This is 1st fragment of fragmented packet.
+ */
+ if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "match on first fragment not "
+ "supported");
+ if (frag_data_spec && !last)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "specified value not supported");
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6_frag_ext),
+ MLX5_ITEM_RANGE_ACCEPTED, error);
+ if (ret)
+ return ret;
+ /* spec and last are valid, validate the specified range. */
+ frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
+ /*
+ * Match on frag_data spec 0x0009 and last 0xfff9
+ * means M is 1 and frag-offset is > 0.
+ * This packet is fragment 2nd and onward, excluding last.
+ * This is not yet supported in MLX5, return appropriate
+ * error message.
+ */
+ if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
+ RTE_IPV6_EHDR_MF_MASK) &&
+ frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ last, "match on following "
+ "fragments not supported");
+ /*
+ * Match on frag_data spec 0x0008 and last 0xfff8
+ * means M is 0 and frag-offset is > 0.
+ * This packet is last fragment of fragmented packet.
+ * This is not yet supported in MLX5, return appropriate
+ * error message.
+ */
+ if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
+ frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ last, "match on last "
+ "fragment not supported");
+ /* Other range values are invalid and rejected. */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
+ "specified range not supported");
+}
+
+/**
+ * Validate the pop VLAN action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the pop vlan action.
+ * @param[in] item_flags
+ * The items found in this flow rule.
+ * @param[in] attr
+ * Pointer to flow attributes.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
+ uint64_t action_flags,
+ const struct rte_flow_action *action,
+ uint64_t item_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_priv *priv = dev->data->dev_private;
+
+ (void)action;
+ (void)attr;
+ if (!priv->sh->pop_vlan_action)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "pop vlan action is not supported");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "pop vlan action not supported for "
+ "egress");
+ if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "no support for multiple VLAN "
+ "actions");
+ /* Pop VLAN with preceding Decap requires inner header with VLAN. */
+ if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
+ !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot pop vlan after decap without "
+ "match on inner vlan in the flow");
+ /* Pop VLAN without preceding Decap requires outer header with VLAN. */
+ if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
+ !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot pop vlan without a "
+ "match on (outer) vlan in the flow");
+ if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, port_id should "
+ "be after pop VLAN action");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "pop vlan action for VF representor "
+ "not supported on NIC table");
+ return 0;
+}
+
+/**
+ * Get VLAN default info from vlan match info.
+ *
+ * @param[in] items
+ * the list of item specifications.
+ * @param[out] vlan
* pointer VLAN info to fill to.
*
* @return
const struct rte_flow_item_vlan *vlan_m = items->mask;
const struct rte_flow_item_vlan *vlan_v = items->spec;
+ /* If VLAN item in pattern doesn't contain data, return here. */
+ if (!vlan_v)
+ return;
if (!vlan_m)
vlan_m = &nic_mask;
/* Only full match values are accepted */
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
const struct mlx5_priv *priv = dev->data->dev_private;
- if (!attr->transfer && attr->ingress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "push VLAN action not supported for "
- "ingress");
if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"invalid vlan ethertype");
- if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "no support for multiple VLAN "
- "actions");
if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
const struct rte_flow_action *action = actions;
const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
- if (conf->vlan_vid > RTE_BE16(0xFFE))
+ if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"VLAN VID value is too big");
const struct rte_flow_action_mark *mark = action->conf;
int ret;
+ if (is_tunnel_offload_active(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "no mark action "
+ "if tunnel offload active");
/* Fall back if no extended metadata register support. */
if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
return mlx5_flow_validate_action_mark(action, action_flags,
reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return reg;
+ if (reg == REG_NON)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "unavalable extended metadata register");
if (reg != REG_A && reg != REG_B) {
struct mlx5_priv *priv = dev->data->dev_private;
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the action structure.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
* @param[out] error
* Pointer to error structure.
*
*/
static int
flow_dv_validate_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint64_t action_flags,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_count *count;
if (!priv->config.devx)
goto notsup_err;
+ if (action_flags & MLX5_FLOW_ACTION_COUNT)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "duplicate count actions set");
+ count = (const struct rte_flow_action_count *)action->conf;
+ if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
+ !priv->sh->flow_hit_aso_en)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "old age and shared count combination is not supported");
#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
return 0;
#endif
* Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the action structure.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
static int
flow_dv_validate_action_decap(struct rte_eth_dev *dev,
uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct mlx5_priv *priv = dev->data->dev_private;
+ if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
+ !priv->config.decap_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "decap is not enabled");
if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"decap action for VF representor "
"not supported on NIC table");
+ if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
+ !(item_flags & MLX5_FLOW_LAYER_VXLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "VXLAN item should be present for VXLAN decap");
return 0;
}
* Holds the actions detected until now.
* @param[out] actions_n
* pointer to the number of actions counter.
+ * @param[in] action
+ * Pointer to the action structure.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
const struct rte_flow_action_raw_decap *decap,
const struct rte_flow_action_raw_encap *encap,
const struct rte_flow_attr *attr, uint64_t *action_flags,
- int *actions_n, struct rte_flow_error *error)
+ int *actions_n, const struct rte_flow_action *action,
+ uint64_t item_flags, struct rte_flow_error *error)
{
const struct mlx5_priv *priv = dev->data->dev_private;
int ret;
"encap combination");
}
if (decap) {
- ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
- error);
+ ret = flow_dv_validate_action_decap(dev, *action_flags, action,
+ item_flags, attr, error);
if (ret < 0)
return ret;
*action_flags |= MLX5_FLOW_ACTION_DECAP;
return 0;
}
+/**
+ * Match encap_decap resource.
+ *
+ * @param list
+ * Pointer to the hash list.
+ * @param entry
+ * Pointer to exist resource entry object.
+ * @param key
+ * Key of the new entry.
+ * @param ctx_cb
+ * Pointer to new encap_decap resource.
+ *
+ * @return
+ * 0 on matching, none-zero otherwise.
+ */
+int
+flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key __rte_unused, void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_encap_decap_resource,
+ entry);
+ if (resource->reformat_type == cache_resource->reformat_type &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->flags == cache_resource->flags &&
+ resource->size == cache_resource->size &&
+ !memcmp((const void *)resource->buf,
+ (const void *)cache_resource->buf,
+ resource->size))
+ return 0;
+ return -1;
+}
+
+/**
+ * Allocate encap_decap resource.
+ *
+ * @param list
+ * Pointer to the hash list.
+ * @param entry
+ * Pointer to exist resource entry object.
+ * @param ctx_cb
+ * Pointer to new encap_decap resource.
+ *
+ * @return
+ * 0 on matching, none-zero otherwise.
+ */
+struct mlx5_hlist_entry *
+flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
+ uint64_t key __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5dv_dr_domain *domain;
+ struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ uint32_t idx;
+ int ret;
+
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ domain = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
+ domain = sh->rx_domain;
+ else
+ domain = sh->tx_domain;
+ /* Register new encap/decap resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ &idx);
+ if (!cache_resource) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ *cache_resource = *resource;
+ cache_resource->idx = idx;
+ ret = mlx5_flow_os_create_flow_action_packet_reformat
+ (sh->ctx, domain, cache_resource,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ return NULL;
+ }
+
+ return &cache_resource->entry;
+}
+
/**
* Find existing encap/decap resource or create and register a new one.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_dv_encap_decap_resource *cache_resource;
- struct mlx5dv_dr_domain *domain;
- uint32_t idx = 0;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_hlist_entry *entry;
+ union {
+ struct {
+ uint32_t ft_type:8;
+ uint32_t refmt_type:8;
+ /*
+ * Header reformat actions can be shared between
+ * non-root tables. One bit to indicate non-root
+ * table or not.
+ */
+ uint32_t is_root:1;
+ uint32_t reserve:15;
+ };
+ uint32_t v32;
+ } encap_decap_key = {
+ {
+ .ft_type = resource->ft_type,
+ .refmt_type = resource->reformat_type,
+ .is_root = !!dev_flow->dv.group,
+ .reserve = 0,
+ }
+ };
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = resource,
+ };
+ uint64_t key64;
resource->flags = dev_flow->dv.group ? 0 : 1;
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
- domain = sh->fdb_domain;
- else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
- domain = sh->rx_domain;
- else
- domain = sh->tx_domain;
- /* Lookup a matching resource from cache. */
- ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
- cache_resource, next) {
- if (resource->reformat_type == cache_resource->reformat_type &&
- resource->ft_type == cache_resource->ft_type &&
- resource->flags == cache_resource->flags &&
- resource->size == cache_resource->size &&
- !memcmp((const void *)resource->buf,
- (const void *)cache_resource->buf,
- resource->size)) {
- DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.rix_encap_decap = idx;
- dev_flow->dv.encap_decap = cache_resource;
- return 0;
- }
- }
- /* Register new encap/decap resource. */
- cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
- &dev_flow->handle->dvh.rix_encap_decap);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- *cache_resource = *resource;
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_packet_reformat
- (sh->ctx, cache_resource->reformat_type,
- cache_resource->ft_type, domain, cache_resource->flags,
- cache_resource->size,
- (cache_resource->size ? cache_resource->buf : NULL));
- if (!cache_resource->verbs_action) {
- rte_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
- }
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
- dev_flow->handle->dvh.rix_encap_decap, cache_resource,
- next);
- dev_flow->dv.encap_decap = cache_resource;
- DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ key64 = __rte_raw_cksum(&encap_decap_key.v32,
+ sizeof(encap_decap_key.v32), 0);
+ if (resource->reformat_type !=
+ MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
+ resource->size)
+ key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
+ entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
+ if (!entry)
+ return -rte_errno;
+ resource = container_of(entry, typeof(*resource), entry);
+ dev_flow->dv.encap_decap = resource;
+ dev_flow->handle->dvh.rix_encap_decap = resource->idx;
return 0;
}
(struct rte_eth_dev *dev __rte_unused,
struct mlx5_flow_tbl_resource *tbl,
struct mlx5_flow *dev_flow,
- struct rte_flow_error *error)
+ struct rte_flow_error *error __rte_unused)
{
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
- int cnt;
MLX5_ASSERT(tbl);
- cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
- if (!cnt) {
- tbl_data->jump.action =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (tbl->obj);
- if (!tbl_data->jump.action)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create jump action");
- DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
- (void *)&tbl_data->jump, cnt);
- } else {
- /* old jump should not make the table ref++. */
- flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
- MLX5_ASSERT(tbl_data->jump.action);
- DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
- (void *)&tbl_data->jump, cnt);
- }
- rte_atomic32_inc(&tbl_data->jump.refcnt);
+ MLX5_ASSERT(tbl_data->jump.action);
dev_flow->handle->rix_jump = tbl_data->idx;
dev_flow->dv.jump = &tbl_data->jump;
return 0;
}
+int
+flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry, void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
+ struct mlx5_flow_dv_port_id_action_resource *res =
+ container_of(entry, typeof(*res), entry);
+
+ return ref->port_id != res->port_id;
+}
+
+struct mlx5_cache_entry *
+flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
+ struct mlx5_flow_dv_port_id_action_resource *cache;
+ uint32_t idx;
+ int ret;
+
+ /* Register new port id action resource. */
+ cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
+ if (!cache) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate port_id action cache memory");
+ return NULL;
+ }
+ *cache = *ref;
+ ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
+ ref->port_id,
+ &cache->action);
+ if (ret) {
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create action");
+ return NULL;
+ }
+ cache->idx = idx;
+ return &cache->entry;
+}
+
/**
* Find existing table port ID resource or create and register a new one.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_dv_port_id_action_resource *cache_resource;
- uint32_t idx = 0;
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_dv_port_id_action_resource *cache;
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = resource,
+ };
- /* Lookup a matching resource from cache. */
- ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
- idx, cache_resource, next) {
- if (resource->port_id == cache_resource->port_id) {
- DRV_LOG(DEBUG, "port id action resource resource %p: "
- "refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->rix_port_id_action = idx;
- dev_flow->dv.port_id_action = cache_resource;
- return 0;
- }
- }
- /* Register new port id action resource. */
- cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
- &dev_flow->handle->rix_port_id_action);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- *cache_resource = *resource;
- /*
- * Depending on rdma_core version the glue routine calls
- * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
- * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
- */
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_port
- (priv->sh->fdb_domain, resource->port_id);
- if (!cache_resource->action) {
- rte_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
- }
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
- dev_flow->handle->rix_port_id_action, cache_resource,
- next);
- dev_flow->dv.port_id_action = cache_resource;
- DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
+ if (!entry)
+ return -rte_errno;
+ cache = container_of(entry, typeof(*cache), entry);
+ dev_flow->dv.port_id_action = cache;
+ dev_flow->handle->rix_port_id_action = cache->idx;
return 0;
}
+int
+flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry, void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
+ struct mlx5_flow_dv_push_vlan_action_resource *res =
+ container_of(entry, typeof(*res), entry);
+
+ return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
+}
+
+struct mlx5_cache_entry *
+flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
+ struct mlx5_flow_dv_push_vlan_action_resource *cache;
+ struct mlx5dv_dr_domain *domain;
+ uint32_t idx;
+ int ret;
+
+ /* Register new port id action resource. */
+ cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
+ if (!cache) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate push_vlan action cache memory");
+ return NULL;
+ }
+ *cache = *ref;
+ if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ domain = sh->fdb_domain;
+ else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
+ domain = sh->rx_domain;
+ else
+ domain = sh->tx_domain;
+ ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
+ &cache->action);
+ if (ret) {
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create push vlan action");
+ return NULL;
+ }
+ cache->idx = idx;
+ return &cache->entry;
+}
+
/**
* Find existing push vlan resource or create and register a new one.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
- struct mlx5dv_dr_domain *domain;
- uint32_t idx = 0;
+ struct mlx5_flow_dv_push_vlan_action_resource *cache;
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = resource,
+ };
- /* Lookup a matching resource from cache. */
- ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
- sh->push_vlan_action_list, idx, cache_resource, next) {
- if (resource->vlan_tag == cache_resource->vlan_tag &&
- resource->ft_type == cache_resource->ft_type) {
- DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
- "refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.rix_push_vlan = idx;
- dev_flow->dv.push_vlan_res = cache_resource;
- return 0;
- }
- }
- /* Register new push_vlan action resource. */
- cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
- &dev_flow->handle->dvh.rix_push_vlan);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- *cache_resource = *resource;
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
- domain = sh->fdb_domain;
- else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
- domain = sh->rx_domain;
- else
- domain = sh->tx_domain;
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_push_vlan(domain,
- resource->vlan_tag);
- if (!cache_resource->action) {
- rte_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
- }
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
- &sh->push_vlan_action_list,
- dev_flow->handle->dvh.rix_push_vlan,
- cache_resource, next);
- dev_flow->dv.push_vlan_res = cache_resource;
- DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
+ if (!entry)
+ return -rte_errno;
+ cache = container_of(entry, typeof(*cache), entry);
+
+ dev_flow->handle->dvh.rix_push_vlan = cache->idx;
+ dev_flow->dv.push_vlan_res = cache;
return 0;
}
+
/**
- * Get the size of specific rte_flow_item_type
+ * Get the size of specific rte_flow_item_type hdr size
*
* @param[in] item_type
* Tested rte_flow_item_type.
* sizeof struct item_type, 0 if void or irrelevant.
*/
static size_t
-flow_dv_get_item_len(const enum rte_flow_item_type item_type)
+flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
{
size_t retval;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- retval = sizeof(struct rte_flow_item_eth);
+ retval = sizeof(struct rte_ether_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- retval = sizeof(struct rte_flow_item_vlan);
+ retval = sizeof(struct rte_vlan_hdr);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- retval = sizeof(struct rte_flow_item_ipv4);
+ retval = sizeof(struct rte_ipv4_hdr);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- retval = sizeof(struct rte_flow_item_ipv6);
+ retval = sizeof(struct rte_ipv6_hdr);
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- retval = sizeof(struct rte_flow_item_udp);
+ retval = sizeof(struct rte_udp_hdr);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- retval = sizeof(struct rte_flow_item_tcp);
+ retval = sizeof(struct rte_tcp_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- retval = sizeof(struct rte_flow_item_vxlan);
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ retval = sizeof(struct rte_vxlan_hdr);
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- retval = sizeof(struct rte_flow_item_gre);
- break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- retval = sizeof(struct rte_flow_item_nvgre);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- retval = sizeof(struct rte_flow_item_vxlan_gpe);
+ retval = sizeof(struct rte_gre_hdr);
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
- retval = sizeof(struct rte_flow_item_mpls);
+ retval = sizeof(struct rte_mpls_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
default:
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "invalid empty data");
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- len = flow_dv_get_item_len(items->type);
+ len = flow_dv_get_item_hdr_len(items->type);
if (len + temp_size > MLX5_ENCAP_MAX_LEN)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
return ret;
}
+static int
+mlx5_flow_item_field_width(enum rte_flow_field_id field)
+{
+ switch (field) {
+ case RTE_FLOW_FIELD_START:
+ return 32;
+ case RTE_FLOW_FIELD_MAC_DST:
+ case RTE_FLOW_FIELD_MAC_SRC:
+ return 48;
+ case RTE_FLOW_FIELD_VLAN_TYPE:
+ return 16;
+ case RTE_FLOW_FIELD_VLAN_ID:
+ return 12;
+ case RTE_FLOW_FIELD_MAC_TYPE:
+ return 16;
+ case RTE_FLOW_FIELD_IPV4_DSCP:
+ return 6;
+ case RTE_FLOW_FIELD_IPV4_TTL:
+ return 8;
+ case RTE_FLOW_FIELD_IPV4_SRC:
+ case RTE_FLOW_FIELD_IPV4_DST:
+ return 32;
+ case RTE_FLOW_FIELD_IPV6_DSCP:
+ return 6;
+ case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
+ return 8;
+ case RTE_FLOW_FIELD_IPV6_SRC:
+ case RTE_FLOW_FIELD_IPV6_DST:
+ return 128;
+ case RTE_FLOW_FIELD_TCP_PORT_SRC:
+ case RTE_FLOW_FIELD_TCP_PORT_DST:
+ return 16;
+ case RTE_FLOW_FIELD_TCP_SEQ_NUM:
+ case RTE_FLOW_FIELD_TCP_ACK_NUM:
+ return 32;
+ case RTE_FLOW_FIELD_TCP_FLAGS:
+ return 6;
+ case RTE_FLOW_FIELD_UDP_PORT_SRC:
+ case RTE_FLOW_FIELD_UDP_PORT_DST:
+ return 16;
+ case RTE_FLOW_FIELD_VXLAN_VNI:
+ case RTE_FLOW_FIELD_GENEVE_VNI:
+ return 24;
+ case RTE_FLOW_FIELD_GTP_TEID:
+ case RTE_FLOW_FIELD_TAG:
+ return 32;
+ case RTE_FLOW_FIELD_MARK:
+ return 24;
+ case RTE_FLOW_FIELD_META:
+ case RTE_FLOW_FIELD_POINTER:
+ case RTE_FLOW_FIELD_VALUE:
+ return 32;
+ default:
+ MLX5_ASSERT(false);
+ }
+ return 0;
+}
+
+/**
+ * Validate the generic modify field actions.
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * Number of header fields to modify (0 or more) on success,
+ * a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
+ const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ const struct rte_flow_action_modify_field *action_modify_field =
+ action->conf;
+ uint32_t dst_width =
+ mlx5_flow_item_field_width(action_modify_field->dst.field);
+ uint32_t src_width =
+ mlx5_flow_item_field_width(action_modify_field->src.field);
+
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (ret)
+ return ret;
+
+ if (action_modify_field->width == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no bits are requested to be modified");
+ else if (action_modify_field->width > dst_width ||
+ action_modify_field->width > src_width)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot modify more bits than"
+ " the width of a field");
+ if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
+ action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
+ if ((action_modify_field->dst.offset +
+ action_modify_field->width > dst_width) ||
+ (action_modify_field->dst.offset % 32))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "destination offset is too big"
+ " or not aligned to 4 bytes");
+ if (action_modify_field->dst.level &&
+ action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot modify inner headers");
+ }
+ if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
+ action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
+ if (!attr->transfer && !attr->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "modify field action "
+ "is not supported for group 0");
+ if ((action_modify_field->src.offset +
+ action_modify_field->width > src_width) ||
+ (action_modify_field->src.offset % 32))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "source offset is too big"
+ " or not aligned to 4 bytes");
+ if (action_modify_field->src.level &&
+ action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot copy from inner headers");
+ }
+ if (action_modify_field->dst.field ==
+ action_modify_field->src.field)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "source and destination fields"
+ " cannot be the same");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
+ action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "immediate value or a pointer to it"
+ " cannot be used as a destination");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_START)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "modifications of an arbitrary"
+ " place in a packet is not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "modifications of the 802.1Q Tag"
+ " Identifier is not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "modifications of the VXLAN Network"
+ " Identifier is not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "modifications of the GENEVE Network"
+ " Identifier is not supported");
+ if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
+ action_modify_field->src.field == RTE_FLOW_FIELD_MARK) {
+ if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+ !mlx5_flow_ext_mreg_supported(dev))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "cannot modify mark without extended"
+ " metadata register support");
+ }
+ if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "add and sub operations"
+ " are not supported");
+ return (action_modify_field->width / 32) +
+ !!(action_modify_field->width % 32);
+}
+
/**
* Validate jump action.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_jump(const struct rte_flow_action *action,
+flow_dv_validate_action_jump(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
+ const struct rte_flow_action *action,
uint64_t action_flags,
const struct rte_flow_attr *attributes,
bool external, struct rte_flow_error *error)
{
uint32_t target_group, table;
int ret = 0;
-
+ struct flow_grp_info grp_info = {
+ .external = !!external,
+ .transfer = !!attributes->transfer,
+ .fdb_def_rule = 1,
+ .std_tbl_fix = 0
+ };
if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
MLX5_FLOW_FATE_ESWITCH_ACTIONS))
return rte_flow_error_set(error, EINVAL,
NULL, "action configuration not set");
target_group =
((const struct rte_flow_action_jump *)action->conf)->group;
- ret = mlx5_flow_group_to_table(attributes, external, target_group,
- true, &table, error);
+ ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
+ &grp_info, error);
if (ret)
return ret;
- if (attributes->group == target_group)
+ if (attributes->group == target_group &&
+ !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
+ MLX5_FLOW_ACTION_TUNNEL_MATCH)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"target group must be other than"
return 0;
}
+/**
+ * Validate the age action.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the age action.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_age(uint64_t action_flags,
+ const struct rte_flow_action *action,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_age *age = action->conf;
+
+ if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
+ !priv->sh->aso_age_mng))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "age action not supported");
+ if (!(action->conf))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be null");
+ if (!(age->timeout))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "invalid timeout value 0");
+ if (action_flags & MLX5_FLOW_ACTION_AGE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "duplicate age actions set");
+ return 0;
+}
+
/**
* Validate the modify-header IPv4 DSCP actions.
*
}
/**
- * Find existing modify-header resource or create and register a new one.
+ * Match modify-header resource.
*
- * @param dev[in, out]
- * Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to modify-header resource.
- * @parm[in, out] dev_flow
- * Pointer to the dev_flow.
- * @param[out] error
- * pointer to error structure.
+ * @param list
+ * Pointer to the hash list.
+ * @param entry
+ * Pointer to exist resource entry object.
+ * @param key
+ * Key of the new entry.
+ * @param ctx
+ * Pointer to new modify-header resource.
*
* @return
- * 0 on success otherwise -errno and errno is set.
+ * 0 on matching, non-zero otherwise.
*/
-static int
-flow_dv_modify_hdr_resource_register
- (struct rte_eth_dev *dev,
- struct mlx5_flow_dv_modify_hdr_resource *resource,
- struct mlx5_flow *dev_flow,
- struct rte_flow_error *error)
+int
+flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry,
+ uint64_t key __rte_unused, void *cb_ctx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
+ struct mlx5_flow_dv_modify_hdr_resource *resource =
+ container_of(entry, typeof(*resource), entry);
+ uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
+
+ key_len += ref->actions_num * sizeof(ref->actions[0]);
+ return ref->actions_num != resource->actions_num ||
+ memcmp(&ref->ft_type, &resource->ft_type, key_len);
+}
+
+struct mlx5_hlist_entry *
+flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5dv_dr_domain *ns;
- uint32_t actions_len;
+ struct mlx5_flow_dv_modify_hdr_resource *entry;
+ struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
+ int ret;
+ uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
+ uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
- resource->flags = dev_flow->dv.group ? 0 :
- MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
- if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
- resource->flags))
- return rte_flow_error_set(error, EOVERFLOW,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "too many modify header items");
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
+ SOCKET_ID_ANY);
+ if (!entry) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ rte_memcpy(&entry->ft_type,
+ RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
+ key_len + data_len);
+ if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
ns = sh->fdb_domain;
- else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+ else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
ns = sh->tx_domain;
else
ns = sh->rx_domain;
- /* Lookup a matching resource from cache. */
- actions_len = resource->actions_num * sizeof(resource->actions[0]);
- LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
- if (resource->ft_type == cache_resource->ft_type &&
- resource->actions_num == cache_resource->actions_num &&
- resource->flags == cache_resource->flags &&
- !memcmp((const void *)resource->actions,
- (const void *)cache_resource->actions,
- actions_len)) {
- DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.modify_hdr = cache_resource;
- return 0;
- }
+ ret = mlx5_flow_os_create_flow_action_modify_header
+ (sh->ctx, ns, entry,
+ data_len, &entry->action);
+ if (ret) {
+ mlx5_free(entry);
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create modification action");
+ return NULL;
}
- /* Register new modify-header resource. */
- cache_resource = rte_calloc(__func__, 1,
- sizeof(*cache_resource) + actions_len, 0);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- *cache_resource = *resource;
- rte_memcpy(cache_resource->actions, resource->actions, actions_len);
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_modify_header
- (sh->ctx, cache_resource->ft_type, ns,
- cache_resource->flags, actions_len,
- (uint64_t *)cache_resource->actions);
- if (!cache_resource->verbs_action) {
- rte_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
- }
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->handle->dvh.modify_hdr = cache_resource;
- DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- return 0;
+ return &entry->entry;
}
/**
- * Get DV flow counter by index.
+ * Validate the sample action.
*
+ * @param[in, out] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the sample action.
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] idx
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[in] item_flags
+ * Holds the items detected.
+ * @param[in] rss
+ * Pointer to the RSS action.
+ * @param[out] sample_rss
+ * Pointer to the RSS action in sample action list.
+ * @param[out] count
+ * Pointer to the COUNT action in sample action list.
+ * @param[out] fdb_mirror_limit
+ * Pointer to the FDB mirror limitation flag.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_sample(uint64_t *action_flags,
+ const struct rte_flow_action *action,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ uint64_t item_flags,
+ const struct rte_flow_action_rss *rss,
+ const struct rte_flow_action_rss **sample_rss,
+ const struct rte_flow_action_count **count,
+ int *fdb_mirror_limit,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *dev_conf = &priv->config;
+ const struct rte_flow_action_sample *sample = action->conf;
+ const struct rte_flow_action *act;
+ uint64_t sub_action_flags = 0;
+ uint16_t queue_index = 0xFFFF;
+ int actions_n = 0;
+ int ret;
+
+ if (!sample)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be NULL");
+ if (sample->ratio == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "ratio value starts from 1");
+ if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "sample action not supported");
+ if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Multiple sample actions not "
+ "supported");
+ if (*action_flags & MLX5_FLOW_ACTION_METER)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, meter should "
+ "be after sample action");
+ if (*action_flags & MLX5_FLOW_ACTION_JUMP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, jump should "
+ "be after sample action");
+ act = sample->actions;
+ for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
+ if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "too many actions");
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(act,
+ sub_action_flags,
+ dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ queue_index = ((const struct rte_flow_action_queue *)
+ (act->conf))->index;
+ sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ *sample_rss = act->conf;
+ ret = mlx5_flow_validate_action_rss(act,
+ sub_action_flags,
+ dev, attr,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ if (rss && *sample_rss &&
+ ((*sample_rss)->level != rss->level ||
+ (*sample_rss)->types != rss->types))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Can't use the different RSS types "
+ "or level in the same flow");
+ if (*sample_rss != NULL && (*sample_rss)->queue_num)
+ queue_index = (*sample_rss)->queue[0];
+ sub_action_flags |= MLX5_FLOW_ACTION_RSS;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = flow_dv_validate_action_mark(dev, act,
+ sub_action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+ sub_action_flags |= MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ else
+ sub_action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_dv_validate_action_count
+ (dev, act,
+ *action_flags | sub_action_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ *count = act->conf;
+ sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
+ *action_flags |= MLX5_FLOW_ACTION_COUNT;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ ret = flow_dv_validate_action_port_id(dev,
+ sub_action_flags,
+ act,
+ attr,
+ error);
+ if (ret)
+ return ret;
+ sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev, NULL, act->conf, attr, &sub_action_flags,
+ &actions_n, action, item_flags, error);
+ if (ret < 0)
+ return ret;
+ ++actions_n;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Doesn't support optional "
+ "action");
+ }
+ }
+ if (attr->ingress && !attr->transfer) {
+ if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Ingress must has a dest "
+ "QUEUE for Sample");
+ } else if (attr->egress && !attr->transfer) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Sample Only support Ingress "
+ "or E-Switch");
+ } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
+ MLX5_ASSERT(attr->transfer);
+ if (sample->ratio > 1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "E-Switch doesn't support "
+ "any optional action "
+ "for sampling");
+ if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action QUEUE");
+ if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action QUEUE");
+ if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "E-Switch must has a dest "
+ "port for mirroring");
+ if (!priv->config.hca_attr.reg_c_preserve &&
+ priv->representor_id != -1)
+ *fdb_mirror_limit = 1;
+ }
+ /* Continue validation for Xcap actions.*/
+ if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
+ (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
+ MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap and decap "
+ "combination aren't "
+ "supported");
+ if (!attr->transfer && attr->ingress && (sub_action_flags &
+ MLX5_FLOW_ACTION_ENCAP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ }
+ return 0;
+}
+
+/**
+ * Find existing modify-header resource or create and register a new one.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] resource
+ * Pointer to modify-header resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_modify_hdr_resource_register
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ uint32_t key_len = sizeof(*resource) -
+ offsetof(typeof(*resource), ft_type) +
+ resource->actions_num * sizeof(resource->actions[0]);
+ struct mlx5_hlist_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = resource,
+ };
+ uint64_t key64;
+
+ resource->flags = dev_flow->dv.group ? 0 :
+ MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
+ resource->flags))
+ return rte_flow_error_set(error, EOVERFLOW,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "too many modify header items");
+ key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
+ entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
+ if (!entry)
+ return -rte_errno;
+ resource = container_of(entry, typeof(*resource), entry);
+ dev_flow->handle->dvh.modify_hdr = resource;
+ return 0;
+}
+
+/**
+ * Get DV flow counter by index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
* mlx5 flow counter index in the container.
* @param[out] ppool
* mlx5 flow counter pool in the container,
struct mlx5_flow_counter_pool **ppool)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
struct mlx5_flow_counter_pool *pool;
- uint32_t batch = 0;
- idx--;
- if (idx >= MLX5_CNT_BATCH_OFFSET) {
- idx -= MLX5_CNT_BATCH_OFFSET;
- batch = 1;
- }
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
- MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
- pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
+ /* Decrease to original index and clear shared bit. */
+ idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
+ MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
+ pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
MLX5_ASSERT(pool);
if (ppool)
*ppool = pool;
- return &pool->counters_raw[idx % MLX5_COUNTERS_PER_POOL];
+ return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
}
/**
- * Get a pool by devx counter ID.
+ * Check the devx counter belongs to the pool.
*
- * @param[in] cont
- * Pointer to the counter container.
+ * @param[in] pool
+ * Pointer to the counter pool.
* @param[in] id
* The counter devx ID.
*
* @return
- * The counter pool pointer if exists, NULL otherwise,
+ * True if counter belongs to the pool, false otherwise.
*/
-static struct mlx5_flow_counter_pool *
-flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
+static bool
+flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
{
- uint32_t i;
- uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
-
- for (i = 0; i < n_valid; i++) {
- struct mlx5_flow_counter_pool *pool = cont->pools[i];
- int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
- MLX5_COUNTERS_PER_POOL;
+ int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
+ MLX5_COUNTERS_PER_POOL;
- if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) {
- /*
- * Move the pool to the head, as counter allocate
- * always gets the first pool in the container.
- */
- if (pool != TAILQ_FIRST(&cont->pool_list)) {
- TAILQ_REMOVE(&cont->pool_list, pool, next);
- TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
- }
- return pool;
- }
- }
- return NULL;
+ if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
+ return true;
+ return false;
}
/**
- * Allocate a new memory for the counter values wrapped by all the needed
- * management.
+ * Get a pool by devx counter ID.
*
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] raws_n
- * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
- *
- * @return
- * The new memory management pointer on success, otherwise NULL and rte_errno
- * is set.
- */
-static struct mlx5_counter_stats_mem_mng *
-flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
-{
- struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
- (dev->data->dev_private))->sh;
- struct mlx5_devx_mkey_attr mkey_attr;
- struct mlx5_counter_stats_mem_mng *mem_mng;
- volatile struct flow_counter_stats *raw_data;
- int size = (sizeof(struct flow_counter_stats) *
- MLX5_COUNTERS_PER_POOL +
- sizeof(struct mlx5_counter_stats_raw)) * raws_n +
- sizeof(struct mlx5_counter_stats_mem_mng);
- uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
- int i;
+ * @param[in] cmng
+ * Pointer to the counter management.
+ * @param[in] id
+ * The counter devx ID.
+ *
+ * @return
+ * The counter pool pointer if exists, NULL otherwise,
+ */
+static struct mlx5_flow_counter_pool *
+flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
+{
+ uint32_t i;
+ struct mlx5_flow_counter_pool *pool = NULL;
- if (!mem) {
- rte_errno = ENOMEM;
- return NULL;
- }
- mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
- size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!mem_mng->umem) {
- rte_errno = errno;
- rte_free(mem);
- return NULL;
+ rte_spinlock_lock(&cmng->pool_update_sl);
+ /* Check last used pool. */
+ if (cmng->last_pool_idx != POOL_IDX_INVALID &&
+ flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
+ pool = cmng->pools[cmng->last_pool_idx];
+ goto out;
}
- mkey_attr.addr = (uintptr_t)mem;
- mkey_attr.size = size;
- mkey_attr.umem_id = mem_mng->umem->umem_id;
- mkey_attr.pd = sh->pdn;
- mkey_attr.log_entity_size = 0;
- mkey_attr.pg_access = 0;
- mkey_attr.klm_array = NULL;
- mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = 1;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
- if (!mem_mng->dm) {
- mlx5_glue->devx_umem_dereg(mem_mng->umem);
- rte_errno = errno;
- rte_free(mem);
- return NULL;
- }
- mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
- raw_data = (volatile struct flow_counter_stats *)mem;
- for (i = 0; i < raws_n; ++i) {
- mem_mng->raws[i].mem_mng = mem_mng;
- mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
+ /* ID out of range means no suitable pool in the container. */
+ if (id > cmng->max_id || id < cmng->min_id)
+ goto out;
+ /*
+ * Find the pool from the end of the container, since mostly counter
+ * ID is sequence increasing, and the last pool should be the needed
+ * one.
+ */
+ i = cmng->n_valid;
+ while (i--) {
+ struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
+
+ if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
+ pool = pool_tmp;
+ break;
+ }
}
- LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
- return mem_mng;
+out:
+ rte_spinlock_unlock(&cmng->pool_update_sl);
+ return pool;
}
/**
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
*
* @return
- * The new container pointer on success, otherwise NULL and rte_errno is set.
+ * 0 on success, otherwise negative errno value and rte_errno is set.
*/
-static struct mlx5_pools_container *
-flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
+static int
+flow_dv_container_resize(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont =
- MLX5_CNT_CONTAINER(priv->sh, batch, 0);
- struct mlx5_pools_container *new_cont =
- MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
- struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
- uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ void *old_pools = cmng->pools;
+ uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
- int i;
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
- /* Fallback mode has no background thread. Skip the check. */
- if (!priv->counter_fallback &&
- cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
- /* The last resize still hasn't detected by the host thread. */
- rte_errno = EAGAIN;
- return NULL;
- }
- new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
- if (!new_cont->pools) {
+ if (!pools) {
rte_errno = ENOMEM;
- return NULL;
- }
- if (cont->n)
- memcpy(new_cont->pools, cont->pools, cont->n *
- sizeof(struct mlx5_flow_counter_pool *));
- /*
- * Fallback mode query the counter directly, no background query
- * resources are needed.
- */
- if (!priv->counter_fallback) {
- mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
- MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
- if (!mem_mng) {
- rte_free(new_cont->pools);
- return NULL;
- }
- for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
- LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
- mem_mng->raws +
- MLX5_CNT_CONTAINER_RESIZE +
- i, next);
- } else {
- /*
- * Release the old container pools directly as no background
- * thread helps that.
- */
- rte_free(cont->pools);
- }
- new_cont->n = resize;
- rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
- TAILQ_INIT(&new_cont->pool_list);
- TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
- new_cont->init_mem_mng = mem_mng;
- rte_cio_wmb();
- /* Flip the master container. */
- priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
- return new_cont;
+ return -ENOMEM;
+ }
+ if (old_pools)
+ memcpy(pools, old_pools, cmng->n *
+ sizeof(struct mlx5_flow_counter_pool *));
+ cmng->n = resize;
+ cmng->pools = pools;
+ if (old_pools)
+ mlx5_free(old_pools);
+ return 0;
}
/**
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
- struct mlx5_flow_counter_ext *cnt_ext = NULL;
int offset;
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
MLX5_ASSERT(pool);
- if (counter < MLX5_CNT_BATCH_OFFSET) {
- cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
- if (priv->counter_fallback)
- return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
+ if (priv->sh->cmng.counter_fallback)
+ return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
0, pkts, bytes, 0, NULL, NULL, 0);
- }
-
rte_spinlock_lock(&pool->sl);
- /*
- * The single counters allocation may allocate smaller ID than the
- * current allocated in parallel to the host reading.
- * In this case the new counter values must be reported as 0.
- */
- if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
+ if (!pool->raw) {
*pkts = 0;
*bytes = 0;
} else {
- offset = cnt - &pool->counters_raw[0];
+ offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
}
* Pointer to the Ethernet device structure.
* @param[out] dcs
* The devX counter handle.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
+ * @param[in] age
+ * Whether the pool is for counter that was allocated for aging.
* @param[in/out] cont_cur
* Pointer to the container pointer, it will be update in pool resize.
*
* @return
* The pool container pointer on success, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_pools_container *
+static struct mlx5_flow_counter_pool *
flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
- uint32_t batch)
+ uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- 0);
- int16_t n_valid = rte_atomic16_read(&cont->n_valid);
- uint32_t size;
-
- if (cont->n == n_valid) {
- cont = flow_dv_container_resize(dev, batch);
- if (!cont)
- return NULL;
- }
- size = sizeof(*pool);
- if (!batch)
- size += MLX5_COUNTERS_PER_POOL *
- sizeof(struct mlx5_flow_counter_ext);
- pool = rte_calloc(__func__, 1, size, 0);
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ bool fallback = priv->sh->cmng.counter_fallback;
+ uint32_t size = sizeof(*pool);
+
+ size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
+ size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
}
+ pool->raw = NULL;
+ pool->is_aged = !!age;
+ pool->query_gen = 0;
pool->min_dcs = dcs;
- if (!priv->counter_fallback)
- pool->raw = cont->init_mem_mng->raws + n_valid %
- MLX5_CNT_CONTAINER_RESIZE;
- pool->raw_hw = NULL;
rte_spinlock_init(&pool->sl);
- /*
- * The generation of the new allocated counters in this pool is 0, 2 in
- * the pool generation makes all the counters valid for allocation.
- * The start and end query generation protect the counters be released
- * between the query and update gap period will not be reallocated
- * without the last query finished and stats updated to the memory.
- */
- rte_atomic64_set(&pool->start_query_gen, 0x2);
- /*
- * There's no background query thread for fallback mode, set the
- * end_query_gen to the maximum value since no need to wait for
- * statistics update.
- */
- rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ?
- INT64_MAX : 0x2);
- TAILQ_INIT(&pool->counters);
- TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
- pool->index = n_valid;
- cont->pools[n_valid] = pool;
- /* Pool initialization must be updated before host thread access. */
- rte_cio_wmb();
- rte_atomic16_add(&cont->n_valid, 1);
- return cont;
+ rte_spinlock_init(&pool->csl);
+ TAILQ_INIT(&pool->counters[0]);
+ TAILQ_INIT(&pool->counters[1]);
+ pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
+ rte_spinlock_lock(&cmng->pool_update_sl);
+ pool->index = cmng->n_valid;
+ if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
+ mlx5_free(pool);
+ rte_spinlock_unlock(&cmng->pool_update_sl);
+ return NULL;
+ }
+ cmng->pools[pool->index] = pool;
+ cmng->n_valid++;
+ if (unlikely(fallback)) {
+ int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
+
+ if (base < cmng->min_id)
+ cmng->min_id = base;
+ if (base > cmng->max_id)
+ cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
+ cmng->last_pool_idx = pool->index;
+ }
+ rte_spinlock_unlock(&cmng->pool_update_sl);
+ return pool;
}
/**
* Pointer to the Ethernet device structure.
* @param[out] cnt_free
* Where to put the pointer of a new counter.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
+ * @param[in] age
+ * Whether the pool is for counter that was allocated for aging.
*
* @return
- * The counter container pointer and @p cnt_free is set on success,
+ * The counter pool pointer and @p cnt_free is set on success,
* NULL otherwise and rte_errno is set.
*/
-static struct mlx5_pools_container *
+static struct mlx5_flow_counter_pool *
flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
struct mlx5_flow_counter **cnt_free,
- uint32_t batch)
+ uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
struct mlx5_flow_counter_pool *pool;
+ struct mlx5_counters tmp_tq;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
+ enum mlx5_counter_type cnt_type =
+ age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
+ bool fallback = priv->sh->cmng.counter_fallback;
uint32_t i;
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
- if (!batch) {
+ if (fallback) {
/* bulk_bitmap must be 0 for single counter allocation. */
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
if (!dcs)
return NULL;
- pool = flow_dv_find_pool_by_id(cont, dcs->id);
+ pool = flow_dv_find_pool_by_id(cmng, dcs->id);
if (!pool) {
- cont = flow_dv_pool_create(dev, dcs, batch);
- if (!cont) {
+ pool = flow_dv_pool_create(dev, dcs, age);
+ if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
- pool = TAILQ_FIRST(&cont->pool_list);
- } else if (dcs->id < pool->min_dcs->id) {
- rte_atomic64_set(&pool->a64_dcs,
- (int64_t)(uintptr_t)dcs);
}
i = dcs->id % MLX5_COUNTERS_PER_POOL;
- cnt = &pool->counters_raw[i];
- TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
- MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
+ cnt = MLX5_POOL_GET_CNT(pool, i);
+ cnt->pool = pool;
+ cnt->dcs_when_free = dcs;
*cnt_free = cnt;
- return cont;
+ return pool;
}
- /* bulk_bitmap is in 128 counters units. */
- if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
if (!dcs) {
rte_errno = ENODATA;
return NULL;
}
- cont = flow_dv_pool_create(dev, dcs, batch);
- if (!cont) {
+ pool = flow_dv_pool_create(dev, dcs, age);
+ if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
- pool = TAILQ_FIRST(&cont->pool_list);
- for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = &pool->counters_raw[i];
- TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
- }
- *cnt_free = &pool->counters_raw[0];
- return cont;
-}
-
-/**
- * Search for existed shared counter.
- *
- * @param[in] cont
- * Pointer to the relevant counter pool container.
- * @param[in] id
- * The shared counter ID to search.
- * @param[out] ppool
- * mlx5 flow counter pool in the container,
- *
- * @return
- * NULL if not existed, otherwise pointer to the shared extend counter.
- */
-static struct mlx5_flow_counter_ext *
-flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
- struct mlx5_flow_counter_pool **ppool)
-{
- static struct mlx5_flow_counter_ext *cnt;
- struct mlx5_flow_counter_pool *pool;
- uint32_t i;
- uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
-
- for (i = 0; i < n_valid; i++) {
- pool = cont->pools[i];
- for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = MLX5_GET_POOL_CNT_EXT(pool, i);
- if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
- if (ppool)
- *ppool = cont->pools[i];
- return cnt;
- }
- }
+ TAILQ_INIT(&tmp_tq);
+ for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
+ cnt = MLX5_POOL_GET_CNT(pool, i);
+ cnt->pool = pool;
+ TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
}
- return NULL;
+ rte_spinlock_lock(&cmng->csl[cnt_type]);
+ TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
+ rte_spinlock_unlock(&cmng->csl[cnt_type]);
+ *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
+ (*cnt_free)->pool = pool;
+ return pool;
}
/**
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] shared
- * Indicate if this counter is shared with other flows.
- * @param[in] id
- * Counter identifier.
- * @param[in] group
- * Counter flow group.
+ * @param[in] age
+ * Whether the counter was allocated for aging.
*
* @return
* Index to flow counter on success, 0 otherwise and rte_errno is set.
*/
static uint32_t
-flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
- uint16_t group)
+flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt_free = NULL;
- struct mlx5_flow_counter_ext *cnt_ext = NULL;
- /*
- * Currently group 0 flow counter cannot be assigned to a flow if it is
- * not the first one in the batch counter allocation, so it is better
- * to allocate counters one by one for these flows in a separate
- * container.
- * A counter can be shared between different groups so need to take
- * shared counters from the single container.
- */
- uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- 0);
+ bool fallback = priv->sh->cmng.counter_fallback;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ enum mlx5_counter_type cnt_type =
+ age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
uint32_t cnt_idx;
if (!priv->config.devx) {
rte_errno = ENOTSUP;
return 0;
}
- if (shared) {
- cnt_ext = flow_dv_counter_shared_search(cont, id, &pool);
- if (cnt_ext) {
- if (cnt_ext->ref_cnt + 1 == 0) {
- rte_errno = E2BIG;
- return 0;
- }
- cnt_ext->ref_cnt++;
- cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
- (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
- + 1;
- return cnt_idx;
- }
- }
- /* Pools which has a free counters are in the start. */
- TAILQ_FOREACH(pool, &cont->pool_list, next) {
- /*
- * The free counter reset values must be updated between the
- * counter release to the counter allocation, so, at least one
- * query must be done in this time. ensure it by saving the
- * query generation in the release time.
- * The free list is sorted according to the generation - so if
- * the first one is not updated, all the others are not
- * updated too.
- */
- cnt_free = TAILQ_FIRST(&pool->counters);
- if (cnt_free && cnt_free->query_gen <
- rte_atomic64_read(&pool->end_query_gen))
- break;
- cnt_free = NULL;
- }
- if (!cnt_free) {
- cont = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
- if (!cont)
- return 0;
- pool = TAILQ_FIRST(&cont->pool_list);
- }
- if (!batch)
- cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
+ /* Get free counters from container. */
+ rte_spinlock_lock(&cmng->csl[cnt_type]);
+ cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
+ if (cnt_free)
+ TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
+ rte_spinlock_unlock(&cmng->csl[cnt_type]);
+ if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
+ goto err;
+ pool = cnt_free->pool;
+ if (fallback)
+ cnt_free->dcs_when_active = cnt_free->dcs_when_free;
/* Create a DV counter action only in the first time usage. */
if (!cnt_free->action) {
uint16_t offset;
struct mlx5_devx_obj *dcs;
+ int ret;
- if (batch) {
- offset = cnt_free - &pool->counters_raw[0];
+ if (!fallback) {
+ offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
dcs = pool->min_dcs;
} else {
offset = 0;
- dcs = cnt_ext->dcs;
+ dcs = cnt_free->dcs_when_free;
}
- cnt_free->action = mlx5_glue->dv_create_flow_action_counter
- (dcs->obj, offset);
- if (!cnt_free->action) {
+ ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
+ &cnt_free->action);
+ if (ret) {
rte_errno = errno;
- return 0;
+ goto err;
}
}
cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
- (cnt_free - pool->counters_raw));
- cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
+ MLX5_CNT_ARRAY_IDX(pool, cnt_free));
/* Update the counter reset values. */
if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
- return 0;
- if (cnt_ext) {
- cnt_ext->shared = shared;
- cnt_ext->ref_cnt = 1;
- cnt_ext->id = id;
- }
- if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
+ goto err;
+ if (!fallback && !priv->sh->cmng.query_thread_on)
/* Start the asynchronous batch query by the host thread. */
mlx5_set_query_alarm(priv->sh);
- TAILQ_REMOVE(&pool->counters, cnt_free, next);
- if (TAILQ_EMPTY(&pool->counters)) {
- /* Move the pool to the end of the container pool list. */
- TAILQ_REMOVE(&cont->pool_list, pool, next);
- TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
- }
return cnt_idx;
+err:
+ if (cnt_free) {
+ cnt_free->pool = pool;
+ if (fallback)
+ cnt_free->dcs_when_free = cnt_free->dcs_when_active;
+ rte_spinlock_lock(&cmng->csl[cnt_type]);
+ TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
+ rte_spinlock_unlock(&cmng->csl[cnt_type]);
+ }
+ return 0;
+}
+
+/**
+ * Allocate a shared flow counter.
+ *
+ * @param[in] ctx
+ * Pointer to the shared counter configuration.
+ * @param[in] data
+ * Pointer to save the allocated counter index.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise and rte_errno is set.
+ */
+
+static int32_t
+flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
+{
+ struct mlx5_shared_counter_conf *conf = ctx;
+ struct rte_eth_dev *dev = conf->dev;
+ struct mlx5_flow_counter *cnt;
+
+ data->dword = flow_dv_counter_alloc(dev, 0);
+ data->dword |= MLX5_CNT_SHARED_OFFSET;
+ cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
+ cnt->shared_info.id = conf->id;
+ return 0;
+}
+
+/**
+ * Get a shared flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] id
+ * Counter identifier.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_counter_conf conf = {
+ .dev = dev,
+ .id = id,
+ };
+ union mlx5_l3t_data data = {
+ .dword = 0,
+ };
+
+ mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
+ flow_dv_counter_alloc_shared_cb, &conf);
+ return data.dword;
+}
+
+/**
+ * Get age param from counter index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] counter
+ * Index to the counter handler.
+ *
+ * @return
+ * The aging parameter specified for the counter index.
+ */
+static struct mlx5_age_param*
+flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
+ uint32_t counter)
+{
+ struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_pool *pool = NULL;
+
+ flow_dv_counter_get_by_idx(dev, counter, &pool);
+ counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
+ cnt = MLX5_POOL_GET_CNT(pool, counter);
+ return MLX5_CNT_TO_AGE(cnt);
+}
+
+/**
+ * Remove a flow counter from aged counter list.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] counter
+ * Index to the counter handler.
+ * @param[in] cnt
+ * Pointer to the counter handler.
+ */
+static void
+flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
+ uint32_t counter, struct mlx5_flow_counter *cnt)
+{
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint16_t expected = AGE_CANDIDATE;
+
+ age_info = GET_PORT_AGE_INFO(priv);
+ age_param = flow_dv_counter_idx_get_age(dev, counter);
+ if (!__atomic_compare_exchange_n(&age_param->state, &expected,
+ AGE_FREE, false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED)) {
+ /**
+ * We need the lock even it is age timeout,
+ * since counter may still in process.
+ */
+ rte_spinlock_lock(&age_info->aged_sl);
+ TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
+ rte_spinlock_unlock(&age_info->aged_sl);
+ __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ }
}
/**
* Index to the counter handler.
*/
static void
-flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
+flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
- struct mlx5_flow_counter_ext *cnt_ext = NULL;
+ enum mlx5_counter_type cnt_type;
if (!counter)
return;
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
MLX5_ASSERT(pool);
- if (counter < MLX5_CNT_BATCH_OFFSET) {
- cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
- if (cnt_ext && --cnt_ext->ref_cnt)
- return;
- }
- /* Put the counter in the end - the last updated one. */
- TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
+ if (IS_SHARED_CNT(counter) &&
+ mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
+ return;
+ if (pool->is_aged)
+ flow_dv_counter_remove_from_age(dev, counter, cnt);
+ cnt->pool = pool;
/*
- * Counters released between query trigger and handler need
- * to wait the next round of query. Since the packets arrive
- * in the gap period will not be taken into account to the
- * old counter.
+ * Put the counter back to list to be updated in none fallback mode.
+ * Currently, we are using two list alternately, while one is in query,
+ * add the freed counter to the other list based on the pool query_gen
+ * value. After query finishes, add counter the list to the global
+ * container counter list. The list changes while query starts. In
+ * this case, lock will not be needed as query callback and release
+ * function both operate with the different list.
+ *
*/
- cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
+ if (!priv->sh->cmng.counter_fallback) {
+ rte_spinlock_lock(&pool->csl);
+ TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
+ rte_spinlock_unlock(&pool->csl);
+ } else {
+ cnt->dcs_when_free = cnt->dcs_when_active;
+ cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
+ MLX5_COUNTER_TYPE_ORIGIN;
+ rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
+ TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
+ cnt, next);
+ rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
+ }
}
/**
*/
static int
flow_dv_validate_attributes(struct rte_eth_dev *dev,
+ const struct mlx5_flow_tunnel *tunnel,
const struct rte_flow_attr *attributes,
- bool external __rte_unused,
+ const struct flow_grp_info *grp_info,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t priority_max = priv->config.flow_prio - 1;
+ uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
int ret = 0;
#ifndef HAVE_MLX5DV_DR
+ RTE_SET_USED(tunnel);
+ RTE_SET_USED(grp_info);
if (attributes->group)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
#else
uint32_t table = 0;
- ret = mlx5_flow_group_to_table(attributes, external,
- attributes->group, !!priv->fdb_def_rule,
- &table, error);
+ ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
+ grp_info, error);
if (ret)
return ret;
if (!table)
ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
#endif
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
- attributes->priority >= priority_max)
+ if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
+ attributes->priority > lowest_priority)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL,
uint16_t ether_type = 0;
int actions_n = 0;
uint8_t item_ipv6_proto = 0;
+ int fdb_mirror_limit = 0;
+ int modify_after_mirror = 0;
+ const struct rte_flow_item *geneve_item = NULL;
const struct rte_flow_item *gre_item = NULL;
+ const struct rte_flow_item *gtp_item = NULL;
const struct rte_flow_action_raw_decap *decap;
const struct rte_flow_action_raw_encap *encap;
- const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action_rss *rss = NULL;
+ const struct rte_flow_action_rss *sample_rss = NULL;
+ const struct rte_flow_action_count *count = NULL;
+ const struct rte_flow_action_count *sample_count = NULL;
const struct rte_flow_item_tcp nic_tcp_mask = {
.hdr = {
.tcp_flags = 0xFF,
.dst_port = RTE_BE16(UINT16_MAX),
}
};
- const struct rte_flow_item_ipv4 nic_ipv4_mask = {
- .hdr = {
- .src_addr = RTE_BE32(0xffffffff),
- .dst_addr = RTE_BE32(0xffffffff),
- .type_of_service = 0xff,
- .next_proto_id = 0xff,
- .time_to_live = 0xff,
- },
- };
const struct rte_flow_item_ipv6 nic_ipv6_mask = {
.hdr = {
.src_addr =
.proto = 0xff,
.hop_limits = 0xff,
},
+ .has_frag_ext = 1,
+ };
+ const struct rte_flow_item_ecpri nic_ecpri_mask = {
+ .hdr = {
+ .common = {
+ .u32 =
+ RTE_BE32(((const struct rte_ecpri_common_hdr) {
+ .type = 0xFF,
+ }).u32),
+ },
+ .dummy[0] = 0xffffffff,
+ },
};
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
uint16_t queue_index = 0xFFFF;
const struct rte_flow_item_vlan *vlan_m = NULL;
- int16_t rw_act_num = 0;
+ uint32_t rw_act_num = 0;
uint64_t is_root;
+ const struct mlx5_flow_tunnel *tunnel;
+ struct flow_grp_info grp_info = {
+ .external = !!external,
+ .transfer = !!attr->transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ };
+ const struct rte_eth_hairpin_conf *conf;
if (items == NULL)
return -1;
- ret = flow_dv_validate_attributes(dev, attr, external, error);
+ if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
+ tunnel = flow_items_to_tunnel(items);
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
+ MLX5_FLOW_ACTION_DECAP;
+ } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
+ tunnel = flow_actions_to_tunnel(actions);
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ } else {
+ tunnel = NULL;
+ }
+ if (tunnel && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "decap not supported "
+ "for VF representor");
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, tunnel, attr, items, actions);
+ ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
if (ret < 0)
return ret;
is_root = (uint64_t)ret;
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int type = items->type;
+ if (!mlx5_flow_os_item_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
switch (type) {
+ case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
+ if (items[0].type != (typeof(items[0].type))
+ MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "MLX5 private items "
+ "must be the first");
+ break;
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
ret = mlx5_flow_validate_item_eth(items, item_flags,
- error);
+ true, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- ret = mlx5_flow_validate_item_vlan(items, item_flags,
- dev, error);
+ ret = flow_dv_validate_item_vlan(items, item_flags,
+ dev, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
case RTE_FLOW_ITEM_TYPE_IPV4:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
- ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- last_item,
- ether_type,
- &nic_ipv4_mask,
- error);
+ ret = flow_dv_validate_item_ipv4(items, item_flags,
+ last_item, ether_type,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
next_protocol = 0xff;
}
break;
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ ret = flow_dv_validate_item_ipv6_frag_ext(items,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->spec)->hdr.next_header;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
(items, item_flags,
error);
if (ret < 0)
return ret;
+ geneve_item = items;
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
+ ret = mlx5_flow_validate_item_geneve_opt(items,
+ last_item,
+ geneve_item,
+ dev,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
+ break;
case RTE_FLOW_ITEM_TYPE_MPLS:
ret = mlx5_flow_validate_item_mpls(dev, items,
item_flags,
error);
if (ret < 0)
return ret;
+ gtp_item = items;
last_item = MLX5_FLOW_LAYER_GTP;
break;
+ case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+ ret = flow_dv_validate_item_gtp_psc(items, last_item,
+ gtp_item, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GTP_PSC;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ /* Capacity will be checked in the translate stage. */
+ ret = mlx5_flow_validate_item_ecpri(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ecpri_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
int type = actions->type;
+
+ if (!mlx5_flow_os_action_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
++actions_n;
action_flags |= MLX5_FLOW_ACTION_FLAG |
MLX5_FLOW_ACTION_MARK_EXT;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
+
} else {
action_flags |= MLX5_FLOW_ACTION_FLAG;
++actions_n;
++actions_n;
action_flags |= MLX5_FLOW_ACTION_MARK |
MLX5_FLOW_ACTION_MARK_EXT;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
} else {
action_flags |= MLX5_FLOW_ACTION_MARK;
++actions_n;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_SET_META;
rw_act_num += MLX5_ACT_NUM_SET_META;
break;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
error);
if (ret < 0)
return ret;
+ if (rss && sample_rss &&
+ (sample_rss->level != rss->level ||
+ sample_rss->types != rss->types))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Can't use the different RSS types "
+ "or level in the same flow");
if (rss != NULL && rss->queue_num)
queue_index = rss->queue[0];
action_flags |= MLX5_FLOW_ACTION_RSS;
++actions_n;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ ret =
+ mlx5_flow_validate_action_default_miss(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ ++actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = flow_dv_validate_action_count(dev, error);
+ ret = flow_dv_validate_action_count(dev, actions,
+ action_flags,
+ error);
if (ret < 0)
return ret;
+ count = actions->conf;
action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
ret = flow_dv_validate_action_decap(dev, action_flags,
+ actions, item_flags,
attr, error);
if (ret < 0)
return ret;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
ret = flow_dv_validate_action_raw_encap_decap
(dev, NULL, actions->conf, attr, &action_flags,
- &actions_n, error);
+ &actions_n, actions, item_flags, error);
if (ret < 0)
return ret;
break;
(dev,
decap ? decap : &empty_decap, encap,
attr, &action_flags, &actions_n,
- error);
+ actions, item_flags, error);
if (ret < 0)
return ret;
break;
RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
MLX5_FLOW_ACTION_SET_MAC_SRC :
MLX5_FLOW_ACTION_SET_MAC_DST;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
/*
* Even if the source and destination MAC addresses have
* overlap in the header with 4B alignment, the convert
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
MLX5_FLOW_ACTION_SET_IPV4_SRC :
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
MLX5_FLOW_ACTION_SET_IPV6_SRC :
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
MLX5_FLOW_ACTION_SET_TP_SRC :
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TTL ?
MLX5_FLOW_ACTION_SET_TTL :
rw_act_num += MLX5_ACT_NUM_MDF_TTL;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- ret = flow_dv_validate_action_jump(actions,
+ ret = flow_dv_validate_action_jump(dev, tunnel, actions,
action_flags,
attr, external,
error);
if (ret)
return ret;
+ if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
+ fdb_mirror_limit)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "sample and jump action combination is not supported");
++actions_n;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
MLX5_FLOW_ACTION_INC_TCP_SEQ :
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
MLX5_FLOW_ACTION_INC_TCP_ACK :
/* Meter action will add one more TAG action. */
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
+ if (!attr->transfer && !attr->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Shared ASO age action is not supported for group 0");
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_dv_validate_action_age(action_flags,
+ actions, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ /*
+ * Validate the regular AGE action (using counter)
+ * mutual exclusion with share counter actions.
+ */
+ if (!priv->sh->flow_hit_aso_en) {
+ if (count && count->shared)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "old age and shared count combination is not supported");
+ if (sample_count)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "old age action and count must be in the same sub flow");
+ }
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ ++actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
ret = flow_dv_validate_action_modify_ipv4_dscp
(action_flags,
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
rw_act_num += MLX5_ACT_NUM_SET_DSCP;
break;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
rw_act_num += MLX5_ACT_NUM_SET_DSCP;
break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ ret = flow_dv_validate_action_sample(&action_flags,
+ actions, dev,
+ attr, item_flags,
+ rss, &sample_rss,
+ &sample_count,
+ &fdb_mirror_limit,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ ++actions_n;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+ if (actions[0].type != (typeof(actions[0].type))
+ MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "MLX5 private action "
+ "must be the first");
+
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ ret = flow_dv_validate_action_modify_field(dev,
+ action_flags,
+ actions,
+ attr,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+ rw_act_num += ret;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
"action not supported");
}
}
+ /*
+ * Validate actions in flow rules
+ * - Explicit decap action is prohibited by the tunnel offload API.
+ * - Drop action in tunnel steer rule is prohibited by the API.
+ * - Application cannot use MARK action because it's value can mask
+ * tunnel default miss nitification.
+ * - JUMP in tunnel match rule has no support in current PMD
+ * implementation.
+ * - TAG & META are reserved for future uses.
+ */
+ if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
+ uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP |
+ MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_SET_TAG |
+ MLX5_FLOW_ACTION_SET_META |
+ MLX5_FLOW_ACTION_DROP;
+
+ if (action_flags & bad_actions_mask)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Invalid RTE action in tunnel "
+ "set decap rule");
+ if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "tunnel set decap rule must terminate "
+ "with JUMP");
+ if (!attr->ingress)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "tunnel flows for ingress traffic only");
+ }
+ if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
+ uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP |
+ MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_SET_TAG |
+ MLX5_FLOW_ACTION_SET_META;
+
+ if (action_flags & bad_actions_mask)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Invalid RTE action in tunnel "
+ "set match rule");
+ }
/*
* Validate the drop action mutual exclusion with other actions.
* Drop action is mutually-exclusive with any other action, except for
* Count action.
+ * Drop action compatibility with tunnel offload was already validated.
*/
- if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
+ if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
+ MLX5_FLOW_ACTION_TUNNEL_MATCH));
+ else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
(action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
actions,
"no fate action is found");
}
- /* Continue validation for Xcap actions.*/
- if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ /*
+ * Continue validation for Xcap and VLAN actions.
+ * If hairpin is working in explicit TX rule mode, there is no actions
+ * splitting and the validation of hairpin ingress flow should be the
+ * same as other standard flows.
+ */
+ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
+ MLX5_FLOW_VLAN_ACTIONS)) &&
+ (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
+ ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
+ conf->tx_explicit != 0))) {
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap and decap "
"combination aren't supported");
- if (!attr->transfer && attr->ingress && (action_flags &
- MLX5_FLOW_ACTION_ENCAP))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "encap is not supported"
- " for ingress traffic");
+ if (!attr->transfer && attr->ingress) {
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "push VLAN action not "
+ "supported for ingress");
+ else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
+ MLX5_FLOW_VLAN_ACTIONS)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no support for "
+ "multiple VLAN actions");
+ }
}
- /* Hairpin flow will add one more TAG action. */
+ /*
+ * Hairpin flow will add one more TAG action in TX implicit mode.
+ * In TX explicit mode, there will be no hairpin flow ID.
+ */
if (hairpin > 0)
rw_act_num += MLX5_ACT_NUM_SET_TAG;
/* extra metadata enabled: one more TAG action will be add. */
dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
mlx5_flow_ext_mreg_supported(dev))
rw_act_num += MLX5_ACT_NUM_SET_TAG;
- if ((uint32_t)rw_act_num >
+ if (rw_act_num >
flow_dv_modify_hdr_action_max(dev, is_root)) {
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "too many header modify"
" actions to support");
}
+ /* Eswitch egress mirror and modify flow has limitation on CX5 */
+ if (fdb_mirror_limit && modify_after_mirror)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "sample before modify action is not supported");
return 0;
}
struct mlx5_flow *dev_flow;
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
/* In case of corrupting the memory. */
- if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+ if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
rte_flow_error_set(error, ENOSPC,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"not free temporary device flow");
"not enough memory to create flow handle");
return NULL;
}
- /* No multi-thread supporting. */
- dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+ MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
+ dev_flow = &wks->flows[wks->flow_idx++];
+ memset(dev_flow, 0, sizeof(*dev_flow));
dev_flow->handle = dev_handle;
dev_flow->handle_idx = handle_idx;
- dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
/*
- * The matching value needs to be cleared to 0 before using. In the
- * past, it will be automatically cleared when using rte_*alloc
- * API. The time consumption will be almost the same as before.
+ * In some old rdma-core releases, before continuing, a check of the
+ * length of matching parameter will be done at first. It needs to use
+ * the length without misc4 param. If the flow has misc4 support, then
+ * the length needs to be adjusted accordingly. Each param member is
+ * aligned with a 64B boundary naturally.
*/
- memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4);
dev_flow->ingress = attr->ingress;
dev_flow->dv.transfer = attr->transfer;
return dev_flow;
}
#endif
+/**
+ * Add match of ip_version.
+ *
+ * @param[in] group
+ * Flow group.
+ * @param[in] headers_v
+ * Values header pointer.
+ * @param[in] headers_m
+ * Masks header pointer.
+ * @param[in] ip_version
+ * The IP version to set.
+ */
+static inline void
+flow_dv_set_match_ip_version(uint32_t group,
+ void *headers_v,
+ void *headers_m,
+ uint8_t ip_version)
+{
+ if (group == 0)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ else
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
+ ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
+}
+
/**
* Add Ethernet item to matcher and to the value.
*
*/
static void
flow_dv_translate_item_eth(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item, int inner,
+ uint32_t group)
{
const struct rte_flow_item_eth *eth_m = item->mask;
const struct rte_flow_item_eth *eth_v = item->spec;
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.type = RTE_BE16(0xffff),
+ .has_vlan = 0,
};
- void *headers_m;
- void *headers_v;
+ void *hdrs_m;
+ void *hdrs_v;
char *l24_v;
unsigned int i;
if (!eth_m)
eth_m = &nic_mask;
if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
ð_m->dst, sizeof(eth_m->dst));
/* The value must be in the range of the mask. */
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
ð_m->src, sizeof(eth_m->src));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
/* The value must be in the range of the mask. */
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
- if (eth_v->type) {
- /* When ethertype is present set mask for tagged VLAN. */
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
- /* Set value for tagged VLAN if ethertype is 802.1Q. */
- if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
- eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
- 1);
- /* Return here to avoid setting match on ethertype. */
- return;
- }
- }
/*
* HW supports match on one Ethertype, the Ethertype following the last
* VLAN tag of the packet (see PRM).
* Set match on ethertype only if ETH header is not followed by VLAN.
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ * eCPRI over Ether layer will use type value 0xAEFE.
*/
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ if (eth_m->type == 0xFFFF) {
+ /* Set cvlan_tag mask for any single\multi\un-tagged case. */
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+ switch (eth_v->type) {
+ case RTE_BE16(RTE_ETHER_TYPE_VLAN):
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_QINQ):
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_IPV4):
+ flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_IPV6):
+ flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
+ return;
+ default:
+ break;
+ }
+ }
+ if (eth_m->has_vlan) {
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+ if (eth_v->has_vlan) {
+ /*
+ * Here, when also has_more_vlan field in VLAN item is
+ * not set, only single-tagged packets will be matched.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
+ return;
+ }
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
rte_be_to_cpu_16(eth_m->type));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
}
flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ int inner, uint32_t group)
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
- void *headers_m;
- void *headers_v;
+ void *hdrs_m;
+ void *hdrs_v;
uint16_t tci_m;
uint16_t tci_v;
- if (!vlan_v)
- return;
- if (!vlan_m)
- vlan_m = &rte_flow_item_vlan_mask;
if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
/*
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->handle->vf_vlan.tag =
- rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
+ if (vlan_v)
+ dev_flow->handle->vf_vlan.tag =
+ rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
+ }
+ /*
+ * When VLAN item exists in flow, mark packet as tagged,
+ * even if TCI is not specified.
+ */
+ if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
}
+ if (!vlan_v)
+ return;
+ if (!vlan_m)
+ vlan_m = &rte_flow_item_vlan_mask;
tci_m = rte_be_to_cpu_16(vlan_m->tci);
tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
+ /*
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ */
+ if (vlan_m->inner_type == 0xFFFF) {
+ switch (vlan_v->inner_type) {
+ case RTE_BE16(RTE_ETHER_TYPE_VLAN):
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_IPV4):
+ flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_IPV6):
+ flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
+ return;
+ default:
+ break;
+ }
+ }
+ if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+ /* Only one vlan_tag bit can be set. */
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
+ return;
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] item_flags
- * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv4(void *matcher, void *key,
const struct rte_flow_item *item,
- const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- if (group == 0)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
- else
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
- /*
- * On outer header (which must contains L2), or inner header with L2,
- * set cvlan_tag mask bit to mark this packet as untagged.
- * This should be done even if item->spec is empty.
- */
- if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
if (!ipv4_v)
return;
if (!ipv4_m)
ipv4_m->hdr.time_to_live);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
+ !!(ipv4_m->hdr.fragment_offset));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+ !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] item_flags
- * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv6(void *matcher, void *key,
const struct rte_flow_item *item,
- const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- if (group == 0)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
- else
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
- /*
- * On outer header (which must contains L2), or inner header with L2,
- * set cvlan_tag mask bit to mark this packet as untagged.
- * This should be done even if item->spec is empty.
- */
- if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
if (!ipv6_v)
return;
if (!ipv6_m)
ipv6_m->hdr.hop_limits);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
+ !!(ipv6_m->has_frag_ext));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+ !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
+}
+
+/**
+ * Add IPV6 fragment extension item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
+ const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
+ const struct rte_flow_item_ipv6_frag_ext nic_mask = {
+ .hdr = {
+ .next_header = 0xff,
+ .frag_data = RTE_BE16(0xffff),
+ },
+ };
+ void *headers_m;
+ void *headers_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ /* IPv6 fragment extension item exists, so packet is IP fragment. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
+ if (!ipv6_frag_ext_v)
+ return;
+ if (!ipv6_frag_ext_m)
+ ipv6_frag_ext_m = &nic_mask;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+ ipv6_frag_ext_m->hdr.next_header);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ ipv6_frag_ext_v->hdr.next_header &
+ ipv6_frag_ext_m->hdr.next_header);
}
/**
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- const char *tni_flow_id_m = (const char *)nvgre_m->tni;
- const char *tni_flow_id_v = (const char *)nvgre_v->tni;
+ const char *tni_flow_id_m;
+ const char *tni_flow_id_v;
char *gre_key_m;
char *gre_key_v;
int size;
return;
if (!nvgre_m)
nvgre_m = &rte_flow_item_nvgre_mask;
+ tni_flow_id_m = (const char *)nvgre_m->tni;
+ tni_flow_id_v = (const char *)nvgre_v->tni;
size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
}
+/**
+ * Create Geneve TLV option resource.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] tag_be24
+ * Tag value in big endian then R-shift 8.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+
+int
+flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
+ sh->geneve_tlv_option_resource;
+ struct mlx5_devx_obj *obj;
+ const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
+ int ret = 0;
+
+ if (!geneve_opt_v)
+ return -1;
+ rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
+ if (geneve_opt_resource != NULL) {
+ if (geneve_opt_resource->option_class ==
+ geneve_opt_v->option_class &&
+ geneve_opt_resource->option_type ==
+ geneve_opt_v->option_type &&
+ geneve_opt_resource->length ==
+ geneve_opt_v->option_len) {
+ /* We already have GENVE TLV option obj allocated. */
+ __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ } else {
+ ret = rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Only one GENEVE TLV option supported");
+ goto exit;
+ }
+ } else {
+ /* Create a GENEVE TLV object and resource. */
+ obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
+ geneve_opt_v->option_class,
+ geneve_opt_v->option_type,
+ geneve_opt_v->option_len);
+ if (!obj) {
+ ret = rte_flow_error_set(error, ENODATA,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create GENEVE TLV Devx object");
+ goto exit;
+ }
+ sh->geneve_tlv_option_resource =
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*geneve_opt_resource),
+ 0, SOCKET_ID_ANY);
+ if (!sh->geneve_tlv_option_resource) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ ret = rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "GENEVE TLV object memory allocation failed");
+ goto exit;
+ }
+ geneve_opt_resource = sh->geneve_tlv_option_resource;
+ geneve_opt_resource->obj = obj;
+ geneve_opt_resource->option_class = geneve_opt_v->option_class;
+ geneve_opt_resource->option_type = geneve_opt_v->option_type;
+ geneve_opt_resource->length = geneve_opt_v->option_len;
+ __atomic_store_n(&geneve_opt_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ }
+exit:
+ rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+ return ret;
+}
+
+/**
+ * Add Geneve TLV option item to matcher.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[out] error
+ * Pointer to error structure.
+ */
+static int
+flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
+ void *key, const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
+ const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_3);
+ void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ rte_be32_t opt_data_key = 0, opt_data_mask = 0;
+ int ret = 0;
+
+ if (!geneve_opt_v)
+ return -1;
+ if (!geneve_opt_m)
+ geneve_opt_m = &rte_flow_item_geneve_opt_mask;
+ ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
+ error);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
+ return ret;
+ }
+ /*
+ * Set the option length in GENEVE header if not requested.
+ * The GENEVE TLV option length is expressed by the option length field
+ * in the GENEVE header.
+ * If the option length was not requested but the GENEVE TLV option item
+ * is present we set the option length field implicitly.
+ */
+ if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
+ MLX5_GENEVE_OPTLEN_MASK);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
+ geneve_opt_v->option_len + 1);
+ }
+ /* Set the data. */
+ if (geneve_opt_v->data) {
+ memcpy(&opt_data_key, geneve_opt_v->data,
+ RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
+ sizeof(opt_data_key)));
+ MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
+ sizeof(opt_data_key));
+ memcpy(&opt_data_mask, geneve_opt_m->data,
+ RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
+ sizeof(opt_data_mask)));
+ MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
+ sizeof(opt_data_mask));
+ MLX5_SET(fte_match_set_misc3, misc3_m,
+ geneve_tlv_option_0_data,
+ rte_be_to_cpu_32(opt_data_mask));
+ MLX5_SET(fte_match_set_misc3, misc3_v,
+ geneve_tlv_option_0_data,
+ rte_be_to_cpu_32(opt_data_key & opt_data_mask));
+ }
+ return ret;
+}
+
/**
* Add MPLS item to matcher and to the value.
*
reg = flow_dv_get_metadata_reg(dev, attr, NULL);
if (reg < 0)
return;
+ MLX5_ASSERT(reg != REG_NON);
/*
* In datapath code there is no endianness
* coversions for perfromance reasons, all
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in]
+ * Flow attributes.
*
* @return
* 0 on success, a negative errno value otherwise.
*/
static int
flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
- void *key, const struct rte_flow_item *item)
+ void *key, const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr)
{
const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
priv = mlx5_port_to_eswitch_info(id, item == NULL);
if (!priv)
return -rte_errno;
- /* Translate to vport field or to metadata, depending on mode. */
- if (priv->vport_meta_mask)
+ /*
+ * Translate to vport field or to metadata, depending on mode.
+ * Kernel can use either misc.source_port or half of C0 metadata
+ * register.
+ */
+ if (priv->vport_meta_mask) {
+ /*
+ * Provide the hint for SW steering library
+ * to insert the flow into ingress domain and
+ * save the extra vport match.
+ */
+ if (mask == 0xffff && priv->vport_id == 0xffff &&
+ priv->pf_bond < 0 && attr->transfer)
+ flow_dv_translate_item_source_vport
+ (matcher, key, priv->vport_id, mask);
+ /*
+ * We should always set the vport metadata register,
+ * otherwise the SW steering library can drop
+ * the rule if wire vport metadata value is not zero,
+ * it depends on kernel configuration.
+ */
flow_dv_translate_item_meta_vport(matcher, key,
priv->vport_meta_tag,
priv->vport_meta_mask);
- else
+ } else {
flow_dv_translate_item_source_vport(matcher, key,
priv->vport_id, mask);
+ }
return 0;
}
return;
if (!icmp6_m)
icmp6_m = &rte_flow_item_icmp6_mask;
- /*
- * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
- * If only the protocol is specified, no need to match the frag.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
icmp6_v->type & icmp6_m->type);
{
const struct rte_flow_item_icmp *icmp_m = item->mask;
const struct rte_flow_item_icmp *icmp_v = item->spec;
+ uint32_t icmp_header_data_m = 0;
+ uint32_t icmp_header_data_v = 0;
void *headers_m;
void *headers_v;
void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
return;
if (!icmp_m)
icmp_m = &rte_flow_item_icmp_mask;
- /*
- * Force flow only to match the non-fragmented IPv4 ICMP packets.
- * If only the protocol is specified, no need to match the frag.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
icmp_m->hdr.icmp_type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
icmp_m->hdr.icmp_code);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
+ icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
+ icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
+ if (icmp_header_data_m) {
+ icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
+ icmp_header_data_v |=
+ rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
+ MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
+ icmp_header_data_m);
+ MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
+ icmp_header_data_v & icmp_header_data_m);
+ }
}
/**
return;
if (!gtp_m)
gtp_m = &rte_flow_item_gtp_mask;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
+ gtp_m->v_pt_rsv_flags);
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
+ gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
gtp_v->msg_type & gtp_m->msg_type);
rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
}
+/**
+ * Add GTP PSC item to matcher.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static int
+flow_dv_translate_item_gtp_psc(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
+ const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
+ void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_3);
+ void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ union {
+ uint32_t w32;
+ struct {
+ uint16_t seq_num;
+ uint8_t npdu_num;
+ uint8_t next_ext_header_type;
+ };
+ } dw_2;
+ uint8_t gtp_flags;
+
+ /* Always set E-flag match on one, regardless of GTP item settings. */
+ gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
+ gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
+ gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
+ gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
+ /*Set next extension header type. */
+ dw_2.seq_num = 0;
+ dw_2.npdu_num = 0;
+ dw_2.next_ext_header_type = 0xff;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
+ rte_cpu_to_be_32(dw_2.w32));
+ dw_2.seq_num = 0;
+ dw_2.npdu_num = 0;
+ dw_2.next_ext_header_type = 0x85;
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
+ rte_cpu_to_be_32(dw_2.w32));
+ if (gtp_psc_v) {
+ union {
+ uint32_t w32;
+ struct {
+ uint8_t len;
+ uint8_t type_flags;
+ uint8_t qfi;
+ uint8_t reserved;
+ };
+ } dw_0;
+
+ /*Set extension header PDU type and Qos. */
+ if (!gtp_psc_m)
+ gtp_psc_m = &rte_flow_item_gtp_psc_mask;
+ dw_0.w32 = 0;
+ dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
+ dw_0.qfi = gtp_psc_m->qfi;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
+ rte_cpu_to_be_32(dw_0.w32));
+ dw_0.w32 = 0;
+ dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
+ gtp_psc_m->pdu_type);
+ dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
+ rte_cpu_to_be_32(dw_0.w32));
+ }
+ return 0;
+}
+
+/**
+ * Add eCPRI item to matcher and to the value.
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] samples
+ * Sample IDs to be used in the matching.
+ */
+static void
+flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
+ void *key, const struct rte_flow_item *item)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_ecpri *ecpri_m = item->mask;
+ const struct rte_flow_item_ecpri *ecpri_v = item->spec;
+ struct rte_ecpri_common_hdr common;
+ void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_4);
+ void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
+ uint32_t *samples;
+ void *dw_m;
+ void *dw_v;
+
+ if (!ecpri_v)
+ return;
+ if (!ecpri_m)
+ ecpri_m = &rte_flow_item_ecpri_mask;
+ /*
+ * Maximal four DW samples are supported in a single matching now.
+ * Two are used now for a eCPRI matching:
+ * 1. Type: one byte, mask should be 0x00ff0000 in network order
+ * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
+ * if any.
+ */
+ if (!ecpri_m->hdr.common.u32)
+ return;
+ samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+ /* Need to take the whole DW as the mask to fill the entry. */
+ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
+ prog_sample_field_value_0);
+ dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
+ prog_sample_field_value_0);
+ /* Already big endian (network order) in the header. */
+ *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
+ *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
+ /* Sample#0, used for matching type, offset 0. */
+ MLX5_SET(fte_match_set_misc4, misc4_m,
+ prog_sample_field_id_0, samples[0]);
+ /* It makes no sense to set the sample ID in the mask field. */
+ MLX5_SET(fte_match_set_misc4, misc4_v,
+ prog_sample_field_id_0, samples[0]);
+ /*
+ * Checking if message body part needs to be matched.
+ * Some wildcard rules only matching type field should be supported.
+ */
+ if (ecpri_m->hdr.dummy[0]) {
+ common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
+ switch (common.type) {
+ case RTE_ECPRI_MSG_TYPE_IQ_DATA:
+ case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
+ case RTE_ECPRI_MSG_TYPE_DLY_MSR:
+ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
+ prog_sample_field_value_1);
+ dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
+ prog_sample_field_value_1);
+ *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
+ *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
+ ecpri_m->hdr.dummy[0];
+ /* Sample#1, to match message body, offset 4. */
+ MLX5_SET(fte_match_set_misc4, misc4_m,
+ prog_sample_field_id_1, samples[1]);
+ MLX5_SET(fte_match_set_misc4, misc4_v,
+ prog_sample_field_id_1, samples[1]);
+ break;
+ default:
+ /* Others, do not match any sample ID. */
+ break;
+ }
+ }
+}
+
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
return match_criteria_enable;
}
+struct mlx5_hlist_entry *
+flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
+ struct rte_flow_error *error = ctx->error;
+ union mlx5_flow_tbl_key key = { .v64 = key64 };
+ struct mlx5_flow_tbl_resource *tbl;
+ void *domain;
+ uint32_t idx = 0;
+ int ret;
+
+ tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
+ if (!tbl_data) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate flow table data entry");
+ return NULL;
+ }
+ tbl_data->idx = idx;
+ tbl_data->tunnel = tt_prm->tunnel;
+ tbl_data->group_id = tt_prm->group_id;
+ tbl_data->external = !!tt_prm->external;
+ tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
+ tbl_data->is_egress = !!key.direction;
+ tbl_data->is_transfer = !!key.domain;
+ tbl_data->dummy = !!key.dummy;
+ tbl_data->table_id = key.table_id;
+ tbl = &tbl_data->tbl;
+ if (key.dummy)
+ return &tbl_data->entry;
+ if (key.domain)
+ domain = sh->fdb_domain;
+ else if (key.direction)
+ domain = sh->tx_domain;
+ else
+ domain = sh->rx_domain;
+ ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create flow table object");
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
+ return NULL;
+ }
+ if (key.table_id) {
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (tbl->obj, &tbl_data->jump.action);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create flow jump action");
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
+ return NULL;
+ }
+ }
+ MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
+ key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
+ key.table_id);
+ mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
+ flow_dv_matcher_create_cb,
+ flow_dv_matcher_match_cb,
+ flow_dv_matcher_remove_cb);
+ return &tbl_data->entry;
+}
+
+int
+flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry, uint64_t key64,
+ void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+ union mlx5_flow_tbl_key key = { .v64 = key64 };
+
+ return tbl_data->table_id != key.table_id ||
+ tbl_data->dummy != key.dummy ||
+ tbl_data->is_transfer != key.domain ||
+ tbl_data->is_egress != key.direction;
+}
/**
* Get a flow table.
* Direction of the table.
* @param[in] transfer
* E-Switch or NIC flow.
+ * @param[in] dummy
+ * Dummy entry for dv API.
* @param[out] error
* pointer to error structure.
*
* @return
* Returns tables resource based on the index, NULL in case of failed.
*/
-static struct mlx5_flow_tbl_resource *
+struct mlx5_flow_tbl_resource *
flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
uint32_t table_id, uint8_t egress,
uint8_t transfer,
+ bool external,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group_id, uint8_t dummy,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_tbl_resource *tbl;
union mlx5_flow_tbl_key table_key = {
{
.table_id = table_id,
- .reserved = 0,
+ .dummy = dummy,
.domain = !!transfer,
.direction = !!egress,
}
};
- struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
- table_key.v64);
+ struct mlx5_flow_tbl_tunnel_prm tt_prm = {
+ .tunnel = tunnel,
+ .group_id = group_id,
+ .external = external,
+ };
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = error,
+ .data = &tt_prm,
+ };
+ struct mlx5_hlist_entry *entry;
struct mlx5_flow_tbl_data_entry *tbl_data;
- uint32_t idx = 0;
- int ret;
- void *domain;
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- tbl = &tbl_data->tbl;
- rte_atomic32_inc(&tbl->refcnt);
- return tbl;
- }
- tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
- if (!tbl_data) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot allocate flow table data entry");
- return NULL;
- }
- tbl_data->idx = idx;
- tbl = &tbl_data->tbl;
- pos = &tbl_data->entry;
- if (transfer)
- domain = sh->fdb_domain;
- else if (egress)
- domain = sh->tx_domain;
- else
- domain = sh->rx_domain;
- tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
- if (!tbl->obj) {
+ entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
+ if (!entry) {
rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create flow table object");
- mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get table");
return NULL;
}
- /*
- * No multi-threads now, but still better to initialize the reference
- * count before insert it into the hash list.
- */
- rte_atomic32_init(&tbl->refcnt);
- /* Jump action reference count is initialized here. */
- rte_atomic32_init(&tbl_data->jump.refcnt);
- pos->key = table_key.v64;
- ret = mlx5_hlist_insert(sh->flow_tbls, pos);
- if (ret < 0) {
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot insert flow table data entry");
- mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
- mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
+ DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
+ table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
+ tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+ return &tbl_data->tbl;
+}
+
+void
+flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+
+ MLX5_ASSERT(entry && sh);
+ if (tbl_data->jump.action)
+ mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
+ if (tbl_data->tbl.obj)
+ mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
+ if (tbl_data->tunnel_offload && tbl_data->external) {
+ struct mlx5_hlist_entry *he;
+ struct mlx5_hlist *tunnel_grp_hash;
+ struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
+ union tunnel_tbl_key tunnel_key = {
+ .tunnel_id = tbl_data->tunnel ?
+ tbl_data->tunnel->tunnel_id : 0,
+ .group = tbl_data->group_id
+ };
+ uint32_t table_id = tbl_data->table_id;
+
+ tunnel_grp_hash = tbl_data->tunnel ?
+ tbl_data->tunnel->groups :
+ thub->groups;
+ he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
+ if (he)
+ mlx5_hlist_unregister(tunnel_grp_hash, he);
+ DRV_LOG(DEBUG,
+ "Table_id %u tunnel %u group %u released.",
+ table_id,
+ tbl_data->tunnel ?
+ tbl_data->tunnel->tunnel_id : 0,
+ tbl_data->group_id);
}
- rte_atomic32_inc(&tbl->refcnt);
- return tbl;
+ mlx5_cache_list_destroy(&tbl_data->matchers);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
}
/**
* Release a flow table.
*
- * @param[in] dev
- * Pointer to rte_eth_dev structure.
+ * @param[in] sh
+ * Pointer to device shared structure.
* @param[in] tbl
* Table resource to be released.
*
* Returns 0 if table was released, else return 1;
*/
static int
-flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
+flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
struct mlx5_flow_tbl_resource *tbl)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
if (!tbl)
return 0;
- if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
- struct mlx5_hlist_entry *pos = &tbl_data->entry;
-
- mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
- tbl->obj = NULL;
- /* remove the entry from the hash list and free memory. */
- mlx5_hlist_remove(sh->flow_tbls, pos);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
- tbl_data->idx);
- return 0;
+ return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
+}
+
+int
+flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry, void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_matcher *ref = ctx->data;
+ struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
+ entry);
+
+ return cur->crc != ref->crc ||
+ cur->priority != ref->priority ||
+ memcmp((const void *)cur->mask.buf,
+ (const void *)ref->mask.buf, ref->mask.size);
+}
+
+struct mlx5_cache_entry *
+flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_matcher *ref = ctx->data;
+ struct mlx5_flow_dv_matcher *cache;
+ struct mlx5dv_flow_matcher_attr dv_attr = {
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .match_mask = (void *)&ref->mask,
+ };
+ struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
+ typeof(*tbl), tbl);
+ int ret;
+
+ cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
+ if (!cache) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create matcher");
+ return NULL;
}
- return 1;
+ *cache = *ref;
+ dv_attr.match_criteria_enable =
+ flow_dv_matcher_enable(cache->mask.buf);
+ dv_attr.priority = ref->priority;
+ if (tbl->is_egress)
+ dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
+ &cache->matcher_object);
+ if (ret) {
+ mlx5_free(cache);
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create matcher");
+ return NULL;
+ }
+ return &cache->entry;
}
/**
*/
static int
flow_dv_matcher_register(struct rte_eth_dev *dev,
- struct mlx5_flow_dv_matcher *matcher,
+ struct mlx5_flow_dv_matcher *ref,
union mlx5_flow_tbl_key *key,
struct mlx5_flow *dev_flow,
+ const struct mlx5_flow_tunnel *tunnel,
+ uint32_t group_id,
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_dv_matcher *cache_matcher;
- struct mlx5dv_flow_matcher_attr dv_attr = {
- .type = IBV_FLOW_ATTR_NORMAL,
- .match_mask = (void *)&matcher->mask,
- };
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_dv_matcher *cache;
struct mlx5_flow_tbl_resource *tbl;
struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = ref,
+ };
- tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
- key->domain, error);
+ /**
+ * tunnel offload API requires this registration for cases when
+ * tunnel match rule was inserted before tunnel set rule.
+ */
+ tbl = flow_dv_tbl_resource_get(dev, key->table_id,
+ key->direction, key->domain,
+ dev_flow->external, tunnel,
+ group_id, 0, error);
if (!tbl)
return -rte_errno; /* No need to refill the error info */
tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
- /* Lookup from cache. */
- LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
- if (matcher->crc == cache_matcher->crc &&
- matcher->priority == cache_matcher->priority &&
- !memcmp((const void *)matcher->mask.buf,
- (const void *)cache_matcher->mask.buf,
- cache_matcher->mask.size)) {
- DRV_LOG(DEBUG,
- "%s group %u priority %hd use %s "
- "matcher %p: refcnt %d++",
- key->domain ? "FDB" : "NIC", key->table_id,
- cache_matcher->priority,
- key->direction ? "tx" : "rx",
- (void *)cache_matcher,
- rte_atomic32_read(&cache_matcher->refcnt));
- rte_atomic32_inc(&cache_matcher->refcnt);
- dev_flow->handle->dvh.matcher = cache_matcher;
- /* old matcher should not make the table ref++. */
- flow_dv_tbl_resource_release(dev, tbl);
- return 0;
- }
- }
- /* Register new matcher. */
- cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
- if (!cache_matcher) {
- flow_dv_tbl_resource_release(dev, tbl);
+ ref->tbl = tbl;
+ entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+ if (!entry) {
+ flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate matcher memory");
+ "cannot allocate ref memory");
}
- *cache_matcher = *matcher;
- dv_attr.match_criteria_enable =
- flow_dv_matcher_enable(cache_matcher->mask.buf);
- dv_attr.priority = matcher->priority;
- if (key->direction)
- dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
- cache_matcher->matcher_object =
- mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
- if (!cache_matcher->matcher_object) {
- rte_free(cache_matcher);
-#ifdef HAVE_MLX5DV_DR
- flow_dv_tbl_resource_release(dev, tbl);
-#endif
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create matcher");
- }
- /* Save the table information */
- cache_matcher->tbl = tbl;
- rte_atomic32_init(&cache_matcher->refcnt);
- /* only matcher ref++, table ref++ already done above in get API. */
- rte_atomic32_inc(&cache_matcher->refcnt);
- LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
- dev_flow->handle->dvh.matcher = cache_matcher;
- DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
- key->domain ? "FDB" : "NIC", key->table_id,
- cache_matcher->priority,
- key->direction ? "tx" : "rx", (void *)cache_matcher,
- rte_atomic32_read(&cache_matcher->refcnt));
+ cache = container_of(entry, typeof(*cache), entry);
+ dev_flow->handle->dvh.matcher = cache;
return 0;
}
+struct mlx5_hlist_entry *
+flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct rte_flow_error *error = ctx;
+ struct mlx5_flow_dv_tag_resource *entry;
+ uint32_t idx = 0;
+ int ret;
+
+ entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
+ if (!entry) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ entry->idx = idx;
+ entry->tag_id = key;
+ ret = mlx5_flow_os_create_flow_action_tag(key,
+ &entry->action);
+ if (ret) {
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ return NULL;
+ }
+ return &entry->entry;
+}
+
+int
+flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry, uint64_t key,
+ void *cb_ctx __rte_unused)
+{
+ struct mlx5_flow_dv_tag_resource *tag =
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+
+ return key != tag->tag_id;
+}
+
/**
* Find existing tag resource or create and register a new one.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
struct mlx5_hlist_entry *entry;
- /* Lookup a matching resource from cache. */
- entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
+ entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
if (entry) {
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
- rte_atomic32_inc(&cache_resource->refcnt);
dev_flow->handle->dvh.rix_tag = cache_resource->idx;
dev_flow->dv.tag_resource = cache_resource;
- DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
return 0;
}
- /* Register new resource. */
- cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
- &dev_flow->handle->dvh.rix_tag);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- cache_resource->entry.key = (uint64_t)tag_be24;
- cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
- if (!cache_resource->action) {
- rte_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
- }
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
- mlx5_glue->destroy_flow_action(cache_resource->action);
- rte_free(cache_resource);
- return rte_flow_error_set(error, EEXIST,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot insert tag");
- }
- dev_flow->dv.tag_resource = cache_resource;
- DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- return 0;
+ return -rte_errno;
+}
+
+void
+flow_dv_tag_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_dv_tag_resource *tag =
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+
+ MLX5_ASSERT(tag && sh && tag->action);
+ claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
+ DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
}
/**
uint32_t tag_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *tag;
tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
if (!tag)
return 0;
DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
- dev->data->port_id, (void *)tag,
- rte_atomic32_read(&tag->refcnt));
- if (rte_atomic32_dec_and_test(&tag->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action(tag->action));
- mlx5_hlist_remove(sh->tag_table, &tag->entry);
- DRV_LOG(DEBUG, "port %u tag %p: removed",
- dev->data->port_id, (void *)tag);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
- return 0;
- }
- return 1;
+ dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
+ return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
}
/**
* This parameter is transferred to
* mlx5dv_dr_action_create_dest_ib_port().
*/
- *dst_port_id = priv->ibv_port;
+ *dst_port_id = priv->dev_port;
#else
/*
* Legacy mode, no LAG configurations is supported.
return 0;
}
+/**
+ * Create a counter with aging configuration.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[out] count
+ * Pointer to the counter action configuration.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_counter(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_action_count *count,
+ const struct rte_flow_action_age *age)
+{
+ uint32_t counter;
+ struct mlx5_age_param *age_param;
+
+ if (count && count->shared)
+ counter = flow_dv_counter_get_shared(dev, count->id);
+ else
+ counter = flow_dv_counter_alloc(dev, !!age);
+ if (!counter || age == NULL)
+ return counter;
+ age_param = flow_dv_counter_idx_get_age(dev, counter);
+ age_param->context = age->context ? age->context :
+ (void *)(uintptr_t)(dev_flow->flow_idx);
+ age_param->timeout = age->timeout;
+ age_param->port_id = dev->data->port_id;
+ __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+ return counter;
+}
+
/**
* Add Tx queue matcher
*
}
/**
- * Fill the flow with DV spec, lock free
- * (mutex should be acquired by caller).
+ * Prepare an Rx Hash queue.
*
- * @param[in] dev
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ * Pointer to the mlx5_flow_rss_desc.
+ * @param[out] hrxq_idx
+ * Hash Rx queue index.
+ *
+ * @return
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_hrxq *
+flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint32_t *hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_handle *dh = dev_flow->handle;
+ struct mlx5_hrxq *hrxq;
+
+ MLX5_ASSERT(rss_desc->queue_num);
+ rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_desc->hash_fields = dev_flow->hash_fields;
+ rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
+ rss_desc->shared_rss = 0;
+ *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
+ if (!*hrxq_idx)
+ return NULL;
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ *hrxq_idx);
+ return hrxq;
+}
+
+/**
+ * Release sample sub action resource.
+ *
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
- * @param[in, out] dev_flow
- * Pointer to the sub flow.
- * @param[in] attr
- * Pointer to the flow attributes.
- * @param[in] items
- * Pointer to the list of items.
- * @param[in] actions
- * Pointer to the list of actions.
+ * @param[in] act_res
+ * Pointer to sample sub action resource.
+ */
+static void
+flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_sub_actions_idx *act_res)
+{
+ if (act_res->rix_hrxq) {
+ mlx5_hrxq_release(dev, act_res->rix_hrxq);
+ act_res->rix_hrxq = 0;
+ }
+ if (act_res->rix_encap_decap) {
+ flow_dv_encap_decap_resource_release(dev,
+ act_res->rix_encap_decap);
+ act_res->rix_encap_decap = 0;
+ }
+ if (act_res->rix_port_id_action) {
+ flow_dv_port_id_action_resource_release(dev,
+ act_res->rix_port_id_action);
+ act_res->rix_port_id_action = 0;
+ }
+ if (act_res->rix_tag) {
+ flow_dv_tag_release(dev, act_res->rix_tag);
+ act_res->rix_tag = 0;
+ }
+ if (act_res->rix_jump) {
+ flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
+ act_res->rix_jump = 0;
+ }
+}
+
+int
+flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry, void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_dv_sample_resource *resource = ctx->data;
+ struct mlx5_flow_dv_sample_resource *cache_resource =
+ container_of(entry, typeof(*cache_resource), entry);
+
+ if (resource->ratio == cache_resource->ratio &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->ft_id == cache_resource->ft_id &&
+ resource->set_action == cache_resource->set_action &&
+ !memcmp((void *)&resource->sample_act,
+ (void *)&cache_resource->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list))) {
+ /*
+ * Existing sample action should release the prepared
+ * sub-actions reference counter.
+ */
+ flow_dv_sample_sub_actions_release(dev,
+ &resource->sample_idx);
+ return 0;
+ }
+ return 1;
+}
+
+struct mlx5_cache_entry *
+flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_dv_sample_resource *resource = ctx->data;
+ void **sample_dv_actions = resource->sub_actions;
+ struct mlx5_flow_dv_sample_resource *cache_resource;
+ struct mlx5dv_dr_flow_sampler_attr sampler_attr;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_tbl_resource *tbl;
+ uint32_t idx = 0;
+ const uint32_t next_ft_step = 1;
+ uint32_t next_ft_id = resource->ft_id + next_ft_step;
+ uint8_t is_egress = 0;
+ uint8_t is_transfer = 0;
+ struct rte_flow_error *error = ctx->error;
+
+ /* Register new sample resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
+ if (!cache_resource) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ *cache_resource = *resource;
+ /* Create normal path table level */
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ is_transfer = 1;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+ is_egress = 1;
+ tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
+ is_egress, is_transfer,
+ true, NULL, 0, 0, error);
+ if (!tbl) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "fail to create normal path table "
+ "for sample");
+ goto error;
+ }
+ cache_resource->normal_path_tbl = tbl;
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+ if (!sh->default_miss_action) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "default miss action was not "
+ "created");
+ goto error;
+ }
+ sample_dv_actions[resource->sample_act.actions_num++] =
+ sh->default_miss_action;
+ }
+ /* Create a DR sample action */
+ sampler_attr.sample_ratio = cache_resource->ratio;
+ sampler_attr.default_next_table = tbl->obj;
+ sampler_attr.num_sample_actions = resource->sample_act.actions_num;
+ sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
+ &sample_dv_actions[0];
+ sampler_attr.action = cache_resource->set_action;
+ if (mlx5_os_flow_dr_create_flow_action_sampler
+ (&sampler_attr, &cache_resource->verbs_action)) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create sample action");
+ goto error;
+ }
+ cache_resource->idx = idx;
+ cache_resource->dev = dev;
+ return &cache_resource->entry;
+error:
+ if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
+ flow_dv_sample_sub_actions_release(dev,
+ &cache_resource->sample_idx);
+ if (cache_resource->normal_path_tbl)
+ flow_dv_tbl_resource_release(MLX5_SH(dev),
+ cache_resource->normal_path_tbl);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
+ return NULL;
+
+}
+
+/**
+ * Find existing sample resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] resource
+ * Pointer to sample resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
* @param[out] error
- * Pointer to the error structure.
+ * pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * 0 on success otherwise -errno and errno is set.
*/
static int
-__flow_dv_translate(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+flow_dv_sample_resource_register(struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_sample_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
{
+ struct mlx5_flow_dv_sample_resource *cache_resource;
+ struct mlx5_cache_entry *entry;
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *dev_conf = &priv->config;
- struct rte_flow *flow = dev_flow->flow;
- struct mlx5_flow_handle *handle = dev_flow->handle;
- struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
- priv->rss_desc)
- [!!priv->flow_nested_idx];
- uint64_t item_flags = 0;
- uint64_t last_item = 0;
- uint64_t action_flags = 0;
- uint64_t priority = attr->priority;
- struct mlx5_flow_dv_matcher matcher = {
- .mask = {
- .size = sizeof(matcher.mask.buf),
- },
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = error,
+ .data = resource,
};
- int actions_n = 0;
- bool actions_end = false;
- union {
- struct mlx5_flow_dv_modify_hdr_resource res;
- uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
- sizeof(struct mlx5_modification_cmd) *
- (MLX5_MAX_MODIFY_NUM + 1)];
- } mhdr_dummy;
- struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
- union flow_dv_attr flow_attr = { .attr = 0 };
- uint32_t tag_be;
- union mlx5_flow_tbl_key tbl_key;
- uint32_t modify_action_position = UINT32_MAX;
- void *match_mask = matcher.mask.buf;
- void *match_value = dev_flow->dv.value.buf;
- uint8_t next_protocol = 0xff;
- struct rte_vlan_hdr vlan = { 0 };
- uint32_t table;
- int ret = 0;
-
- mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
- MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
- ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
- !!priv->fdb_def_rule, &table, error);
- if (ret)
- return ret;
- dev_flow->dv.group = table;
- if (attr->transfer)
- mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
- if (priority == MLX5_FLOW_PRIO_RSVD)
- priority = dev_conf->flow_prio - 1;
- /* number of actions must be set to 0 in case of dirty stack. */
- mhdr_res->actions_num = 0;
- for (; !actions_end ; actions++) {
- const struct rte_flow_action_queue *queue;
- const struct rte_flow_action_rss *rss;
- const struct rte_flow_action *action = actions;
- const struct rte_flow_action_count *count = action->conf;
- const uint8_t *rss_key;
- const struct rte_flow_action_jump *jump_data;
- const struct rte_flow_action_meter *mtr;
- struct mlx5_flow_tbl_resource *tbl;
- uint32_t port_id = 0;
- struct mlx5_flow_dv_port_id_action_resource port_id_resource;
- int action_type = actions->type;
- const struct rte_flow_action *found_action = NULL;
- struct mlx5_flow_meter *fm = NULL;
- switch (action_type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- case RTE_FLOW_ACTION_TYPE_PORT_ID:
- if (flow_dv_translate_action_port_id(dev, action,
- &port_id, error))
- return -rte_errno;
- memset(&port_id_resource, 0, sizeof(port_id_resource));
- port_id_resource.port_id = port_id;
- if (flow_dv_port_id_action_resource_register
- (dev, &port_id_resource, dev_flow, error))
- return -rte_errno;
- MLX5_ASSERT(!handle->rix_port_id_action);
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.port_id_action->action;
- action_flags |= MLX5_FLOW_ACTION_PORT_ID;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
- break;
- case RTE_FLOW_ACTION_TYPE_FLAG:
- action_flags |= MLX5_FLOW_ACTION_FLAG;
- dev_flow->handle->mark = 1;
- if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
- struct rte_flow_action_mark mark = {
- .id = MLX5_FLOW_MARK_DEFAULT,
- };
+ entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
+ if (!entry)
+ return -rte_errno;
+ cache_resource = container_of(entry, typeof(*cache_resource), entry);
+ dev_flow->handle->dvh.rix_sample = cache_resource->idx;
+ dev_flow->dv.sample_res = cache_resource;
+ return 0;
+}
- if (flow_dv_convert_action_mark(dev, &mark,
- mhdr_res,
- error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
- break;
- }
- tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- /*
- * Only one FLAG or MARK is supported per device flow
- * right now. So the pointer to the tag resource must be
- * zero before the register process.
- */
- MLX5_ASSERT(!handle->dvh.rix_tag);
- if (flow_dv_tag_resource_register(dev, tag_be,
- dev_flow, error))
- return -rte_errno;
- MLX5_ASSERT(dev_flow->dv.tag_resource);
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
- break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- action_flags |= MLX5_FLOW_ACTION_MARK;
- dev_flow->handle->mark = 1;
- if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
- const struct rte_flow_action_mark *mark =
- (const struct rte_flow_action_mark *)
- actions->conf;
+int
+flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry, void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_dv_dest_array_resource *cache_resource =
+ container_of(entry, typeof(*cache_resource), entry);
+ uint32_t idx = 0;
- if (flow_dv_convert_action_mark(dev, mark,
- mhdr_res,
- error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
- break;
- }
- /* Fall-through */
- case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
- /* Legacy (non-extensive) MARK action. */
- tag_be = mlx5_flow_mark_set
- (((const struct rte_flow_action_mark *)
- (actions->conf))->id);
- MLX5_ASSERT(!handle->dvh.rix_tag);
- if (flow_dv_tag_resource_register(dev, tag_be,
- dev_flow, error))
- return -rte_errno;
- MLX5_ASSERT(dev_flow->dv.tag_resource);
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
- break;
- case RTE_FLOW_ACTION_TYPE_SET_META:
- if (flow_dv_convert_action_set_meta
- (dev, mhdr_res, attr,
- (const struct rte_flow_action_set_meta *)
- actions->conf, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_SET_META;
- break;
- case RTE_FLOW_ACTION_TYPE_SET_TAG:
- if (flow_dv_convert_action_set_tag
- (dev, mhdr_res,
- (const struct rte_flow_action_set_tag *)
- actions->conf, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_SET_TAG;
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- action_flags |= MLX5_FLOW_ACTION_DROP;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
+ if (resource->num_of_dest == cache_resource->num_of_dest &&
+ resource->ft_type == cache_resource->ft_type &&
+ !memcmp((void *)cache_resource->sample_act,
+ (void *)resource->sample_act,
+ (resource->num_of_dest *
+ sizeof(struct mlx5_flow_sub_actions_list)))) {
+ /*
+ * Existing sample action should release the prepared
+ * sub-actions reference counter.
+ */
+ for (idx = 0; idx < resource->num_of_dest; idx++)
+ flow_dv_sample_sub_actions_release(dev,
+ &resource->sample_idx[idx]);
+ return 0;
+ }
+ return 1;
+}
+
+struct mlx5_cache_entry *
+flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct rte_eth_dev *dev = ctx->dev;
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
+ struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
+ struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5dv_dr_domain *domain;
+ uint32_t idx = 0, res_idx = 0;
+ struct rte_flow_error *error = ctx->error;
+ uint64_t action_flags;
+ int ret;
+
+ /* Register new destination array resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &res_idx);
+ if (!cache_resource) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ *cache_resource = *resource;
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ domain = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
+ domain = sh->rx_domain;
+ else
+ domain = sh->tx_domain;
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5dv_dr_action_dest_attr),
+ 0, SOCKET_ID_ANY);
+ if (!dest_attr[idx]) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ goto error;
+ }
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
+ sample_act = &resource->sample_act[idx];
+ action_flags = sample_act->action_flags;
+ switch (action_flags) {
+ case MLX5_FLOW_ACTION_QUEUE:
+ dest_attr[idx]->dest = sample_act->dr_queue_action;
+ break;
+ case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
+ dest_attr[idx]->dest_reformat = &dest_reformat[idx];
+ dest_attr[idx]->dest_reformat->reformat =
+ sample_act->dr_encap_action;
+ dest_attr[idx]->dest_reformat->dest =
+ sample_act->dr_port_id_action;
+ break;
+ case MLX5_FLOW_ACTION_PORT_ID:
+ dest_attr[idx]->dest = sample_act->dr_port_id_action;
+ break;
+ case MLX5_FLOW_ACTION_JUMP:
+ dest_attr[idx]->dest = sample_act->dr_jump_action;
break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported actions type");
+ goto error;
+ }
+ }
+ /* create a dest array actioin */
+ ret = mlx5_os_flow_dr_create_flow_action_dest_array
+ (domain,
+ cache_resource->num_of_dest,
+ dest_attr,
+ &cache_resource->action);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create destination array action");
+ goto error;
+ }
+ cache_resource->idx = res_idx;
+ cache_resource->dev = dev;
+ for (idx = 0; idx < resource->num_of_dest; idx++)
+ mlx5_free(dest_attr[idx]);
+ return &cache_resource->entry;
+error:
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ struct mlx5_flow_sub_actions_idx *act_res =
+ &cache_resource->sample_idx[idx];
+ if (act_res->rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ act_res->rix_hrxq))
+ act_res->rix_hrxq = 0;
+ if (act_res->rix_encap_decap &&
+ !flow_dv_encap_decap_resource_release(dev,
+ act_res->rix_encap_decap))
+ act_res->rix_encap_decap = 0;
+ if (act_res->rix_port_id_action &&
+ !flow_dv_port_id_action_resource_release(dev,
+ act_res->rix_port_id_action))
+ act_res->rix_port_id_action = 0;
+ if (act_res->rix_jump &&
+ !flow_dv_jump_tbl_resource_release(dev,
+ act_res->rix_jump))
+ act_res->rix_jump = 0;
+ if (dest_attr[idx])
+ mlx5_free(dest_attr[idx]);
+ }
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
+ return NULL;
+}
+
+/**
+ * Find existing destination array resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] resource
+ * Pointer to destination array resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_dest_array_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = error,
+ .data = resource,
+ };
+
+ entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
+ if (!entry)
+ return -rte_errno;
+ cache_resource = container_of(entry, typeof(*cache_resource), entry);
+ dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
+ dev_flow->dv.dest_array_res = cache_resource;
+ return 0;
+}
+
+/**
+ * Convert Sample action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to sample action structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in, out] num_of_dest
+ * Pointer to the num of destination.
+ * @param[in, out] sample_actions
+ * Pointer to sample actions list.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate_action_sample(struct rte_eth_dev *dev,
+ const struct rte_flow_action_sample *action,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ uint32_t *num_of_dest,
+ void **sample_actions,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action *sub_actions;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5_flow_sub_actions_idx *sample_idx;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_rss_desc *rss_desc;
+ uint64_t action_flags = 0;
+
+ MLX5_ASSERT(wks);
+ rss_desc = &wks->rss_desc;
+ sample_act = &res->sample_act;
+ sample_idx = &res->sample_idx;
+ res->ratio = action->ratio;
+ sub_actions = action->actions;
+ for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
+ int type = sub_actions->type;
+ uint32_t pre_rix = 0;
+ void *pre_r;
+ switch (type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
- queue = actions->conf;
+ {
+ const struct rte_flow_action_queue *queue;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ queue = sub_actions->conf;
rss_desc->queue_num = 1;
rss_desc->queue[0] = queue->index;
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create fate queue");
+ sample_act->dr_queue_action = hrxq->action;
+ sample_idx->rix_hrxq = hrxq_idx;
+ sample_actions[sample_act->actions_num++] =
+ hrxq->action;
+ (*num_of_dest)++;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_QUEUE;
break;
+ }
case RTE_FLOW_ACTION_TYPE_RSS:
- rss = actions->conf;
+ {
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+ const struct rte_flow_action_rss *rss;
+ const uint8_t *rss_key;
+
+ rss = sub_actions->conf;
memcpy(rss_desc->queue, rss->queue,
rss->queue_num * sizeof(uint16_t));
rss_desc->queue_num = rss->queue_num;
* rss->level and rss.types should be set in advance
* when expanding items for RSS.
*/
+ flow_dv_hashfields_set(dev_flow, rss_desc);
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create fate queue");
+ sample_act->dr_queue_action = hrxq->action;
+ sample_idx->rix_hrxq = hrxq_idx;
+ sample_actions[sample_act->actions_num++] =
+ hrxq->action;
+ (*num_of_dest)++;
action_flags |= MLX5_FLOW_ACTION_RSS;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_QUEUE;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ uint32_t tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (sub_actions->conf))->id);
+
+ dev_flow->handle->mark = 1;
+ pre_rix = dev_flow->handle->dvh.rix_tag;
+ /* Save the mark resource before sample */
+ pre_r = dev_flow->dv.tag_resource;
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ sample_act->dr_tag_action =
+ dev_flow->dv.tag_resource->action;
+ sample_idx->rix_tag =
+ dev_flow->handle->dvh.rix_tag;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_tag_action;
+ /* Recover the mark resource after sample */
+ dev_flow->dv.tag_resource = pre_r;
+ dev_flow->handle->dvh.rix_tag = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_MARK;
break;
+ }
case RTE_FLOW_ACTION_TYPE_COUNT:
- if (!dev_conf->devx) {
- rte_errno = ENOTSUP;
- goto cnt_err;
+ {
+ if (!flow->counter) {
+ flow->counter =
+ flow_dv_translate_create_counter(dev,
+ dev_flow, sub_actions->conf,
+ 0);
+ if (!flow->counter)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create counter"
+ " object.");
}
- flow->counter = flow_dv_counter_alloc(dev,
- count->shared,
- count->id,
- dev_flow->dv.group);
- if (!flow->counter)
- goto cnt_err;
- dev_flow->dv.actions[actions_n++] =
+ sample_act->dr_cnt_action =
(flow_dv_counter_get_by_idx(dev,
flow->counter, NULL))->action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_cnt_action;
action_flags |= MLX5_FLOW_ACTION_COUNT;
break;
-cnt_err:
- if (rte_errno == ENOTSUP)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "count action not supported");
- else
- return rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "cannot create counter"
- " object.");
- break;
- case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- dev_flow->dv.actions[actions_n++] =
- priv->sh->pop_vlan_action;
- action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
- break;
- case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- if (!(action_flags &
- MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
- flow_dev_get_vlan_info_from_items(items, &vlan);
- vlan.eth_proto = rte_be_to_cpu_16
- ((((const struct rte_flow_action_of_push_vlan *)
- actions->conf)->ethertype));
- found_action = mlx5_flow_find_action
- (actions + 1,
- RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
- if (found_action)
- mlx5_update_vlan_vid_pcp(found_action, &vlan);
- found_action = mlx5_flow_find_action
- (actions + 1,
- RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
- if (found_action)
- mlx5_update_vlan_vid_pcp(found_action, &vlan);
- if (flow_dv_create_action_push_vlan
- (dev, attr, &vlan, dev_flow, error))
+ }
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ {
+ struct mlx5_flow_dv_port_id_action_resource
+ port_id_resource;
+ uint32_t port_id = 0;
+
+ memset(&port_id_resource, 0, sizeof(port_id_resource));
+ /* Save the port id resource before sample */
+ pre_rix = dev_flow->handle->rix_port_id_action;
+ pre_r = dev_flow->dv.port_id_action;
+ if (flow_dv_translate_action_port_id(dev, sub_actions,
+ &port_id, error))
return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.push_vlan_res->action;
- action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ port_id_resource.port_id = port_id;
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
+ return -rte_errno;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ sample_idx->rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_port_id_action;
+ /* Recover the port id resource after sample */
+ dev_flow->dv.port_id_action = pre_r;
+ dev_flow->handle->rix_port_id_action = pre_rix;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
- case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
- /* of_vlan_push action handled this action */
- MLX5_ASSERT(action_flags &
- MLX5_FLOW_ACTION_OF_PUSH_VLAN);
+ }
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ /* Save the encap resource before sample */
+ pre_rix = dev_flow->handle->dvh.rix_encap_decap;
+ pre_r = dev_flow->dv.encap_decap;
+ if (flow_dv_create_action_l2_encap(dev, sub_actions,
+ dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ sample_idx->rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_encap_action;
+ /* Recover the encap resource after sample */
+ dev_flow->dv.encap_decap = pre_r;
+ dev_flow->handle->dvh.rix_encap_decap = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
- case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
- if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Not support for sampler action");
+ }
+ }
+ sample_act->action_flags = action_flags;
+ res->ft_id = dev_flow->dv.group;
+ if (attr->transfer) {
+ union {
+ uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
+ uint64_t set_action;
+ } action_ctx = { .set_action = 0 };
+
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ MLX5_SET(set_action_in, action_ctx.action_in, action_type,
+ MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, action_ctx.action_in, field,
+ MLX5_MODI_META_REG_C_0);
+ MLX5_SET(set_action_in, action_ctx.action_in, data,
+ priv->vport_meta_tag);
+ res->set_action = action_ctx.set_action;
+ } else if (attr->ingress) {
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ } else {
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
+ }
+ return 0;
+}
+
+/**
+ * Convert Sample action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] num_of_dest
+ * The num of destination.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[in, out] mdest_res
+ * Pointer to destination array resource.
+ * @param[in] sample_actions
+ * Pointer to sample path actions list.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_create_action_sample(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ uint32_t num_of_dest,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct mlx5_flow_dv_dest_array_resource *mdest_res,
+ void **sample_actions,
+ uint64_t action_flags,
+ struct rte_flow_error *error)
+{
+ /* update normal path action resource into last index of array */
+ uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
+ struct mlx5_flow_sub_actions_list *sample_act =
+ &mdest_res->sample_act[dest_index];
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc;
+ uint32_t normal_idx = 0;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ MLX5_ASSERT(wks);
+ rss_desc = &wks->rss_desc;
+ if (num_of_dest > 1) {
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
+ /* Handle QP action for mirroring */
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create rx queue");
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
+ sample_act->dr_queue_action = hrxq->action;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ }
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_jump =
+ dev_flow->handle->rix_jump;
+ sample_act->dr_jump_action =
+ dev_flow->dv.jump->action;
+ dev_flow->handle->rix_jump = 0;
+ }
+ sample_act->actions_num = normal_idx;
+ /* update sample action resource into first index of array */
+ mdest_res->ft_type = res->ft_type;
+ memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
+ sizeof(struct mlx5_flow_sub_actions_idx));
+ memcpy(&mdest_res->sample_act[0], &res->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list));
+ mdest_res->num_of_dest = num_of_dest;
+ if (flow_dv_dest_array_resource_register(dev, mdest_res,
+ dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create sample "
+ "action");
+ } else {
+ res->sub_actions = sample_actions;
+ if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't create sample action");
+ }
+ return 0;
+}
+
+/**
+ * Remove an ASO age action from age actions list.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] age
+ * Pointer to the aso age action handler.
+ */
+static void
+flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
+ struct mlx5_aso_age_action *age)
+{
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param = &age->age_params;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint16_t expected = AGE_CANDIDATE;
+
+ age_info = GET_PORT_AGE_INFO(priv);
+ if (!__atomic_compare_exchange_n(&age_param->state, &expected,
+ AGE_FREE, false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED)) {
+ /**
+ * We need the lock even it is age timeout,
+ * since age action may still in process.
+ */
+ rte_spinlock_lock(&age_info->aged_sl);
+ LIST_REMOVE(age, next);
+ rte_spinlock_unlock(&age_info->aged_sl);
+ __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ }
+}
+
+/**
+ * Release an ASO age action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] age_idx
+ * Index of ASO age action to release.
+ * @param[in] flow
+ * True if the release operation is during flow destroy operation.
+ * False if the release operation is during action destroy operation.
+ *
+ * @return
+ * 0 when age action was removed, otherwise the number of references.
+ */
+static int
+flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
+ uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
+
+ if (!ret) {
+ flow_dv_aso_age_remove_from_age(dev, age);
+ rte_spinlock_lock(&mng->free_sl);
+ LIST_INSERT_HEAD(&mng->free, age, next);
+ rte_spinlock_unlock(&mng->free_sl);
+ }
+ return ret;
+}
+
+/**
+ * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value and rte_errno is set.
+ */
+static int
+flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ void *old_pools = mng->pools;
+ uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
+ uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
+
+ if (!pools) {
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+ if (old_pools) {
+ memcpy(pools, old_pools,
+ mng->n * sizeof(struct mlx5_flow_counter_pool *));
+ mlx5_free(old_pools);
+ } else {
+ /* First ASO flow hit allocation - starting ASO data-path. */
+ int ret = mlx5_aso_queue_start(priv->sh);
+
+ if (ret) {
+ mlx5_free(pools);
+ return ret;
+ }
+ }
+ mng->n = resize;
+ mng->pools = pools;
+ return 0;
+}
+
+/**
+ * Create and initialize a new ASO aging pool.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] age_free
+ * Where to put the pointer of a new age action.
+ *
+ * @return
+ * The age actions pool pointer and @p age_free is set on success,
+ * NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_aso_age_pool *
+flow_dv_age_pool_create(struct rte_eth_dev *dev,
+ struct mlx5_aso_age_action **age_free)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+ struct mlx5_aso_age_pool *pool = NULL;
+ struct mlx5_devx_obj *obj = NULL;
+ uint32_t i;
+
+ obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
+ priv->sh->pdn);
+ if (!obj) {
+ rte_errno = ENODATA;
+ DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
+ return NULL;
+ }
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+ if (!pool) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ pool->flow_hit_aso_obj = obj;
+ pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
+ rte_spinlock_lock(&mng->resize_sl);
+ pool->index = mng->next;
+ /* Resize pools array if there is no room for the new pool in it. */
+ if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
+ claim_zero(mlx5_devx_cmd_destroy(obj));
+ mlx5_free(pool);
+ rte_spinlock_unlock(&mng->resize_sl);
+ return NULL;
+ }
+ mng->pools[pool->index] = pool;
+ mng->next++;
+ rte_spinlock_unlock(&mng->resize_sl);
+ /* Assign the first action in the new pool, the rest go to free list. */
+ *age_free = &pool->actions[0];
+ for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
+ pool->actions[i].offset = i;
+ LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
+ }
+ return pool;
+}
+
+/**
+ * Allocate a ASO aging bit.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Index to ASO age action on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_aso_age_pool *pool;
+ struct mlx5_aso_age_action *age_free = NULL;
+ struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+
+ MLX5_ASSERT(mng);
+ /* Try to get the next free age action bit. */
+ rte_spinlock_lock(&mng->free_sl);
+ age_free = LIST_FIRST(&mng->free);
+ if (age_free) {
+ LIST_REMOVE(age_free, next);
+ } else if (!flow_dv_age_pool_create(dev, &age_free)) {
+ rte_spinlock_unlock(&mng->free_sl);
+ rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "failed to create ASO age pool");
+ return 0; /* 0 is an error. */
+ }
+ rte_spinlock_unlock(&mng->free_sl);
+ pool = container_of
+ ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
+ (age_free - age_free->offset), const struct mlx5_aso_age_pool,
+ actions);
+ if (!age_free->dr_action) {
+ int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
+ error);
+
+ if (reg_c < 0) {
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "failed to get reg_c "
+ "for ASO flow hit");
+ return 0; /* 0 is an error. */
+ }
+#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
+ age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
+ (priv->sh->rx_domain,
+ pool->flow_hit_aso_obj->obj, age_free->offset,
+ MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
+ (reg_c - REG_C_0));
+#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
+ if (!age_free->dr_action) {
+ rte_errno = errno;
+ rte_spinlock_lock(&mng->free_sl);
+ LIST_INSERT_HEAD(&mng->free, age_free, next);
+ rte_spinlock_unlock(&mng->free_sl);
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "failed to create ASO "
+ "flow hit action");
+ return 0; /* 0 is an error. */
+ }
+ }
+ __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ return pool->index | ((age_free->offset + 1) << 16);
+}
+
+/**
+ * Create a age action using ASO mechanism.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
+ const struct rte_flow_action_age *age,
+ struct rte_flow_error *error)
+{
+ uint32_t age_idx = 0;
+ struct mlx5_aso_age_action *aso_age;
+
+ age_idx = flow_dv_aso_age_alloc(dev, error);
+ if (!age_idx)
+ return 0;
+ aso_age = flow_aso_age_get_by_idx(dev, age_idx);
+ aso_age->age_params.context = age->context;
+ aso_age->age_params.timeout = age->timeout;
+ aso_age->age_params.port_id = dev->data->port_id;
+ __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
+ __ATOMIC_RELAXED);
+ __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
+ __ATOMIC_RELAXED);
+ return age_idx;
+}
+
+/**
+ * Fill the flow with DV spec, lock free
+ * (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *dev_conf = &priv->config;
+ struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_handle *handle = dev_flow->handle;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc;
+ uint64_t item_flags = 0;
+ uint64_t last_item = 0;
+ uint64_t action_flags = 0;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ },
+ };
+ int actions_n = 0;
+ bool actions_end = false;
+ union {
+ struct mlx5_flow_dv_modify_hdr_resource res;
+ uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+ sizeof(struct mlx5_modification_cmd) *
+ (MLX5_MAX_MODIFY_NUM + 1)];
+ } mhdr_dummy;
+ struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+ const struct rte_flow_action_count *count = NULL;
+ const struct rte_flow_action_age *age = NULL;
+ union flow_dv_attr flow_attr = { .attr = 0 };
+ uint32_t tag_be;
+ union mlx5_flow_tbl_key tbl_key;
+ uint32_t modify_action_position = UINT32_MAX;
+ void *match_mask = matcher.mask.buf;
+ void *match_value = dev_flow->dv.value.buf;
+ uint8_t next_protocol = 0xff;
+ struct rte_vlan_hdr vlan = { 0 };
+ struct mlx5_flow_dv_dest_array_resource mdest_res;
+ struct mlx5_flow_dv_sample_resource sample_res;
+ void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+ const struct rte_flow_action_sample *sample = NULL;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ uint32_t sample_act_pos = UINT32_MAX;
+ uint32_t num_of_dest = 0;
+ int tmp_actions_n = 0;
+ uint32_t table;
+ int ret = 0;
+ const struct mlx5_flow_tunnel *tunnel;
+ struct flow_grp_info grp_info = {
+ .external = !!dev_flow->external,
+ .transfer = !!attr->transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ .skip_scale = dev_flow->skip_scale &
+ (1 << MLX5_SCALE_FLOW_GROUP_BIT),
+ };
+
+ if (!wks)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "failed to push flow workspace");
+ rss_desc = &wks->rss_desc;
+ memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
+ memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
+ mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ /* update normal path action resource into last index of array */
+ sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
+ tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
+ flow_items_to_tunnel(items) :
+ is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
+ flow_actions_to_tunnel(actions) :
+ dev_flow->tunnel ? dev_flow->tunnel : NULL;
+ mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, tunnel, attr, items, actions);
+ ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
+ &grp_info, error);
+ if (ret)
+ return ret;
+ dev_flow->dv.group = table;
+ if (attr->transfer)
+ mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ /* number of actions must be set to 0 in case of dirty stack. */
+ mhdr_res->actions_num = 0;
+ if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
+ /*
+ * do not add decap action if match rule drops packet
+ * HW rejects rules with decap & drop
+ *
+ * if tunnel match rule was inserted before matching tunnel set
+ * rule flow table used in the match rule must be registered.
+ * current implementation handles that in the
+ * flow_dv_match_register() at the function end.
+ */
+ bool add_decap = true;
+ const struct rte_flow_action *ptr = actions;
+
+ for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
+ if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ add_decap = false;
break;
- flow_dev_get_vlan_info_from_items(items, &vlan);
- mlx5_update_vlan_vid_pcp(actions, &vlan);
- /* If no VLAN push - this is a modify header action */
- if (flow_dv_convert_action_modify_vlan_vid
- (mhdr_res, actions, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
- break;
- case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
- case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- if (flow_dv_create_action_l2_encap(dev, actions,
- dev_flow,
- attr->transfer,
- error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
- action_flags |= MLX5_FLOW_ACTION_ENCAP;
- break;
- case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
- case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ }
+ }
+ if (add_decap) {
if (flow_dv_create_action_l2_decap(dev, dev_flow,
attr->transfer,
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
+ }
+ }
+ for (; !actions_end ; actions++) {
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action *action = actions;
+ const uint8_t *rss_key;
+ const struct rte_flow_action_meter *mtr;
+ struct mlx5_flow_tbl_resource *tbl;
+ struct mlx5_aso_age_action *age_act;
+ uint32_t port_id = 0;
+ struct mlx5_flow_dv_port_id_action_resource port_id_resource;
+ int action_type = actions->type;
+ const struct rte_flow_action *found_action = NULL;
+ struct mlx5_flow_meter *fm = NULL;
+ uint32_t jump_group = 0;
+
+ if (!mlx5_flow_os_action_supported(action_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ switch (action_type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ if (flow_dv_translate_action_port_id(dev, action,
+ &port_id, error))
+ return -rte_errno;
+ port_id_resource.port_id = port_id;
+ MLX5_ASSERT(!handle->rix_port_id_action);
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.port_id_action->action;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ num_of_dest++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ dev_flow->handle->mark = 1;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ struct rte_flow_action_mark mark = {
+ .id = MLX5_FLOW_MARK_DEFAULT,
+ };
+
+ if (flow_dv_convert_action_mark(dev, &mark,
+ mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+ /*
+ * Only one FLAG or MARK is supported per device flow
+ * right now. So the pointer to the tag resource must be
+ * zero before the register process.
+ */
+ MLX5_ASSERT(!handle->dvh.rix_tag);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.tag_resource->action;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ dev_flow->handle->mark = 1;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ if (flow_dv_convert_action_mark(dev, mark,
+ mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ /* Fall-through */
+ case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
+ /* Legacy (non-extensive) MARK action. */
+ tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (actions->conf))->id);
+ MLX5_ASSERT(!handle->dvh.rix_tag);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.tag_resource->action;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ if (flow_dv_convert_action_set_meta
+ (dev, mhdr_res, attr,
+ (const struct rte_flow_action_set_meta *)
+ actions->conf, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_META;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ if (flow_dv_convert_action_set_tag
+ (dev, mhdr_res,
+ (const struct rte_flow_action_set_tag *)
+ actions->conf, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+ rss_desc->queue_num = 1;
+ rss_desc->queue[0] = queue->index;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ num_of_dest++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
+ memcpy(rss_desc->queue, rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ rss_desc->queue_num = rss->queue_num;
+ /* NULL RSS key indicates default RSS key. */
+ rss_key = !rss->key ? rss_hash_default_key : rss->key;
+ memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /*
+ * rss->level and rss.types should be set in advance
+ * when expanding items for RSS.
+ */
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ dev_flow->handle->fate_action = rss_desc->shared_rss ?
+ MLX5_FLOW_FATE_SHARED_RSS :
+ MLX5_FLOW_FATE_QUEUE;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
+ flow->age = (uint32_t)(uintptr_t)(action->conf);
+ age_act = flow_aso_age_get_by_idx(dev, flow->age);
+ __atomic_fetch_add(&age_act->refcnt, 1,
+ __ATOMIC_RELAXED);
+ dev_flow->dv.actions[actions_n++] = age_act->dr_action;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ if (priv->sh->flow_hit_aso_en && attr->group) {
+ /*
+ * Create one shared age action, to be used
+ * by all sub-flows.
+ */
+ if (!flow->age) {
+ flow->age =
+ flow_dv_translate_create_aso_age
+ (dev, action->conf,
+ error);
+ if (!flow->age)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't create ASO age action");
+ }
+ dev_flow->dv.actions[actions_n++] =
+ (flow_aso_age_get_by_idx
+ (dev, flow->age))->dr_action;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ break;
+ }
+ /* Fall-through */
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ if (!dev_conf->devx) {
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "count action not supported");
+ }
+ /* Save information first, will apply later. */
+ if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
+ count = action->conf;
+ else
+ age = action->conf;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ dev_flow->dv.actions[actions_n++] =
+ priv->sh->pop_vlan_action;
+ action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ if (!(action_flags &
+ MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ vlan.eth_proto = rte_be_to_cpu_16
+ ((((const struct rte_flow_action_of_push_vlan *)
+ actions->conf)->ethertype));
+ found_action = mlx5_flow_find_action
+ (actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
+ if (found_action)
+ mlx5_update_vlan_vid_pcp(found_action, &vlan);
+ found_action = mlx5_flow_find_action
+ (actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
+ if (found_action)
+ mlx5_update_vlan_vid_pcp(found_action, &vlan);
+ if (flow_dv_create_action_push_vlan
+ (dev, attr, &vlan, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.push_vlan_res->action;
+ action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ /* of_vlan_push action handled this action */
+ MLX5_ASSERT(action_flags &
+ MLX5_FLOW_ACTION_OF_PUSH_VLAN);
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ break;
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ mlx5_update_vlan_vid_pcp(actions, &vlan);
+ /* If no VLAN push - this is a modify header action */
+ if (flow_dv_convert_action_modify_vlan_vid
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (flow_dv_create_action_l2_encap(dev, actions,
+ dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ if (flow_dv_create_action_l2_decap(dev, dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- jump_data = action->conf;
- ret = mlx5_flow_group_to_table(attr, dev_flow->external,
- jump_data->group,
- !!priv->fdb_def_rule,
- &table, error);
+ jump_group = ((const struct rte_flow_action_jump *)
+ action->conf)->group;
+ grp_info.std_tbl_fix = 0;
+ if (dev_flow->skip_scale &
+ (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
+ grp_info.skip_scale = 1;
+ else
+ grp_info.skip_scale = 0;
+ ret = mlx5_flow_group_to_table(dev, tunnel,
+ jump_group,
+ &table,
+ &grp_info, error);
if (ret)
return ret;
- tbl = flow_dv_tbl_resource_get(dev, table,
- attr->egress,
- attr->transfer, error);
+ tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
+ attr->transfer,
+ !!dev_flow->external,
+ tunnel, jump_group, 0,
+ error);
if (!tbl)
return rte_flow_error_set
(error, errno,
"cannot create jump action.");
if (flow_dv_jump_tbl_resource_register
(dev, tbl, dev_flow, error)) {
- flow_dv_tbl_resource_release(dev, tbl);
+ flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
return rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_ACTION,
dev_flow->dv.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
+ num_of_dest++;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_DEFAULT_MISS;
+ break;
case RTE_FLOW_ACTION_TYPE_METER:
mtr = actions->conf;
if (!flow->meter) {
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ sample_act_pos = actions_n;
+ sample = (const struct rte_flow_action_sample *)
+ action->conf;
+ actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ /* put encap action into group if work with port id */
+ if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
+ (action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ if (flow_dv_convert_action_modify_field
+ (dev, mhdr_res, actions, attr, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res->actions_num) {
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- handle->dvh.modify_hdr->verbs_action;
+ handle->dvh.modify_hdr->action;
+ }
+ if (action_flags & MLX5_FLOW_ACTION_COUNT) {
+ /*
+ * Create one count action, to be used
+ * by all sub-flows.
+ */
+ if (!flow->counter) {
+ flow->counter =
+ flow_dv_translate_create_counter
+ (dev, dev_flow, count,
+ age);
+ if (!flow->counter)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "cannot create counter"
+ " object.");
+ }
+ dev_flow->dv.actions[actions_n] =
+ (flow_dv_counter_get_by_idx(dev,
+ flow->counter, NULL))->action;
+ actions_n++;
}
- break;
default:
break;
}
modify_action_position == UINT32_MAX)
modify_action_position = actions_n++;
}
- dev_flow->dv.actions_n = actions_n;
- dev_flow->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
+ if (!mlx5_flow_os_item_supported(item_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_PORT_ID:
- flow_dv_translate_item_port_id(dev, match_mask,
- match_value, items);
+ flow_dv_translate_item_port_id
+ (dev, match_mask, match_value, items, attr);
last_item = MLX5_FLOW_ITEM_PORT_ID;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
flow_dv_translate_item_eth(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
+ items, tunnel,
+ dev_flow->dv.group);
+ matcher.priority = action_flags &
+ MLX5_FLOW_ACTION_DEFAULT_MISS &&
+ !dev_flow->external ?
+ MLX5_PRIORITY_MAP_L3 :
+ MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
flow_dv_translate_item_vlan(dev_flow,
match_mask, match_value,
- items, tunnel);
+ items, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_INNER_VLAN) :
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, item_flags, tunnel,
+ items, tunnel,
dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, item_flags, tunnel,
+ items, tunnel,
dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
next_protocol = 0xff;
}
break;
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ flow_dv_translate_item_ipv6_frag_ext(match_mask,
+ match_value,
+ items, tunnel);
+ last_item = tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->spec)->hdr.next_header;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value,
items, tunnel);
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
flow_dv_translate_item_vxlan_gpe(match_mask,
match_value, items,
tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
flow_dv_translate_item_geneve(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
+ ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
+ match_value,
+ items, error);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "cannot create GENEVE TLV option");
+ flow->geneve_tlv_option = 1;
+ last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
+ break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_MARK:
case RTE_FLOW_ITEM_TYPE_GTP:
flow_dv_translate_item_gtp(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GTP;
break;
+ case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+ ret = flow_dv_translate_item_gtp_psc(match_mask,
+ match_value,
+ items);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "cannot create GTP PSC item");
+ last_item = MLX5_FLOW_LAYER_GTP_PSC;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ if (!mlx5_flex_parser_ecpri_exist(dev)) {
+ /* Create it only the first time to be used. */
+ ret = mlx5_flex_parser_ecpri_alloc(dev);
+ if (ret)
+ return rte_flow_error_set
+ (error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "cannot create eCPRI parser");
+ }
+ /* Adjust the length matcher and device flow value. */
+ matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ dev_flow->dv.value.size =
+ MLX5_ST_SZ_BYTES(fte_match_param);
+ flow_dv_translate_item_ecpri(dev, match_mask,
+ match_value, items);
+ /* No other protocol should follow eCPRI layer. */
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
default:
break;
}
if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
(priv->representor || priv->master)) {
if (flow_dv_translate_item_port_id(dev, match_mask,
- match_value, NULL))
+ match_value, NULL, attr))
return -rte_errno;
}
#ifdef RTE_LIBRTE_MLX5_DEBUG
handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow, rss_desc);
- /* Register matcher. */
+ /* If has RSS action in the sample action, the Sample/Mirror resource
+ * should be registered after the hash filed be update.
+ */
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
+ ret = flow_dv_translate_action_sample(dev,
+ sample,
+ dev_flow, attr,
+ &num_of_dest,
+ sample_actions,
+ &sample_res,
+ error);
+ if (ret < 0)
+ return ret;
+ ret = flow_dv_create_action_sample(dev,
+ dev_flow,
+ num_of_dest,
+ &sample_res,
+ &mdest_res,
+ sample_actions,
+ action_flags,
+ error);
+ if (ret < 0)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create sample action");
+ if (num_of_dest > 1) {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.dest_array_res->action;
+ } else {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.sample_res->verbs_action;
+ }
+ }
+ /*
+ * For multiple destination (sample action with ratio=1), the encap
+ * action and port id action will be combined into group action.
+ * So need remove the original these actions in the flow and only
+ * use the sample action instead of.
+ */
+ if (num_of_dest > 1 &&
+ (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
+ int i;
+ void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+
+ for (i = 0; i < actions_n; i++) {
+ if ((sample_act->dr_encap_action &&
+ sample_act->dr_encap_action ==
+ dev_flow->dv.actions[i]) ||
+ (sample_act->dr_port_id_action &&
+ sample_act->dr_port_id_action ==
+ dev_flow->dv.actions[i]) ||
+ (sample_act->dr_jump_action &&
+ sample_act->dr_jump_action ==
+ dev_flow->dv.actions[i]))
+ continue;
+ temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
+ }
+ memcpy((void *)dev_flow->dv.actions,
+ (void *)temp_actions,
+ tmp_actions_n * sizeof(void *));
+ actions_n = tmp_actions_n;
+ }
+ dev_flow->dv.actions_n = actions_n;
+ dev_flow->act_flags = action_flags;
+ /* Register matcher. */
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
- matcher.priority = mlx5_flow_adjust_priority(dev, priority,
- matcher.priority);
+ matcher.priority = mlx5_get_matcher_priority(dev, attr,
+ matcher.priority);
/* reserved field no needs to be set to 0 here. */
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
tbl_key.table_id = dev_flow->dv.group;
- if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
+ if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
+ tunnel, attr->group, error))
return -rte_errno;
return 0;
}
/**
- * Apply the flow to the NIC, lock free,
- * (mutex should be acquired by caller).
+ * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
+ * and tunnel.
+ *
+ * @param[in, out] action
+ * Shred RSS action holding hash RX queue objects.
+ * @param[in] hash_fields
+ * Defines combination of packet fields to participate in RX hash.
+ * @param[in] tunnel
+ * Tunnel type
+ * @param[in] hrxq_idx
+ * Hash RX queue index to set.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
+ const uint64_t hash_fields,
+ const int tunnel,
+ uint32_t hrxq_idx)
+{
+ uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
+
+ switch (hash_fields & ~IBV_RX_HASH_INNER) {
+ case MLX5_RSS_HASH_IPV4:
+ hrxqs[0] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV4_TCP:
+ hrxqs[1] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV4_UDP:
+ hrxqs[2] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6:
+ hrxqs[3] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6_TCP:
+ hrxqs[4] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6_UDP:
+ hrxqs[5] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_NONE:
+ hrxqs[6] = hrxq_idx;
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/**
+ * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
+ * and tunnel.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * Shared RSS action ID holding hash RX queue objects.
+ * @param[in] hash_fields
+ * Defines combination of packet fields to participate in RX hash.
+ * @param[in] tunnel
+ * Tunnel type
+ *
+ * @return
+ * Valid hash RX queue index, otherwise 0.
+ */
+static uint32_t
+__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
+ const uint64_t hash_fields,
+ const int tunnel)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_rss =
+ mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
+ const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
+ shared_rss->hrxq_tunnel;
+
+ switch (hash_fields & ~IBV_RX_HASH_INNER) {
+ case MLX5_RSS_HASH_IPV4:
+ return hrxqs[0];
+ case MLX5_RSS_HASH_IPV4_TCP:
+ return hrxqs[1];
+ case MLX5_RSS_HASH_IPV4_UDP:
+ return hrxqs[2];
+ case MLX5_RSS_HASH_IPV6:
+ return hrxqs[3];
+ case MLX5_RSS_HASH_IPV6_TCP:
+ return hrxqs[4];
+ case MLX5_RSS_HASH_IPV6_UDP:
+ return hrxqs[5];
+ case MLX5_RSS_HASH_NONE:
+ return hrxqs[6];
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Apply the flow to the NIC, lock free,
+ * (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_workspace *dv;
+ struct mlx5_flow_handle *dh;
+ struct mlx5_flow_handle_dv *dv_h;
+ struct mlx5_flow *dev_flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
+ int n;
+ int err;
+ int idx;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
+
+ MLX5_ASSERT(wks);
+ for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
+ dev_flow = &wks->flows[idx];
+ dv = &dev_flow->dv;
+ dh = dev_flow->handle;
+ dv_h = &dh->dvh;
+ n = dv->actions_n;
+ if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
+ if (dv->transfer) {
+ dv->actions[n++] = priv->sh->esw_drop_action;
+ } else {
+ MLX5_ASSERT(priv->drop_queue.hrxq);
+ dv->actions[n++] =
+ priv->drop_queue.hrxq->action;
+ }
+ } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
+ !dv_h->rix_sample && !dv_h->rix_dest_array)) {
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
+ &hrxq_idx);
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ dh->rix_hrxq = hrxq_idx;
+ dv->actions[n++] = hrxq->action;
+ } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+ struct mlx5_hrxq *hrxq = NULL;
+ uint32_t hrxq_idx;
+
+ hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
+ rss_desc->shared_rss,
+ dev_flow->hash_fields,
+ !!(dh->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (hrxq_idx)
+ hrxq = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ hrxq_idx);
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ dh->rix_srss = rss_desc->shared_rss;
+ dv->actions[n++] = hrxq->action;
+ } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
+ if (!priv->sh->default_miss_action) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "default miss action not be created.");
+ goto error;
+ }
+ dv->actions[n++] = priv->sh->default_miss_action;
+ }
+ err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
+ (void *)&dv->value, n,
+ dv->actions, &dh->drv_flow);
+ if (err) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "hardware refuses to create flow");
+ goto error;
+ }
+ if (priv->vmwa_context &&
+ dh->vf_vlan.tag && !dh->vf_vlan.created) {
+ /*
+ * The rule contains the VLAN pattern.
+ * For VF we are going to create VLAN
+ * interface to make hypervisor set correct
+ * e-Switch vport context.
+ */
+ mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
+ }
+ }
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dh, next) {
+ /* hrxq is union, don't clear it if the flag is not set. */
+ if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
+ mlx5_hrxq_release(dev, dh->rix_hrxq);
+ dh->rix_hrxq = 0;
+ } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
+ dh->rix_srss = 0;
+ }
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+ }
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+void
+flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry)
+{
+ struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
+ entry);
+
+ claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
+ mlx5_free(cache);
+}
+
+/**
+ * Release the flow matcher.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param port_id
+ * Index to port ID action resource.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_matcher_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
+ struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
+ typeof(*tbl), tbl);
+ int ret;
+
+ MLX5_ASSERT(matcher->matcher_object);
+ ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
+ flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
+ return ret;
+}
+
+/**
+ * Release encap_decap resource.
+ *
+ * @param list
+ * Pointer to the hash list.
+ * @param entry
+ * Pointer to exist resource entry object.
+ */
+void
+flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_dv_encap_decap_resource *res =
+ container_of(entry, typeof(*res), entry);
+
+ claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
+}
+
+/**
+ * Release an encap/decap resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param encap_decap_idx
+ * Index of encap decap resource.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
+ uint32_t encap_decap_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ encap_decap_idx);
+ if (!cache_resource)
+ return 0;
+ MLX5_ASSERT(cache_resource->action);
+ return mlx5_hlist_unregister(priv->sh->encaps_decaps,
+ &cache_resource->entry);
+}
+
+/**
+ * Release an jump to table action resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rix_jump
+ * Index to the jump action resource.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
+ uint32_t rix_jump)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+
+ tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
+ rix_jump);
+ if (!tbl_data)
+ return 0;
+ return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
+}
+
+void
+flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
+ struct mlx5_hlist_entry *entry)
+{
+ struct mlx5_flow_dv_modify_hdr_resource *res =
+ container_of(entry, typeof(*res), entry);
+
+ claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
+ mlx5_free(entry);
+}
+
+/**
+ * Release a modify-header resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
+
+ MLX5_ASSERT(entry->action);
+ return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
+}
+
+void
+flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_dv_port_id_action_resource *cache =
+ container_of(entry, typeof(*cache), entry);
+
+ claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
+}
+
+/**
+ * Release port ID action resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
+ uint32_t port_id)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_port_id_action_resource *cache;
+
+ cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
+ if (!cache)
+ return 0;
+ MLX5_ASSERT(cache->action);
+ return mlx5_cache_unregister(&priv->sh->port_id_action_list,
+ &cache->entry);
+}
+
+/**
+ * Release shared RSS action resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param srss
+ * Shared RSS action index.
+ */
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_rss;
+
+ shared_rss = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
+ __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+}
+
+void
+flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_flow_dv_push_vlan_action_resource *cache =
+ container_of(entry, typeof(*cache), entry);
+
+ claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
+}
+
+/**
+ * Release push vlan action resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_push_vlan_action_resource *cache;
+ uint32_t idx = handle->dvh.rix_push_vlan;
+
+ cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
+ if (!cache)
+ return 0;
+ MLX5_ASSERT(cache->action);
+ return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
+ &cache->entry);
+}
+
+/**
+ * Release the fate resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ */
+static void
+flow_dv_fate_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ if (!handle->rix_fate)
+ return;
+ switch (handle->fate_action) {
+ case MLX5_FLOW_FATE_QUEUE:
+ mlx5_hrxq_release(dev, handle->rix_hrxq);
+ break;
+ case MLX5_FLOW_FATE_JUMP:
+ flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
+ break;
+ case MLX5_FLOW_FATE_PORT_ID:
+ flow_dv_port_id_action_resource_release(dev,
+ handle->rix_port_id_action);
+ break;
+ default:
+ DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
+ break;
+ }
+ handle->rix_fate = 0;
+}
+
+void
+flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry)
+{
+ struct mlx5_flow_dv_sample_resource *cache_resource =
+ container_of(entry, typeof(*cache_resource), entry);
+ struct rte_eth_dev *dev = cache_resource->dev;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (cache_resource->verbs_action)
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->verbs_action));
+ if (cache_resource->normal_path_tbl)
+ flow_dv_tbl_resource_release(MLX5_SH(dev),
+ cache_resource->normal_path_tbl);
+ flow_dv_sample_sub_actions_release(dev,
+ &cache_resource->sample_idx);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
+ cache_resource->idx);
+ DRV_LOG(DEBUG, "sample resource %p: removed",
+ (void *)cache_resource);
+}
+
+/**
+ * Release an sample resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_sample_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_sample_resource *cache_resource;
+
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
+ handle->dvh.rix_sample);
+ if (!cache_resource)
+ return 0;
+ MLX5_ASSERT(cache_resource->verbs_action);
+ return mlx5_cache_unregister(&priv->sh->sample_action_list,
+ &cache_resource->entry);
+}
+
+void
+flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry)
+{
+ struct mlx5_flow_dv_dest_array_resource *cache_resource =
+ container_of(entry, typeof(*cache_resource), entry);
+ struct rte_eth_dev *dev = cache_resource->dev;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t i = 0;
+
+ MLX5_ASSERT(cache_resource->action);
+ if (cache_resource->action)
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
+ for (; i < cache_resource->num_of_dest; i++)
+ flow_dv_sample_sub_actions_release(dev,
+ &cache_resource->sample_idx[i]);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ cache_resource->idx);
+ DRV_LOG(DEBUG, "destination array resource %p: removed",
+ (void *)cache_resource);
+}
+
+/**
+ * Release an destination array resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_dest_array_resource *cache;
+
+ cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ handle->dvh.rix_dest_array);
+ if (!cache)
+ return 0;
+ MLX5_ASSERT(cache->action);
+ return mlx5_cache_unregister(&priv->sh->dest_array_list,
+ &cache->entry);
+}
+
+static void
+flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
+ sh->geneve_tlv_option_resource;
+ rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
+ if (geneve_opt_resource) {
+ if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
+ __ATOMIC_RELAXED))) {
+ claim_zero(mlx5_devx_cmd_destroy
+ (geneve_opt_resource->obj));
+ mlx5_free(sh->geneve_tlv_option_resource);
+ sh->geneve_tlv_option_resource = NULL;
+ }
+ }
+ rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+}
+
+/**
+ * Remove the flow from the NIC but keeps it in memory.
+ * Lock free, (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow_handle *dh;
+ uint32_t handle_idx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!flow)
+ return;
+ handle_idx = flow->dev_handles;
+ while (handle_idx) {
+ dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ handle_idx);
+ if (!dh)
+ return;
+ if (dh->drv_flow) {
+ claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
+ dh->drv_flow = NULL;
+ }
+ if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+ flow_dv_fate_resource_release(dev, dh);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+ handle_idx = dh->next.next;
+ }
+}
+
+/**
+ * Remove the flow from the NIC and the memory.
+ * Lock free, (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t srss = 0;
+
+ if (!flow)
+ return;
+ flow_dv_remove(dev, flow);
+ if (flow->counter) {
+ flow_dv_counter_free(dev, flow->counter);
+ flow->counter = 0;
+ }
+ if (flow->meter) {
+ struct mlx5_flow_meter *fm;
+
+ fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
+ flow->meter);
+ if (fm)
+ mlx5_flow_meter_detach(fm);
+ flow->meter = 0;
+ }
+ if (flow->age)
+ flow_dv_aso_age_release(dev, flow->age);
+ if (flow->geneve_tlv_option) {
+ flow_dv_geneve_tlv_option_resource_release(dev);
+ flow->geneve_tlv_option = 0;
+ }
+ while (flow->dev_handles) {
+ uint32_t tmp_idx = flow->dev_handles;
+
+ dev_handle = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
+ if (!dev_handle)
+ return;
+ flow->dev_handles = dev_handle->next.next;
+ if (dev_handle->dvh.matcher)
+ flow_dv_matcher_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_sample)
+ flow_dv_sample_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_dest_array)
+ flow_dv_dest_array_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_encap_decap)
+ flow_dv_encap_decap_resource_release(dev,
+ dev_handle->dvh.rix_encap_decap);
+ if (dev_handle->dvh.modify_hdr)
+ flow_dv_modify_hdr_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_push_vlan)
+ flow_dv_push_vlan_action_resource_release(dev,
+ dev_handle);
+ if (dev_handle->dvh.rix_tag)
+ flow_dv_tag_release(dev,
+ dev_handle->dvh.rix_tag);
+ if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
+ flow_dv_fate_resource_release(dev, dev_handle);
+ else if (!srss)
+ srss = dev_handle->rix_srss;
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ tmp_idx);
+ }
+ if (srss)
+ flow_dv_shared_rss_action_release(dev, srss);
+}
+
+/**
+ * Release array of hash RX queue objects.
+ * Helper function.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[out] error
- * Pointer to error structure.
+ * @param[in, out] hrxqs
+ * Array of hash RX queue objects.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Total number of references to hash RX queue objects in *hrxqs* array
+ * after this operation.
*/
static int
-__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
- struct rte_flow_error *error)
+__flow_dv_hrxqs_release(struct rte_eth_dev *dev,
+ uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
{
- struct mlx5_flow_dv_workspace *dv;
- struct mlx5_flow_handle *dh;
- struct mlx5_flow_handle_dv *dv_h;
- struct mlx5_flow *dev_flow;
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t handle_idx;
- int n;
- int err;
- int idx;
+ size_t i;
+ int remaining = 0;
- for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
- dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
- dv = &dev_flow->dv;
- dh = dev_flow->handle;
- dv_h = &dh->dvh;
- n = dv->actions_n;
- if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
- if (dv->transfer) {
- dv->actions[n++] = priv->sh->esw_drop_action;
- } else {
- struct mlx5_hrxq *drop_hrxq;
- drop_hrxq = mlx5_hrxq_drop_new(dev);
- if (!drop_hrxq) {
- rte_flow_error_set
- (error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get drop hash queue");
- goto error;
- }
- /*
- * Drop queues will be released by the specify
- * mlx5_hrxq_drop_release() function. Assign
- * the special index to hrxq to mark the queue
- * has been allocated.
- */
- dh->rix_hrxq = UINT32_MAX;
- dv->actions[n++] = drop_hrxq->action;
- }
- } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
- struct mlx5_hrxq *hrxq;
- uint32_t hrxq_idx;
- struct mlx5_flow_rss_desc *rss_desc =
- &((struct mlx5_flow_rss_desc *)priv->rss_desc)
- [!!priv->flow_nested_idx];
-
- MLX5_ASSERT(rss_desc->queue_num);
- hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num);
- if (!hrxq_idx) {
- hrxq_idx = mlx5_hrxq_new
- (dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
- }
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
- hrxq_idx);
- if (!hrxq) {
- rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot get hash queue");
- goto error;
- }
- dh->rix_hrxq = hrxq_idx;
- dv->actions[n++] = hrxq->action;
- }
- dh->ib_flow =
- mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
- (void *)&dv->value, n,
- dv->actions);
- if (!dh->ib_flow) {
- rte_flow_error_set(error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "hardware refuses to create flow");
- goto error;
- }
- if (priv->vmwa_context &&
- dh->vf_vlan.tag && !dh->vf_vlan.created) {
- /*
- * The rule contains the VLAN pattern.
- * For VF we are going to create VLAN
- * interface to make hypervisor set correct
- * e-Switch vport context.
- */
- mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
- }
- }
- return 0;
-error:
- err = rte_errno; /* Save rte_errno before cleanup. */
- SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
- handle_idx, dh, next) {
- /* hrxq is union, don't clear it if the flag is not set. */
- if (dh->rix_hrxq) {
- if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
- mlx5_hrxq_drop_release(dev);
- dh->rix_hrxq = 0;
- } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
- mlx5_hrxq_release(dev, dh->rix_hrxq);
- dh->rix_hrxq = 0;
- }
- }
- if (dh->vf_vlan.tag && dh->vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+ for (i = 0; i < RTE_DIM(*hrxqs); i++) {
+ int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
+
+ if (!ret)
+ (*hrxqs)[i] = 0;
+ remaining += ret;
}
- rte_errno = err; /* Restore rte_errno. */
- return -rte_errno;
+ return remaining;
}
/**
- * Release the flow matcher.
+ * Release all hash RX queue objects representing shared RSS action.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] action
+ * Shared RSS action to remove hash RX queue objects from.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * Total number of references to hash RX queue objects stored in *action*
+ * after this operation.
+ * Expected to be 0 if no external references held.
*/
static int
-flow_dv_matcher_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
+ struct mlx5_shared_action_rss *shared_rss)
{
- struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
-
- MLX5_ASSERT(matcher->matcher_object);
- DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
- dev->data->port_id, (void *)matcher,
- rte_atomic32_read(&matcher->refcnt));
- if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (matcher->matcher_object));
- LIST_REMOVE(matcher, next);
- /* table ref-- in release interface. */
- flow_dv_tbl_resource_release(dev, matcher->tbl);
- rte_free(matcher);
- DRV_LOG(DEBUG, "port %u matcher %p: removed",
- dev->data->port_id, (void *)matcher);
- return 0;
- }
- return 1;
+ return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq) +
+ __flow_dv_hrxqs_release(dev, &shared_rss->hrxq_tunnel);
}
/**
- * Release an encap/decap resource.
+ * Setup shared RSS action.
+ * Prepare set of hash RX queue objects sufficient to handle all valid
+ * hash_fields combinations (see enum ibv_rx_hash_fields).
*
- * @param dev
- * Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] action_idx
+ * Shared RSS action ipool index.
+ * @param[in, out] action
+ * Partially initialized shared RSS action.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * 0 on success, otherwise negative errno value.
*/
static int
-flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
+ uint32_t action_idx,
+ struct mlx5_shared_action_rss *shared_rss,
+ struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t idx = handle->dvh.rix_encap_decap;
- struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ struct mlx5_flow_rss_desc rss_desc = { 0 };
+ size_t i;
+ int err;
- cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
- idx);
- if (!cache_resource)
- return 0;
- MLX5_ASSERT(cache_resource->verbs_action);
- DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->verbs_action));
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
- &priv->sh->encaps_decaps, idx,
- cache_resource, next);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
- DRV_LOG(DEBUG, "encap/decap resource %p: removed",
- (void *)cache_resource);
- return 0;
+ if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot setup indirection table");
+ }
+ memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
+ rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_desc.const_q = shared_rss->origin.queue;
+ rss_desc.queue_num = shared_rss->origin.queue_num;
+ /* Set non-zero value to indicate a shared RSS. */
+ rss_desc.shared_rss = action_idx;
+ rss_desc.ind_tbl = shared_rss->ind_tbl;
+ for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
+ uint32_t hrxq_idx;
+ uint64_t hash_fields = mlx5_rss_hash_fields[i];
+ int tunnel;
+
+ for (tunnel = 0; tunnel < 2; tunnel++) {
+ rss_desc.tunnel = tunnel;
+ rss_desc.hash_fields = hash_fields;
+ hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
+ if (!hrxq_idx) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error_hrxq_new;
+ }
+ err = __flow_dv_action_rss_hrxq_set
+ (shared_rss, hash_fields, tunnel, hrxq_idx);
+ MLX5_ASSERT(!err);
+ }
}
- return 1;
+ return 0;
+error_hrxq_new:
+ err = rte_errno;
+ __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
+ if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
+ shared_rss->ind_tbl = NULL;
+ rte_errno = err;
+ return -rte_errno;
}
/**
- * Release an jump to table action resource.
+ * Create shared RSS action.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] conf
+ * Shared action configuration.
+ * @param[in] rss
+ * RSS action specification used to create shared action.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * A valid shared action ID in case of success, 0 otherwise and
+ * rte_errno is set.
*/
-static int
-flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+static uint32_t
+__flow_dv_action_rss_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action_rss *rss,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
- struct mlx5_flow_tbl_data_entry *tbl_data;
-
- tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
- handle->rix_jump);
- if (!tbl_data)
- return 0;
- cache_resource = &tbl_data->jump;
- MLX5_ASSERT(cache_resource->action);
- DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
- /* jump action memory free is inside the table release. */
- flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
- DRV_LOG(DEBUG, "jump table resource %p: removed",
- (void *)cache_resource);
- return 0;
+ struct mlx5_shared_action_rss *shared_rss = NULL;
+ void *queue = NULL;
+ struct rte_flow_action_rss *origin;
+ const uint8_t *rss_key;
+ uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
+ uint32_t idx;
+
+ RTE_SET_USED(conf);
+ queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
+ 0, SOCKET_ID_ANY);
+ shared_rss = mlx5_ipool_zmalloc
+ (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
+ if (!shared_rss || !queue) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ goto error_rss_init;
}
- return 1;
+ if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
+ rte_flow_error_set(error, E2BIG,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "rss action number out of range");
+ goto error_rss_init;
+ }
+ shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*shared_rss->ind_tbl),
+ 0, SOCKET_ID_ANY);
+ if (!shared_rss->ind_tbl) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ goto error_rss_init;
+ }
+ memcpy(queue, rss->queue, queue_size);
+ shared_rss->ind_tbl->queues = queue;
+ shared_rss->ind_tbl->queues_n = rss->queue_num;
+ origin = &shared_rss->origin;
+ origin->func = rss->func;
+ origin->level = rss->level;
+ /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
+ origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+ /* NULL RSS key indicates default RSS key. */
+ rss_key = !rss->key ? rss_hash_default_key : rss->key;
+ memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ origin->key = &shared_rss->key[0];
+ origin->key_len = MLX5_RSS_HASH_KEY_LEN;
+ origin->queue = queue;
+ origin->queue_num = rss->queue_num;
+ if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
+ goto error_rss_init;
+ rte_spinlock_init(&shared_rss->action_rss_sl);
+ __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_spinlock_lock(&priv->shared_act_sl);
+ ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ &priv->rss_shared_actions, idx, shared_rss, next);
+ rte_spinlock_unlock(&priv->shared_act_sl);
+ return idx;
+error_rss_init:
+ if (shared_rss) {
+ if (shared_rss->ind_tbl)
+ mlx5_free(shared_rss->ind_tbl);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ idx);
+ }
+ if (queue)
+ mlx5_free(queue);
+ return 0;
}
/**
- * Release a modify-header resource.
+ * Destroy the shared RSS action.
+ * Release related hash RX queue objects.
*
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * The shared RSS action object ID to be removed.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * 0 on success, otherwise negative errno value.
*/
static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
+__flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
+ struct rte_flow_error *error)
{
- struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
- handle->dvh.modify_hdr;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_rss =
+ mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
+ uint32_t old_refcnt = 1;
+ int remaining;
+ uint16_t *queue = NULL;
- MLX5_ASSERT(cache_resource->verbs_action);
- DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->verbs_action));
- LIST_REMOVE(cache_resource, next);
- rte_free(cache_resource);
- DRV_LOG(DEBUG, "modify-header resource %p: removed",
- (void *)cache_resource);
- return 0;
- }
- return 1;
+ if (!shared_rss)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "invalid shared action");
+ remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
+ if (remaining)
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss hrxq has references");
+ if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
+ 0, 0, __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED))
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss has references");
+ queue = shared_rss->ind_tbl->queues;
+ remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
+ if (remaining)
+ return rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared rss indirection table has"
+ " references");
+ mlx5_free(queue);
+ rte_spinlock_lock(&priv->shared_act_sl);
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ &priv->rss_shared_actions, idx, shared_rss, next);
+ rte_spinlock_unlock(&priv->shared_act_sl);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+ idx);
+ return 0;
}
/**
- * Release port ID action resource.
+ * Create shared action, lock free,
+ * (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] conf
+ * Shared action configuration.
+ * @param[in] action
+ * Action specification used to create shared action.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * A valid shared action handle in case of success, NULL otherwise and
+ * rte_errno is set.
*/
-static int
-flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+static struct rte_flow_shared_action *
+flow_dv_action_create(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *err)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_port_id_action_resource *cache_resource;
- uint32_t idx = handle->rix_port_id_action;
+ uint32_t idx = 0;
+ uint32_t ret = 0;
- cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
- idx);
- if (!cache_resource)
- return 0;
- MLX5_ASSERT(cache_resource->action);
- DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
- &priv->sh->port_id_action_list, idx,
- cache_resource, next);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx);
- DRV_LOG(DEBUG, "port id action resource %p: removed",
- (void *)cache_resource);
- return 0;
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
+ idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
+ MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
+ idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
+ MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
+ if (ret) {
+ struct mlx5_aso_age_action *aso_age =
+ flow_aso_age_get_by_idx(dev, ret);
+
+ if (!aso_age->age_params.context)
+ aso_age->age_params.context =
+ (void *)(uintptr_t)idx;
+ }
+ break;
+ default:
+ rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "action type not supported");
+ break;
}
- return 1;
+ return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
}
/**
- * Release push vlan action resource.
+ * Destroy the shared action.
+ * Release action related resources on the NIC and the memory.
+ * Lock free, (mutex should be acquired by caller).
+ * Dispatcher for action type specific call.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] action
+ * The shared action object to be removed.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * 0 on success, otherwise negative errno value.
*/
static int
-flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+flow_dv_action_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t idx = handle->dvh.rix_push_vlan;
- struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
+ uint32_t act_idx = (uint32_t)(uintptr_t)action;
+ uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+ int ret;
- cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
- idx);
- if (!cache_resource)
- return 0;
- MLX5_ASSERT(cache_resource->action);
- DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
- &priv->sh->push_vlan_action_list, idx,
- cache_resource, next);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
- DRV_LOG(DEBUG, "push vlan action resource %p: removed",
- (void *)cache_resource);
+ switch (type) {
+ case MLX5_SHARED_ACTION_TYPE_RSS:
+ return __flow_dv_action_rss_release(dev, idx, error);
+ case MLX5_SHARED_ACTION_TYPE_AGE:
+ ret = flow_dv_aso_age_release(dev, idx);
+ if (ret)
+ /*
+ * In this case, the last flow has a reference will
+ * actually release the age action.
+ */
+ DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
+ " released with references %d.", idx, ret);
return 0;
- }
- return 1;
-}
-
-/**
- * Release the fate resource.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
- */
-static void
-flow_dv_fate_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
-{
- if (!handle->rix_fate)
- return;
- if (handle->fate_action == MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
- mlx5_hrxq_release(dev, handle->rix_hrxq);
- else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
- flow_dv_jump_tbl_resource_release(dev, handle);
- else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
- flow_dv_port_id_action_resource_release(dev, handle);
- else
- DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
- handle->rix_fate = 0;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
}
/**
- * Remove the flow from the NIC but keeps it in memory.
- * Lock free, (mutex should be acquired by caller).
+ * Updates in place shared RSS action configuration.
*
* @param[in] dev
- * Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * The shared RSS action object ID to be updated.
+ * @param[in] action_conf
+ * RSS action specification used to modify *shared_rss*.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ * @note: currently only support update of RSS queues.
*/
-static void
-__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+static int
+__flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
+ const struct rte_flow_action_rss *action_conf,
+ struct rte_flow_error *error)
{
- struct mlx5_flow_handle *dh;
- uint32_t handle_idx;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_action_rss *shared_rss =
+ mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
+ int ret = 0;
+ void *queue = NULL;
+ uint16_t *queue_old = NULL;
+ uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
- if (!flow)
- return;
- handle_idx = flow->dev_handles;
- while (handle_idx) {
- dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
- handle_idx);
- if (!dh)
- return;
- if (dh->ib_flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
- dh->ib_flow = NULL;
- }
- if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
- dh->fate_action == MLX5_FLOW_FATE_QUEUE)
- flow_dv_fate_resource_release(dev, dh);
- if (dh->vf_vlan.tag && dh->vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
- handle_idx = dh->next.next;
+ if (!shared_rss)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "invalid shared action to update");
+ if (priv->obj_ops.ind_table_modify == NULL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "cannot modify indirection table");
+ queue = mlx5_malloc(MLX5_MEM_ZERO,
+ RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
+ 0, SOCKET_ID_ANY);
+ if (!queue)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ memcpy(queue, action_conf->queue, queue_size);
+ MLX5_ASSERT(shared_rss->ind_tbl);
+ rte_spinlock_lock(&shared_rss->action_rss_sl);
+ queue_old = shared_rss->ind_tbl->queues;
+ ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
+ queue, action_conf->queue_num, true);
+ if (ret) {
+ mlx5_free(queue);
+ ret = rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "cannot update indirection table");
+ } else {
+ mlx5_free(queue_old);
+ shared_rss->origin.queue = queue;
+ shared_rss->origin.queue_num = action_conf->queue_num;
}
+ rte_spinlock_unlock(&shared_rss->action_rss_sl);
+ return ret;
}
/**
- * Remove the flow from the NIC and the memory.
- * Lock free, (mutex should be acquired by caller).
+ * Updates in place shared action configuration, lock free,
+ * (mutex should be acquired by caller).
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in, out] flow
- * Pointer to flow structure.
+ * @param[in] action
+ * The shared action object to be updated.
+ * @param[in] action_conf
+ * Action specification used to modify *action*.
+ * *action_conf* should be of type correlating with type of the *action*,
+ * otherwise considered as invalid.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
*/
-static void
-__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- struct mlx5_flow_handle *dev_handle;
- struct mlx5_priv *priv = dev->data->dev_private;
-
- if (!flow)
- return;
- __flow_dv_remove(dev, flow);
- if (flow->counter) {
- flow_dv_counter_release(dev, flow->counter);
- flow->counter = 0;
- }
- if (flow->meter) {
- struct mlx5_flow_meter *fm;
-
- fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
- flow->meter);
- if (fm)
- mlx5_flow_meter_detach(fm);
- flow->meter = 0;
+static int
+flow_dv_action_update(struct rte_eth_dev *dev,
+ struct rte_flow_shared_action *action,
+ const void *action_conf,
+ struct rte_flow_error *err)
+{
+ uint32_t act_idx = (uint32_t)(uintptr_t)action;
+ uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+
+ switch (type) {
+ case MLX5_SHARED_ACTION_TYPE_RSS:
+ return __flow_dv_action_rss_update(dev, idx, action_conf, err);
+ default:
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type update not supported");
}
- while (flow->dev_handles) {
- uint32_t tmp_idx = flow->dev_handles;
+}
- dev_handle = mlx5_ipool_get(priv->sh->ipool
- [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
- if (!dev_handle)
- return;
- flow->dev_handles = dev_handle->next.next;
- if (dev_handle->dvh.matcher)
- flow_dv_matcher_release(dev, dev_handle);
- if (dev_handle->dvh.rix_encap_decap)
- flow_dv_encap_decap_resource_release(dev, dev_handle);
- if (dev_handle->dvh.modify_hdr)
- flow_dv_modify_hdr_resource_release(dev_handle);
- if (dev_handle->dvh.rix_push_vlan)
- flow_dv_push_vlan_action_resource_release(dev,
- dev_handle);
- if (dev_handle->dvh.rix_tag)
- flow_dv_tag_release(dev,
- dev_handle->dvh.rix_tag);
- flow_dv_fate_resource_release(dev, dev_handle);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
- tmp_idx);
+static int
+flow_dv_action_query(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action *action, void *data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_age_param *age_param;
+ struct rte_flow_query_age *resp;
+ uint32_t act_idx = (uint32_t)(uintptr_t)action;
+ uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+
+ switch (type) {
+ case MLX5_SHARED_ACTION_TYPE_AGE:
+ age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
+ resp = data;
+ resp->aged = __atomic_load_n(&age_param->state,
+ __ATOMIC_RELAXED) == AGE_TMOUT ?
+ 1 : 0;
+ resp->sec_since_last_hit_valid = !resp->aged;
+ if (resp->sec_since_last_hit_valid)
+ resp->sec_since_last_hit = __atomic_load_n
+ (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ return 0;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type query not supported");
}
}
"counters are not available");
}
+/**
+ * Query a flow rule AGE action for aging information.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the sub flow.
+ * @param[out] data
+ * data retrieved by the query.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
+ void *data, struct rte_flow_error *error)
+{
+ struct rte_flow_query_age *resp = data;
+ struct mlx5_age_param *age_param;
+
+ if (flow->age) {
+ struct mlx5_aso_age_action *act =
+ flow_aso_age_get_by_idx(dev, flow->age);
+
+ age_param = &act->age_params;
+ } else if (flow->counter) {
+ age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
+
+ if (!age_param || !age_param->timeout)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot read age data");
+ } else {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "age data not available");
+ }
+ resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+ AGE_TMOUT ? 1 : 0;
+ resp->sec_since_last_hit_valid = !resp->aged;
+ if (resp->sec_since_last_hit_valid)
+ resp->sec_since_last_hit = __atomic_load_n
+ (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ return 0;
+}
+
/**
* Query a flow.
*
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_query_count(dev, flow, data, error);
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_dv_query_age(dev, flow, data, error);
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
if (!mtd || !priv->config.dv_flow_en)
return 0;
if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->egress.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->egress.color_matcher));
if (mtd->egress.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->egress.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->egress.any_matcher));
if (mtd->egress.tbl)
- flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
+ flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
if (mtd->egress.sfx_tbl)
- flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
+ flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
if (mtd->ingress.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->ingress.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->ingress.color_matcher));
if (mtd->ingress.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->ingress.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->ingress.any_matcher));
if (mtd->ingress.tbl)
- flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
+ flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
if (mtd->ingress.sfx_tbl)
- flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
+ flow_dv_tbl_resource_release(MLX5_SH(dev),
+ mtd->ingress.sfx_tbl);
if (mtd->transfer.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->transfer.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->transfer.color_matcher));
if (mtd->transfer.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->transfer.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->transfer.any_matcher));
if (mtd->transfer.tbl)
- flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
+ flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
if (mtd->transfer.sfx_tbl)
- flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
+ flow_dv_tbl_resource_release(MLX5_SH(dev),
+ mtd->transfer.sfx_tbl);
if (mtd->drop_actn)
- claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
- rte_free(mtd);
+ claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
+ mlx5_free(mtd);
return 0;
}
uint32_t color_reg_c_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_match_params mask = {
.size = sizeof(mask.buf),
};
struct mlx5_meter_domain_info *dtb;
struct rte_flow_error error;
int i = 0;
+ int ret;
if (transfer)
dtb = &mtb->transfer;
dtb = &mtb->ingress;
/* Create the meter table with METER level. */
dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
- egress, transfer, &error);
+ egress, transfer, false, NULL, 0,
+ 0, &error);
if (!dtb->tbl) {
DRV_LOG(ERR, "Failed to create meter policer table.");
return -1;
/* Create the meter suffix table with SUFFIX level. */
dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
MLX5_FLOW_TABLE_LEVEL_SUFFIX,
- egress, transfer, &error);
+ egress, transfer, false, NULL, 0,
+ 0, &error);
if (!dtb->sfx_tbl) {
DRV_LOG(ERR, "Failed to create meter suffix table.");
return -1;
/* Create matchers, Any and Color. */
dv_attr.priority = 3;
dv_attr.match_criteria_enable = 0;
- dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
- &dv_attr,
- dtb->tbl->obj);
- if (!dtb->any_matcher) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
+ &dtb->any_matcher);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter"
" policer default matcher.");
goto error_exit;
1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
- dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
- &dv_attr,
- dtb->tbl->obj);
- if (!dtb->color_matcher) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
+ &dtb->color_matcher);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter policer color matcher.");
goto error_exit;
}
actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
actions[i++] = mtb->drop_actn;
/* Default rule: lowest priority, match any, actions: drop. */
- dtb->policer_rules[RTE_MTR_DROPPED] =
- mlx5_glue->dv_create_flow(dtb->any_matcher,
- (void *)&value, i, actions);
- if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
+ ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
+ actions,
+ &dtb->policer_rules[RTE_MTR_DROPPED]);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter policer drop rule.");
goto error_exit;
}
rte_errno = ENOTSUP;
return NULL;
}
- mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
+ mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
if (!mtb) {
DRV_LOG(ERR, "Failed to allocate memory for meter.");
return NULL;
mtb->count_actns[i] = cnt->action;
}
/* Create drop action. */
- mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
- if (!mtb->drop_actn) {
+ ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
+ if (ret) {
DRV_LOG(ERR, "Failed to create drop action.");
goto error_exit;
}
for (i = 0; i < RTE_MTR_DROPPED; i++) {
if (dt->policer_rules[i]) {
- claim_zero(mlx5_glue->dv_destroy_flow
- (dt->policer_rules[i]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (dt->policer_rules[i]));
dt->policer_rules[i] = NULL;
}
}
if (dt->jump_actn) {
- claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
+ claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
dt->jump_actn = NULL;
}
}
struct mlx5_meter_domains_infos *mtb = fm->mfts;
void *actions[METER_ACTIONS];
int i;
+ int ret = 0;
/* Create jump action. */
if (!dtb->jump_actn)
- dtb->jump_actn =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (dtb->sfx_tbl->obj);
- if (!dtb->jump_actn) {
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (dtb->sfx_tbl->obj, &dtb->jump_actn);
+ if (ret) {
DRV_LOG(ERR, "Failed to create policer jump action.");
goto error;
}
actions[j++] = mtb->drop_actn;
else
actions[j++] = dtb->jump_actn;
- dtb->policer_rules[i] =
- mlx5_glue->dv_create_flow(dtb->color_matcher,
- (void *)&value,
- j, actions);
- if (!dtb->policer_rules[i]) {
+ ret = mlx5_flow_os_create_flow(dtb->color_matcher,
+ (void *)&value, j, actions,
+ &dtb->policer_rules[i]);
+ if (ret) {
DRV_LOG(ERR, "Failed to create policer rule.");
goto error;
}
return -1;
}
+/**
+ * Validate the batch counter support in root table.
+ *
+ * Create a simple flow with invalid counter and drop action on root table to
+ * validate if batch counter with offset on root table is supported or not.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_match_params mask = {
+ .size = sizeof(mask.buf),
+ };
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf),
+ };
+ struct mlx5dv_flow_matcher_attr dv_attr = {
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .priority = 0,
+ .match_criteria_enable = 0,
+ .match_mask = (void *)&mask,
+ };
+ void *actions[2] = { 0 };
+ struct mlx5_flow_tbl_resource *tbl = NULL;
+ struct mlx5_devx_obj *dcs = NULL;
+ void *matcher = NULL;
+ void *flow = NULL;
+ int ret = -1;
+
+ tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
+ if (!tbl)
+ goto err;
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ if (!dcs)
+ goto err;
+ ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
+ &actions[0]);
+ if (ret)
+ goto err;
+ actions[1] = priv->drop_queue.hrxq->action;
+ dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
+ &matcher);
+ if (ret)
+ goto err;
+ ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
+ actions, &flow);
+err:
+ /*
+ * If batch counter with offset is not supported, the driver will not
+ * validate the invalid offset value, flow create should success.
+ * In this case, it means batch counter is not supported in root table.
+ *
+ * Otherwise, if flow create is failed, counter offset is supported.
+ */
+ if (flow) {
+ DRV_LOG(INFO, "Batch counter is not supported in root "
+ "table. Switch to fallback mode.");
+ rte_errno = ENOTSUP;
+ ret = -rte_errno;
+ claim_zero(mlx5_flow_os_destroy_flow(flow));
+ } else {
+ /* Check matcher to make sure validate fail at flow create. */
+ if (!matcher || (matcher && errno != EINVAL))
+ DRV_LOG(ERR, "Unexpected error in counter offset "
+ "support detection");
+ ret = 0;
+ }
+ if (actions[0])
+ claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
+ if (matcher)
+ claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
+ if (tbl)
+ flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
+ if (dcs)
+ claim_zero(mlx5_devx_cmd_destroy(dcs));
+ return ret;
+}
+
/**
* Query a devx counter.
*
return 0;
}
-/*
- * Mutex-protected thunk to lock-free __flow_dv_translate().
+/**
+ * Get aged-out flows.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] context
+ * The address of an array of pointers to the aged-out flows contexts.
+ * @param[in] nb_contexts
+ * The length of context array pointers.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * how many contexts get in success, otherwise negative errno value.
+ * if nb_contexts is 0, return the amount of all aged contexts.
+ * if nb_contexts is not 0 , return the amount of aged flows reported
+ * in the context array.
+ * @note: only stub for now
*/
static int
-flow_dv_translate(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+flow_get_aged_flows(struct rte_eth_dev *dev,
+ void **context,
+ uint32_t nb_contexts,
+ struct rte_flow_error *error)
{
- int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_age_info *age_info;
+ struct mlx5_age_param *age_param;
+ struct mlx5_flow_counter *counter;
+ struct mlx5_aso_age_action *act;
+ int nb_flows = 0;
- flow_dv_shared_lock(dev);
- ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
- flow_dv_shared_unlock(dev);
- return ret;
+ if (nb_contexts && !context)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "empty context");
+ age_info = GET_PORT_AGE_INFO(priv);
+ rte_spinlock_lock(&age_info->aged_sl);
+ LIST_FOREACH(act, &age_info->aged_aso, next) {
+ nb_flows++;
+ if (nb_contexts) {
+ context[nb_flows - 1] =
+ act->age_params.context;
+ if (!(--nb_contexts))
+ break;
+ }
+ }
+ TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
+ nb_flows++;
+ if (nb_contexts) {
+ age_param = MLX5_CNT_TO_AGE(counter);
+ context[nb_flows - 1] = age_param->context;
+ if (!(--nb_contexts))
+ break;
+ }
+ }
+ rte_spinlock_unlock(&age_info->aged_sl);
+ MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
+ return nb_flows;
}
/*
- * Mutex-protected thunk to lock-free __flow_dv_apply().
+ * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
*/
-static int
-flow_dv_apply(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+static uint32_t
+flow_dv_counter_allocate(struct rte_eth_dev *dev)
{
- int ret;
-
- flow_dv_shared_lock(dev);
- ret = __flow_dv_apply(dev, flow, error);
- flow_dv_shared_unlock(dev);
- return ret;
+ return flow_dv_counter_alloc(dev, 0);
}
-/*
- * Mutex-protected thunk to lock-free __flow_dv_remove().
+/**
+ * Validate shared action.
+ * Dispatcher for action type specific validation.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] conf
+ * Shared action configuration.
+ * @param[in] action
+ * The shared action object to validate.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
*/
-static void
-flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+static int
+flow_dv_action_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *err)
{
- flow_dv_shared_lock(dev);
- __flow_dv_remove(dev, flow);
- flow_dv_shared_unlock(dev);
-}
+ struct mlx5_priv *priv = dev->data->dev_private;
-/*
- * Mutex-protected thunk to lock-free __flow_dv_destroy().
- */
-static void
-flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- flow_dv_shared_lock(dev);
- __flow_dv_destroy(dev, flow);
- flow_dv_shared_unlock(dev);
+ RTE_SET_USED(conf);
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ /*
+ * priv->obj_ops is set according to driver capabilities.
+ * When DevX capabilities are
+ * sufficient, it is set to devx_obj_ops.
+ * Otherwise, it is set to ibv_obj_ops.
+ * ibv_obj_ops doesn't support ind_table_modify operation.
+ * In this case the shared RSS action can't be used.
+ */
+ if (priv->obj_ops.ind_table_modify == NULL)
+ return rte_flow_error_set
+ (err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "shared RSS action not supported");
+ return mlx5_validate_action_rss(dev, action, err);
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ if (!priv->sh->aso_age_mng)
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "shared age action not supported");
+ return flow_dv_validate_action_age(0, action, dev, err);
+ default:
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "action type not supported");
+ }
}
-/*
- * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
- */
-static uint32_t
-flow_dv_counter_allocate(struct rte_eth_dev *dev)
+static int
+flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
{
- uint32_t cnt;
-
- flow_dv_shared_lock(dev);
- cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
- flow_dv_shared_unlock(dev);
- return cnt;
-}
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
-/*
- * Mutex-protected thunk to lock-free flow_dv_counter_release().
- */
-static void
-flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
-{
- flow_dv_shared_lock(dev);
- flow_dv_counter_release(dev, cnt);
- flow_dv_shared_unlock(dev);
+ if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
+ ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
+ flags);
+ if (ret != 0)
+ return ret;
+ }
+ if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
+ ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
+ if (ret != 0)
+ return ret;
+ }
+ if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
+ ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
}
const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
+ .get_aged_flows = flow_get_aged_flows,
+ .action_validate = flow_dv_action_validate,
+ .action_create = flow_dv_action_create,
+ .action_destroy = flow_dv_action_destroy,
+ .action_update = flow_dv_action_update,
+ .action_query = flow_dv_action_query,
+ .sync_domain = flow_dv_sync_domain,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+