#include "mlx5_common_os.h"
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
#include "rte_pmd_mlx5.h"
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
MLX5_MODIFICATION_TYPE_SET, error);
}
+static int
+mlx5_flow_item_field_width(enum rte_flow_field_id field)
+{
+ switch (field) {
+ case RTE_FLOW_FIELD_START:
+ return 32;
+ case RTE_FLOW_FIELD_MAC_DST:
+ case RTE_FLOW_FIELD_MAC_SRC:
+ return 48;
+ case RTE_FLOW_FIELD_VLAN_TYPE:
+ return 16;
+ case RTE_FLOW_FIELD_VLAN_ID:
+ return 12;
+ case RTE_FLOW_FIELD_MAC_TYPE:
+ return 16;
+ case RTE_FLOW_FIELD_IPV4_DSCP:
+ return 6;
+ case RTE_FLOW_FIELD_IPV4_TTL:
+ return 8;
+ case RTE_FLOW_FIELD_IPV4_SRC:
+ case RTE_FLOW_FIELD_IPV4_DST:
+ return 32;
+ case RTE_FLOW_FIELD_IPV6_DSCP:
+ return 6;
+ case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
+ return 8;
+ case RTE_FLOW_FIELD_IPV6_SRC:
+ case RTE_FLOW_FIELD_IPV6_DST:
+ return 128;
+ case RTE_FLOW_FIELD_TCP_PORT_SRC:
+ case RTE_FLOW_FIELD_TCP_PORT_DST:
+ return 16;
+ case RTE_FLOW_FIELD_TCP_SEQ_NUM:
+ case RTE_FLOW_FIELD_TCP_ACK_NUM:
+ return 32;
+ case RTE_FLOW_FIELD_TCP_FLAGS:
+ return 6;
+ case RTE_FLOW_FIELD_UDP_PORT_SRC:
+ case RTE_FLOW_FIELD_UDP_PORT_DST:
+ return 16;
+ case RTE_FLOW_FIELD_VXLAN_VNI:
+ case RTE_FLOW_FIELD_GENEVE_VNI:
+ return 24;
+ case RTE_FLOW_FIELD_GTP_TEID:
+ case RTE_FLOW_FIELD_TAG:
+ return 32;
+ case RTE_FLOW_FIELD_MARK:
+ return 24;
+ case RTE_FLOW_FIELD_META:
+ return 32;
+ case RTE_FLOW_FIELD_POINTER:
+ case RTE_FLOW_FIELD_VALUE:
+ return 64;
+ default:
+ MLX5_ASSERT(false);
+ }
+ return 0;
+}
+
static void
mlx5_flow_field_id_to_modify_info
(const struct rte_flow_action_modify_data *data,
struct field_modify_info *info,
- uint32_t *mask, uint32_t *value, uint32_t width,
+ uint32_t *mask, uint32_t *value,
+ uint32_t width, uint32_t dst_width,
struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
uint32_t idx = 0;
+ uint64_t val = 0;
switch (data->field) {
case RTE_FLOW_FIELD_START:
/* not supported yet */
}
info[idx] = (struct field_modify_info){2, 4 * idx,
MLX5_MODI_OUT_DMAC_15_0};
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
}
info[idx] = (struct field_modify_info){2, 4 * idx,
MLX5_MODI_OUT_SMAC_15_0};
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
} else {
if (data->offset < 32)
info[idx++] = (struct field_modify_info){4, 0,
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_FIRST_VID};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x00000fff >>
- (12 - width));
+ mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
break;
case RTE_FLOW_FIELD_MAC_TYPE:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_ETHERTYPE};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_IPV4_DSCP:
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_DSCP};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000003f >>
- (6 - width));
+ mask[idx] = 0x3f >> (6 - width);
break;
case RTE_FLOW_FIELD_IPV4_TTL:
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IPV4_TTL};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x000000ff >>
- (8 - width));
+ mask[idx] = 0xff >> (8 - width);
break;
case RTE_FLOW_FIELD_IPV4_SRC:
info[idx] = (struct field_modify_info){4, 0,
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_DSCP};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000003f >>
- (6 - width));
+ mask[idx] = 0x3f >> (6 - width);
break;
case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IPV6_HOPLIMIT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x000000ff >>
- (8 - width));
+ mask[idx] = 0xff >> (8 - width);
break;
case RTE_FLOW_FIELD_IPV6_SRC:
if (mask) {
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_SPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_TCP_PORT_DST:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_DPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_TCP_SEQ_NUM:
info[idx] = (struct field_modify_info){4, 0,
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_TCP_FLAGS};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000003f >>
- (6 - width));
+ mask[idx] = 0x3f >> (6 - width);
break;
case RTE_FLOW_FIELD_UDP_PORT_SRC:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_UDP_SPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_UDP_PORT_DST:
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_UDP_DPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0x0000ffff >>
- (16 - width));
+ mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
break;
case RTE_FLOW_FIELD_VXLAN_VNI:
/* not supported yet */
}
break;
case RTE_FLOW_FIELD_POINTER:
- for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
- if (mask[idx]) {
- memcpy(&value[idx],
- (void *)(uintptr_t)data->value, 32);
- value[idx] = rte_cpu_to_be_32(value[idx]);
- break;
- }
- }
- break;
case RTE_FLOW_FIELD_VALUE:
+ if (data->field == RTE_FLOW_FIELD_POINTER)
+ memcpy(&val, (void *)(uintptr_t)data->value,
+ sizeof(uint64_t));
+ else
+ val = data->value;
for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
if (mask[idx]) {
- value[idx] =
- rte_cpu_to_be_32((uint32_t)data->value);
- break;
+ if (dst_width > 16) {
+ value[idx] = rte_cpu_to_be_32(val);
+ val >>= 32;
+ } else if (dst_width > 8) {
+ value[idx] = rte_cpu_to_be_16(val);
+ val >>= 16;
+ } else {
+ value[idx] = (uint8_t)val;
+ val >>= 8;
+ }
+ if (!val)
+ break;
}
}
break;
uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
uint32_t type;
+ uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
conf->src.field == RTE_FLOW_FIELD_VALUE) {
type = MLX5_MODIFICATION_TYPE_SET;
/** For SET fill the destination field (field) first. */
mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
- value, conf->width, dev, attr, error);
+ value, conf->width, dst_width, dev, attr, error);
/** Then copy immediate value from source as per mask. */
mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
- value, conf->width, dev, attr, error);
+ value, conf->width, dst_width, dev, attr, error);
item.spec = &value;
} else {
type = MLX5_MODIFICATION_TYPE_COPY;
/** For COPY fill the destination field (dcopy) without mask. */
mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
- value, conf->width, dev, attr, error);
+ value, conf->width, dst_width, dev, attr, error);
/** Then construct the source field (field) with mask. */
mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
- value, conf->width, dev, attr, error);
+ value, conf->width, dst_width, dev, attr, error);
}
item.mask = &mask;
return flow_dv_convert_modify_action(&item,
return ret;
}
-static int
-mlx5_flow_item_field_width(enum rte_flow_field_id field)
-{
- switch (field) {
- case RTE_FLOW_FIELD_START:
- return 32;
- case RTE_FLOW_FIELD_MAC_DST:
- case RTE_FLOW_FIELD_MAC_SRC:
- return 48;
- case RTE_FLOW_FIELD_VLAN_TYPE:
- return 16;
- case RTE_FLOW_FIELD_VLAN_ID:
- return 12;
- case RTE_FLOW_FIELD_MAC_TYPE:
- return 16;
- case RTE_FLOW_FIELD_IPV4_DSCP:
- return 6;
- case RTE_FLOW_FIELD_IPV4_TTL:
- return 8;
- case RTE_FLOW_FIELD_IPV4_SRC:
- case RTE_FLOW_FIELD_IPV4_DST:
- return 32;
- case RTE_FLOW_FIELD_IPV6_DSCP:
- return 6;
- case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
- return 8;
- case RTE_FLOW_FIELD_IPV6_SRC:
- case RTE_FLOW_FIELD_IPV6_DST:
- return 128;
- case RTE_FLOW_FIELD_TCP_PORT_SRC:
- case RTE_FLOW_FIELD_TCP_PORT_DST:
- return 16;
- case RTE_FLOW_FIELD_TCP_SEQ_NUM:
- case RTE_FLOW_FIELD_TCP_ACK_NUM:
- return 32;
- case RTE_FLOW_FIELD_TCP_FLAGS:
- return 6;
- case RTE_FLOW_FIELD_UDP_PORT_SRC:
- case RTE_FLOW_FIELD_UDP_PORT_DST:
- return 16;
- case RTE_FLOW_FIELD_VXLAN_VNI:
- case RTE_FLOW_FIELD_GENEVE_VNI:
- return 24;
- case RTE_FLOW_FIELD_GTP_TEID:
- case RTE_FLOW_FIELD_TAG:
- return 32;
- case RTE_FLOW_FIELD_MARK:
- return 24;
- case RTE_FLOW_FIELD_META:
- case RTE_FLOW_FIELD_POINTER:
- case RTE_FLOW_FIELD_VALUE:
- return 32;
- default:
- MLX5_ASSERT(false);
- }
- return 0;
-}
-
/**
* Validate the generic modify field actions.
* @param[in] dev
item_flags, attr,
error))
return -rte_errno;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
++actions_n;
break;
error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
++actions_n;
break;
(action_flags, actions, error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
/* Count PCP with push_vlan command. */
action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
break;
actions, error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
/* Count VID with push_vlan command. */
action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
rw_act_num += MLX5_ACT_NUM_MDF_VID;
attr, error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
action_flags |= MLX5_FLOW_ACTION_DECAP;
++actions_n;
break;
actions, item_flags, error);
if (ret < 0)
return ret;
+ if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
+ (action_flags & MLX5_FLOW_ACTION_DECAP))
+ modify_after_mirror = 1;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
error);
if (ret < 0)
return ret;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ modify_after_mirror = 1;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
++actions_n;
return &cache_resource->entry;
error:
for (idx = 0; idx < resource->num_of_dest; idx++) {
- struct mlx5_flow_sub_actions_idx *act_res =
- &cache_resource->sample_idx[idx];
- if (act_res->rix_hrxq &&
- !mlx5_hrxq_release(dev,
- act_res->rix_hrxq))
- act_res->rix_hrxq = 0;
- if (act_res->rix_encap_decap &&
- !flow_dv_encap_decap_resource_release(dev,
- act_res->rix_encap_decap))
- act_res->rix_encap_decap = 0;
- if (act_res->rix_port_id_action &&
- !flow_dv_port_id_action_resource_release(dev,
- act_res->rix_port_id_action))
- act_res->rix_port_id_action = 0;
- if (act_res->rix_jump &&
- !flow_dv_jump_tbl_resource_release(dev,
- act_res->rix_jump))
- act_res->rix_jump = 0;
+ flow_dv_sample_sub_actions_release(dev,
+ &cache_resource->sample_idx[idx]);
if (dest_attr[idx])
mlx5_free(dest_attr[idx]);
}
dev_flow->handle->dvh.rix_encap_decap;
sample_act->dr_encap_action =
dev_flow->dv.encap_decap->action;
+ dev_flow->handle->dvh.rix_encap_decap = 0;
}
if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
normal_idx++;
dev_flow->handle->rix_port_id_action;
sample_act->dr_port_id_action =
dev_flow->dv.port_id_action->action;
+ dev_flow->handle->rix_port_id_action = 0;
}
if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
normal_idx++;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action *action = actions;
const uint8_t *rss_key;
- const struct rte_flow_action_meter *mtr;
struct mlx5_flow_tbl_resource *tbl;
struct mlx5_aso_age_action *age_act;
uint32_t port_id = 0;
struct mlx5_flow_dv_port_id_action_resource port_id_resource;
int action_type = actions->type;
const struct rte_flow_action *found_action = NULL;
- struct mlx5_flow_meter *fm = NULL;
uint32_t jump_group = 0;
if (!mlx5_flow_os_action_supported(action_type))
MLX5_FLOW_FATE_DEFAULT_MISS;
break;
case RTE_FLOW_ACTION_TYPE_METER:
- mtr = actions->conf;
- if (!flow->meter) {
- fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
- attr, error);
- if (!fm)
- return rte_flow_error_set(error,
- rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "meter not found "
- "or invalid parameters");
- flow->meter = fm->idx;
- }
+ if (!wks->fm)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Failed to get meter in flow.");
/* Set the meter action. */
- if (!fm) {
- fm = mlx5_ipool_get(priv->sh->ipool
- [MLX5_IPOOL_MTR], flow->meter);
- if (!fm)
- return rte_flow_error_set(error,
- rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "meter not found "
- "or invalid parameters");
- }
dev_flow->dv.actions[actions_n++] =
- fm->mfts->meter_action;
+ wks->fm->mfts->meter_action;
action_flags |= MLX5_FLOW_ACTION_METER;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
return;
switch (handle->fate_action) {
case MLX5_FLOW_FATE_QUEUE:
- mlx5_hrxq_release(dev, handle->rix_hrxq);
+ if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
+ mlx5_hrxq_release(dev, handle->rix_hrxq);
break;
case MLX5_FLOW_FATE_JUMP:
flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
{
struct mlx5_flow_handle *dev_handle;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter *fm = NULL;
uint32_t srss = 0;
if (!flow)
flow->counter = 0;
}
if (flow->meter) {
- struct mlx5_flow_meter *fm;
-
fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
flow->meter);
if (fm)
flow_dv_fate_resource_release(dev, dev_handle);
else if (!srss)
srss = dev_handle->rix_srss;
+ if (fm && dev_handle->is_meter_flow_id &&
+ dev_handle->split_flow_id)
+ mlx5_ipool_free(fm->flow_ipool,
+ dev_handle->split_flow_id);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
tmp_idx);
}
*/
static uint32_t
__flow_dv_action_rss_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action_rss *rss,
struct rte_flow_error *error)
{
"cannot allocate resource memory");
goto error_rss_init;
}
- if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
+ if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
rte_flow_error_set(error, E2BIG,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"rss action number out of range");
}
/**
- * Create shared action, lock free,
+ * Create indirect action, lock free,
* (mutex should be acquired by caller).
* Dispatcher for action type specific call.
*
* @param[in] conf
* Shared action configuration.
* @param[in] action
- * Action specification used to create shared action.
+ * Action specification used to create indirect action.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
* A valid shared action handle in case of success, NULL otherwise and
* rte_errno is set.
*/
-static struct rte_flow_shared_action *
+static struct rte_flow_action_handle *
flow_dv_action_create(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
struct rte_flow_error *err)
{
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_RSS:
ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
- idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
- MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
+ idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
+ MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
- idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
- MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
+ idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
+ MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
if (ret) {
struct mlx5_aso_age_action *aso_age =
flow_aso_age_get_by_idx(dev, ret);
NULL, "action type not supported");
break;
}
- return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
+ return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
}
/**
- * Destroy the shared action.
+ * Destroy the indirect action.
* Release action related resources on the NIC and the memory.
* Lock free, (mutex should be acquired by caller).
* Dispatcher for action type specific call.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] action
- * The shared action object to be removed.
+ * @param[in] handle
+ * The indirect action object handle to be removed.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
*/
static int
flow_dv_action_destroy(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
+ struct rte_flow_action_handle *handle,
struct rte_flow_error *error)
{
- uint32_t act_idx = (uint32_t)(uintptr_t)action;
- uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
- uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
int ret;
switch (type) {
- case MLX5_SHARED_ACTION_TYPE_RSS:
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
return __flow_dv_action_rss_release(dev, idx, error);
- case MLX5_SHARED_ACTION_TYPE_AGE:
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
ret = flow_dv_aso_age_release(dev, idx);
if (ret)
/*
* In this case, the last flow has a reference will
* actually release the age action.
*/
- DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
+ DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
" released with references %d.", idx, ret);
return 0;
default:
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] action
- * The shared action object to be updated.
- * @param[in] action_conf
- * Action specification used to modify *action*.
- * *action_conf* should be of type correlating with type of the *action*,
- * otherwise considered as invalid.
+ * @param[in] handle
+ * The indirect action object handle to be updated.
+ * @param[in] update
+ * Action specification used to modify the action pointed by *handle*.
+ * *update* could be of same type with the action pointed by the *handle*
+ * handle argument, or some other structures like a wrapper, depending on
+ * the indirect action type.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
*/
static int
flow_dv_action_update(struct rte_eth_dev *dev,
- struct rte_flow_shared_action *action,
- const void *action_conf,
+ struct rte_flow_action_handle *handle,
+ const void *update,
struct rte_flow_error *err)
{
- uint32_t act_idx = (uint32_t)(uintptr_t)action;
- uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
- uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ const void *action_conf;
switch (type) {
- case MLX5_SHARED_ACTION_TYPE_RSS:
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
+ action_conf = ((const struct rte_flow_action *)update)->conf;
return __flow_dv_action_rss_update(dev, idx, action_conf, err);
default:
return rte_flow_error_set(err, ENOTSUP,
static int
flow_dv_action_query(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action *action, void *data,
+ const struct rte_flow_action_handle *handle, void *data,
struct rte_flow_error *error)
{
struct mlx5_age_param *age_param;
struct rte_flow_query_age *resp;
- uint32_t act_idx = (uint32_t)(uintptr_t)action;
- uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
- uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
switch (type) {
- case MLX5_SHARED_ACTION_TYPE_AGE:
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
resp = data;
resp->aged = __atomic_load_n(&age_param->state,
if (!mtd || !priv->config.dv_flow_en)
return 0;
- if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_flow_os_destroy_flow
- (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
- if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_flow_os_destroy_flow
- (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
- if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_flow_os_destroy_flow
- (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
- if (mtd->egress.color_matcher)
- claim_zero(mlx5_flow_os_destroy_flow_matcher
- (mtd->egress.color_matcher));
- if (mtd->egress.any_matcher)
- claim_zero(mlx5_flow_os_destroy_flow_matcher
- (mtd->egress.any_matcher));
if (mtd->egress.tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
if (mtd->egress.sfx_tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
- if (mtd->ingress.color_matcher)
- claim_zero(mlx5_flow_os_destroy_flow_matcher
- (mtd->ingress.color_matcher));
- if (mtd->ingress.any_matcher)
- claim_zero(mlx5_flow_os_destroy_flow_matcher
- (mtd->ingress.any_matcher));
if (mtd->ingress.tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
if (mtd->ingress.sfx_tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev),
mtd->ingress.sfx_tbl);
- if (mtd->transfer.color_matcher)
- claim_zero(mlx5_flow_os_destroy_flow_matcher
- (mtd->transfer.color_matcher));
- if (mtd->transfer.any_matcher)
- claim_zero(mlx5_flow_os_destroy_flow_matcher
- (mtd->transfer.any_matcher));
if (mtd->transfer.tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
if (mtd->transfer.sfx_tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev),
mtd->transfer.sfx_tbl);
- if (mtd->drop_actn)
- claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
mlx5_free(mtd);
return 0;
}
* Table attribute.
* @param[in] transfer
* Table attribute.
- * @param[in] color_reg_c_idx
- * Reg C index for color match.
*
* @return
- * 0 on success, -1 otherwise and rte_errno is set.
+ * 0 on success, -1 otherwise.
*/
static int
flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
struct mlx5_meter_domains_infos *mtb,
- uint8_t egress, uint8_t transfer,
- uint32_t color_reg_c_idx)
+ uint8_t egress, uint8_t transfer)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_flow_dv_match_params mask = {
- .size = sizeof(mask.buf),
- };
- struct mlx5_flow_dv_match_params value = {
- .size = sizeof(value.buf),
- };
- struct mlx5dv_flow_matcher_attr dv_attr = {
- .type = IBV_FLOW_ATTR_NORMAL,
- .priority = 0,
- .match_criteria_enable = 0,
- .match_mask = (void *)&mask,
- };
- void *actions[METER_ACTIONS];
- struct mlx5_meter_domain_info *dtb;
struct rte_flow_error error;
- int i = 0;
- int ret;
+ struct mlx5_meter_domain_info *dtb;
if (transfer)
dtb = &mtb->transfer;
DRV_LOG(ERR, "Failed to create meter suffix table.");
return -1;
}
- /* Create matchers, Any and Color. */
- dv_attr.priority = 3;
- dv_attr.match_criteria_enable = 0;
- ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
- &dtb->any_matcher);
- if (ret) {
- DRV_LOG(ERR, "Failed to create meter"
- " policer default matcher.");
- goto error_exit;
- }
- dv_attr.priority = 0;
- dv_attr.match_criteria_enable =
- 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
- flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
- rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
- ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
- &dtb->color_matcher);
- if (ret) {
- DRV_LOG(ERR, "Failed to create meter policer color matcher.");
- goto error_exit;
- }
- if (mtb->count_actns[RTE_MTR_DROPPED])
- actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
- actions[i++] = mtb->drop_actn;
- /* Default rule: lowest priority, match any, actions: drop. */
- ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
- actions,
- &dtb->policer_rules[RTE_MTR_DROPPED]);
- if (ret) {
- DRV_LOG(ERR, "Failed to create meter policer drop rule.");
- goto error_exit;
- }
return 0;
-error_exit:
- return -1;
}
/**
*
* @param[in] dev
* Pointer to Ethernet device.
- * @param[in] fm
- * Pointer to the flow meter.
*
* @return
* Pointer to table set on success, NULL otherwise and rte_errno is set.
*/
static struct mlx5_meter_domains_infos *
-flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
- const struct mlx5_flow_meter *fm)
+flow_dv_create_mtr_tbl(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_meter_domains_infos *mtb;
int ret;
- int i;
if (!priv->mtr_en) {
rte_errno = ENOTSUP;
DRV_LOG(ERR, "Failed to allocate memory for meter.");
return NULL;
}
- /* Create meter count actions */
- for (i = 0; i <= RTE_MTR_DROPPED; i++) {
- struct mlx5_flow_counter *cnt;
- if (!fm->policer_stats.cnt[i])
- continue;
- cnt = flow_dv_counter_get_by_idx(dev,
- fm->policer_stats.cnt[i], NULL);
- mtb->count_actns[i] = cnt->action;
- }
- /* Create drop action. */
- ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
- if (ret) {
- DRV_LOG(ERR, "Failed to create drop action.");
- goto error_exit;
- }
/* Egress meter table. */
- ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
+ ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0);
if (ret) {
DRV_LOG(ERR, "Failed to prepare egress meter table.");
goto error_exit;
}
/* Ingress meter table. */
- ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
+ ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0);
if (ret) {
DRV_LOG(ERR, "Failed to prepare ingress meter table.");
goto error_exit;
}
/* FDB meter table. */
if (priv->config.dv_esw_en) {
- ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
- priv->mtr_color_reg);
+ ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1);
if (ret) {
DRV_LOG(ERR, "Failed to prepare fdb meter table.");
goto error_exit;
return NULL;
}
+/**
+ * Destroy the meter table matchers.
+ * Lock free, (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in,out] dtb
+ * Pointer to DV meter table.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+flow_dv_destroy_mtr_matchers(struct rte_eth_dev *dev,
+ struct mlx5_meter_domain_info *dtb)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tbl_data_entry *tbl;
+
+ if (!priv->config.dv_flow_en)
+ return 0;
+ if (dtb->drop_matcher) {
+ tbl = container_of(dtb->drop_matcher->tbl, typeof(*tbl), tbl);
+ mlx5_cache_unregister(&tbl->matchers,
+ &dtb->drop_matcher->entry);
+ dtb->drop_matcher = NULL;
+ }
+ if (dtb->color_matcher) {
+ tbl = container_of(dtb->color_matcher->tbl, typeof(*tbl), tbl);
+ mlx5_cache_unregister(&tbl->matchers,
+ &dtb->color_matcher->entry);
+ dtb->color_matcher = NULL;
+ }
+ return 0;
+}
+
+/**
+ * Create the matchers for meter table.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] color_reg_c_idx
+ * Reg C index for color match.
+ * @param[in] mtr_id_reg_c_idx
+ * Reg C index for meter_id match.
+ * @param[in] mtr_id_mask
+ * Mask for meter_id match criteria.
+ * @param[in,out] dtb
+ * Pointer to DV meter table.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_prepare_mtr_matchers(struct rte_eth_dev *dev,
+ uint32_t color_reg_c_idx,
+ uint32_t mtr_id_reg_c_idx,
+ uint32_t mtr_id_mask,
+ struct mlx5_meter_domain_info *dtb,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ },
+ .tbl = dtb->tbl,
+ };
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ };
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = &matcher,
+ };
+ uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
+
+ tbl_data = container_of(dtb->tbl, struct mlx5_flow_tbl_data_entry, tbl);
+ if (!dtb->drop_matcher) {
+ /* Create matchers for Drop. */
+ flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
+ mtr_id_reg_c_idx, 0, mtr_id_mask);
+ matcher.priority = MLX5_REG_BITS * 2 - priv->max_mtr_bits;
+ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+ matcher.mask.size);
+ entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+ if (!entry) {
+ DRV_LOG(ERR, "Failed to register meter drop matcher.");
+ return -1;
+ }
+ dtb->drop_matcher =
+ container_of(entry, struct mlx5_flow_dv_matcher, entry);
+ }
+ if (!dtb->color_matcher) {
+ /* Create matchers for Color + meter_id. */
+ if (priv->mtr_reg_share) {
+ flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
+ color_reg_c_idx, 0,
+ (mtr_id_mask | color_mask));
+ } else {
+ flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
+ color_reg_c_idx, 0, color_mask);
+ flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
+ mtr_id_reg_c_idx, 0, mtr_id_mask);
+ }
+ matcher.priority = MLX5_REG_BITS - priv->max_mtr_bits;
+ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+ matcher.mask.size);
+ entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+ if (!entry) {
+ DRV_LOG(ERR, "Failed to register meter color matcher.");
+ return -1;
+ }
+ dtb->color_matcher =
+ container_of(entry, struct mlx5_flow_dv_matcher, entry);
+ }
+ return 0;
+}
+
/**
* Destroy domain policer rule.
*
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[in] dt
* Pointer to domain table.
*/
static void
-flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
+flow_dv_destroy_domain_policer_rule(struct rte_eth_dev *dev,
+ struct mlx5_meter_domain_info *dt)
{
- int i;
-
- for (i = 0; i < RTE_MTR_DROPPED; i++) {
- if (dt->policer_rules[i]) {
- claim_zero(mlx5_flow_os_destroy_flow
- (dt->policer_rules[i]));
- dt->policer_rules[i] = NULL;
- }
+ if (dt->drop_rule) {
+ claim_zero(mlx5_flow_os_destroy_flow(dt->drop_rule));
+ dt->drop_rule = NULL;
+ }
+ if (dt->green_rule) {
+ claim_zero(mlx5_flow_os_destroy_flow(dt->green_rule));
+ dt->green_rule = NULL;
}
+ flow_dv_destroy_mtr_matchers(dev, dt);
if (dt->jump_actn) {
claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
dt->jump_actn = NULL;
* Always 0.
*/
static int
-flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
+flow_dv_destroy_policer_rules(struct rte_eth_dev *dev,
const struct mlx5_flow_meter *fm,
const struct rte_flow_attr *attr)
{
if (!mtb)
return 0;
if (attr->egress)
- flow_dv_destroy_domain_policer_rule(&mtb->egress);
+ flow_dv_destroy_domain_policer_rule(dev, &mtb->egress);
if (attr->ingress)
- flow_dv_destroy_domain_policer_rule(&mtb->ingress);
+ flow_dv_destroy_domain_policer_rule(dev, &mtb->ingress);
if (attr->transfer)
- flow_dv_destroy_domain_policer_rule(&mtb->transfer);
+ flow_dv_destroy_domain_policer_rule(dev, &mtb->transfer);
return 0;
}
/**
* Create specify domain meter policer rule.
*
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[in] fm
* Pointer to flow meter structure.
* @param[in] mtb
* Pointer to DV meter table set.
- * @param[in] mtr_reg_c
- * Color match REG_C.
+ * @param[out] drop_rule
+ * The address of pointer saving drop rule.
+ * @param[out] color_rule
+ * The address of pointer saving green rule.
*
* @return
* 0 on success, -1 otherwise.
*/
static int
-flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
+flow_dv_create_policer_forward_rule(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter *fm,
struct mlx5_meter_domain_info *dtb,
- uint8_t mtr_reg_c)
+ void **drop_rule,
+ void **green_rule)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_match_params matcher = {
- .size = sizeof(matcher.buf),
+ .size = sizeof(matcher.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
};
struct mlx5_flow_dv_match_params value = {
- .size = sizeof(value.buf),
+ .size = sizeof(value.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
};
struct mlx5_meter_domains_infos *mtb = fm->mfts;
+ struct rte_flow_error error;
+ uint32_t color_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR,
+ 0, &error);
+ uint32_t mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
+ 0, &error);
+ uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
+ uint32_t mtr_id_mask =
+ ((UINT32_C(1) << priv->max_mtr_bits) - 1) << mtr_id_offset;
void *actions[METER_ACTIONS];
int i;
int ret = 0;
DRV_LOG(ERR, "Failed to create policer jump action.");
goto error;
}
- for (i = 0; i < RTE_MTR_DROPPED; i++) {
- int j = 0;
-
- flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
- rte_col_2_mlx5_col(i), UINT8_MAX);
- if (mtb->count_actns[i])
- actions[j++] = mtb->count_actns[i];
- if (fm->action[i] == MTR_POLICER_ACTION_DROP)
- actions[j++] = mtb->drop_actn;
- else
- actions[j++] = dtb->jump_actn;
- ret = mlx5_flow_os_create_flow(dtb->color_matcher,
- (void *)&value, j, actions,
- &dtb->policer_rules[i]);
+ /* Prepare matchers. */
+ if (!dtb->drop_matcher || !dtb->color_matcher) {
+ ret = flow_dv_prepare_mtr_matchers(dev, color_reg_c,
+ mtr_id_reg_c, mtr_id_mask,
+ dtb, &error);
if (ret) {
- DRV_LOG(ERR, "Failed to create policer rule.");
+ DRV_LOG(ERR, "Failed to setup matchers for mtr table.");
goto error;
}
}
+ /* Create Drop flow, matching meter_id only. */
+ i = 0;
+ flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_id_reg_c,
+ (fm->idx << mtr_id_offset), UINT32_MAX);
+ if (mtb->drop_count)
+ actions[i++] = mtb->drop_count;
+ actions[i++] = priv->sh->dr_drop_action;
+ ret = mlx5_flow_os_create_flow(dtb->drop_matcher->matcher_object,
+ (void *)&value, i, actions, drop_rule);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create meter policer drop rule.");
+ goto error;
+ }
+ /* Create flow matching Green color + meter_id. */
+ i = 0;
+ if (priv->mtr_reg_share) {
+ flow_dv_match_meta_reg(matcher.buf, value.buf, color_reg_c,
+ ((fm->idx << mtr_id_offset) |
+ rte_col_2_mlx5_col(RTE_COLOR_GREEN)),
+ UINT32_MAX);
+ } else {
+ flow_dv_match_meta_reg(matcher.buf, value.buf, color_reg_c,
+ rte_col_2_mlx5_col(RTE_COLOR_GREEN),
+ UINT32_MAX);
+ flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_id_reg_c,
+ fm->idx, UINT32_MAX);
+ }
+ if (mtb->green_count)
+ actions[i++] = mtb->green_count;
+ actions[i++] = dtb->jump_actn;
+ ret = mlx5_flow_os_create_flow(dtb->color_matcher->matcher_object,
+ (void *)&value, i, actions, green_rule);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create meter policer color rule.");
+ goto error;
+ }
return 0;
error:
rte_errno = errno;
}
/**
- * Create policer rules.
+ * Prepare policer rules for all domains.
+ * If meter already initialized, this will replace all old rules with new ones.
*
* @param[in] dev
* Pointer to Ethernet device.
* 0 on success, -1 otherwise.
*/
static int
-flow_dv_create_policer_rules(struct rte_eth_dev *dev,
- struct mlx5_flow_meter *fm,
- const struct rte_flow_attr *attr)
+flow_dv_prepare_policer_rules(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter *fm,
+ const struct rte_flow_attr *attr)
{
- struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_meter_domains_infos *mtb = fm->mfts;
+ bool initialized = false;
+ struct mlx5_flow_counter *cnt;
+ void *egress_drop_rule = NULL;
+ void *egress_green_rule = NULL;
+ void *ingress_drop_rule = NULL;
+ void *ingress_green_rule = NULL;
+ void *transfer_drop_rule = NULL;
+ void *transfer_green_rule = NULL;
int ret;
+ /* Get the statistics counters for green/drop. */
+ if (fm->policer_stats.cnt[RTE_COLOR_GREEN]) {
+ cnt = flow_dv_counter_get_by_idx(dev,
+ fm->policer_stats.cnt[RTE_COLOR_GREEN],
+ NULL);
+ mtb->green_count = cnt->action;
+ } else {
+ mtb->green_count = NULL;
+ }
+ if (fm->policer_stats.cnt[RTE_MTR_DROPPED]) {
+ cnt = flow_dv_counter_get_by_idx(dev,
+ fm->policer_stats.cnt[RTE_MTR_DROPPED],
+ NULL);
+ mtb->drop_count = cnt->action;
+ } else {
+ mtb->drop_count = NULL;
+ }
+ /**
+ * If flow meter has been initialized, all policer rules
+ * are created. So can get if meter initialized by checking
+ * any policer rule.
+ */
+ if (mtb->egress.drop_rule)
+ initialized = true;
if (attr->egress) {
- ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
- priv->mtr_color_reg);
+ ret = flow_dv_create_policer_forward_rule(dev,
+ fm, &mtb->egress,
+ &egress_drop_rule, &egress_green_rule);
if (ret) {
DRV_LOG(ERR, "Failed to create egress policer.");
goto error;
}
}
if (attr->ingress) {
- ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
- priv->mtr_color_reg);
+ ret = flow_dv_create_policer_forward_rule(dev,
+ fm, &mtb->ingress,
+ &ingress_drop_rule, &ingress_green_rule);
if (ret) {
DRV_LOG(ERR, "Failed to create ingress policer.");
goto error;
}
}
if (attr->transfer) {
- ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
- priv->mtr_color_reg);
+ ret = flow_dv_create_policer_forward_rule(dev,
+ fm, &mtb->transfer,
+ &transfer_drop_rule, &transfer_green_rule);
if (ret) {
DRV_LOG(ERR, "Failed to create transfer policer.");
goto error;
}
}
+ /* Replace old flows if existing. */
+ if (mtb->egress.drop_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(mtb->egress.drop_rule));
+ if (mtb->egress.green_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(mtb->egress.green_rule));
+ if (mtb->ingress.drop_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(mtb->ingress.drop_rule));
+ if (mtb->ingress.green_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(mtb->ingress.green_rule));
+ if (mtb->transfer.drop_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(mtb->transfer.drop_rule));
+ if (mtb->transfer.green_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(mtb->transfer.green_rule));
+ mtb->egress.drop_rule = egress_drop_rule;
+ mtb->egress.green_rule = egress_green_rule;
+ mtb->ingress.drop_rule = ingress_drop_rule;
+ mtb->ingress.green_rule = ingress_green_rule;
+ mtb->transfer.drop_rule = transfer_drop_rule;
+ mtb->transfer.green_rule = transfer_green_rule;
return 0;
error:
- flow_dv_destroy_policer_rules(dev, fm, attr);
+ if (egress_drop_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(egress_drop_rule));
+ if (egress_green_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(egress_green_rule));
+ if (ingress_drop_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(ingress_drop_rule));
+ if (ingress_green_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(ingress_green_rule));
+ if (transfer_drop_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(transfer_drop_rule));
+ if (transfer_green_rule)
+ claim_zero(mlx5_flow_os_destroy_flow(transfer_green_rule));
+ if (!initialized)
+ flow_dv_destroy_policer_rules(dev, fm, attr);
return -1;
}
}
/**
- * Validate shared action.
+ * Validate indirect action.
* Dispatcher for action type specific validation.
*
* @param[in] dev
* @param[in] conf
* Shared action configuration.
* @param[in] action
- * The shared action object to validate.
+ * The indirect action object to validate.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
*/
static int
flow_dv_action_validate(struct rte_eth_dev *dev,
- const struct rte_flow_shared_action_conf *conf,
+ const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
struct rte_flow_error *err)
{
.query = flow_dv_query,
.create_mtr_tbls = flow_dv_create_mtr_tbl,
.destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
- .create_policer_rules = flow_dv_create_policer_rules,
+ .prepare_policer_rules = flow_dv_prepare_policer_rules,
.destroy_policer_rules = flow_dv_destroy_policer_rules,
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,