+/**
+ * Find the policy table for prefix table with RSS.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ * @param[in] rss_desc
+ * Pointer to rss_desc
+ * @return
+ * Pointer to table set on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_meter_sub_policy *
+flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy,
+ struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+ struct mlx5_flow_meter_info *next_fm;
+ struct mlx5_flow_meter_policy *next_policy;
+ struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
+ struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
+ struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
+ uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+ bool reuse_sub_policy;
+ uint32_t i = 0;
+ uint32_t j = 0;
+
+ while (true) {
+ /* Iterate hierarchy to get all policies in this hierarchy. */
+ policies[i++] = mtr_policy;
+ if (!mtr_policy->is_hierarchy)
+ break;
+ if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
+ DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
+ return NULL;
+ }
+ next_fm = mlx5_flow_meter_find(priv,
+ mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
+ if (!next_fm) {
+ DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
+ return NULL;
+ }
+ next_policy =
+ mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
+ NULL);
+ MLX5_ASSERT(next_policy);
+ mtr_policy = next_policy;
+ }
+ while (i) {
+ /**
+ * From last policy to the first one in hierarchy,
+ * create / get the sub policy for each of them.
+ */
+ sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
+ policies[--i],
+ rss_desc,
+ next_sub_policy,
+ &reuse_sub_policy);
+ if (!sub_policy) {
+ DRV_LOG(ERR, "Failed to get the sub policy.");
+ goto err_exit;
+ }
+ if (!reuse_sub_policy)
+ sub_policies[j++] = sub_policy;
+ next_sub_policy = sub_policy;
+ }
+ return sub_policy;
+err_exit:
+ while (j) {
+ uint16_t sub_policy_num;
+
+ sub_policy = sub_policies[--j];
+ mtr_policy = sub_policy->main_policy;
+ __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
+ if (sub_policy != mtr_policy->sub_policys[domain][0]) {
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ mtr_policy->sub_policys[domain][sub_policy_num - 1] =
+ NULL;
+ sub_policy_num--;
+ mtr_policy->sub_policy_num &=
+ ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
+ mtr_policy->sub_policy_num |=
+ (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ sub_policy->idx);
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Create the sub policy tag rule for all meters in hierarchy.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] fm
+ * Meter information table.
+ * @param[in] src_port
+ * The src port this extra rule should use.
+ * @param[in] item
+ * The src port match item.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_info *fm,
+ int32_t src_port,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_policy *mtr_policy;
+ struct mlx5_flow_meter_sub_policy *sub_policy;
+ struct mlx5_flow_meter_info *next_fm = NULL;
+ struct mlx5_flow_meter_policy *next_policy;
+ struct mlx5_flow_meter_sub_policy *next_sub_policy;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct mlx5_sub_policy_color_rule *color_rule;
+ struct mlx5_meter_policy_acts acts;
+ uint32_t color_reg_c_idx;
+ bool mtr_first = (src_port != UINT16_MAX) ? true : false;
+ struct rte_flow_attr attr = {
+ .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
+ .priority = 0,
+ .ingress = 0,
+ .egress = 0,
+ .transfer = 1,
+ .reserved = 0,
+ };
+ uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
+ int i;
+
+ mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
+ MLX5_ASSERT(mtr_policy);
+ if (!mtr_policy->is_hierarchy)
+ return 0;
+ next_fm = mlx5_flow_meter_find(priv,
+ mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
+ if (!next_fm) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Failed to find next meter in hierarchy.");
+ }
+ if (!next_fm->drop_cnt)
+ goto exit;
+ color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ for (i = 0; i < RTE_COLORS; i++) {
+ bool rule_exist = false;
+ struct mlx5_meter_policy_action_container *act_cnt;
+
+ if (i >= RTE_COLOR_YELLOW)
+ break;
+ TAILQ_FOREACH(color_rule,
+ &sub_policy->color_rules[i], next_port)
+ if (color_rule->src_port == src_port) {
+ rule_exist = true;
+ break;
+ }
+ if (rule_exist)
+ continue;
+ color_rule = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_sub_policy_color_rule),
+ 0, SOCKET_ID_ANY);
+ if (!color_rule)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "No memory to create tag color rule.");
+ color_rule->src_port = src_port;
+ attr.priority = i;
+ next_policy = mlx5_flow_meter_policy_find(dev,
+ next_fm->policy_id, NULL);
+ MLX5_ASSERT(next_policy);
+ next_sub_policy = next_policy->sub_policys[domain][0];
+ tbl_data = container_of(next_sub_policy->tbl_rsc,
+ struct mlx5_flow_tbl_data_entry, tbl);
+ act_cnt = &mtr_policy->act_cnt[i];
+ if (mtr_first) {
+ acts.dv_actions[0] = next_fm->meter_action;
+ acts.dv_actions[1] = act_cnt->modify_hdr->action;
+ } else {
+ acts.dv_actions[0] = act_cnt->modify_hdr->action;
+ acts.dv_actions[1] = next_fm->meter_action;
+ }
+ acts.dv_actions[2] = tbl_data->jump.action;
+ acts.actions_n = 3;
+ if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
+ next_fm = NULL;
+ goto err_exit;
+ }
+ if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
+ MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
+ &attr, true, item,
+ &color_rule->matcher, error)) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create hierarchy meter matcher.");
+ goto err_exit;
+ }
+ if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
+ (enum rte_color)i,
+ color_rule->matcher->matcher_object,
+ acts.actions_n, acts.dv_actions,
+ true, item,
+ &color_rule->rule, &attr)) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create hierarchy meter rule.");
+ goto err_exit;
+ }
+ TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
+ color_rule, next_port);
+ }
+exit:
+ /**
+ * Recursive call to iterate all meters in hierarchy and
+ * create needed rules.
+ */
+ return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
+ src_port, item, error);
+err_exit:
+ if (color_rule) {
+ if (color_rule->rule)
+ mlx5_flow_os_destroy_flow(color_rule->rule);
+ if (color_rule->matcher) {
+ struct mlx5_flow_tbl_data_entry *tbl =
+ container_of(color_rule->matcher->tbl,
+ typeof(*tbl), tbl);
+ mlx5_list_unregister(tbl->matchers,
+ &color_rule->matcher->entry);
+ }
+ mlx5_free(color_rule);
+ }
+ if (next_fm)
+ mlx5_flow_meter_detach(priv, next_fm);
+ return -rte_errno;
+}
+
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ */
+static void
+flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+ uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+ uint32_t i, j;
+ uint16_t sub_policy_num, new_policy_num;
+
+ rte_spinlock_lock(&mtr_policy->sl);
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ switch (mtr_policy->act_cnt[i].fate_action) {
+ case MLX5_FLOW_FATE_SHARED_RSS:
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ new_policy_num = sub_policy_num;
+ for (j = 0; j < sub_policy_num; j++) {
+ sub_policy =
+ mtr_policy->sub_policys[domain][j];
+ if (sub_policy) {
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ if (sub_policy !=
+ mtr_policy->sub_policys[domain][0]) {
+ mtr_policy->sub_policys[domain][j] =
+ NULL;
+ mlx5_ipool_free
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ sub_policy->idx);
+ new_policy_num--;
+ }
+ }
+ }
+ if (new_policy_num != sub_policy_num) {
+ mtr_policy->sub_policy_num &=
+ ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+ mtr_policy->sub_policy_num |=
+ (new_policy_num &
+ MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+ }
+ break;
+ case MLX5_FLOW_FATE_QUEUE:
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ break;
+ default:
+ /*Other actions without queue and do nothing*/
+ break;
+ }
+ }
+ rte_spinlock_unlock(&mtr_policy->sl);
+}
+