+
+/**
+ * Destroy the sub policy table with RX queue.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] mtr_policy
+ * Pointer to meter policy table.
+ */
+static void
+flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
+ struct mlx5_flow_meter_policy *mtr_policy)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+ uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+ uint32_t i, j;
+ uint16_t sub_policy_num, new_policy_num;
+
+ rte_spinlock_lock(&mtr_policy->sl);
+ for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+ switch (mtr_policy->act_cnt[i].fate_action) {
+ case MLX5_FLOW_FATE_SHARED_RSS:
+ sub_policy_num = (mtr_policy->sub_policy_num >>
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+ MLX5_MTR_SUB_POLICY_NUM_MASK;
+ new_policy_num = sub_policy_num;
+ for (j = 0; j < sub_policy_num; j++) {
+ sub_policy =
+ mtr_policy->sub_policys[domain][j];
+ if (sub_policy) {
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ if (sub_policy !=
+ mtr_policy->sub_policys[domain][0]) {
+ mtr_policy->sub_policys[domain][j] =
+ NULL;
+ mlx5_ipool_free
+ (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+ sub_policy->idx);
+ new_policy_num--;
+ }
+ }
+ }
+ if (new_policy_num != sub_policy_num) {
+ mtr_policy->sub_policy_num &=
+ ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+ mtr_policy->sub_policy_num |=
+ (new_policy_num &
+ MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+ (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+ }
+ break;
+ case MLX5_FLOW_FATE_QUEUE:
+ sub_policy = mtr_policy->sub_policys[domain][0];
+ __flow_dv_destroy_sub_policy_rules(dev,
+ sub_policy);
+ break;
+ default:
+ /*Other actions without queue and do nothing*/
+ break;
+ }
+ }
+ rte_spinlock_unlock(&mtr_policy->sl);
+}
+