net/mlx5: fix encap/decap limit for hairpin flow split
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index c4071cd..642516a 100644 (file)
@@ -777,6 +777,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
                               REG_C_3;
        case MLX5_MTR_COLOR:
+       case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
                MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
                return priv->mtr_color_reg;
        case MLX5_COPY_MARK:
@@ -1411,6 +1412,7 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = action->conf;
+       enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
        unsigned int i;
 
        if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
@@ -1476,6 +1478,8 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          NULL, "No queues configured");
        for (i = 0; i != rss->queue_num; ++i) {
+               struct mlx5_rxq_ctrl *rxq_ctrl;
+
                if (rss->queue[i] >= priv->rxqs_n)
                        return rte_flow_error_set
                                (error, EINVAL,
@@ -1485,6 +1489,15 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                        return rte_flow_error_set
                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                 &rss->queue[i], "queue is not configured");
+               rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
+                                       struct mlx5_rxq_ctrl, rxq);
+               if (i == 0)
+                       rxq_type = rxq_ctrl->type;
+               if (rxq_type != rxq_ctrl->type)
+                       return rte_flow_error_set
+                               (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                &rss->queue[i],
+                                "combining hairpin and regular RSS queues is not supported");
        }
        return 0;
 }
@@ -3535,9 +3548,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
                        raw_encap = actions->conf;
-                       if (raw_encap->size >
-                           (sizeof(struct rte_flow_item_eth) +
-                            sizeof(struct rte_flow_item_ipv4)))
+                       if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
                                split++;
                        action_n++;
                        break;
@@ -3972,9 +3983,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
                        raw_encap = actions->conf;
-                       if (raw_encap->size >
-                           (sizeof(struct rte_flow_item_eth) +
-                            sizeof(struct rte_flow_item_ipv4))) {
+                       if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
                                memcpy(actions_tx, actions,
                                       sizeof(struct rte_flow_action));
                                actions_tx++;
@@ -3987,9 +3996,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
                        raw_decap = actions->conf;
-                       if (raw_decap->size <
-                           (sizeof(struct rte_flow_item_eth) +
-                            sizeof(struct rte_flow_item_ipv4))) {
+                       if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
                                memcpy(actions_tx, actions,
                                       sizeof(struct rte_flow_action));
                                actions_tx++;
@@ -7807,6 +7814,11 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
        const struct mlx5_flow_tbl_data_entry *tble;
        const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
 
+       if (!is_tunnel_offload_active(dev)) {
+               info->flags = 0;
+               return 0;
+       }
+
        if ((ol_flags & mask) != mask)
                goto err;
        tble = tunnel_mark_decode(dev, m->hash.fdir.hi);