net/mlx5: optimize action flags in flow handle
authorSuanming Mou <suanmingm@mellanox.com>
Thu, 16 Apr 2020 08:34:23 +0000 (16:34 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:09 +0000 (13:57 +0200)
As only limited bits is used in act_flags for flow destroy, it's a bit
expensive to save the whole 64 bits. Move the act_flags out of flow
handle and save the needed bits for flow destroy to save some bytes for
the flow handle data struct.

The fate action type and mark bits are reserved as they will be used in
flow destroy.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c

index bf95a40..ffc2910 100644 (file)
@@ -722,8 +722,7 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
                       struct mlx5_flow_handle *dev_handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const int mark = !!(dev_handle->act_flags &
-                           (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+       const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
        unsigned int i;
 
@@ -800,8 +799,7 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
                        struct mlx5_flow_handle *dev_handle)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       const int mark = !!(dev_handle->act_flags &
-                           (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+       const int mark = dev_handle->mark;
        const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
        unsigned int i;
 
@@ -2718,7 +2716,7 @@ flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
         * help to do the optimization work for source code.
         * If no decap actions, use the layers directly.
         */
-       if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP))
+       if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
                return dev_flow->handle->layers;
        /* Convert L3 layers with decap action. */
        if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
index fc97424..67f767b 100644 (file)
@@ -339,6 +339,16 @@ enum mlx5_flow_drv_type {
        MLX5_FLOW_TYPE_MAX,
 };
 
+/* Fate action type. */
+enum mlx5_flow_fate_type {
+       MLX5_FLOW_FATE_NONE, /* Egress flow. */
+       MLX5_FLOW_FATE_QUEUE,
+       MLX5_FLOW_FATE_JUMP,
+       MLX5_FLOW_FATE_PORT_ID,
+       MLX5_FLOW_FATE_DROP,
+       MLX5_FLOW_FATE_MAX,
+};
+
 /* Matcher PRM representation */
 struct mlx5_flow_dv_match_params {
        size_t size;
@@ -502,19 +512,21 @@ struct mlx5_flow_handle {
        /**< Index to next device flow handle. */
        uint64_t layers;
        /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
-       uint64_t act_flags;
-       /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
        void *ib_flow; /**< Verbs flow pointer. */
        struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
        union {
                uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
                uint32_t mtr_flow_id; /**< Unique meter match flow id. */
        };
+       uint32_t mark:1; /**< Metadate rxq mark flag. */
+       uint32_t fate_action:3; /**< Fate action type. */
        union {
                uint32_t hrxq; /**< Hash Rx queue object index. */
                uint32_t jump; /**< Index to the jump action resource. */
                uint32_t port_id_action;
                /**< Index to port ID action resource. */
+               uint32_t fate_idx;
+               /**< Generic value indicates the fate action. */
        };
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        struct mlx5_flow_handle_dv dvh;
@@ -624,6 +636,8 @@ struct mlx5_flow_verbs_workspace {
 struct mlx5_flow {
        struct rte_flow *flow; /**< Pointer to the main flow. */
        uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+       uint64_t act_flags;
+       /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
        bool external; /**< true if the flow is created external to PMD. */
        uint8_t ingress; /**< 1 if the flow is ingress. */
        union {
index a44265b..e28f01d 100644 (file)
@@ -7445,9 +7445,11 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        dev_flow->dv.actions[actions_n++] =
                                        dev_flow->dv.port_id_action->action;
                        action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
                        break;
                case RTE_FLOW_ACTION_TYPE_FLAG:
                        action_flags |= MLX5_FLOW_ACTION_FLAG;
+                       dev_flow->handle->mark = 1;
                        if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
                                struct rte_flow_action_mark mark = {
                                        .id = MLX5_FLOW_MARK_DEFAULT,
@@ -7476,6 +7478,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_MARK:
                        action_flags |= MLX5_FLOW_ACTION_MARK;
+                       dev_flow->handle->mark = 1;
                        if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
                                const struct rte_flow_action_mark *mark =
                                        (const struct rte_flow_action_mark *)
@@ -7520,6 +7523,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ACTION_TYPE_DROP:
                        action_flags |= MLX5_FLOW_ACTION_DROP;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
                        break;
                case RTE_FLOW_ACTION_TYPE_QUEUE:
                        MLX5_ASSERT(flow->rss.queue);
@@ -7527,6 +7531,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        flow->rss.queue_num = 1;
                        (*flow->rss.queue)[0] = queue->index;
                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
                        break;
                case RTE_FLOW_ACTION_TYPE_RSS:
                        MLX5_ASSERT(flow->rss.queue);
@@ -7543,6 +7548,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                         * when expanding items for RSS.
                         */
                        action_flags |= MLX5_FLOW_ACTION_RSS;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
                        break;
                case RTE_FLOW_ACTION_TYPE_COUNT:
                        if (!dev_conf->devx) {
@@ -7702,6 +7708,7 @@ cnt_err:
                        dev_flow->dv.actions[actions_n++] =
                                        dev_flow->dv.jump->action;
                        action_flags |= MLX5_FLOW_ACTION_JUMP;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
                case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
@@ -7844,7 +7851,7 @@ cnt_err:
                        modify_action_position = actions_n++;
        }
        dev_flow->dv.actions_n = actions_n;
-       handle->act_flags = action_flags;
+       dev_flow->act_flags = action_flags;
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
                int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
                int item_type = items->type;
@@ -8100,7 +8107,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                dh = dev_flow->handle;
                dv_h = &dh->dvh;
                n = dv->actions_n;
-               if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+               if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
                        if (dv->transfer) {
                                dv->actions[n++] = priv->sh->esw_drop_action;
                        } else {
@@ -8123,8 +8130,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                dh->hrxq = UINT32_MAX;
                                dv->actions[n++] = drop_hrxq->action;
                        }
-               } else if (dh->act_flags &
-                          (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
+               } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
                        struct mlx5_hrxq *hrxq;
                        uint32_t hrxq_idx;
 
@@ -8185,12 +8191,10 @@ error:
                       handle_idx, dh, next) {
                /* hrxq is union, don't clear it if the flag is not set. */
                if (dh->hrxq) {
-                       if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+                       if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
                                mlx5_hrxq_drop_release(dev);
                                dh->hrxq = 0;
-                       } else if (dh->act_flags &
-                                 (MLX5_FLOW_ACTION_QUEUE |
-                                 MLX5_FLOW_ACTION_RSS)) {
+                       } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
                                mlx5_hrxq_release(dev, dh->hrxq);
                                dh->hrxq = 0;
                        }
@@ -8431,6 +8435,33 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
        return 1;
 }
 
+/**
+ * Release the fate resource.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
+ */
+static void
+flow_dv_fate_resource_release(struct rte_eth_dev *dev,
+                              struct mlx5_flow_handle *handle)
+{
+       if (!handle->fate_idx)
+               return;
+       if (handle->fate_action == MLX5_FLOW_FATE_DROP)
+               mlx5_hrxq_drop_release(dev);
+       else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
+               mlx5_hrxq_release(dev, handle->hrxq);
+       else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
+               flow_dv_jump_tbl_resource_release(dev, handle);
+       else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
+               flow_dv_port_id_action_resource_release(dev, handle);
+       else
+               DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
+       handle->fate_idx = 0;
+}
+
 /**
  * Remove the flow from the NIC but keeps it in memory.
  * Lock free, (mutex should be acquired by caller).
@@ -8459,18 +8490,9 @@ __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
                        claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
                        dh->ib_flow = NULL;
                }
-               /* hrxq is union, don't touch it only the flag is set. */
-               if (dh->hrxq) {
-                       if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
-                               mlx5_hrxq_drop_release(dev);
-                               dh->hrxq = 0;
-                       } else if (dh->act_flags &
-                                 (MLX5_FLOW_ACTION_QUEUE |
-                                 MLX5_FLOW_ACTION_RSS)) {
-                               mlx5_hrxq_release(dev, dh->hrxq);
-                               dh->hrxq = 0;
-                       }
-               }
+               if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
+                   dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+                       flow_dv_fate_resource_release(dev, dh);
                if (dh->vf_vlan.tag && dh->vf_vlan.created)
                        mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
                handle_idx = dh->next.next;
@@ -8517,17 +8539,13 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                        flow_dv_encap_decap_resource_release(dev, dev_handle);
                if (dev_handle->dvh.modify_hdr)
                        flow_dv_modify_hdr_resource_release(dev_handle);
-               if (dev_handle->act_flags & MLX5_FLOW_ACTION_JUMP)
-                       flow_dv_jump_tbl_resource_release(dev, dev_handle);
-               if (dev_handle->act_flags & MLX5_FLOW_ACTION_PORT_ID)
-                       flow_dv_port_id_action_resource_release(dev,
-                                                               dev_handle);
                if (dev_handle->dvh.push_vlan_res)
                        flow_dv_push_vlan_action_resource_release(dev,
                                                                  dev_handle);
                if (dev_handle->dvh.tag_resource)
                        flow_dv_tag_release(dev,
                                            dev_handle->dvh.tag_resource);
+               flow_dv_fate_resource_release(dev, dev_handle);
                mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
                           tmp_idx);
        }
index 931d547..1d56b03 100644 (file)
@@ -1589,22 +1589,27 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                case RTE_FLOW_ACTION_TYPE_FLAG:
                        flow_verbs_translate_action_flag(dev_flow, actions);
                        action_flags |= MLX5_FLOW_ACTION_FLAG;
+                       dev_flow->handle->mark = 1;
                        break;
                case RTE_FLOW_ACTION_TYPE_MARK:
                        flow_verbs_translate_action_mark(dev_flow, actions);
                        action_flags |= MLX5_FLOW_ACTION_MARK;
+                       dev_flow->handle->mark = 1;
                        break;
                case RTE_FLOW_ACTION_TYPE_DROP:
                        flow_verbs_translate_action_drop(dev_flow, actions);
                        action_flags |= MLX5_FLOW_ACTION_DROP;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
                        break;
                case RTE_FLOW_ACTION_TYPE_QUEUE:
                        flow_verbs_translate_action_queue(dev_flow, actions);
                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
                        break;
                case RTE_FLOW_ACTION_TYPE_RSS:
                        flow_verbs_translate_action_rss(dev_flow, actions);
                        action_flags |= MLX5_FLOW_ACTION_RSS;
+                       dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
                        break;
                case RTE_FLOW_ACTION_TYPE_COUNT:
                        ret = flow_verbs_translate_action_count(dev_flow,
@@ -1621,7 +1626,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                                                  "action not supported");
                }
        }
-       dev_flow->handle->act_flags = action_flags;
+       dev_flow->act_flags = action_flags;
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
                int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
@@ -1756,12 +1761,11 @@ flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
                }
                /* hrxq is union, don't touch it only the flag is set. */
                if (handle->hrxq) {
-                       if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+                       if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
                                mlx5_hrxq_drop_release(dev);
                                handle->hrxq = 0;
-                       } else if (handle->act_flags &
-                                 (MLX5_FLOW_ACTION_QUEUE |
-                                 MLX5_FLOW_ACTION_RSS)) {
+                       } else if (handle->fate_action ==
+                                  MLX5_FLOW_FATE_QUEUE) {
                                mlx5_hrxq_release(dev, handle->hrxq);
                                handle->hrxq = 0;
                        }
@@ -1833,7 +1837,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
                dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
                handle = dev_flow->handle;
-               if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+               if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
                        hrxq = mlx5_hrxq_drop_new(dev);
                        if (!hrxq) {
                                rte_flow_error_set
@@ -1898,12 +1902,11 @@ error:
                       dev_handles, handle, next) {
                /* hrxq is union, don't touch it only the flag is set. */
                if (handle->hrxq) {
-                       if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+                       if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
                                mlx5_hrxq_drop_release(dev);
                                handle->hrxq = 0;
-                       } else if (handle->act_flags &
-                                 (MLX5_FLOW_ACTION_QUEUE |
-                                 MLX5_FLOW_ACTION_RSS)) {
+                       } else if (handle->fate_action ==
+                                  MLX5_FLOW_FATE_QUEUE) {
                                mlx5_hrxq_release(dev, handle->hrxq);
                                handle->hrxq = 0;
                        }