b7bc4af6fb24d77cd70585df869fcfca038a3e49
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_flex.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3  */
4 #include <rte_malloc.h>
5 #include <mlx5_devx_cmds.h>
6 #include <mlx5_malloc.h>
7 #include "mlx5.h"
8 #include "mlx5_flow.h"
9
10 static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11               "Flex item maximal number exceeds uint32_t bit width");
12
13 /**
14  *  Routine called once on port initialization to init flex item
15  *  related infrastructure initialization
16  *
17  * @param dev
18  *   Ethernet device to perform flex item initialization
19  *
20  * @return
21  *   0 on success, a negative errno value otherwise and rte_errno is set.
22  */
23 int
24 mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25 {
26         struct mlx5_priv *priv = dev->data->dev_private;
27
28         rte_spinlock_init(&priv->flex_item_sl);
29         MLX5_ASSERT(!priv->flex_item_map);
30         return 0;
31 }
32
33 /**
34  *  Routine called once on port close to perform flex item
35  *  related infrastructure cleanup.
36  *
37  * @param dev
38  *   Ethernet device to perform cleanup
39  */
40 void
41 mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42 {
43         struct mlx5_priv *priv = dev->data->dev_private;
44         uint32_t i;
45
46         for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47                 if (priv->flex_item_map & (1 << i)) {
48                         /* DevX object dereferencing should be provided here. */
49                         priv->flex_item_map &= ~(1 << i);
50                 }
51         }
52 }
53
54 static int
55 mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
56 {
57         uintptr_t start = (uintptr_t)&priv->flex_item[0];
58         uintptr_t entry = (uintptr_t)item;
59         uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
60
61         if (entry < start ||
62             idx >= MLX5_PORT_FLEX_ITEM_NUM ||
63             (entry - start) % sizeof(struct mlx5_flex_item) ||
64             !(priv->flex_item_map & (1u << idx)))
65                 return -1;
66         return (int)idx;
67 }
68
69 static struct mlx5_flex_item *
70 mlx5_flex_alloc(struct mlx5_priv *priv)
71 {
72         struct mlx5_flex_item *item = NULL;
73
74         rte_spinlock_lock(&priv->flex_item_sl);
75         if (~priv->flex_item_map) {
76                 uint32_t idx = rte_bsf32(~priv->flex_item_map);
77
78                 if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
79                         item = &priv->flex_item[idx];
80                         MLX5_ASSERT(!item->refcnt);
81                         MLX5_ASSERT(!item->devx_fp);
82                         item->devx_fp = NULL;
83                         __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
84                         priv->flex_item_map |= 1u << idx;
85                 }
86         }
87         rte_spinlock_unlock(&priv->flex_item_sl);
88         return item;
89 }
90
91 static void
92 mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
93 {
94         int idx = mlx5_flex_index(priv, item);
95
96         MLX5_ASSERT(idx >= 0 &&
97                     idx < MLX5_PORT_FLEX_ITEM_NUM &&
98                     (priv->flex_item_map & (1u << idx)));
99         if (idx >= 0) {
100                 rte_spinlock_lock(&priv->flex_item_sl);
101                 MLX5_ASSERT(!item->refcnt);
102                 MLX5_ASSERT(!item->devx_fp);
103                 item->devx_fp = NULL;
104                 __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
105                 priv->flex_item_map &= ~(1u << idx);
106                 rte_spinlock_unlock(&priv->flex_item_sl);
107         }
108 }
109
110 /**
111  * Create the flex item with specified configuration over the Ethernet device.
112  *
113  * @param dev
114  *   Ethernet device to create flex item on.
115  * @param[in] conf
116  *   Flex item configuration.
117  * @param[out] error
118  *   Perform verbose error reporting if not NULL. PMDs initialize this
119  *   structure in case of error only.
120  *
121  * @return
122  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
123  */
124 struct rte_flow_item_flex_handle *
125 flow_dv_item_create(struct rte_eth_dev *dev,
126                     const struct rte_flow_item_flex_conf *conf,
127                     struct rte_flow_error *error)
128 {
129         struct mlx5_priv *priv = dev->data->dev_private;
130         struct mlx5_flex_item *flex;
131
132         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
133         flex = mlx5_flex_alloc(priv);
134         if (!flex) {
135                 rte_flow_error_set(error, ENOMEM,
136                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
137                                    "too many flex items created on the port");
138                 return NULL;
139         }
140         RTE_SET_USED(conf);
141         /* Mark initialized flex item valid. */
142         __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
143         return (struct rte_flow_item_flex_handle *)flex;
144 }
145
146 /**
147  * Release the flex item on the specified Ethernet device.
148  *
149  * @param dev
150  *   Ethernet device to destroy flex item on.
151  * @param[in] handle
152  *   Handle of the item existing on the specified device.
153  * @param[out] error
154  *   Perform verbose error reporting if not NULL. PMDs initialize this
155  *   structure in case of error only.
156  *
157  * @return
158  *   0 on success, a negative errno value otherwise and rte_errno is set.
159  */
160 int
161 flow_dv_item_release(struct rte_eth_dev *dev,
162                      const struct rte_flow_item_flex_handle *handle,
163                      struct rte_flow_error *error)
164 {
165         struct mlx5_priv *priv = dev->data->dev_private;
166         struct mlx5_flex_item *flex =
167                 (struct mlx5_flex_item *)(uintptr_t)handle;
168         uint32_t old_refcnt = 1;
169
170         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
171         rte_spinlock_lock(&priv->flex_item_sl);
172         if (mlx5_flex_index(priv, flex) < 0) {
173                 rte_spinlock_unlock(&priv->flex_item_sl);
174                 return rte_flow_error_set(error, EINVAL,
175                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
176                                           "invalid flex item handle value");
177         }
178         if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
179                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
180                 rte_spinlock_unlock(&priv->flex_item_sl);
181                 return rte_flow_error_set(error, EBUSY,
182                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
183                                           "flex item has flow references");
184         }
185         /* Flex item is marked as invalid, we can leave locked section. */
186         rte_spinlock_unlock(&priv->flex_item_sl);
187         mlx5_flex_free(priv, flex);
188         return 0;
189 }