2f87073e9734e3b88c459c3644651697405688c8
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_flex.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3  */
4 #include <rte_malloc.h>
5 #include <mlx5_devx_cmds.h>
6 #include <mlx5_malloc.h>
7 #include "mlx5.h"
8 #include "mlx5_flow.h"
9
10 static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11               "Flex item maximal number exceeds uint32_t bit width");
12
13 /**
14  *  Routine called once on port initialization to init flex item
15  *  related infrastructure initialization
16  *
17  * @param dev
18  *   Ethernet device to perform flex item initialization
19  *
20  * @return
21  *   0 on success, a negative errno value otherwise and rte_errno is set.
22  */
23 int
24 mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25 {
26         struct mlx5_priv *priv = dev->data->dev_private;
27
28         rte_spinlock_init(&priv->flex_item_sl);
29         MLX5_ASSERT(!priv->flex_item_map);
30         return 0;
31 }
32
33 /**
34  *  Routine called once on port close to perform flex item
35  *  related infrastructure cleanup.
36  *
37  * @param dev
38  *   Ethernet device to perform cleanup
39  */
40 void
41 mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42 {
43         struct mlx5_priv *priv = dev->data->dev_private;
44         uint32_t i;
45
46         for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47                 if (priv->flex_item_map & (1 << i)) {
48                         struct mlx5_flex_item *flex = &priv->flex_item[i];
49
50                         claim_zero(mlx5_list_unregister
51                                         (priv->sh->flex_parsers_dv,
52                                          &flex->devx_fp->entry));
53                         flex->devx_fp = NULL;
54                         flex->refcnt = 0;
55                         priv->flex_item_map &= ~(1 << i);
56                 }
57         }
58 }
59
60 static int
61 mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
62 {
63         uintptr_t start = (uintptr_t)&priv->flex_item[0];
64         uintptr_t entry = (uintptr_t)item;
65         uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
66
67         if (entry < start ||
68             idx >= MLX5_PORT_FLEX_ITEM_NUM ||
69             (entry - start) % sizeof(struct mlx5_flex_item) ||
70             !(priv->flex_item_map & (1u << idx)))
71                 return -1;
72         return (int)idx;
73 }
74
75 static struct mlx5_flex_item *
76 mlx5_flex_alloc(struct mlx5_priv *priv)
77 {
78         struct mlx5_flex_item *item = NULL;
79
80         rte_spinlock_lock(&priv->flex_item_sl);
81         if (~priv->flex_item_map) {
82                 uint32_t idx = rte_bsf32(~priv->flex_item_map);
83
84                 if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
85                         item = &priv->flex_item[idx];
86                         MLX5_ASSERT(!item->refcnt);
87                         MLX5_ASSERT(!item->devx_fp);
88                         item->devx_fp = NULL;
89                         __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
90                         priv->flex_item_map |= 1u << idx;
91                 }
92         }
93         rte_spinlock_unlock(&priv->flex_item_sl);
94         return item;
95 }
96
97 static void
98 mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
99 {
100         int idx = mlx5_flex_index(priv, item);
101
102         MLX5_ASSERT(idx >= 0 &&
103                     idx < MLX5_PORT_FLEX_ITEM_NUM &&
104                     (priv->flex_item_map & (1u << idx)));
105         if (idx >= 0) {
106                 rte_spinlock_lock(&priv->flex_item_sl);
107                 MLX5_ASSERT(!item->refcnt);
108                 MLX5_ASSERT(!item->devx_fp);
109                 item->devx_fp = NULL;
110                 __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
111                 priv->flex_item_map &= ~(1u << idx);
112                 rte_spinlock_unlock(&priv->flex_item_sl);
113         }
114 }
115
116 /**
117  * Create the flex item with specified configuration over the Ethernet device.
118  *
119  * @param dev
120  *   Ethernet device to create flex item on.
121  * @param[in] conf
122  *   Flex item configuration.
123  * @param[out] error
124  *   Perform verbose error reporting if not NULL. PMDs initialize this
125  *   structure in case of error only.
126  *
127  * @return
128  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
129  */
130 struct rte_flow_item_flex_handle *
131 flow_dv_item_create(struct rte_eth_dev *dev,
132                     const struct rte_flow_item_flex_conf *conf,
133                     struct rte_flow_error *error)
134 {
135         struct mlx5_priv *priv = dev->data->dev_private;
136         struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
137         struct mlx5_flex_item *flex;
138         struct mlx5_list_entry *ent;
139
140         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
141         flex = mlx5_flex_alloc(priv);
142         if (!flex) {
143                 rte_flow_error_set(error, ENOMEM,
144                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
145                                    "too many flex items created on the port");
146                 return NULL;
147         }
148         ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
149         if (!ent) {
150                 rte_flow_error_set(error, ENOMEM,
151                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
152                                    "flex item creation failure");
153                 goto error;
154         }
155         flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
156         RTE_SET_USED(conf);
157         /* Mark initialized flex item valid. */
158         __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
159         return (struct rte_flow_item_flex_handle *)flex;
160
161 error:
162         mlx5_flex_free(priv, flex);
163         return NULL;
164 }
165
166 /**
167  * Release the flex item on the specified Ethernet device.
168  *
169  * @param dev
170  *   Ethernet device to destroy flex item on.
171  * @param[in] handle
172  *   Handle of the item existing on the specified device.
173  * @param[out] error
174  *   Perform verbose error reporting if not NULL. PMDs initialize this
175  *   structure in case of error only.
176  *
177  * @return
178  *   0 on success, a negative errno value otherwise and rte_errno is set.
179  */
180 int
181 flow_dv_item_release(struct rte_eth_dev *dev,
182                      const struct rte_flow_item_flex_handle *handle,
183                      struct rte_flow_error *error)
184 {
185         struct mlx5_priv *priv = dev->data->dev_private;
186         struct mlx5_flex_item *flex =
187                 (struct mlx5_flex_item *)(uintptr_t)handle;
188         uint32_t old_refcnt = 1;
189         int rc;
190
191         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
192         rte_spinlock_lock(&priv->flex_item_sl);
193         if (mlx5_flex_index(priv, flex) < 0) {
194                 rte_spinlock_unlock(&priv->flex_item_sl);
195                 return rte_flow_error_set(error, EINVAL,
196                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
197                                           "invalid flex item handle value");
198         }
199         if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
200                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
201                 rte_spinlock_unlock(&priv->flex_item_sl);
202                 return rte_flow_error_set(error, EBUSY,
203                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
204                                           "flex item has flow references");
205         }
206         /* Flex item is marked as invalid, we can leave locked section. */
207         rte_spinlock_unlock(&priv->flex_item_sl);
208         MLX5_ASSERT(flex->devx_fp);
209         rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
210                                   &flex->devx_fp->entry);
211         flex->devx_fp = NULL;
212         mlx5_flex_free(priv, flex);
213         if (rc < 0)
214                 return rte_flow_error_set(error, EBUSY,
215                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
216                                           "flex item release failure");
217         return 0;
218 }
219
220 /* DevX flex parser list callbacks. */
221 struct mlx5_list_entry *
222 mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
223 {
224         struct mlx5_dev_ctx_shared *sh = list_ctx;
225         struct mlx5_flex_parser_devx *fp, *conf = ctx;
226         int ret;
227
228         fp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flex_parser_devx),
229                          0, SOCKET_ID_ANY);
230         if (!fp)
231                 return NULL;
232         /* Copy the requested configurations. */
233         fp->num_samples = conf->num_samples;
234         memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
235         /* Create DevX flex parser. */
236         fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
237                                                         &fp->devx_conf);
238         if (!fp->devx_obj)
239                 goto error;
240         /* Query the firmware assigned sample ids. */
241         ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
242                                                 fp->sample_ids,
243                                                 fp->num_samples);
244         if (ret)
245                 goto error;
246         DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
247                 (const void *)fp, fp->num_samples);
248         return &fp->entry;
249 error:
250         if (fp->devx_obj)
251                 mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
252         if (fp)
253                 mlx5_free(fp);
254         return NULL;
255 }
256
257 int
258 mlx5_flex_parser_match_cb(void *list_ctx,
259                           struct mlx5_list_entry *iter, void *ctx)
260 {
261         struct mlx5_flex_parser_devx *fp =
262                 container_of(iter, struct mlx5_flex_parser_devx, entry);
263         struct mlx5_flex_parser_devx *org =
264                 container_of(ctx, struct mlx5_flex_parser_devx, entry);
265
266         RTE_SET_USED(list_ctx);
267         return !iter || !ctx || memcmp(&fp->devx_conf,
268                                        &org->devx_conf,
269                                        sizeof(fp->devx_conf));
270 }
271
272 void
273 mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
274 {
275         struct mlx5_flex_parser_devx *fp =
276                 container_of(entry, struct mlx5_flex_parser_devx, entry);
277
278         RTE_SET_USED(list_ctx);
279         MLX5_ASSERT(fp->devx_obj);
280         claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
281         DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
282         mlx5_free(entry);
283 }
284
285 struct mlx5_list_entry *
286 mlx5_flex_parser_clone_cb(void *list_ctx,
287                           struct mlx5_list_entry *entry, void *ctx)
288 {
289         struct mlx5_flex_parser_devx *fp;
290
291         RTE_SET_USED(list_ctx);
292         RTE_SET_USED(entry);
293         fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
294                          0, SOCKET_ID_ANY);
295         if (!fp)
296                 return NULL;
297         memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
298         return &fp->entry;
299 }
300
301 void
302 mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
303 {
304         struct mlx5_flex_parser_devx *fp =
305                 container_of(entry, struct mlx5_flex_parser_devx, entry);
306         RTE_SET_USED(list_ctx);
307         mlx5_free(fp);
308 }