4214a63a735dd3e2450937a67ae3d82dcbe1291d
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4
5 #include <rte_flow.h>
6
7 #include <mlx5_malloc.h>
8 #include "mlx5_defs.h"
9 #include "mlx5_flow.h"
10
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
12
13 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
14
15 /**
16  * Create flow action template.
17  *
18  * @param[in] dev
19  *   Pointer to the rte_eth_dev structure.
20  * @param[in] attr
21  *   Pointer to the action template attributes.
22  * @param[in] actions
23  *   Associated actions (list terminated by the END action).
24  * @param[in] masks
25  *   List of actions that marks which of the action's member is constant.
26  * @param[out] error
27  *   Pointer to error structure.
28  *
29  * @return
30  *   Action template pointer on success, NULL otherwise and rte_errno is set.
31  */
32 static struct rte_flow_actions_template *
33 flow_hw_actions_template_create(struct rte_eth_dev *dev,
34                         const struct rte_flow_actions_template_attr *attr,
35                         const struct rte_flow_action actions[],
36                         const struct rte_flow_action masks[],
37                         struct rte_flow_error *error)
38 {
39         struct mlx5_priv *priv = dev->data->dev_private;
40         int len, act_len, mask_len, i;
41         struct rte_flow_actions_template *at;
42
43         act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
44                                 NULL, 0, actions, error);
45         if (act_len <= 0)
46                 return NULL;
47         len = RTE_ALIGN(act_len, 16);
48         mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
49                                  NULL, 0, masks, error);
50         if (mask_len <= 0)
51                 return NULL;
52         len += RTE_ALIGN(mask_len, 16);
53         at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());
54         if (!at) {
55                 rte_flow_error_set(error, ENOMEM,
56                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
57                                    NULL,
58                                    "cannot allocate action template");
59                 return NULL;
60         }
61         at->attr = *attr;
62         at->actions = (struct rte_flow_action *)(at + 1);
63         act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,
64                                 actions, error);
65         if (act_len <= 0)
66                 goto error;
67         at->masks = (struct rte_flow_action *)
68                     (((uint8_t *)at->actions) + act_len);
69         mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
70                                  len - act_len, masks, error);
71         if (mask_len <= 0)
72                 goto error;
73         /*
74          * mlx5 PMD hacks indirect action index directly to the action conf.
75          * The rte_flow_conv() function copies the content from conf pointer.
76          * Need to restore the indirect action index from action conf here.
77          */
78         for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
79              actions++, masks++, i++) {
80                 if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
81                         at->actions[i].conf = actions->conf;
82                         at->masks[i].conf = masks->conf;
83                 }
84         }
85         __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
86         LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
87         return at;
88 error:
89         mlx5_free(at);
90         return NULL;
91 }
92
93 /**
94  * Destroy flow action template.
95  *
96  * @param[in] dev
97  *   Pointer to the rte_eth_dev structure.
98  * @param[in] template
99  *   Pointer to the action template to be destroyed.
100  * @param[out] error
101  *   Pointer to error structure.
102  *
103  * @return
104  *   0 on success, a negative errno value otherwise and rte_errno is set.
105  */
106 static int
107 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
108                                  struct rte_flow_actions_template *template,
109                                  struct rte_flow_error *error __rte_unused)
110 {
111         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
112                 DRV_LOG(WARNING, "Action template %p is still in use.",
113                         (void *)template);
114                 return rte_flow_error_set(error, EBUSY,
115                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
116                                    NULL,
117                                    "action template in using");
118         }
119         LIST_REMOVE(template, next);
120         mlx5_free(template);
121         return 0;
122 }
123
124 /**
125  * Create flow item template.
126  *
127  * @param[in] dev
128  *   Pointer to the rte_eth_dev structure.
129  * @param[in] attr
130  *   Pointer to the item template attributes.
131  * @param[in] items
132  *   The template item pattern.
133  * @param[out] error
134  *   Pointer to error structure.
135  *
136  * @return
137  *  Item template pointer on success, NULL otherwise and rte_errno is set.
138  */
139 static struct rte_flow_pattern_template *
140 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
141                              const struct rte_flow_pattern_template_attr *attr,
142                              const struct rte_flow_item items[],
143                              struct rte_flow_error *error)
144 {
145         struct mlx5_priv *priv = dev->data->dev_private;
146         struct rte_flow_pattern_template *it;
147
148         it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
149         if (!it) {
150                 rte_flow_error_set(error, ENOMEM,
151                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
152                                    NULL,
153                                    "cannot allocate item template");
154                 return NULL;
155         }
156         it->attr = *attr;
157         it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
158         if (!it->mt) {
159                 mlx5_free(it);
160                 rte_flow_error_set(error, rte_errno,
161                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
162                                    NULL,
163                                    "cannot create match template");
164                 return NULL;
165         }
166         __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
167         LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
168         return it;
169 }
170
171 /**
172  * Destroy flow item template.
173  *
174  * @param[in] dev
175  *   Pointer to the rte_eth_dev structure.
176  * @param[in] template
177  *   Pointer to the item template to be destroyed.
178  * @param[out] error
179  *   Pointer to error structure.
180  *
181  * @return
182  *   0 on success, a negative errno value otherwise and rte_errno is set.
183  */
184 static int
185 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
186                               struct rte_flow_pattern_template *template,
187                               struct rte_flow_error *error __rte_unused)
188 {
189         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
190                 DRV_LOG(WARNING, "Item template %p is still in use.",
191                         (void *)template);
192                 return rte_flow_error_set(error, EBUSY,
193                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
194                                    NULL,
195                                    "item template in using");
196         }
197         LIST_REMOVE(template, next);
198         claim_zero(mlx5dr_match_template_destroy(template->mt));
199         mlx5_free(template);
200         return 0;
201 }
202
203 /*
204  * Get information about HWS pre-configurable resources.
205  *
206  * @param[in] dev
207  *   Pointer to the rte_eth_dev structure.
208  * @param[out] port_info
209  *   Pointer to port information.
210  * @param[out] queue_info
211  *   Pointer to queue information.
212  * @param[out] error
213  *   Pointer to error structure.
214  *
215  * @return
216  *   0 on success, a negative errno value otherwise and rte_errno is set.
217  */
218 static int
219 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
220                  struct rte_flow_port_info *port_info __rte_unused,
221                  struct rte_flow_queue_info *queue_info __rte_unused,
222                  struct rte_flow_error *error __rte_unused)
223 {
224         /* Nothing to be updated currently. */
225         memset(port_info, 0, sizeof(*port_info));
226         /* Queue size is unlimited from low-level. */
227         queue_info->max_size = UINT32_MAX;
228         return 0;
229 }
230
231 /**
232  * Configure port HWS resources.
233  *
234  * @param[in] dev
235  *   Pointer to the rte_eth_dev structure.
236  * @param[in] port_attr
237  *   Port configuration attributes.
238  * @param[in] nb_queue
239  *   Number of queue.
240  * @param[in] queue_attr
241  *   Array that holds attributes for each flow queue.
242  * @param[out] error
243  *   Pointer to error structure.
244  *
245  * @return
246  *   0 on success, a negative errno value otherwise and rte_errno is set.
247  */
248 static int
249 flow_hw_configure(struct rte_eth_dev *dev,
250                   const struct rte_flow_port_attr *port_attr,
251                   uint16_t nb_queue,
252                   const struct rte_flow_queue_attr *queue_attr[],
253                   struct rte_flow_error *error)
254 {
255         struct mlx5_priv *priv = dev->data->dev_private;
256         struct mlx5dr_context *dr_ctx = NULL;
257         struct mlx5dr_context_attr dr_ctx_attr = {0};
258         struct mlx5_hw_q *hw_q;
259         struct mlx5_hw_q_job *job = NULL;
260         uint32_t mem_size, i, j;
261
262         if (!port_attr || !nb_queue || !queue_attr) {
263                 rte_errno = EINVAL;
264                 goto err;
265         }
266         /* In case re-configuring, release existing context at first. */
267         if (priv->dr_ctx) {
268                 /* */
269                 for (i = 0; i < nb_queue; i++) {
270                         hw_q = &priv->hw_q[i];
271                         /* Make sure all queues are empty. */
272                         if (hw_q->size != hw_q->job_idx) {
273                                 rte_errno = EBUSY;
274                                 goto err;
275                         }
276                 }
277                 flow_hw_resource_release(dev);
278         }
279         /* Allocate the queue job descriptor LIFO. */
280         mem_size = sizeof(priv->hw_q[0]) * nb_queue;
281         for (i = 0; i < nb_queue; i++) {
282                 /*
283                  * Check if the queues' size are all the same as the
284                  * limitation from HWS layer.
285                  */
286                 if (queue_attr[i]->size != queue_attr[0]->size) {
287                         rte_errno = EINVAL;
288                         goto err;
289                 }
290                 mem_size += (sizeof(struct mlx5_hw_q_job *) +
291                             sizeof(struct mlx5_hw_q_job)) *
292                             queue_attr[0]->size;
293         }
294         priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
295                                  64, SOCKET_ID_ANY);
296         if (!priv->hw_q) {
297                 rte_errno = ENOMEM;
298                 goto err;
299         }
300         for (i = 0; i < nb_queue; i++) {
301                 priv->hw_q[i].job_idx = queue_attr[i]->size;
302                 priv->hw_q[i].size = queue_attr[i]->size;
303                 if (i == 0)
304                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
305                                             &priv->hw_q[nb_queue];
306                 else
307                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
308                                             &job[queue_attr[i - 1]->size];
309                 job = (struct mlx5_hw_q_job *)
310                       &priv->hw_q[i].job[queue_attr[i]->size];
311                 for (j = 0; j < queue_attr[i]->size; j++)
312                         priv->hw_q[i].job[j] = &job[j];
313         }
314         dr_ctx_attr.pd = priv->sh->cdev->pd;
315         dr_ctx_attr.queues = nb_queue;
316         /* Queue size should all be the same. Take the first one. */
317         dr_ctx_attr.queue_size = queue_attr[0]->size;
318         dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
319         /* rte_errno has been updated by HWS layer. */
320         if (!dr_ctx)
321                 goto err;
322         priv->dr_ctx = dr_ctx;
323         priv->nb_queue = nb_queue;
324         return 0;
325 err:
326         if (dr_ctx)
327                 claim_zero(mlx5dr_context_close(dr_ctx));
328         mlx5_free(priv->hw_q);
329         priv->hw_q = NULL;
330         return rte_flow_error_set(error, rte_errno,
331                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
332                                   "fail to configure port");
333 }
334
335 /**
336  * Release HWS resources.
337  *
338  * @param[in] dev
339  *   Pointer to the rte_eth_dev structure.
340  */
341 void
342 flow_hw_resource_release(struct rte_eth_dev *dev)
343 {
344         struct mlx5_priv *priv = dev->data->dev_private;
345         struct rte_flow_pattern_template *it;
346         struct rte_flow_actions_template *at;
347
348         if (!priv->dr_ctx)
349                 return;
350         while (!LIST_EMPTY(&priv->flow_hw_itt)) {
351                 it = LIST_FIRST(&priv->flow_hw_itt);
352                 flow_hw_pattern_template_destroy(dev, it, NULL);
353         }
354         while (!LIST_EMPTY(&priv->flow_hw_at)) {
355                 at = LIST_FIRST(&priv->flow_hw_at);
356                 flow_hw_actions_template_destroy(dev, at, NULL);
357         }
358         mlx5_free(priv->hw_q);
359         priv->hw_q = NULL;
360         claim_zero(mlx5dr_context_close(priv->dr_ctx));
361         priv->dr_ctx = NULL;
362         priv->nb_queue = 0;
363 }
364
365 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
366         .info_get = flow_hw_info_get,
367         .configure = flow_hw_configure,
368         .pattern_template_create = flow_hw_pattern_template_create,
369         .pattern_template_destroy = flow_hw_pattern_template_destroy,
370         .actions_template_create = flow_hw_actions_template_create,
371         .actions_template_destroy = flow_hw_actions_template_destroy,
372 };
373
374 #endif