net/mlx5: add pattern template management
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4
5 #include <rte_flow.h>
6
7 #include <mlx5_malloc.h>
8 #include "mlx5_defs.h"
9 #include "mlx5_flow.h"
10
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
12
13 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
14
15 /**
16  * Create flow item template.
17  *
18  * @param[in] dev
19  *   Pointer to the rte_eth_dev structure.
20  * @param[in] attr
21  *   Pointer to the item template attributes.
22  * @param[in] items
23  *   The template item pattern.
24  * @param[out] error
25  *   Pointer to error structure.
26  *
27  * @return
28  *  Item template pointer on success, NULL otherwise and rte_errno is set.
29  */
30 static struct rte_flow_pattern_template *
31 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
32                              const struct rte_flow_pattern_template_attr *attr,
33                              const struct rte_flow_item items[],
34                              struct rte_flow_error *error)
35 {
36         struct mlx5_priv *priv = dev->data->dev_private;
37         struct rte_flow_pattern_template *it;
38
39         it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
40         if (!it) {
41                 rte_flow_error_set(error, ENOMEM,
42                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
43                                    NULL,
44                                    "cannot allocate item template");
45                 return NULL;
46         }
47         it->attr = *attr;
48         it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
49         if (!it->mt) {
50                 mlx5_free(it);
51                 rte_flow_error_set(error, rte_errno,
52                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
53                                    NULL,
54                                    "cannot create match template");
55                 return NULL;
56         }
57         __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
58         LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
59         return it;
60 }
61
62 /**
63  * Destroy flow item template.
64  *
65  * @param[in] dev
66  *   Pointer to the rte_eth_dev structure.
67  * @param[in] template
68  *   Pointer to the item template to be destroyed.
69  * @param[out] error
70  *   Pointer to error structure.
71  *
72  * @return
73  *   0 on success, a negative errno value otherwise and rte_errno is set.
74  */
75 static int
76 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
77                               struct rte_flow_pattern_template *template,
78                               struct rte_flow_error *error __rte_unused)
79 {
80         if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
81                 DRV_LOG(WARNING, "Item template %p is still in use.",
82                         (void *)template);
83                 return rte_flow_error_set(error, EBUSY,
84                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
85                                    NULL,
86                                    "item template in using");
87         }
88         LIST_REMOVE(template, next);
89         claim_zero(mlx5dr_match_template_destroy(template->mt));
90         mlx5_free(template);
91         return 0;
92 }
93
94 /*
95  * Get information about HWS pre-configurable resources.
96  *
97  * @param[in] dev
98  *   Pointer to the rte_eth_dev structure.
99  * @param[out] port_info
100  *   Pointer to port information.
101  * @param[out] queue_info
102  *   Pointer to queue information.
103  * @param[out] error
104  *   Pointer to error structure.
105  *
106  * @return
107  *   0 on success, a negative errno value otherwise and rte_errno is set.
108  */
109 static int
110 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
111                  struct rte_flow_port_info *port_info __rte_unused,
112                  struct rte_flow_queue_info *queue_info __rte_unused,
113                  struct rte_flow_error *error __rte_unused)
114 {
115         /* Nothing to be updated currently. */
116         memset(port_info, 0, sizeof(*port_info));
117         /* Queue size is unlimited from low-level. */
118         queue_info->max_size = UINT32_MAX;
119         return 0;
120 }
121
122 /**
123  * Configure port HWS resources.
124  *
125  * @param[in] dev
126  *   Pointer to the rte_eth_dev structure.
127  * @param[in] port_attr
128  *   Port configuration attributes.
129  * @param[in] nb_queue
130  *   Number of queue.
131  * @param[in] queue_attr
132  *   Array that holds attributes for each flow queue.
133  * @param[out] error
134  *   Pointer to error structure.
135  *
136  * @return
137  *   0 on success, a negative errno value otherwise and rte_errno is set.
138  */
139 static int
140 flow_hw_configure(struct rte_eth_dev *dev,
141                   const struct rte_flow_port_attr *port_attr,
142                   uint16_t nb_queue,
143                   const struct rte_flow_queue_attr *queue_attr[],
144                   struct rte_flow_error *error)
145 {
146         struct mlx5_priv *priv = dev->data->dev_private;
147         struct mlx5dr_context *dr_ctx = NULL;
148         struct mlx5dr_context_attr dr_ctx_attr = {0};
149         struct mlx5_hw_q *hw_q;
150         struct mlx5_hw_q_job *job = NULL;
151         uint32_t mem_size, i, j;
152
153         if (!port_attr || !nb_queue || !queue_attr) {
154                 rte_errno = EINVAL;
155                 goto err;
156         }
157         /* In case re-configuring, release existing context at first. */
158         if (priv->dr_ctx) {
159                 /* */
160                 for (i = 0; i < nb_queue; i++) {
161                         hw_q = &priv->hw_q[i];
162                         /* Make sure all queues are empty. */
163                         if (hw_q->size != hw_q->job_idx) {
164                                 rte_errno = EBUSY;
165                                 goto err;
166                         }
167                 }
168                 flow_hw_resource_release(dev);
169         }
170         /* Allocate the queue job descriptor LIFO. */
171         mem_size = sizeof(priv->hw_q[0]) * nb_queue;
172         for (i = 0; i < nb_queue; i++) {
173                 /*
174                  * Check if the queues' size are all the same as the
175                  * limitation from HWS layer.
176                  */
177                 if (queue_attr[i]->size != queue_attr[0]->size) {
178                         rte_errno = EINVAL;
179                         goto err;
180                 }
181                 mem_size += (sizeof(struct mlx5_hw_q_job *) +
182                             sizeof(struct mlx5_hw_q_job)) *
183                             queue_attr[0]->size;
184         }
185         priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
186                                  64, SOCKET_ID_ANY);
187         if (!priv->hw_q) {
188                 rte_errno = ENOMEM;
189                 goto err;
190         }
191         for (i = 0; i < nb_queue; i++) {
192                 priv->hw_q[i].job_idx = queue_attr[i]->size;
193                 priv->hw_q[i].size = queue_attr[i]->size;
194                 if (i == 0)
195                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
196                                             &priv->hw_q[nb_queue];
197                 else
198                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
199                                             &job[queue_attr[i - 1]->size];
200                 job = (struct mlx5_hw_q_job *)
201                       &priv->hw_q[i].job[queue_attr[i]->size];
202                 for (j = 0; j < queue_attr[i]->size; j++)
203                         priv->hw_q[i].job[j] = &job[j];
204         }
205         dr_ctx_attr.pd = priv->sh->cdev->pd;
206         dr_ctx_attr.queues = nb_queue;
207         /* Queue size should all be the same. Take the first one. */
208         dr_ctx_attr.queue_size = queue_attr[0]->size;
209         dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
210         /* rte_errno has been updated by HWS layer. */
211         if (!dr_ctx)
212                 goto err;
213         priv->dr_ctx = dr_ctx;
214         priv->nb_queue = nb_queue;
215         return 0;
216 err:
217         if (dr_ctx)
218                 claim_zero(mlx5dr_context_close(dr_ctx));
219         mlx5_free(priv->hw_q);
220         priv->hw_q = NULL;
221         return rte_flow_error_set(error, rte_errno,
222                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
223                                   "fail to configure port");
224 }
225
226 /**
227  * Release HWS resources.
228  *
229  * @param[in] dev
230  *   Pointer to the rte_eth_dev structure.
231  */
232 void
233 flow_hw_resource_release(struct rte_eth_dev *dev)
234 {
235         struct mlx5_priv *priv = dev->data->dev_private;
236         struct rte_flow_pattern_template *it;
237
238         if (!priv->dr_ctx)
239                 return;
240         while (!LIST_EMPTY(&priv->flow_hw_itt)) {
241                 it = LIST_FIRST(&priv->flow_hw_itt);
242                 flow_hw_pattern_template_destroy(dev, it, NULL);
243         }
244         mlx5_free(priv->hw_q);
245         priv->hw_q = NULL;
246         claim_zero(mlx5dr_context_close(priv->dr_ctx));
247         priv->dr_ctx = NULL;
248         priv->nb_queue = 0;
249 }
250
251 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
252         .info_get = flow_hw_info_get,
253         .configure = flow_hw_configure,
254         .pattern_template_create = flow_hw_pattern_template_create,
255         .pattern_template_destroy = flow_hw_pattern_template_destroy,
256 };
257
258 #endif