1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates
7 #include <mlx5_malloc.h>
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
13 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
16 * Create flow action template.
19 * Pointer to the rte_eth_dev structure.
21 * Pointer to the action template attributes.
23 * Associated actions (list terminated by the END action).
25 * List of actions that marks which of the action's member is constant.
27 * Pointer to error structure.
30 * Action template pointer on success, NULL otherwise and rte_errno is set.
32 static struct rte_flow_actions_template *
33 flow_hw_actions_template_create(struct rte_eth_dev *dev,
34 const struct rte_flow_actions_template_attr *attr,
35 const struct rte_flow_action actions[],
36 const struct rte_flow_action masks[],
37 struct rte_flow_error *error)
39 struct mlx5_priv *priv = dev->data->dev_private;
40 int len, act_len, mask_len, i;
41 struct rte_flow_actions_template *at;
43 act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
44 NULL, 0, actions, error);
47 len = RTE_ALIGN(act_len, 16);
48 mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
49 NULL, 0, masks, error);
52 len += RTE_ALIGN(mask_len, 16);
53 at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());
55 rte_flow_error_set(error, ENOMEM,
56 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
58 "cannot allocate action template");
62 at->actions = (struct rte_flow_action *)(at + 1);
63 act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,
67 at->masks = (struct rte_flow_action *)
68 (((uint8_t *)at->actions) + act_len);
69 mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
70 len - act_len, masks, error);
74 * mlx5 PMD hacks indirect action index directly to the action conf.
75 * The rte_flow_conv() function copies the content from conf pointer.
76 * Need to restore the indirect action index from action conf here.
78 for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
79 actions++, masks++, i++) {
80 if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
81 at->actions[i].conf = actions->conf;
82 at->masks[i].conf = masks->conf;
85 __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
86 LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
94 * Destroy flow action template.
97 * Pointer to the rte_eth_dev structure.
99 * Pointer to the action template to be destroyed.
101 * Pointer to error structure.
104 * 0 on success, a negative errno value otherwise and rte_errno is set.
107 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
108 struct rte_flow_actions_template *template,
109 struct rte_flow_error *error __rte_unused)
111 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
112 DRV_LOG(WARNING, "Action template %p is still in use.",
114 return rte_flow_error_set(error, EBUSY,
115 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
117 "action template in using");
119 LIST_REMOVE(template, next);
125 * Create flow item template.
128 * Pointer to the rte_eth_dev structure.
130 * Pointer to the item template attributes.
132 * The template item pattern.
134 * Pointer to error structure.
137 * Item template pointer on success, NULL otherwise and rte_errno is set.
139 static struct rte_flow_pattern_template *
140 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
141 const struct rte_flow_pattern_template_attr *attr,
142 const struct rte_flow_item items[],
143 struct rte_flow_error *error)
145 struct mlx5_priv *priv = dev->data->dev_private;
146 struct rte_flow_pattern_template *it;
148 it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
150 rte_flow_error_set(error, ENOMEM,
151 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
153 "cannot allocate item template");
157 it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
160 rte_flow_error_set(error, rte_errno,
161 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
163 "cannot create match template");
166 __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
167 LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
172 * Destroy flow item template.
175 * Pointer to the rte_eth_dev structure.
176 * @param[in] template
177 * Pointer to the item template to be destroyed.
179 * Pointer to error structure.
182 * 0 on success, a negative errno value otherwise and rte_errno is set.
185 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
186 struct rte_flow_pattern_template *template,
187 struct rte_flow_error *error __rte_unused)
189 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
190 DRV_LOG(WARNING, "Item template %p is still in use.",
192 return rte_flow_error_set(error, EBUSY,
193 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
195 "item template in using");
197 LIST_REMOVE(template, next);
198 claim_zero(mlx5dr_match_template_destroy(template->mt));
204 * Get information about HWS pre-configurable resources.
207 * Pointer to the rte_eth_dev structure.
208 * @param[out] port_info
209 * Pointer to port information.
210 * @param[out] queue_info
211 * Pointer to queue information.
213 * Pointer to error structure.
216 * 0 on success, a negative errno value otherwise and rte_errno is set.
219 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
220 struct rte_flow_port_info *port_info __rte_unused,
221 struct rte_flow_queue_info *queue_info __rte_unused,
222 struct rte_flow_error *error __rte_unused)
224 /* Nothing to be updated currently. */
225 memset(port_info, 0, sizeof(*port_info));
226 /* Queue size is unlimited from low-level. */
227 queue_info->max_size = UINT32_MAX;
232 * Configure port HWS resources.
235 * Pointer to the rte_eth_dev structure.
236 * @param[in] port_attr
237 * Port configuration attributes.
238 * @param[in] nb_queue
240 * @param[in] queue_attr
241 * Array that holds attributes for each flow queue.
243 * Pointer to error structure.
246 * 0 on success, a negative errno value otherwise and rte_errno is set.
249 flow_hw_configure(struct rte_eth_dev *dev,
250 const struct rte_flow_port_attr *port_attr,
252 const struct rte_flow_queue_attr *queue_attr[],
253 struct rte_flow_error *error)
255 struct mlx5_priv *priv = dev->data->dev_private;
256 struct mlx5dr_context *dr_ctx = NULL;
257 struct mlx5dr_context_attr dr_ctx_attr = {0};
258 struct mlx5_hw_q *hw_q;
259 struct mlx5_hw_q_job *job = NULL;
260 uint32_t mem_size, i, j;
262 if (!port_attr || !nb_queue || !queue_attr) {
266 /* In case re-configuring, release existing context at first. */
269 for (i = 0; i < nb_queue; i++) {
270 hw_q = &priv->hw_q[i];
271 /* Make sure all queues are empty. */
272 if (hw_q->size != hw_q->job_idx) {
277 flow_hw_resource_release(dev);
279 /* Allocate the queue job descriptor LIFO. */
280 mem_size = sizeof(priv->hw_q[0]) * nb_queue;
281 for (i = 0; i < nb_queue; i++) {
283 * Check if the queues' size are all the same as the
284 * limitation from HWS layer.
286 if (queue_attr[i]->size != queue_attr[0]->size) {
290 mem_size += (sizeof(struct mlx5_hw_q_job *) +
291 sizeof(struct mlx5_hw_q_job)) *
294 priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
300 for (i = 0; i < nb_queue; i++) {
301 priv->hw_q[i].job_idx = queue_attr[i]->size;
302 priv->hw_q[i].size = queue_attr[i]->size;
304 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
305 &priv->hw_q[nb_queue];
307 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
308 &job[queue_attr[i - 1]->size];
309 job = (struct mlx5_hw_q_job *)
310 &priv->hw_q[i].job[queue_attr[i]->size];
311 for (j = 0; j < queue_attr[i]->size; j++)
312 priv->hw_q[i].job[j] = &job[j];
314 dr_ctx_attr.pd = priv->sh->cdev->pd;
315 dr_ctx_attr.queues = nb_queue;
316 /* Queue size should all be the same. Take the first one. */
317 dr_ctx_attr.queue_size = queue_attr[0]->size;
318 dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
319 /* rte_errno has been updated by HWS layer. */
322 priv->dr_ctx = dr_ctx;
323 priv->nb_queue = nb_queue;
327 claim_zero(mlx5dr_context_close(dr_ctx));
328 mlx5_free(priv->hw_q);
330 return rte_flow_error_set(error, rte_errno,
331 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
332 "fail to configure port");
336 * Release HWS resources.
339 * Pointer to the rte_eth_dev structure.
342 flow_hw_resource_release(struct rte_eth_dev *dev)
344 struct mlx5_priv *priv = dev->data->dev_private;
345 struct rte_flow_pattern_template *it;
346 struct rte_flow_actions_template *at;
350 while (!LIST_EMPTY(&priv->flow_hw_itt)) {
351 it = LIST_FIRST(&priv->flow_hw_itt);
352 flow_hw_pattern_template_destroy(dev, it, NULL);
354 while (!LIST_EMPTY(&priv->flow_hw_at)) {
355 at = LIST_FIRST(&priv->flow_hw_at);
356 flow_hw_actions_template_destroy(dev, at, NULL);
358 mlx5_free(priv->hw_q);
360 claim_zero(mlx5dr_context_close(priv->dr_ctx));
365 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
366 .info_get = flow_hw_info_get,
367 .configure = flow_hw_configure,
368 .pattern_template_create = flow_hw_pattern_template_create,
369 .pattern_template_destroy = flow_hw_pattern_template_destroy,
370 .actions_template_create = flow_hw_actions_template_create,
371 .actions_template_destroy = flow_hw_actions_template_destroy,