e5b2ae91d8f9b479357ebe69c33b062fa83c61c2
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4
5 #include <rte_flow.h>
6
7 #include <mlx5_malloc.h>
8 #include "mlx5_defs.h"
9 #include "mlx5_flow.h"
10
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
12
13 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
14
15 /**
16  * Get information about HWS pre-configurable resources.
17  *
18  * @param[in] dev
19  *   Pointer to the rte_eth_dev structure.
20  * @param[out] port_info
21  *   Pointer to port information.
22  * @param[out] queue_info
23  *   Pointer to queue information.
24  * @param[out] error
25  *   Pointer to error structure.
26  *
27  * @return
28  *   0 on success, a negative errno value otherwise and rte_errno is set.
29  */
30 static int
31 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
32                  struct rte_flow_port_info *port_info __rte_unused,
33                  struct rte_flow_queue_info *queue_info __rte_unused,
34                  struct rte_flow_error *error __rte_unused)
35 {
36         /* Nothing to be updated currently. */
37         memset(port_info, 0, sizeof(*port_info));
38         /* Queue size is unlimited from low-level. */
39         queue_info->max_size = UINT32_MAX;
40         return 0;
41 }
42
43 /**
44  * Configure port HWS resources.
45  *
46  * @param[in] dev
47  *   Pointer to the rte_eth_dev structure.
48  * @param[in] port_attr
49  *   Port configuration attributes.
50  * @param[in] nb_queue
51  *   Number of queue.
52  * @param[in] queue_attr
53  *   Array that holds attributes for each flow queue.
54  * @param[out] error
55  *   Pointer to error structure.
56  *
57  * @return
58  *   0 on success, a negative errno value otherwise and rte_errno is set.
59  */
60 static int
61 flow_hw_configure(struct rte_eth_dev *dev,
62                   const struct rte_flow_port_attr *port_attr,
63                   uint16_t nb_queue,
64                   const struct rte_flow_queue_attr *queue_attr[],
65                   struct rte_flow_error *error)
66 {
67         struct mlx5_priv *priv = dev->data->dev_private;
68         struct mlx5dr_context *dr_ctx = NULL;
69         struct mlx5dr_context_attr dr_ctx_attr = {0};
70         struct mlx5_hw_q *hw_q;
71         struct mlx5_hw_q_job *job = NULL;
72         uint32_t mem_size, i, j;
73
74         if (!port_attr || !nb_queue || !queue_attr) {
75                 rte_errno = EINVAL;
76                 goto err;
77         }
78         /* In case re-configuring, release existing context at first. */
79         if (priv->dr_ctx) {
80                 /* */
81                 for (i = 0; i < nb_queue; i++) {
82                         hw_q = &priv->hw_q[i];
83                         /* Make sure all queues are empty. */
84                         if (hw_q->size != hw_q->job_idx) {
85                                 rte_errno = EBUSY;
86                                 goto err;
87                         }
88                 }
89                 flow_hw_resource_release(dev);
90         }
91         /* Allocate the queue job descriptor LIFO. */
92         mem_size = sizeof(priv->hw_q[0]) * nb_queue;
93         for (i = 0; i < nb_queue; i++) {
94                 /*
95                  * Check if the queues' size are all the same as the
96                  * limitation from HWS layer.
97                  */
98                 if (queue_attr[i]->size != queue_attr[0]->size) {
99                         rte_errno = EINVAL;
100                         goto err;
101                 }
102                 mem_size += (sizeof(struct mlx5_hw_q_job *) +
103                             sizeof(struct mlx5_hw_q_job)) *
104                             queue_attr[0]->size;
105         }
106         priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
107                                  64, SOCKET_ID_ANY);
108         if (!priv->hw_q) {
109                 rte_errno = ENOMEM;
110                 goto err;
111         }
112         for (i = 0; i < nb_queue; i++) {
113                 priv->hw_q[i].job_idx = queue_attr[i]->size;
114                 priv->hw_q[i].size = queue_attr[i]->size;
115                 if (i == 0)
116                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
117                                             &priv->hw_q[nb_queue];
118                 else
119                         priv->hw_q[i].job = (struct mlx5_hw_q_job **)
120                                             &job[queue_attr[i - 1]->size];
121                 job = (struct mlx5_hw_q_job *)
122                       &priv->hw_q[i].job[queue_attr[i]->size];
123                 for (j = 0; j < queue_attr[i]->size; j++)
124                         priv->hw_q[i].job[j] = &job[j];
125         }
126         dr_ctx_attr.pd = priv->sh->cdev->pd;
127         dr_ctx_attr.queues = nb_queue;
128         /* Queue size should all be the same. Take the first one. */
129         dr_ctx_attr.queue_size = queue_attr[0]->size;
130         dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
131         /* rte_errno has been updated by HWS layer. */
132         if (!dr_ctx)
133                 goto err;
134         priv->dr_ctx = dr_ctx;
135         priv->nb_queue = nb_queue;
136         return 0;
137 err:
138         if (dr_ctx)
139                 claim_zero(mlx5dr_context_close(dr_ctx));
140         mlx5_free(priv->hw_q);
141         priv->hw_q = NULL;
142         return rte_flow_error_set(error, rte_errno,
143                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
144                                   "fail to configure port");
145 }
146
147 /**
148  * Release HWS resources.
149  *
150  * @param[in] dev
151  *   Pointer to the rte_eth_dev structure.
152  */
153 void
154 flow_hw_resource_release(struct rte_eth_dev *dev)
155 {
156         struct mlx5_priv *priv = dev->data->dev_private;
157
158         if (!priv->dr_ctx)
159                 return;
160         mlx5_free(priv->hw_q);
161         priv->hw_q = NULL;
162         claim_zero(mlx5dr_context_close(priv->dr_ctx));
163         priv->dr_ctx = NULL;
164         priv->nb_queue = 0;
165 }
166
167 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
168         .info_get = flow_hw_info_get,
169         .configure = flow_hw_configure,
170 };
171
172 #endif