1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates
7 #include <mlx5_malloc.h>
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
13 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
16 * Create flow item template.
19 * Pointer to the rte_eth_dev structure.
21 * Pointer to the item template attributes.
23 * The template item pattern.
25 * Pointer to error structure.
28 * Item template pointer on success, NULL otherwise and rte_errno is set.
30 static struct rte_flow_pattern_template *
31 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
32 const struct rte_flow_pattern_template_attr *attr,
33 const struct rte_flow_item items[],
34 struct rte_flow_error *error)
36 struct mlx5_priv *priv = dev->data->dev_private;
37 struct rte_flow_pattern_template *it;
39 it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
41 rte_flow_error_set(error, ENOMEM,
42 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
44 "cannot allocate item template");
48 it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
51 rte_flow_error_set(error, rte_errno,
52 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
54 "cannot create match template");
57 __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
58 LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
63 * Destroy flow item template.
66 * Pointer to the rte_eth_dev structure.
68 * Pointer to the item template to be destroyed.
70 * Pointer to error structure.
73 * 0 on success, a negative errno value otherwise and rte_errno is set.
76 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
77 struct rte_flow_pattern_template *template,
78 struct rte_flow_error *error __rte_unused)
80 if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
81 DRV_LOG(WARNING, "Item template %p is still in use.",
83 return rte_flow_error_set(error, EBUSY,
84 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
86 "item template in using");
88 LIST_REMOVE(template, next);
89 claim_zero(mlx5dr_match_template_destroy(template->mt));
95 * Get information about HWS pre-configurable resources.
98 * Pointer to the rte_eth_dev structure.
99 * @param[out] port_info
100 * Pointer to port information.
101 * @param[out] queue_info
102 * Pointer to queue information.
104 * Pointer to error structure.
107 * 0 on success, a negative errno value otherwise and rte_errno is set.
110 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
111 struct rte_flow_port_info *port_info __rte_unused,
112 struct rte_flow_queue_info *queue_info __rte_unused,
113 struct rte_flow_error *error __rte_unused)
115 /* Nothing to be updated currently. */
116 memset(port_info, 0, sizeof(*port_info));
117 /* Queue size is unlimited from low-level. */
118 queue_info->max_size = UINT32_MAX;
123 * Configure port HWS resources.
126 * Pointer to the rte_eth_dev structure.
127 * @param[in] port_attr
128 * Port configuration attributes.
129 * @param[in] nb_queue
131 * @param[in] queue_attr
132 * Array that holds attributes for each flow queue.
134 * Pointer to error structure.
137 * 0 on success, a negative errno value otherwise and rte_errno is set.
140 flow_hw_configure(struct rte_eth_dev *dev,
141 const struct rte_flow_port_attr *port_attr,
143 const struct rte_flow_queue_attr *queue_attr[],
144 struct rte_flow_error *error)
146 struct mlx5_priv *priv = dev->data->dev_private;
147 struct mlx5dr_context *dr_ctx = NULL;
148 struct mlx5dr_context_attr dr_ctx_attr = {0};
149 struct mlx5_hw_q *hw_q;
150 struct mlx5_hw_q_job *job = NULL;
151 uint32_t mem_size, i, j;
153 if (!port_attr || !nb_queue || !queue_attr) {
157 /* In case re-configuring, release existing context at first. */
160 for (i = 0; i < nb_queue; i++) {
161 hw_q = &priv->hw_q[i];
162 /* Make sure all queues are empty. */
163 if (hw_q->size != hw_q->job_idx) {
168 flow_hw_resource_release(dev);
170 /* Allocate the queue job descriptor LIFO. */
171 mem_size = sizeof(priv->hw_q[0]) * nb_queue;
172 for (i = 0; i < nb_queue; i++) {
174 * Check if the queues' size are all the same as the
175 * limitation from HWS layer.
177 if (queue_attr[i]->size != queue_attr[0]->size) {
181 mem_size += (sizeof(struct mlx5_hw_q_job *) +
182 sizeof(struct mlx5_hw_q_job)) *
185 priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
191 for (i = 0; i < nb_queue; i++) {
192 priv->hw_q[i].job_idx = queue_attr[i]->size;
193 priv->hw_q[i].size = queue_attr[i]->size;
195 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
196 &priv->hw_q[nb_queue];
198 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
199 &job[queue_attr[i - 1]->size];
200 job = (struct mlx5_hw_q_job *)
201 &priv->hw_q[i].job[queue_attr[i]->size];
202 for (j = 0; j < queue_attr[i]->size; j++)
203 priv->hw_q[i].job[j] = &job[j];
205 dr_ctx_attr.pd = priv->sh->cdev->pd;
206 dr_ctx_attr.queues = nb_queue;
207 /* Queue size should all be the same. Take the first one. */
208 dr_ctx_attr.queue_size = queue_attr[0]->size;
209 dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
210 /* rte_errno has been updated by HWS layer. */
213 priv->dr_ctx = dr_ctx;
214 priv->nb_queue = nb_queue;
218 claim_zero(mlx5dr_context_close(dr_ctx));
219 mlx5_free(priv->hw_q);
221 return rte_flow_error_set(error, rte_errno,
222 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
223 "fail to configure port");
227 * Release HWS resources.
230 * Pointer to the rte_eth_dev structure.
233 flow_hw_resource_release(struct rte_eth_dev *dev)
235 struct mlx5_priv *priv = dev->data->dev_private;
236 struct rte_flow_pattern_template *it;
240 while (!LIST_EMPTY(&priv->flow_hw_itt)) {
241 it = LIST_FIRST(&priv->flow_hw_itt);
242 flow_hw_pattern_template_destroy(dev, it, NULL);
244 mlx5_free(priv->hw_q);
246 claim_zero(mlx5dr_context_close(priv->dr_ctx));
251 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
252 .info_get = flow_hw_info_get,
253 .configure = flow_hw_configure,
254 .pattern_template_create = flow_hw_pattern_template_create,
255 .pattern_template_destroy = flow_hw_pattern_template_destroy,