1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates
7 #include <mlx5_malloc.h>
11 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
13 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
16 * Get information about HWS pre-configurable resources.
19 * Pointer to the rte_eth_dev structure.
20 * @param[out] port_info
21 * Pointer to port information.
22 * @param[out] queue_info
23 * Pointer to queue information.
25 * Pointer to error structure.
28 * 0 on success, a negative errno value otherwise and rte_errno is set.
31 flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
32 struct rte_flow_port_info *port_info __rte_unused,
33 struct rte_flow_queue_info *queue_info __rte_unused,
34 struct rte_flow_error *error __rte_unused)
36 /* Nothing to be updated currently. */
37 memset(port_info, 0, sizeof(*port_info));
38 /* Queue size is unlimited from low-level. */
39 queue_info->max_size = UINT32_MAX;
44 * Configure port HWS resources.
47 * Pointer to the rte_eth_dev structure.
48 * @param[in] port_attr
49 * Port configuration attributes.
52 * @param[in] queue_attr
53 * Array that holds attributes for each flow queue.
55 * Pointer to error structure.
58 * 0 on success, a negative errno value otherwise and rte_errno is set.
61 flow_hw_configure(struct rte_eth_dev *dev,
62 const struct rte_flow_port_attr *port_attr,
64 const struct rte_flow_queue_attr *queue_attr[],
65 struct rte_flow_error *error)
67 struct mlx5_priv *priv = dev->data->dev_private;
68 struct mlx5dr_context *dr_ctx = NULL;
69 struct mlx5dr_context_attr dr_ctx_attr = {0};
70 struct mlx5_hw_q *hw_q;
71 struct mlx5_hw_q_job *job = NULL;
72 uint32_t mem_size, i, j;
74 if (!port_attr || !nb_queue || !queue_attr) {
78 /* In case re-configuring, release existing context at first. */
81 for (i = 0; i < nb_queue; i++) {
82 hw_q = &priv->hw_q[i];
83 /* Make sure all queues are empty. */
84 if (hw_q->size != hw_q->job_idx) {
89 flow_hw_resource_release(dev);
91 /* Allocate the queue job descriptor LIFO. */
92 mem_size = sizeof(priv->hw_q[0]) * nb_queue;
93 for (i = 0; i < nb_queue; i++) {
95 * Check if the queues' size are all the same as the
96 * limitation from HWS layer.
98 if (queue_attr[i]->size != queue_attr[0]->size) {
102 mem_size += (sizeof(struct mlx5_hw_q_job *) +
103 sizeof(struct mlx5_hw_q_job)) *
106 priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
112 for (i = 0; i < nb_queue; i++) {
113 priv->hw_q[i].job_idx = queue_attr[i]->size;
114 priv->hw_q[i].size = queue_attr[i]->size;
116 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
117 &priv->hw_q[nb_queue];
119 priv->hw_q[i].job = (struct mlx5_hw_q_job **)
120 &job[queue_attr[i - 1]->size];
121 job = (struct mlx5_hw_q_job *)
122 &priv->hw_q[i].job[queue_attr[i]->size];
123 for (j = 0; j < queue_attr[i]->size; j++)
124 priv->hw_q[i].job[j] = &job[j];
126 dr_ctx_attr.pd = priv->sh->cdev->pd;
127 dr_ctx_attr.queues = nb_queue;
128 /* Queue size should all be the same. Take the first one. */
129 dr_ctx_attr.queue_size = queue_attr[0]->size;
130 dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
131 /* rte_errno has been updated by HWS layer. */
134 priv->dr_ctx = dr_ctx;
135 priv->nb_queue = nb_queue;
139 claim_zero(mlx5dr_context_close(dr_ctx));
140 mlx5_free(priv->hw_q);
142 return rte_flow_error_set(error, rte_errno,
143 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
144 "fail to configure port");
148 * Release HWS resources.
151 * Pointer to the rte_eth_dev structure.
154 flow_hw_resource_release(struct rte_eth_dev *dev)
156 struct mlx5_priv *priv = dev->data->dev_private;
160 mlx5_free(priv->hw_q);
162 claim_zero(mlx5dr_context_close(priv->dr_ctx));
167 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
168 .info_get = flow_hw_info_get,
169 .configure = flow_hw_configure,