vdpa/mlx5: support queue state operation
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_virtq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <string.h>
5
6 #include <rte_malloc.h>
7 #include <rte_errno.h>
8
9 #include <mlx5_common.h>
10
11 #include "mlx5_vdpa_utils.h"
12 #include "mlx5_vdpa.h"
13
14
15 static int
16 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
17 {
18         unsigned int i;
19
20         if (virtq->virtq) {
21                 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
22                 virtq->virtq = NULL;
23         }
24         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
25                 if (virtq->umems[i].obj)
26                         claim_zero(mlx5_glue->devx_umem_dereg
27                                                          (virtq->umems[i].obj));
28                 if (virtq->umems[i].buf)
29                         rte_free(virtq->umems[i].buf);
30         }
31         memset(&virtq->umems, 0, sizeof(virtq->umems));
32         if (virtq->eqp.fw_qp)
33                 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
34         return 0;
35 }
36
37 void
38 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
39 {
40         struct mlx5_vdpa_virtq *entry;
41         struct mlx5_vdpa_virtq *next;
42
43         entry = SLIST_FIRST(&priv->virtq_list);
44         while (entry) {
45                 next = SLIST_NEXT(entry, next);
46                 mlx5_vdpa_virtq_unset(entry);
47                 SLIST_REMOVE(&priv->virtq_list, entry, mlx5_vdpa_virtq, next);
48                 rte_free(entry);
49                 entry = next;
50         }
51         SLIST_INIT(&priv->virtq_list);
52         if (priv->tis) {
53                 claim_zero(mlx5_devx_cmd_destroy(priv->tis));
54                 priv->tis = NULL;
55         }
56         if (priv->td) {
57                 claim_zero(mlx5_devx_cmd_destroy(priv->td));
58                 priv->td = NULL;
59         }
60         priv->features = 0;
61 }
62
63 static int
64 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
65 {
66         struct mlx5_devx_virtq_attr attr = {
67                         .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
68                         .state = state ? MLX5_VIRTQ_STATE_RDY :
69                                          MLX5_VIRTQ_STATE_SUSPEND,
70                         .queue_index = virtq->index,
71         };
72
73         return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
74 }
75
76 static uint64_t
77 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
78 {
79         struct rte_vhost_mem_region *reg;
80         uint32_t i;
81         uint64_t gpa = 0;
82
83         for (i = 0; i < mem->nregions; i++) {
84                 reg = &mem->regions[i];
85                 if (hva >= reg->host_user_addr &&
86                     hva < reg->host_user_addr + reg->size) {
87                         gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
88                         break;
89                 }
90         }
91         return gpa;
92 }
93
94 static int
95 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv,
96                       struct mlx5_vdpa_virtq *virtq, int index)
97 {
98         struct rte_vhost_vring vq;
99         struct mlx5_devx_virtq_attr attr = {0};
100         uint64_t gpa;
101         int ret;
102         unsigned int i;
103         uint16_t last_avail_idx;
104         uint16_t last_used_idx;
105
106         ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
107         if (ret)
108                 return -1;
109         virtq->index = index;
110         virtq->vq_size = vq.size;
111         attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
112         attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
113         attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
114         attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
115         attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
116                                                         VIRTIO_F_VERSION_1));
117         attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
118                         MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
119         /*
120          * No need event QPs creation when the guest in poll mode or when the
121          * capability allows it.
122          */
123         attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
124                                                MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
125                                                       MLX5_VIRTQ_EVENT_MODE_QP :
126                                                   MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
127         if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
128                 ret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,
129                                                 &virtq->eqp);
130                 if (ret) {
131                         DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
132                                 index);
133                         return -1;
134                 }
135                 attr.qp_id = virtq->eqp.fw_qp->id;
136         } else {
137                 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
138                         " need event QPs and event mechanism.", index);
139         }
140         /* Setup 3 UMEMs for each virtq. */
141         for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
142                 virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
143                                                           priv->caps.umems[i].b;
144                 virtq->umems[i].buf = rte_zmalloc(__func__,
145                                                   virtq->umems[i].size, 4096);
146                 if (!virtq->umems[i].buf) {
147                         DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
148                                 " %u.", i, index);
149                         goto error;
150                 }
151                 virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->ctx,
152                                                         virtq->umems[i].buf,
153                                                         virtq->umems[i].size,
154                                                         IBV_ACCESS_LOCAL_WRITE);
155                 if (!virtq->umems[i].obj) {
156                         DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
157                                 i, index);
158                         goto error;
159                 }
160                 attr.umems[i].id = virtq->umems[i].obj->umem_id;
161                 attr.umems[i].offset = 0;
162                 attr.umems[i].size = virtq->umems[i].size;
163         }
164         if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
165                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
166                                            (uint64_t)(uintptr_t)vq.desc);
167                 if (!gpa) {
168                         DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
169                         goto error;
170                 }
171                 attr.desc_addr = gpa;
172                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
173                                            (uint64_t)(uintptr_t)vq.used);
174                 if (!gpa) {
175                         DRV_LOG(ERR, "Failed to get GPA for used ring.");
176                         goto error;
177                 }
178                 attr.used_addr = gpa;
179                 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
180                                            (uint64_t)(uintptr_t)vq.avail);
181                 if (!gpa) {
182                         DRV_LOG(ERR, "Failed to get GPA for available ring.");
183                         goto error;
184                 }
185                 attr.available_addr = gpa;
186         }
187         rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
188                                  &last_used_idx);
189         DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
190                 "virtq %d.", priv->vid, last_avail_idx, last_used_idx, index);
191         attr.hw_available_index = last_avail_idx;
192         attr.hw_used_index = last_used_idx;
193         attr.q_size = vq.size;
194         attr.mkey = priv->gpa_mkey_index;
195         attr.tis_id = priv->tis->id;
196         attr.queue_index = index;
197         virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
198         virtq->priv = priv;
199         if (!virtq->virtq)
200                 goto error;
201         if (mlx5_vdpa_virtq_modify(virtq, 1))
202                 goto error;
203         virtq->enable = 1;
204         return 0;
205 error:
206         mlx5_vdpa_virtq_unset(virtq);
207         return -1;
208 }
209
210 static int
211 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
212 {
213         if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
214                 if (!(priv->caps.virtio_queue_type & (1 <<
215                                                      MLX5_VIRTQ_TYPE_PACKED))) {
216                         DRV_LOG(ERR, "Failed to configur PACKED mode for vdev "
217                                 "%d - it was not reported by HW/driver"
218                                 " capability.", priv->vid);
219                         return -ENOTSUP;
220                 }
221         }
222         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
223                 if (!priv->caps.tso_ipv4) {
224                         DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
225                                 " was not reported by HW/driver capability.",
226                                 priv->vid);
227                         return -ENOTSUP;
228                 }
229         }
230         if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
231                 if (!priv->caps.tso_ipv6) {
232                         DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
233                                 " was not reported by HW/driver capability.",
234                                 priv->vid);
235                         return -ENOTSUP;
236                 }
237         }
238         if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
239                 if (!priv->caps.tx_csum) {
240                         DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
241                                 " was not reported by HW/driver capability.",
242                                 priv->vid);
243                         return -ENOTSUP;
244                 }
245         }
246         if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
247                 if (!priv->caps.rx_csum) {
248                         DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
249                                 " GUEST CSUM was not reported by HW/driver "
250                                 "capability.", priv->vid);
251                         return -ENOTSUP;
252                 }
253         }
254         if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
255                 if (!priv->caps.virtio_version_1_0) {
256                         DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
257                                 "version 1 was not reported by HW/driver"
258                                 " capability.", priv->vid);
259                         return -ENOTSUP;
260                 }
261         }
262         return 0;
263 }
264
265 int
266 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
267 {
268         struct mlx5_devx_tis_attr tis_attr = {0};
269         struct mlx5_vdpa_virtq *virtq;
270         uint32_t i;
271         uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
272         int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
273
274         if (ret || mlx5_vdpa_features_validate(priv)) {
275                 DRV_LOG(ERR, "Failed to configure negotiated features.");
276                 return -1;
277         }
278         priv->td = mlx5_devx_cmd_create_td(priv->ctx);
279         if (!priv->td) {
280                 DRV_LOG(ERR, "Failed to create transport domain.");
281                 return -rte_errno;
282         }
283         tis_attr.transport_domain = priv->td->id;
284         priv->tis = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
285         if (!priv->tis) {
286                 DRV_LOG(ERR, "Failed to create TIS.");
287                 goto error;
288         }
289         for (i = 0; i < nr_vring; i++) {
290                 virtq = rte_zmalloc(__func__, sizeof(*virtq), 0);
291                 if (!virtq || mlx5_vdpa_virtq_setup(priv, virtq, i)) {
292                         if (virtq)
293                                 rte_free(virtq);
294                         goto error;
295                 }
296                 SLIST_INSERT_HEAD(&priv->virtq_list, virtq, next);
297         }
298         priv->nr_virtqs = nr_vring;
299         return 0;
300 error:
301         mlx5_vdpa_virtqs_release(priv);
302         return -1;
303 }