1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
6 #include <sys/eventfd.h>
8 #include <rte_malloc.h>
12 #include <mlx5_common.h>
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
19 mlx5_vdpa_virtq_kick_handler(void *cb_arg)
21 struct mlx5_vdpa_virtq *virtq = cb_arg;
22 struct mlx5_vdpa_priv *priv = virtq->priv;
27 if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
28 DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
29 priv->vid, virtq->index);
32 if (rte_intr_fd_get(virtq->intr_handle) < 0)
34 for (retry = 0; retry < 3; ++retry) {
35 nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
39 errno == EWOULDBLOCK ||
42 DRV_LOG(ERR, "Failed to read kickfd of virtq %d: %s",
43 virtq->index, strerror(errno));
49 rte_write32(virtq->index, priv->virtq_db_addr);
50 if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
51 DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
52 priv->vid, virtq->index);
55 if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
56 if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
57 virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
59 virtq->notifier_state =
60 MLX5_VDPA_NOTIFIER_STATE_ENABLED;
61 DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
62 virtq->notifier_state ==
63 MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
66 DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
69 /* Release cached VQ resources. */
71 mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
75 for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
76 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
78 for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
79 if (virtq->umems[j].obj) {
80 claim_zero(mlx5_glue->devx_umem_dereg
81 (virtq->umems[j].obj));
82 virtq->umems[j].obj = NULL;
84 if (virtq->umems[j].buf) {
85 rte_free(virtq->umems[j].buf);
86 virtq->umems[j].buf = NULL;
88 virtq->umems[j].size = 0;
91 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
96 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
100 if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
101 while (ret == -EAGAIN) {
102 ret = rte_intr_callback_unregister(virtq->intr_handle,
103 mlx5_vdpa_virtq_kick_handler, virtq);
104 if (ret == -EAGAIN) {
105 DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
106 rte_intr_fd_get(virtq->intr_handle),
108 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
111 rte_intr_fd_set(virtq->intr_handle, -1);
113 rte_intr_instance_free(virtq->intr_handle);
115 ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
117 DRV_LOG(WARNING, "Failed to stop virtq %d.",
119 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
122 virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
127 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
131 for (i = 0; i < priv->nr_virtqs; i++)
132 mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
138 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
140 struct mlx5_devx_virtq_attr attr = {
141 .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
142 .state = state ? MLX5_VIRTQ_STATE_RDY :
143 MLX5_VIRTQ_STATE_SUSPEND,
144 .queue_index = virtq->index,
147 return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
151 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
153 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
158 ret = mlx5_vdpa_virtq_modify(virtq, 0);
161 virtq->stopped = true;
162 DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
163 return mlx5_vdpa_virtq_query(priv, index);
167 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
169 struct mlx5_devx_virtq_attr attr = {0};
170 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
173 if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
174 DRV_LOG(ERR, "Failed to query virtq %d.", index);
177 DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
178 "hw_used_index=%d", priv->vid, index,
179 attr.hw_available_index, attr.hw_used_index);
180 ret = rte_vhost_set_vring_base(priv->vid, index,
181 attr.hw_available_index,
184 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
187 if (attr.state == MLX5_VIRTQ_STATE_ERROR)
188 DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
189 priv->vid, index, attr.error_type);
194 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
196 struct rte_vhost_mem_region *reg;
200 for (i = 0; i < mem->nregions; i++) {
201 reg = &mem->regions[i];
202 if (hva >= reg->host_user_addr &&
203 hva < reg->host_user_addr + reg->size) {
204 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
212 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
214 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
215 struct rte_vhost_vring vq;
216 struct mlx5_devx_virtq_attr attr = {0};
220 uint16_t last_avail_idx;
221 uint16_t last_used_idx;
222 uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
225 ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
230 virtq->index = index;
231 virtq->vq_size = vq.size;
232 attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
233 attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
234 attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
235 attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
236 attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
237 VIRTIO_F_VERSION_1));
238 attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
239 MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
241 * No need event QPs creation when the guest in poll mode or when the
242 * capability allows it.
244 attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
245 MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
246 MLX5_VIRTQ_EVENT_MODE_QP :
247 MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
248 if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
249 ret = mlx5_vdpa_event_qp_prepare(priv, vq.size, vq.callfd,
252 DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
256 attr.qp_id = virtq->eqp.fw_qp->id;
258 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
259 " need event QPs and event mechanism.", index);
261 if (priv->caps.queue_counters_valid) {
262 if (!virtq->counters)
263 virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
265 if (!virtq->counters) {
266 DRV_LOG(ERR, "Failed to create virtq couners for virtq"
270 attr.counters_obj_id = virtq->counters->id;
272 /* Setup 3 UMEMs for each virtq. */
273 for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
276 struct mlx5dv_devx_umem *obj;
278 size = priv->caps.umems[i].a * vq.size + priv->caps.umems[i].b;
279 if (virtq->umems[i].size == size &&
280 virtq->umems[i].obj != NULL) {
281 /* Reuse registered memory. */
282 memset(virtq->umems[i].buf, 0, size);
285 if (virtq->umems[i].obj)
286 claim_zero(mlx5_glue->devx_umem_dereg
287 (virtq->umems[i].obj));
288 if (virtq->umems[i].buf)
289 rte_free(virtq->umems[i].buf);
290 virtq->umems[i].size = 0;
291 virtq->umems[i].obj = NULL;
292 virtq->umems[i].buf = NULL;
293 buf = rte_zmalloc(__func__, size, 4096);
295 DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
299 obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx, buf, size,
300 IBV_ACCESS_LOCAL_WRITE);
302 DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
306 virtq->umems[i].size = size;
307 virtq->umems[i].buf = buf;
308 virtq->umems[i].obj = obj;
310 attr.umems[i].id = virtq->umems[i].obj->umem_id;
311 attr.umems[i].offset = 0;
312 attr.umems[i].size = virtq->umems[i].size;
314 if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
315 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
316 (uint64_t)(uintptr_t)vq.desc);
318 DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
321 attr.desc_addr = gpa;
322 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
323 (uint64_t)(uintptr_t)vq.used);
325 DRV_LOG(ERR, "Failed to get GPA for used ring.");
328 attr.used_addr = gpa;
329 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
330 (uint64_t)(uintptr_t)vq.avail);
332 DRV_LOG(ERR, "Failed to get GPA for available ring.");
335 attr.available_addr = gpa;
337 ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
342 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
344 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
345 "virtq %d.", priv->vid, last_avail_idx,
346 last_used_idx, index);
348 attr.hw_available_index = last_avail_idx;
349 attr.hw_used_index = last_used_idx;
350 attr.q_size = vq.size;
351 attr.mkey = priv->gpa_mkey_index;
352 attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
353 attr.queue_index = index;
354 attr.pd = priv->cdev->pdn;
355 attr.hw_latency_mode = priv->hw_latency_mode;
356 attr.hw_max_latency_us = priv->hw_max_latency_us;
357 attr.hw_max_pending_comp = priv->hw_max_pending_comp;
358 virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
362 claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
363 if (mlx5_vdpa_virtq_modify(virtq, 1))
366 rte_write32(virtq->index, priv->virtq_db_addr);
367 /* Setup doorbell mapping. */
369 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
370 if (virtq->intr_handle == NULL) {
371 DRV_LOG(ERR, "Fail to allocate intr_handle");
375 if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
378 if (rte_intr_fd_get(virtq->intr_handle) == -1) {
379 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
381 if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
384 if (rte_intr_callback_register(virtq->intr_handle,
385 mlx5_vdpa_virtq_kick_handler,
387 rte_intr_fd_set(virtq->intr_handle, -1);
388 DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
392 DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
393 rte_intr_fd_get(virtq->intr_handle),
397 /* Subscribe virtq error event. */
399 cookie = ((uint64_t)virtq->version << 32) + index;
400 ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
405 DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
410 virtq->stopped = false;
411 /* Initial notification to ask Qemu handling completed buffers. */
412 if (virtq->eqp.cq.callfd != -1)
413 eventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);
414 DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
418 mlx5_vdpa_virtq_unset(virtq);
423 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
425 if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
426 if (!(priv->caps.virtio_queue_type & (1 <<
427 MLX5_VIRTQ_TYPE_PACKED))) {
428 DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
429 "%d - it was not reported by HW/driver"
430 " capability.", priv->vid);
434 if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
435 if (!priv->caps.tso_ipv4) {
436 DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
437 " was not reported by HW/driver capability.",
442 if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
443 if (!priv->caps.tso_ipv6) {
444 DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
445 " was not reported by HW/driver capability.",
450 if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
451 if (!priv->caps.tx_csum) {
452 DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
453 " was not reported by HW/driver capability.",
458 if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
459 if (!priv->caps.rx_csum) {
460 DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
461 " GUEST CSUM was not reported by HW/driver "
462 "capability.", priv->vid);
466 if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
467 if (!priv->caps.virtio_version_1_0) {
468 DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
469 "version 1 was not reported by HW/driver"
470 " capability.", priv->vid);
478 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
481 uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
482 int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
484 if (ret || mlx5_vdpa_features_validate(priv)) {
485 DRV_LOG(ERR, "Failed to configure negotiated features.");
488 if ((priv->features & (1ULL << VIRTIO_NET_F_CSUM)) == 0 &&
489 ((priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) > 0 ||
490 (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) > 0)) {
491 /* Packet may be corrupted if TSO is enabled without CSUM. */
492 DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
493 priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
495 if (nr_vring > priv->caps.max_num_virtio_queues) {
496 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
497 (int)priv->caps.max_num_virtio_queues,
501 priv->nr_virtqs = nr_vring;
502 for (i = 0; i < nr_vring; i++)
503 if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
507 mlx5_vdpa_virtqs_release(priv);
512 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
513 struct mlx5_vdpa_virtq *virtq)
515 struct rte_vhost_vring vq;
516 int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
520 if (vq.size != virtq->vq_size || vq.kickfd !=
521 rte_intr_fd_get(virtq->intr_handle))
523 if (virtq->eqp.cq.cq_obj.cq) {
524 if (vq.callfd != virtq->eqp.cq.callfd)
526 } else if (vq.callfd != -1) {
533 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
535 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
538 DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
539 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
540 if (priv->state == MLX5_VDPA_STATE_PROBED) {
541 virtq->enable = !!enable;
544 if (virtq->enable == !!enable) {
547 ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
549 DRV_LOG(ERR, "Virtq %d modify check failed.", index);
554 DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
558 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
559 ret = mlx5_vdpa_steer_update(priv);
561 DRV_LOG(WARNING, "Failed to disable steering "
562 "for virtq %d.", index);
564 mlx5_vdpa_virtq_unset(virtq);
567 ret = mlx5_vdpa_virtq_setup(priv, index);
569 DRV_LOG(ERR, "Failed to setup virtq %d.", index);
573 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
574 ret = mlx5_vdpa_steer_update(priv);
576 DRV_LOG(WARNING, "Failed to enable steering "
577 "for virtq %d.", index);
584 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
585 struct rte_vdpa_stat *stats, unsigned int n)
587 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
588 struct mlx5_devx_virtio_q_couners_attr *attr = &virtq->stats;
591 if (!virtq->counters) {
592 DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
596 ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, attr);
598 DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
601 ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
602 if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
604 stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
605 .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
606 .value = attr->received_desc - virtq->reset.received_desc,
608 if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
610 stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
611 .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
612 .value = attr->completed_desc - virtq->reset.completed_desc,
614 if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
616 stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
617 .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
618 .value = attr->bad_desc_errors - virtq->reset.bad_desc_errors,
620 if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
622 stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
623 .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
624 .value = attr->exceed_max_chain - virtq->reset.exceed_max_chain,
626 if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
628 stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
629 .id = MLX5_VDPA_STATS_INVALID_BUFFER,
630 .value = attr->invalid_buffer - virtq->reset.invalid_buffer,
632 if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
634 stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
635 .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
636 .value = attr->error_cqes - virtq->reset.error_cqes,
642 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
644 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
647 if (virtq->counters == NULL) /* VQ not enabled. */
649 ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
652 DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",