vdpa/mlx5: add CPU core parameter to bind polling thread
[dpdk.git] / drivers / vdpa / mlx5 / mlx5_vdpa_event.c
index 743e52a..a055d20 100644 (file)
 #include <rte_alarm.h>
 
 #include <mlx5_common.h>
+#include <mlx5_common_os.h>
+#include <mlx5_glue.h>
 
 #include "mlx5_vdpa_utils.h"
 #include "mlx5_vdpa.h"
 
 
-#define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 500u
-#define MLX5_VDPA_NO_TRAFFIC_TIME_S 2LLU
+#define MLX5_VDPA_ERROR_TIME_SEC 3u
 
 void
 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
@@ -43,7 +44,7 @@ mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
                       sizeof(out.buf)) >=
                       (ssize_t)sizeof(out.event_resp.cookie))
                        ;
-               mlx5_glue->devx_destroy_event_channel(priv->eventc);
+               mlx5_os_devx_destroy_event_channel(priv->eventc);
                priv->eventc = NULL;
        }
 #endif
@@ -54,17 +55,16 @@ mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
 static int
 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
 {
-       uint32_t lcore;
+       int flags, ret;
 
        if (priv->eventc)
                return 0;
-       lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
-       if (mlx5_glue->devx_query_eqn(priv->ctx, lcore, &priv->eqn)) {
+       if (mlx5_glue->devx_query_eqn(priv->ctx, 0, &priv->eqn)) {
                rte_errno = errno;
                DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
                return -1;
        }
-       priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
+       priv->eventc = mlx5_os_devx_create_event_channel(priv->ctx,
                           MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
        if (!priv->eventc) {
                rte_errno = errno;
@@ -72,7 +72,18 @@ mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
                        rte_errno);
                goto error;
        }
-       priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
+       flags = fcntl(priv->eventc->fd, F_GETFL);
+       ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to change event channel FD.");
+               goto error;
+       }
+       /*
+        * This PMD always claims the write memory barrier on UAR
+        * registers writings, it is safe to allocate UAR with any
+        * memory mapping type.
+        */
+       priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
        if (!priv->uar) {
                rte_errno = errno;
                DRV_LOG(ERR, "Failed to allocate UAR.");
@@ -124,15 +135,15 @@ static int
 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
                    int callfd, struct mlx5_vdpa_cq *cq)
 {
-       struct mlx5_devx_cq_attr attr;
+       struct mlx5_devx_cq_attr attr = {0};
        size_t pgsize = sysconf(_SC_PAGESIZE);
        uint32_t umem_size;
-       int ret;
        uint16_t event_nums[1] = {0};
+       uint16_t cq_size = 1 << log_desc_n;
+       int ret;
 
        cq->log_desc_n = log_desc_n;
-       umem_size = sizeof(struct mlx5_cqe) * (1 << log_desc_n) +
-                                                       sizeof(*cq->db_rec) * 2;
+       umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
        cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
        if (!cq->umem_buf) {
                DRV_LOG(ERR, "Failed to allocate memory for CQ.");
@@ -149,13 +160,13 @@ mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
        }
        attr.q_umem_valid = 1;
        attr.db_umem_valid = 1;
-       attr.use_first_only = 0;
+       attr.use_first_only = 1;
        attr.overrun_ignore = 0;
        attr.uar_page_id = priv->uar->page_id;
        attr.q_umem_id = cq->umem_obj->umem_id;
        attr.q_umem_offset = 0;
        attr.db_umem_id = cq->umem_obj->umem_id;
-       attr.db_umem_offset = sizeof(struct mlx5_cqe) * (1 << log_desc_n);
+       attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
        attr.eqn = priv->eqn;
        attr.log_cq_size = log_desc_n;
        attr.log_page_size = rte_log2_u32(pgsize);
@@ -166,7 +177,7 @@ mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
        cq->cq_ci = 0;
        rte_spinlock_init(&cq->sl);
        /* Subscribe CQ event to the event channel controlled by the driver. */
-       ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
+       ret = mlx5_os_devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
                                                   sizeof(event_nums),
                                                   event_nums,
                                                   (uint64_t)(uintptr_t)cq);
@@ -175,19 +186,10 @@ mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
                rte_errno = errno;
                goto error;
        }
-       if (callfd != -1) {
-               ret = mlx5_glue->devx_subscribe_devx_event_fd(priv->eventc,
-                                                             callfd,
-                                                             cq->cq->obj, 0);
-               if (ret) {
-                       DRV_LOG(ERR, "Failed to subscribe CQE event fd.");
-                       rte_errno = errno;
-                       goto error;
-               }
-       }
        cq->callfd = callfd;
        /* Init CQ to ones to be in HW owner in the start. */
-       memset((void *)(uintptr_t)cq->umem_buf, 0xFF, attr.db_umem_offset);
+       cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
+       cq->cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
        /* First arming. */
        mlx5_vdpa_cq_arm(priv, cq);
        return 0;
@@ -202,35 +204,38 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
        struct mlx5_vdpa_event_qp *eqp =
                                container_of(cq, struct mlx5_vdpa_event_qp, cq);
        const unsigned int cq_size = 1 << cq->log_desc_n;
-       const unsigned int cq_mask = cq_size - 1;
-       uint32_t total = 0;
-       int ret;
-
-       do {
-               volatile struct mlx5_cqe *cqe = cq->cqes + ((cq->cq_ci + total)
-                                                           & cq_mask);
-
-               ret = check_cqe(cqe, cq_size, cq->cq_ci + total);
-               switch (ret) {
-               case MLX5_CQE_STATUS_ERR:
+       union {
+               struct {
+                       uint16_t wqe_counter;
+                       uint8_t rsvd5;
+                       uint8_t op_own;
+               };
+               uint32_t word;
+       } last_word;
+       uint16_t next_wqe_counter = cq->cq_ci;
+       uint16_t cur_wqe_counter;
+       uint16_t comp;
+
+       last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
+       cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
+       comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
+       if (comp) {
+               cq->cq_ci += comp;
+               MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
+                           MLX5_CQE_INVALID);
+               if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
+                              MLX5_CQE_RESP_ERR ||
+                              MLX5_CQE_OPCODE(last_word.op_own) ==
+                              MLX5_CQE_REQ_ERR)))
                        cq->errors++;
-                       /*fall-through*/
-               case MLX5_CQE_STATUS_SW_OWN:
-                       total++;
-                       break;
-               case MLX5_CQE_STATUS_HW_OWN:
-               default:
-                       break;
-               }
-       } while (ret != MLX5_CQE_STATUS_HW_OWN);
-       rte_io_wmb();
-       cq->cq_ci += total;
-       /* Ring CQ doorbell record. */
-       cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
-       rte_io_wmb();
-       /* Ring SW QP doorbell record. */
-       eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
-       return total;
+               rte_io_wmb();
+               /* Ring CQ doorbell record. */
+               cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
+               rte_io_wmb();
+               /* Ring SW QP doorbell record. */
+               eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
+       }
+       return comp;
 }
 
 static void
@@ -246,21 +251,45 @@ mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
        }
 }
 
+static void
+mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
+{
+       if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
+               switch (max) {
+               case 0:
+                       priv->timer_delay_us += priv->event_us;
+                       break;
+               case 1:
+                       break;
+               default:
+                       priv->timer_delay_us /= max;
+                       break;
+               }
+       }
+       if (priv->timer_delay_us)
+               usleep(priv->timer_delay_us);
+}
+
 static void *
 mlx5_vdpa_poll_handle(void *arg)
 {
        struct mlx5_vdpa_priv *priv = arg;
        int i;
        struct mlx5_vdpa_cq *cq;
-       uint32_t total;
+       uint32_t max;
        uint64_t current_tic;
 
        pthread_mutex_lock(&priv->timer_lock);
        while (!priv->timer_on)
                pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
        pthread_mutex_unlock(&priv->timer_lock);
+       priv->timer_delay_us = priv->event_mode ==
+                                           MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
+                                             MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
+                                                                priv->event_us;
        while (1) {
-               total = 0;
+               max = 0;
+               pthread_mutex_lock(&priv->vq_config_lock);
                for (i = 0; i < priv->nr_virtqs; i++) {
                        cq = &priv->virtqs[i].eqp.cq;
                        if (cq->cq && !cq->armed) {
@@ -271,30 +300,37 @@ mlx5_vdpa_poll_handle(void *arg)
                                        if (cq->callfd != -1)
                                                eventfd_write(cq->callfd,
                                                              (eventfd_t)1);
-                                       total += comp;
+                                       if (comp > max)
+                                               max = comp;
                                }
                        }
                }
                current_tic = rte_rdtsc();
-               if (!total) {
+               if (!max) {
                        /* No traffic ? stop timer and load interrupts. */
                        if (current_tic - priv->last_traffic_tic >=
-                           rte_get_timer_hz() * MLX5_VDPA_NO_TRAFFIC_TIME_S) {
+                           rte_get_timer_hz() * priv->no_traffic_time_s) {
                                DRV_LOG(DEBUG, "Device %s traffic was stopped.",
                                        priv->vdev->device->name);
                                mlx5_vdpa_arm_all_cqs(priv);
+                               pthread_mutex_unlock(&priv->vq_config_lock);
                                pthread_mutex_lock(&priv->timer_lock);
                                priv->timer_on = 0;
                                while (!priv->timer_on)
                                        pthread_cond_wait(&priv->timer_cond,
                                                          &priv->timer_lock);
                                pthread_mutex_unlock(&priv->timer_lock);
+                               priv->timer_delay_us = priv->event_mode ==
+                                           MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
+                                             MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
+                                                                priv->event_us;
                                continue;
                        }
                } else {
                        priv->last_traffic_tic = current_tic;
                }
-               usleep(priv->timer_delay_us);
+               pthread_mutex_unlock(&priv->vq_config_lock);
+               mlx5_vdpa_timer_sleep(priv, max);
        }
        return NULL;
 }
@@ -309,6 +345,7 @@ mlx5_vdpa_interrupt_handler(void *cb_arg)
                uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
        } out;
 
+       pthread_mutex_lock(&priv->vq_config_lock);
        while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
                                         sizeof(out.buf)) >=
                                       (ssize_t)sizeof(out.event_resp.cookie)) {
@@ -319,7 +356,17 @@ mlx5_vdpa_interrupt_handler(void *cb_arg)
                struct mlx5_vdpa_virtq *virtq = container_of(eqp,
                                                   struct mlx5_vdpa_virtq, eqp);
 
+               if (!virtq->enable)
+                       continue;
                mlx5_vdpa_cq_poll(cq);
+               /* Notify guest for descs consuming. */
+               if (cq->callfd != -1)
+                       eventfd_write(cq->callfd, (eventfd_t)1);
+               if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
+                       mlx5_vdpa_cq_arm(priv, cq);
+                       pthread_mutex_unlock(&priv->vq_config_lock);
+                       return;
+               }
                /* Don't arm again - timer will take control. */
                DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
                        " Timer is %s, cq ci is %u.\n",
@@ -338,32 +385,189 @@ mlx5_vdpa_interrupt_handler(void *cb_arg)
                pthread_cond_signal(&priv->timer_cond);
        }
        pthread_mutex_unlock(&priv->timer_lock);
+       pthread_mutex_unlock(&priv->vq_config_lock);
+}
+
+static void
+mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
+{
+#ifdef HAVE_IBV_DEVX_EVENT
+       struct mlx5_vdpa_priv *priv = cb_arg;
+       union {
+               struct mlx5dv_devx_async_event_hdr event_resp;
+               uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
+       } out;
+       uint32_t vq_index, i, version;
+       struct mlx5_vdpa_virtq *virtq;
+       uint64_t sec;
+
+       pthread_mutex_lock(&priv->vq_config_lock);
+       while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
+                                        sizeof(out.buf)) >=
+                                      (ssize_t)sizeof(out.event_resp.cookie)) {
+               vq_index = out.event_resp.cookie & UINT32_MAX;
+               version = out.event_resp.cookie >> 32;
+               if (vq_index >= priv->nr_virtqs) {
+                       DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
+                               priv->vdev->device->name, vq_index);
+                       continue;
+               }
+               virtq = &priv->virtqs[vq_index];
+               if (!virtq->enable || virtq->version != version)
+                       continue;
+               if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
+                       continue;
+               virtq->stopped = true;
+               /* Query error info. */
+               if (mlx5_vdpa_virtq_query(priv, vq_index))
+                       goto log;
+               /* Disable vq. */
+               if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
+                       DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
+                       goto log;
+               }
+               /* Retry if error happens less than N times in 3 seconds. */
+               sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
+               if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
+                       /* Retry. */
+                       if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
+                               DRV_LOG(ERR, "Failed to enable virtq %d.",
+                                       vq_index);
+                       else
+                               DRV_LOG(WARNING, "Recover virtq %d: %u.",
+                                       vq_index, ++virtq->n_retry);
+               } else {
+                       /* Retry timeout, give up. */
+                       DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
+                               priv->vdev->device->name, vq_index);
+               }
+log:
+               /* Shift in current time to error time log end. */
+               for (i = 1; i < RTE_DIM(virtq->err_time); i++)
+                       virtq->err_time[i - 1] = virtq->err_time[i];
+               virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
+       }
+       pthread_mutex_unlock(&priv->vq_config_lock);
+#endif
 }
 
 int
-mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
+mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
 {
+       int ret;
        int flags;
+
+       /* Setup device event channel. */
+       priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
+       if (!priv->err_chnl) {
+               rte_errno = errno;
+               DRV_LOG(ERR, "Failed to create device event channel %d.",
+                       rte_errno);
+               goto error;
+       }
+       flags = fcntl(priv->err_chnl->fd, F_GETFL);
+       ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to change device event channel FD.");
+               goto error;
+       }
+       priv->err_intr_handle.fd = priv->err_chnl->fd;
+       priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
+       if (rte_intr_callback_register(&priv->err_intr_handle,
+                                      mlx5_vdpa_err_interrupt_handler,
+                                      priv)) {
+               priv->err_intr_handle.fd = 0;
+               DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
+                       priv->vid);
+               goto error;
+       } else {
+               DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
+                       priv->vid);
+       }
+       return 0;
+error:
+       mlx5_vdpa_err_event_unset(priv);
+       return -1;
+}
+
+void
+mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
+{
+       int retries = MLX5_VDPA_INTR_RETRIES;
+       int ret = -EAGAIN;
+
+       if (!priv->err_intr_handle.fd)
+               return;
+       while (retries-- && ret == -EAGAIN) {
+               ret = rte_intr_callback_unregister(&priv->err_intr_handle,
+                                           mlx5_vdpa_err_interrupt_handler,
+                                           priv);
+               if (ret == -EAGAIN) {
+                       DRV_LOG(DEBUG, "Try again to unregister fd %d "
+                               "of error interrupt, retries = %d.",
+                               priv->err_intr_handle.fd, retries);
+                       rte_pause();
+               }
+       }
+       memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
+       if (priv->err_chnl) {
+#ifdef HAVE_IBV_DEVX_EVENT
+               union {
+                       struct mlx5dv_devx_async_event_hdr event_resp;
+                       uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
+                                   128];
+               } out;
+
+               /* Clean all pending events. */
+               while (mlx5_glue->devx_get_event(priv->err_chnl,
+                      &out.event_resp, sizeof(out.buf)) >=
+                      (ssize_t)sizeof(out.event_resp.cookie))
+                       ;
+#endif
+               mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
+               priv->err_chnl = NULL;
+       }
+}
+
+int
+mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
+{
        int ret;
+       rte_cpuset_t cpuset;
+       pthread_attr_t attr;
+       char name[16];
 
        if (!priv->eventc)
                /* All virtqs are in poll mode. */
                return 0;
-       pthread_mutex_init(&priv->timer_lock, NULL);
-       pthread_cond_init(&priv->timer_cond, NULL);
-       priv->timer_on = 0;
-       priv->timer_delay_us = MLX5_VDPA_DEFAULT_TIMER_DELAY_US;
-       ret = pthread_create(&priv->timer_tid, NULL, mlx5_vdpa_poll_handle,
-                            (void *)priv);
-       if (ret) {
-               DRV_LOG(ERR, "Failed to create timer thread.");
-               return -1;
-       }
-       flags = fcntl(priv->eventc->fd, F_GETFL);
-       ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
-       if (ret) {
-               DRV_LOG(ERR, "Failed to change event channel FD.");
-               goto error;
+       if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
+               pthread_mutex_init(&priv->timer_lock, NULL);
+               pthread_cond_init(&priv->timer_cond, NULL);
+               priv->timer_on = 0;
+               pthread_attr_init(&attr);
+               CPU_ZERO(&cpuset);
+               if (priv->event_core != -1)
+                       CPU_SET(priv->event_core, &cpuset);
+               else
+                       cpuset = rte_lcore_cpuset(rte_get_main_lcore());
+               ret = pthread_attr_setaffinity_np(&attr, sizeof(cpuset),
+                                                 &cpuset);
+               if (ret) {
+                       DRV_LOG(ERR, "Failed to set thread affinity.");
+                       return -1;
+               }
+               ret = pthread_create(&priv->timer_tid, &attr,
+                                    mlx5_vdpa_poll_handle, (void *)priv);
+               if (ret) {
+                       DRV_LOG(ERR, "Failed to create timer thread.");
+                       return -1;
+               }
+               snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
+               ret = pthread_setname_np(priv->timer_tid, name);
+               if (ret) {
+                       DRV_LOG(ERR, "Failed to set timer thread name.");
+                       return -1;
+               }
        }
        priv->intr_handle.fd = priv->eventc->fd;
        priv->intr_handle.type = RTE_INTR_HANDLE_EXT;