crypto/cnxk: add digest support
[dpdk.git] / lib / vhost / vhost.c
index 9abfc0b..53a470f 100644 (file)
@@ -261,7 +261,7 @@ vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint64_t src, dst;
        uint64_t len, remain = desc_len;
 
-       idesc = rte_malloc(__func__, desc_len, 0);
+       idesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node);
        if (unlikely(!idesc))
                return NULL;
 
@@ -312,10 +312,10 @@ cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq)
 
        if (vq->resubmit_inflight) {
                if (vq->resubmit_inflight->resubmit_list) {
-                       free(vq->resubmit_inflight->resubmit_list);
+                       rte_free(vq->resubmit_inflight->resubmit_list);
                        vq->resubmit_inflight->resubmit_list = NULL;
                }
-               free(vq->resubmit_inflight);
+               rte_free(vq->resubmit_inflight);
                vq->resubmit_inflight = NULL;
        }
 }
@@ -365,8 +365,7 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
 
        vhost_free_async_mem(vq);
        rte_free(vq->batch_copy_elems);
-       if (vq->iotlb_pool)
-               rte_mempool_free(vq->iotlb_pool);
+       rte_mempool_free(vq->iotlb_pool);
        rte_free(vq->log_cache);
        rte_free(vq);
 }
@@ -550,6 +549,7 @@ static void
 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 {
        struct vhost_virtqueue *vq;
+       int numa_node = SOCKET_ID_ANY;
 
        if (vring_idx >= VHOST_MAX_VRING) {
                VHOST_LOG_CONFIG(ERR,
@@ -570,6 +570,17 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
        vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
        vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
        vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF;
+
+#ifdef RTE_LIBRTE_VHOST_NUMA
+       if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
+               VHOST_LOG_CONFIG(ERR, "(%d) failed to query numa node: %s\n",
+                       dev->vid, rte_strerror(errno));
+               numa_node = SOCKET_ID_ANY;
+       }
+#endif
+       vq->numa_node = numa_node;
+
+       vhost_user_iotlb_init(dev, vring_idx);
 }
 
 static void
@@ -1615,7 +1626,6 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
        struct vhost_virtqueue *vq;
        struct virtio_net *dev = get_device(vid);
        struct rte_vhost_async_features f;
-       int node;
 
        if (dev == NULL || ops == NULL)
                return -1;
@@ -1650,20 +1660,9 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
                goto reg_out;
        }
 
-#ifdef RTE_LIBRTE_VHOST_NUMA
-       if (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
-               VHOST_LOG_CONFIG(ERR,
-                       "unable to get numa information in async register. "
-                       "allocating async buffer memory on the caller thread node\n");
-               node = SOCKET_ID_ANY;
-       }
-#else
-       node = SOCKET_ID_ANY;
-#endif
-
        vq->async_pkts_info = rte_malloc_socket(NULL,
                        vq->size * sizeof(struct async_inflight_info),
-                       RTE_CACHE_LINE_SIZE, node);
+                       RTE_CACHE_LINE_SIZE, vq->numa_node);
        if (!vq->async_pkts_info) {
                vhost_free_async_mem(vq);
                VHOST_LOG_CONFIG(ERR,
@@ -1674,7 +1673,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 
        vq->it_pool = rte_malloc_socket(NULL,
                        VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
-                       RTE_CACHE_LINE_SIZE, node);
+                       RTE_CACHE_LINE_SIZE, vq->numa_node);
        if (!vq->it_pool) {
                vhost_free_async_mem(vq);
                VHOST_LOG_CONFIG(ERR,
@@ -1685,7 +1684,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 
        vq->vec_pool = rte_malloc_socket(NULL,
                        VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
-                       RTE_CACHE_LINE_SIZE, node);
+                       RTE_CACHE_LINE_SIZE, vq->numa_node);
        if (!vq->vec_pool) {
                vhost_free_async_mem(vq);
                VHOST_LOG_CONFIG(ERR,
@@ -1697,7 +1696,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
        if (vq_is_packed(dev)) {
                vq->async_buffers_packed = rte_malloc_socket(NULL,
                        vq->size * sizeof(struct vring_used_elem_packed),
-                       RTE_CACHE_LINE_SIZE, node);
+                       RTE_CACHE_LINE_SIZE, vq->numa_node);
                if (!vq->async_buffers_packed) {
                        vhost_free_async_mem(vq);
                        VHOST_LOG_CONFIG(ERR,
@@ -1708,7 +1707,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
        } else {
                vq->async_descs_split = rte_malloc_socket(NULL,
                        vq->size * sizeof(struct vring_used_elem),
-                       RTE_CACHE_LINE_SIZE, node);
+                       RTE_CACHE_LINE_SIZE, vq->numa_node);
                if (!vq->async_descs_split) {
                        vhost_free_async_mem(vq);
                        VHOST_LOG_CONFIG(ERR,
@@ -1779,5 +1778,5 @@ out:
        return ret;
 }
 
-RTE_LOG_REGISTER(vhost_config_log_level, lib.vhost.config, INFO);
-RTE_LOG_REGISTER(vhost_data_log_level, lib.vhost.data, WARNING);
+RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
+RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);