ret = recvmsg(sockfd, &msgh, 0);
if (ret <= 0) {
if (ret)
- VHOST_LOG_CONFIG(ERR, "(%s) recvmsg failed on fd %d (%s)\n",
- ifname, sockfd, strerror(errno));
+ VHOST_LOG_CONFIG(ifname, ERR, "recvmsg failed on fd %d (%s)\n",
+ sockfd, strerror(errno));
return ret;
}
if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
- VHOST_LOG_CONFIG(ERR, "(%s) truncated msg (fd %d)\n", ifname, sockfd);
+ VHOST_LOG_CONFIG(ifname, ERR, "truncated msg (fd %d)\n", sockfd);
return -1;
}
msgh.msg_controllen = sizeof(control);
cmsg = CMSG_FIRSTHDR(&msgh);
if (cmsg == NULL) {
- VHOST_LOG_CONFIG(ERR, "(%s) cmsg == NULL\n", ifname);
+ VHOST_LOG_CONFIG(ifname, ERR, "cmsg == NULL\n");
errno = EINVAL;
return -1;
}
} while (ret < 0 && errno == EINTR);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) sendmsg error on fd %d (%s)\n",
- ifname, sockfd, strerror(errno));
+ VHOST_LOG_CONFIG(ifname, ERR, "sendmsg error on fd %d (%s)\n",
+ sockfd, strerror(errno));
return ret;
}
dev->async_copy = 1;
}
- VHOST_LOG_CONFIG(INFO, "(%s) new device, handle is %d\n", vsocket->path, vid);
+ VHOST_LOG_CONFIG(vsocket->path, INFO, "new device, handle is %d\n", vid);
if (vsocket->notify_ops->new_connection) {
ret = vsocket->notify_ops->new_connection(vid);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to add vhost user connection with fd %d\n",
- vsocket->path, fd);
+ VHOST_LOG_CONFIG(vsocket->path, ERR,
+ "failed to add vhost user connection with fd %d\n",
+ fd);
goto err_cleanup;
}
}
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
NULL, conn);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to add fd %d into vhost server fdset\n",
- vsocket->path, fd);
+ VHOST_LOG_CONFIG(vsocket->path, ERR,
+ "failed to add fd %d into vhost server fdset\n",
+ fd);
if (vsocket->notify_ops->destroy_connection)
vsocket->notify_ops->destroy_connection(conn->vid);
if (fd < 0)
return;
- VHOST_LOG_CONFIG(INFO, "(%s) new vhost user connection is %d\n",
- vsocket->path, fd);
+ VHOST_LOG_CONFIG(vsocket->path, INFO, "new vhost user connection is %d\n", fd);
vhost_user_add_connection(fd, vsocket);
}
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0)
return -1;
- VHOST_LOG_CONFIG(INFO, "(%s) vhost-user %s: socket created, fd: %d\n",
- vsocket->path, vsocket->is_server ? "server" : "client", fd);
+ VHOST_LOG_CONFIG(vsocket->path, INFO, "vhost-user %s: socket created, fd: %d\n",
+ vsocket->is_server ? "server" : "client", fd);
if (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) vhost-user: can't set nonblocking mode for socket, fd: %d (%s)\n",
- vsocket->path, fd, strerror(errno));
+ VHOST_LOG_CONFIG(vsocket->path, ERR,
+ "vhost-user: can't set nonblocking mode for socket, fd: %d (%s)\n",
+ fd, strerror(errno));
close(fd);
return -1;
}
*/
ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to bind: %s; remove it and try again\n",
- path, strerror(errno));
+ VHOST_LOG_CONFIG(path, ERR, "failed to bind: %s; remove it and try again\n",
+ strerror(errno));
goto err;
}
- VHOST_LOG_CONFIG(INFO, "(%s) binding succeeded\n", path);
+ VHOST_LOG_CONFIG(path, INFO, "binding succeeded\n");
ret = listen(fd, MAX_VIRTIO_BACKLOG);
if (ret < 0)
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
NULL, vsocket);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to add listen fd %d to vhost server fdset\n",
- path, fd);
+ VHOST_LOG_CONFIG(path, ERR, "failed to add listen fd %d to vhost server fdset\n",
+ fd);
goto err;
}
flags = fcntl(fd, F_GETFL, 0);
if (flags < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) can't get flags for connfd %d (%s)\n",
- path, fd, strerror(errno));
+ VHOST_LOG_CONFIG(path, ERR, "can't get flags for connfd %d (%s)\n",
+ fd, strerror(errno));
return -2;
}
if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
- VHOST_LOG_CONFIG(ERR, "(%s) can't disable nonblocking on fd %d\n", path, fd);
+ VHOST_LOG_CONFIG(path, ERR, "can't disable nonblocking on fd %d\n", fd);
return -2;
}
return 0;
sizeof(reconn->un));
if (ret == -2) {
close(reconn->fd);
- VHOST_LOG_CONFIG(ERR, "(%s) reconnection for fd %d failed\n",
- reconn->vsocket->path, reconn->fd);
+ VHOST_LOG_CONFIG(reconn->vsocket->path, ERR,
+ "reconnection for fd %d failed\n",
+ reconn->fd);
goto remove_fd;
}
if (ret == -1)
continue;
- VHOST_LOG_CONFIG(INFO, "(%s) connected\n", reconn->vsocket->path);
+ VHOST_LOG_CONFIG(reconn->vsocket->path, INFO, "connected\n");
vhost_user_add_connection(reconn->fd, reconn->vsocket);
remove_fd:
TAILQ_REMOVE(&reconn_list.head, reconn, next);
ret = pthread_mutex_init(&reconn_list.mutex, NULL);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "%s: failed to initialize mutex\n", __func__);
+ VHOST_LOG_CONFIG("thread", ERR, "%s: failed to initialize mutex\n", __func__);
return ret;
}
TAILQ_INIT(&reconn_list.head);
ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL,
vhost_user_client_reconnect, NULL);
if (ret != 0) {
- VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread\n");
+ VHOST_LOG_CONFIG("thread", ERR, "failed to create reconnect thread\n");
if (pthread_mutex_destroy(&reconn_list.mutex))
- VHOST_LOG_CONFIG(ERR, "%s: failed to destroy reconnect mutex\n", __func__);
+ VHOST_LOG_CONFIG("thread", ERR,
+ "%s: failed to destroy reconnect mutex\n",
+ __func__);
}
return ret;
return 0;
}
- VHOST_LOG_CONFIG(WARNING, "(%s) failed to connect: %s\n", path, strerror(errno));
+ VHOST_LOG_CONFIG(path, WARNING, "failed to connect: %s\n", strerror(errno));
if (ret == -2 || !vsocket->reconnect) {
close(fd);
return -1;
}
- VHOST_LOG_CONFIG(INFO, "(%s) reconnecting...\n", path);
+ VHOST_LOG_CONFIG(path, INFO, "reconnecting...\n");
reconn = malloc(sizeof(*reconn));
if (reconn == NULL) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for reconnect\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to allocate memory for reconnect\n");
close(fd);
return -1;
}
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) socket file is not registered yet.\n",
- path);
+ VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
ret = -1;
goto unlock_exit;
}
if (vdpa_dev->ops->get_dev_type) {
ret = vdpa_dev->ops->get_dev_type(vdpa_dev, &vdpa_type);
if (ret) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to get vdpa dev type for socket file.\n",
- path);
+ VHOST_LOG_CONFIG(path, ERR,
+ "failed to get vdpa dev type for socket file.\n");
ret = -1;
goto unlock_exit;
}
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
- VHOST_LOG_CONFIG(ERR, "(%s) socket file is not registered yet.\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
ret = -1;
goto unlock_exit;
}
}
if (vdpa_dev->ops->get_features(vdpa_dev, &vdpa_features) < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to get vdpa features for socket file.\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa features for socket file.\n");
ret = -1;
goto unlock_exit;
}
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
- VHOST_LOG_CONFIG(ERR, "(%s) socket file is not registered yet.\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
ret = -1;
goto unlock_exit;
}
if (vdpa_dev->ops->get_protocol_features(vdpa_dev,
&vdpa_protocol_features) < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to get vdpa protocol features.\n",
- path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa protocol features.\n");
ret = -1;
goto unlock_exit;
}
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
- VHOST_LOG_CONFIG(ERR, "(%s) socket file is not registered yet.\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
ret = -1;
goto unlock_exit;
}
}
if (vdpa_dev->ops->get_queue_num(vdpa_dev, &vdpa_queue_num) < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to get vdpa queue number.\n",
- path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa queue number.\n");
ret = -1;
goto unlock_exit;
}
pthread_mutex_lock(&vhost_user.mutex);
if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
- VHOST_LOG_CONFIG(ERR, "(%s) the number of vhost sockets reaches maximum\n",
- path);
+ VHOST_LOG_CONFIG(path, ERR, "the number of vhost sockets reaches maximum\n");
goto out;
}
memset(vsocket, 0, sizeof(struct vhost_user_socket));
vsocket->path = strdup(path);
if (vsocket->path == NULL) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to copy socket path string\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to copy socket path string\n");
vhost_user_socket_mem_free(vsocket);
goto out;
}
TAILQ_INIT(&vsocket->conn_list);
ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to init connection mutex\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to init connection mutex\n");
goto out_free;
}
vsocket->vdpa_dev = NULL;
if (vsocket->async_copy &&
(flags & (RTE_VHOST_USER_IOMMU_SUPPORT |
RTE_VHOST_USER_POSTCOPY_SUPPORT))) {
- VHOST_LOG_CONFIG(ERR, "(%s) async copy with IOMMU or post-copy not supported\n",
- path);
+ VHOST_LOG_CONFIG(path, ERR, "async copy with IOMMU or post-copy not supported\n");
goto out_mutex;
}
if (vsocket->async_copy) {
vsocket->supported_features &= ~(1ULL << VHOST_F_LOG_ALL);
vsocket->features &= ~(1ULL << VHOST_F_LOG_ALL);
- VHOST_LOG_CONFIG(INFO, "(%s) logging feature is disabled in async copy mode\n",
- path);
+ VHOST_LOG_CONFIG(path, INFO, "logging feature is disabled in async copy mode\n");
}
/*
(1ULL << VIRTIO_NET_F_HOST_TSO6) |
(1ULL << VIRTIO_NET_F_HOST_UFO);
- VHOST_LOG_CONFIG(INFO, "(%s) Linear buffers requested without external buffers,\n",
- path);
- VHOST_LOG_CONFIG(INFO, "(%s) disabling host segmentation offloading support\n",
- path);
+ VHOST_LOG_CONFIG(path, INFO, "Linear buffers requested without external buffers,\n");
+ VHOST_LOG_CONFIG(path, INFO, "disabling host segmentation offloading support\n");
vsocket->supported_features &= ~seg_offload_features;
vsocket->features &= ~seg_offload_features;
}
~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT);
} else {
#ifndef RTE_LIBRTE_VHOST_POSTCOPY
- VHOST_LOG_CONFIG(ERR, "(%s) Postcopy requested but not compiled\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "Postcopy requested but not compiled\n");
ret = -1;
goto out_mutex;
#endif
out_mutex:
if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to destroy connection mutex\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to destroy connection mutex\n");
}
out_free:
vhost_user_socket_mem_free(vsocket);
goto again;
}
- VHOST_LOG_CONFIG(INFO, "(%s) free connfd %d\n", path, conn->connfd);
+ VHOST_LOG_CONFIG(path, INFO, "free connfd %d\n", conn->connfd);
close(conn->connfd);
vhost_destroy_device(conn->vid);
TAILQ_REMOVE(&vsocket->conn_list, conn, next);
* rebuild the wait list of poll.
*/
if (fdset_pipe_init(&vhost_user.fdset) < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to create pipe for vhost fdset\n", path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to create pipe for vhost fdset\n");
return -1;
}
"vhost-events", NULL, fdset_event_dispatch,
&vhost_user.fdset);
if (ret != 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to create fdset handling thread\n",
- path);
+ VHOST_LOG_CONFIG(path, ERR, "failed to create fdset handling thread\n");
fdset_pipe_uninit(&vhost_user.fdset);
return -1;
}
vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
if (vhost_user_iotlb_miss(dev, iova, perm)) {
- VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
- dev->ifname, iova);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+ iova);
vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
}
hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
if (map_len != len) {
- VHOST_LOG_DATA(ERR,
- "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
- dev->ifname, iova);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
return;
}
hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
if (map_len != len) {
- VHOST_LOG_DATA(ERR,
- "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
- dev->ifname, iova);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
return;
}
gpa = hva_to_gpa(dev, hva, exp_size);
if (!gpa) {
- VHOST_LOG_DATA(ERR,
- "(%s) failed to find GPA for log_addr: 0x%"
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "failed to find GPA for log_addr: 0x%"
PRIx64 " hva: 0x%" PRIx64 "\n",
- dev->ifname, log_addr, hva);
+ log_addr, hva);
return 0;
}
return gpa;
int numa_node = SOCKET_ID_ANY;
if (vring_idx >= VHOST_MAX_VRING) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to init vring, out of bound (%d)\n",
- dev->ifname, vring_idx);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to init vring, out of bound (%d)\n",
+ vring_idx);
return;
}
vq = dev->virtqueue[vring_idx];
if (!vq) {
- VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n",
- dev->ifname, vring_idx);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "virtqueue not allocated (%d)\n", vring_idx);
return;
}
#ifdef RTE_LIBRTE_VHOST_NUMA
if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
- dev->ifname, rte_strerror(errno));
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to query numa node: %s\n",
+ rte_strerror(errno));
numa_node = SOCKET_ID_ANY;
}
#endif
int callfd;
if (vring_idx >= VHOST_MAX_VRING) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to reset vring, out of bound (%d)\n",
- dev->ifname, vring_idx);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to reset vring, out of bound (%d)\n",
+ vring_idx);
return;
}
vq = dev->virtqueue[vring_idx];
if (!vq) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to reset vring, virtqueue not allocated (%d)\n",
- dev->ifname, vring_idx);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to reset vring, virtqueue not allocated (%d)\n",
+ vring_idx);
return;
}
vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);
if (vq == NULL) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vring %u.\n",
- dev->ifname, i);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for vring %u.\n",
+ i);
return -1;
}
}
if (i == RTE_MAX_VHOST_DEVICE) {
- VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n");
+ VHOST_LOG_CONFIG("device", ERR, "failed to find a free slot for new device.\n");
pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
if (dev == NULL) {
- VHOST_LOG_CONFIG(ERR, "failed to allocate memory for new device.\n");
+ VHOST_LOG_CONFIG("device", ERR, "failed to allocate memory for new device.\n");
pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
ret = get_mempolicy(&numa_node, NULL, 0, dev,
MPOL_F_NODE | MPOL_F_ADDR);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
- dev->ifname, rte_strerror(errno));
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to query numa node: %s\n",
+ rte_strerror(errno));
return -1;
}
return 0;
if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
- dev->ifname, __func__, qid);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: invalid virtqueue idx %d.\n",
+ __func__, qid);
return 0;
}
int node = vq->numa_node;
if (unlikely(vq->async)) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) async register failed: already registered (qid: %d)\n",
- dev->ifname, queue_id);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "async register failed: already registered (qid: %d)\n",
+ queue_id);
return -1;
}
async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);
if (!async) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async metadata (qid: %d)\n",
- dev->ifname, queue_id);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate async metadata (qid: %d)\n",
+ queue_id);
return -1;
}
async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
RTE_CACHE_LINE_SIZE, node);
if (!async->pkts_info) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async_pkts_info (qid: %d)\n",
- dev->ifname, queue_id);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate async_pkts_info (qid: %d)\n",
+ queue_id);
goto out_free_async;
}
async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool),
RTE_CACHE_LINE_SIZE, node);
if (!async->pkts_cmpl_flag) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async pkts_cmpl_flag (qid: %d)\n",
- dev->ifname, queue_id);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate async pkts_cmpl_flag (qid: %d)\n",
+ queue_id);
goto out_free_async;
}
vq->size * sizeof(struct vring_used_elem_packed),
RTE_CACHE_LINE_SIZE, node);
if (!async->buffers_packed) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async buffers (qid: %d)\n",
- dev->ifname, queue_id);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate async buffers (qid: %d)\n",
+ queue_id);
goto out_free_inflight;
}
} else {
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE, node);
if (!async->descs_split) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async descs (qid: %d)\n",
- dev->ifname, queue_id);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate async descs (qid: %d)\n",
+ queue_id);
goto out_free_inflight;
}
}
return -1;
if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
- VHOST_LOG_CONFIG(ERR, "(%s) %s() called without access lock taken.\n",
- dev->ifname, __func__);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n",
+ __func__);
return -1;
}
return ret;
if (!rte_spinlock_trylock(&vq->access_lock)) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to unregister async channel, virtqueue busy.\n");
return ret;
}
if (!vq->async) {
ret = 0;
} else if (vq->async->pkts_inflight_n) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
- VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to unregister async channel.\n");
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "inflight packets must be completed before unregistration.\n");
} else {
vhost_free_async_mem(vq);
ret = 0;
return -1;
if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
- VHOST_LOG_CONFIG(ERR, "(%s) %s() called without access lock taken.\n",
- dev->ifname, __func__);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n",
+ __func__);
return -1;
}
return 0;
if (vq->async->pkts_inflight_n) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
- VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to unregister async channel.\n");
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "inflight packets must be completed before unregistration.\n");
return -1;
}
uint16_t max_desc;
if (!rte_dma_is_valid(dma_id)) {
- VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id);
+ VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id);
return -1;
}
if (rte_dma_info_get(dma_id, &info) != 0) {
- VHOST_LOG_CONFIG(ERR, "Fail to get DMA %d information.\n", dma_id);
+ VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id);
return -1;
}
if (vchan_id >= info.max_vchans) {
- VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
+ VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
return -1;
}
vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans,
RTE_CACHE_LINE_SIZE);
if (vchans == NULL) {
- VHOST_LOG_CONFIG(ERR, "Failed to allocate vchans for DMA %d vChannel %u.\n",
- dma_id, vchan_id);
+ VHOST_LOG_CONFIG("dma", ERR,
+ "Failed to allocate vchans for DMA %d vChannel %u.\n",
+ dma_id, vchan_id);
return -1;
}
}
if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
- VHOST_LOG_CONFIG(INFO, "DMA %d vChannel %u already registered.\n", dma_id,
- vchan_id);
+ VHOST_LOG_CONFIG("dma", INFO, "DMA %d vChannel %u already registered.\n",
+ dma_id, vchan_id);
return 0;
}
pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE);
if (!pkts_cmpl_flag_addr) {
- VHOST_LOG_CONFIG(ERR, "Failed to allocate pkts_cmpl_flag_addr for DMA %d "
- "vChannel %u.\n", dma_id, vchan_id);
+ VHOST_LOG_CONFIG("dma", ERR,
+ "Failed to allocate pkts_cmpl_flag_addr for DMA %d vChannel %u.\n",
+ dma_id, vchan_id);
if (dma_copy_track[dma_id].nr_vchans == 0) {
rte_free(dma_copy_track[dma_id].vchans);
return ret;
if (!rte_spinlock_trylock(&vq->access_lock)) {
- VHOST_LOG_CONFIG(DEBUG,
- "(%s) failed to check in-flight packets. virtqueue busy.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG,
+ "failed to check in-flight packets. virtqueue busy.\n");
return ret;
}
return ret;
if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
- VHOST_LOG_CONFIG(ERR, "(%s) %s() called without access lock taken.\n",
- dev->ifname, __func__);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n",
+ __func__);
return -1;
}
if (ctx->fd_num == expected_fds)
return 0;
- VHOST_LOG_CONFIG(ERR, "(%s) expect %d FDs for request %s, received %d\n",
- dev->ifname, expected_fds,
- vhost_message_handlers[ctx->msg.request.master].description,
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "expect %d FDs for request %s, received %d\n",
+ expected_fds, vhost_message_handlers[ctx->msg.request.master].description,
ctx->fd_num);
close_msg_fds(ctx);
return;
/* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */
- VHOST_LOG_CONFIG(ERR, "(%s) DMA engine map failed\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "DMA engine map failed\n");
}
}
if (rte_errno == EINVAL)
return;
- VHOST_LOG_CONFIG(ERR, "(%s) DMA engine unmap failed\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "DMA engine unmap failed\n");
}
}
}
rte_vhost_driver_get_features(dev->ifname, &vhost_features);
if (features & ~vhost_features) {
- VHOST_LOG_CONFIG(ERR, "(%s) received invalid negotiated features.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "received invalid negotiated features.\n");
dev->flags |= VIRTIO_DEV_FEATURES_FAILED;
dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
* is enabled when the live-migration starts.
*/
if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
- VHOST_LOG_CONFIG(ERR, "(%s) features changed while device is running.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "features changed while device is running.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
} else {
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
}
- VHOST_LOG_CONFIG(INFO, "(%s) negotiated Virtio features: 0x%" PRIx64 "\n",
- dev->ifname, dev->features);
- VHOST_LOG_CONFIG(DEBUG, "(%s) mergeable RX buffers %s, virtio 1 %s\n",
- dev->ifname,
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "negotiated Virtio features: 0x%" PRIx64 "\n",
+ dev->features);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG,
+ "mergeable RX buffers %s, virtio 1 %s\n",
(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
if (ctx->msg.payload.state.num > 32768) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid virtqueue size %u\n",
- dev->ifname, ctx->msg.payload.state.num);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid virtqueue size %u\n",
+ ctx->msg.payload.state.num);
return RTE_VHOST_MSG_RESULT_ERR;
}
*/
if (!vq_is_packed(dev)) {
if (vq->size & (vq->size - 1)) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid virtqueue size %u\n",
- dev->ifname, vq->size);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid virtqueue size %u\n",
+ vq->size);
return RTE_VHOST_MSG_RESULT_ERR;
}
}
sizeof(struct vring_used_elem_packed),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->shadow_used_packed) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to allocate memory for shadow used ring.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for shadow used ring.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->shadow_used_split) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to allocate memory for vq internal data.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for vq internal data.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
}
vq->size * sizeof(struct batch_copy_elem),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->batch_copy_elems) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for batching copy.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for batching copy.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "(%s) unable to get virtqueue %d numa information.\n",
- dev->ifname, index);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "unable to get virtqueue %d numa information.\n",
+ index);
return dev;
}
vq = rte_realloc_socket(vq, sizeof(*vq), 0, node);
if (!vq) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc virtqueue %d on node %d\n",
- dev->ifname, index, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc virtqueue %d on node %d\n",
+ index, node);
return dev;
}
if (vq != dev->virtqueue[index]) {
- VHOST_LOG_CONFIG(INFO, "(%s) reallocated virtqueue on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated virtqueue on node %d\n", node);
dev->virtqueue[index] = vq;
vhost_user_iotlb_init(dev, index);
}
sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup),
RTE_CACHE_LINE_SIZE, node);
if (!sup) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc shadow packed on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc shadow packed on node %d\n",
+ node);
return dev;
}
vq->shadow_used_packed = sup;
sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus),
RTE_CACHE_LINE_SIZE, node);
if (!sus) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc shadow split on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc shadow split on node %d\n",
+ node);
return dev;
}
vq->shadow_used_split = sus;
bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce),
RTE_CACHE_LINE_SIZE, node);
if (!bce) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc batch copy elem on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc batch copy elem on node %d\n",
+ node);
return dev;
}
vq->batch_copy_elems = bce;
lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node);
if (!lc) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc log cache on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc log cache on node %d\n",
+ node);
return dev;
}
vq->log_cache = lc;
ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node);
if (!ri) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc resubmit inflight on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc resubmit inflight on node %d\n",
+ node);
return dev;
}
vq->resubmit_inflight = ri;
rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num,
0, node);
if (!rd) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc resubmit list on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc resubmit list on node %d\n",
+ node);
return dev;
}
ri->resubmit_list = rd;
ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "(%s) unable to get numa information.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "unable to get numa information.\n");
return dev;
}
dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node);
if (!dev) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc dev on node %d\n",
- old_dev->ifname, node);
+ VHOST_LOG_CONFIG(old_dev->ifname, ERR, "failed to realloc dev on node %d\n", node);
return old_dev;
}
- VHOST_LOG_CONFIG(INFO, "(%s) reallocated device on node %d\n", dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated device on node %d\n", node);
vhost_devices[dev->vid] = dev;
mem_size = sizeof(struct rte_vhost_memory) +
sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
if (!mem) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc mem table on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc mem table on node %d\n",
+ node);
return dev;
}
dev->mem = mem;
gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),
RTE_CACHE_LINE_SIZE, node);
if (!gp) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc guest pages on node %d\n",
- dev->ifname, node);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to realloc guest pages on node %d\n",
+ node);
return dev;
}
dev->guest_pages = gp;
vq->log_guest_addr =
log_addr_to_gpa(dev, vq);
if (vq->log_guest_addr == 0) {
- VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map log_guest_addr.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map log_guest_addr.\n");
return dev;
}
}
if (vq->desc_packed == NULL ||
len != sizeof(struct vring_packed_desc) *
vq->size) {
- VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map desc_packed ring.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map desc_packed ring.\n");
return dev;
}
vq, addr->avail_user_addr, &len);
if (vq->driver_event == NULL ||
len != sizeof(struct vring_packed_desc_event)) {
- VHOST_LOG_CONFIG(DEBUG, "(%s) failed to find driver area address.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG,
+ "failed to find driver area address.\n");
return dev;
}
vq, addr->used_user_addr, &len);
if (vq->device_event == NULL ||
len != sizeof(struct vring_packed_desc_event)) {
- VHOST_LOG_CONFIG(DEBUG, "(%s) failed to find device area address.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG,
+ "failed to find device area address.\n");
return dev;
}
vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->desc_user_addr, &len);
if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
- VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map desc ring.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map desc ring.\n");
return dev;
}
vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->avail_user_addr, &len);
if (vq->avail == 0 || len != expected_len) {
- VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map avail ring.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map avail ring.\n");
return dev;
}
vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->used_user_addr, &len);
if (vq->used == 0 || len != expected_len) {
- VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map used ring.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map used ring.\n");
return dev;
}
if (vq->last_used_idx != vq->used->idx) {
- VHOST_LOG_CONFIG(WARNING, "(%s) last_used_idx (%u) and vq->used->idx (%u) mismatches;\n",
- dev->ifname,
+ VHOST_LOG_CONFIG(dev->ifname, WARNING,
+ "last_used_idx (%u) and vq->used->idx (%u) mismatches;\n",
vq->last_used_idx, vq->used->idx);
vq->last_used_idx = vq->used->idx;
vq->last_avail_idx = vq->used->idx;
- VHOST_LOG_CONFIG(WARNING, "(%s) some packets maybe resent for Tx and dropped for Rx\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, WARNING,
+ "some packets maybe resent for Tx and dropped for Rx\n");
}
vq->access_ok = true;
- VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address desc: %p\n", dev->ifname, vq->desc);
- VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address avail: %p\n", dev->ifname, vq->avail);
- VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address used: %p\n", dev->ifname, vq->used);
- VHOST_LOG_CONFIG(DEBUG, "(%s) log_guest_addr: %" PRIx64 "\n",
- dev->ifname, vq->log_guest_addr);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address desc: %p\n", vq->desc);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address avail: %p\n", vq->avail);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address used: %p\n", vq->used);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "log_guest_addr: %" PRIx64 "\n", vq->log_guest_addr);
return dev;
}
vq->last_avail_idx = ctx->msg.payload.state.num;
}
- VHOST_LOG_CONFIG(INFO,
- "(%s) vring base idx:%u last_used_idx:%u last_avail_idx:%u.\n",
- dev->ifname, ctx->msg.payload.state.index, vq->last_used_idx,
- vq->last_avail_idx);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "vring base idx:%u last_used_idx:%u last_avail_idx:%u.\n",
+ ctx->msg.payload.state.index, vq->last_used_idx, vq->last_avail_idx);
return RTE_VHOST_MSG_RESULT_OK;
}
dev->max_guest_pages * sizeof(*page),
RTE_CACHE_LINE_SIZE);
if (dev->guest_pages == NULL) {
- VHOST_LOG_CONFIG(ERR, "(%s) cannot realloc guest_pages\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "cannot realloc guest_pages\n");
rte_free(old_pages);
return -1;
}
for (i = 0; i < dev->nr_guest_pages; i++) {
page = &dev->guest_pages[i];
- VHOST_LOG_CONFIG(INFO, "(%s) guest physical page region %u\n",
- dev->ifname, i);
- VHOST_LOG_CONFIG(INFO, "(%s)\tguest_phys_addr: %" PRIx64 "\n",
- dev->ifname, page->guest_phys_addr);
- VHOST_LOG_CONFIG(INFO, "(%s)\thost_iova : %" PRIx64 "\n",
- dev->ifname, page->host_iova);
- VHOST_LOG_CONFIG(INFO, "(%s)\tsize : %" PRIx64 "\n",
- dev->ifname, page->size);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "guest physical page region %u\n", i);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "\tguest_phys_addr: %" PRIx64 "\n",
+ page->guest_phys_addr);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "\thost_iova : %" PRIx64 "\n",
+ page->host_iova);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "\tsize : %" PRIx64 "\n",
+ page->size);
}
}
#else
if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
®_struct)) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to register ufd for region "
- "%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n",
- dev->ifname,
- (uint64_t)reg_struct.range.start,
- (uint64_t)reg_struct.range.start +
- (uint64_t)reg_struct.range.len - 1,
- dev->postcopy_ufd,
- strerror(errno));
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to register ufd for region "
+ "%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n",
+ (uint64_t)reg_struct.range.start,
+ (uint64_t)reg_struct.range.start +
+ (uint64_t)reg_struct.range.len - 1,
+ dev->postcopy_ufd,
+ strerror(errno));
return -1;
}
- VHOST_LOG_CONFIG(INFO,
- "(%s)\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n",
- dev->ifname,
- (uint64_t)reg_struct.range.start,
- (uint64_t)reg_struct.range.start +
- (uint64_t)reg_struct.range.len - 1);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n",
+ (uint64_t)reg_struct.range.start,
+ (uint64_t)reg_struct.range.start +
+ (uint64_t)reg_struct.range.len - 1);
return 0;
}
* we've got to wait before we're allowed to generate faults.
*/
if (read_vhost_message(dev, main_fd, &ack_ctx) <= 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to read qemu ack on postcopy set-mem-table\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to read qemu ack on postcopy set-mem-table\n");
return -1;
}
return -1;
if (ack_ctx.msg.request.master != VHOST_USER_SET_MEM_TABLE) {
- VHOST_LOG_CONFIG(ERR, "(%s) bad qemu ack on postcopy set-mem-table (%d)\n",
- dev->ifname, ack_ctx.msg.request.master);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "bad qemu ack on postcopy set-mem-table (%d)\n",
+ ack_ctx.msg.request.master);
return -1;
}
/* Check for memory_size + mmap_offset overflow */
if (mmap_offset >= -region->size) {
- VHOST_LOG_CONFIG(ERR, "(%s) mmap_offset (%#"PRIx64") and memory_size (%#"PRIx64") overflow\n",
- dev->ifname, mmap_offset, region->size);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "mmap_offset (%#"PRIx64") and memory_size (%#"PRIx64") overflow\n",
+ mmap_offset, region->size);
return -1;
}
*/
alignment = get_blk_size(region->fd);
if (alignment == (uint64_t)-1) {
- VHOST_LOG_CONFIG(ERR, "(%s) couldn't get hugepage size through fstat\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "couldn't get hugepage size through fstat\n");
return -1;
}
mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
* mmap() kernel implementation would return an error, but
* better catch it before and provide useful info in the logs.
*/
- VHOST_LOG_CONFIG(ERR, "(%s) mmap size (0x%" PRIx64 ") or alignment (0x%" PRIx64 ") is invalid\n",
- dev->ifname, region->size + mmap_offset, alignment);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "mmap size (0x%" PRIx64 ") or alignment (0x%" PRIx64 ") is invalid\n",
+ region->size + mmap_offset, alignment);
return -1;
}
MAP_SHARED | populate, region->fd, 0);
if (mmap_addr == MAP_FAILED) {
- VHOST_LOG_CONFIG(ERR, "(%s) mmap failed (%s).\n", dev->ifname, strerror(errno));
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "mmap failed (%s).\n", strerror(errno));
return -1;
}
if (dev->async_copy) {
if (add_guest_pages(dev, region, alignment) < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) adding guest pages to region failed.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "adding guest pages to region failed.\n");
return -1;
}
}
- VHOST_LOG_CONFIG(INFO, "(%s) guest memory region size: 0x%" PRIx64 "\n",
- dev->ifname, region->size);
- VHOST_LOG_CONFIG(INFO, "(%s)\t guest physical addr: 0x%" PRIx64 "\n",
- dev->ifname, region->guest_phys_addr);
- VHOST_LOG_CONFIG(INFO, "(%s)\t guest virtual addr: 0x%" PRIx64 "\n",
- dev->ifname, region->guest_user_addr);
- VHOST_LOG_CONFIG(INFO, "(%s)\t host virtual addr: 0x%" PRIx64 "\n",
- dev->ifname, region->host_user_addr);
- VHOST_LOG_CONFIG(INFO, "(%s)\t mmap addr : 0x%" PRIx64 "\n",
- dev->ifname, (uint64_t)(uintptr_t)mmap_addr);
- VHOST_LOG_CONFIG(INFO, "(%s)\t mmap size : 0x%" PRIx64 "\n",
- dev->ifname, mmap_size);
- VHOST_LOG_CONFIG(INFO, "(%s)\t mmap align: 0x%" PRIx64 "\n",
- dev->ifname, alignment);
- VHOST_LOG_CONFIG(INFO, "(%s)\t mmap off : 0x%" PRIx64 "\n",
- dev->ifname, mmap_offset);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "guest memory region size: 0x%" PRIx64 "\n",
+ region->size);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t guest physical addr: 0x%" PRIx64 "\n",
+ region->guest_phys_addr);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t guest virtual addr: 0x%" PRIx64 "\n",
+ region->guest_user_addr);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t host virtual addr: 0x%" PRIx64 "\n",
+ region->host_user_addr);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t mmap addr : 0x%" PRIx64 "\n",
+ (uint64_t)(uintptr_t)mmap_addr);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t mmap size : 0x%" PRIx64 "\n",
+ mmap_size);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t mmap align: 0x%" PRIx64 "\n",
+ alignment);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t mmap off : 0x%" PRIx64 "\n",
+ mmap_offset);
return 0;
}
return RTE_VHOST_MSG_RESULT_ERR;
if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
- VHOST_LOG_CONFIG(ERR, "(%s) too many memory regions (%u)\n",
- dev->ifname, memory->nregions);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "too many memory regions (%u)\n",
+ memory->nregions);
goto close_msg_fds;
}
if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
- VHOST_LOG_CONFIG(INFO, "(%s) memory regions not changed\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "memory regions not changed\n");
close_msg_fds(ctx);
RTE_CACHE_LINE_SIZE,
numa_node);
if (dev->guest_pages == NULL) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to allocate memory for dev->guest_pages\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for dev->guest_pages\n");
goto close_msg_fds;
}
}
dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node);
if (dev->mem == NULL) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to allocate memory for dev->mem\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to allocate memory for dev->mem\n");
goto free_guest_pages;
}
mmap_offset = memory->regions[i].mmap_offset;
if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap region %u\n", dev->ifname, i);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap region %u\n", i);
goto free_mem_table;
}
dev->flags |= VIRTIO_DEV_READY;
if (!(dev->flags & VIRTIO_DEV_RUNNING))
- VHOST_LOG_CONFIG(INFO, "(%s) virtio is now ready for processing.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "virtio is now ready for processing.\n");
return 1;
}
if (mfd == -1) {
mfd = mkstemp(fname);
if (mfd == -1) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to get inflight buffer fd\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to get inflight buffer fd\n");
return NULL;
}
}
if (ftruncate(mfd, size) == -1) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc inflight buffer\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc inflight buffer\n");
close(mfd);
return NULL;
}
ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
if (ptr == MAP_FAILED) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap inflight buffer\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap inflight buffer\n");
close(mfd);
return NULL;
}
void *addr;
if (ctx->msg.size != sizeof(ctx->msg.payload.inflight)) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid get_inflight_fd message size is %d\n",
- dev->ifname, ctx->msg.size);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid get_inflight_fd message size is %d\n",
+ ctx->msg.size);
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->inflight_info = rte_zmalloc_socket("inflight_info",
sizeof(struct inflight_mem_info), 0, numa_node);
if (!dev->inflight_info) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc dev inflight area\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->inflight_info->fd = -1;
num_queues = ctx->msg.payload.inflight.num_queues;
queue_size = ctx->msg.payload.inflight.queue_size;
- VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd num_queues: %u\n",
- dev->ifname, ctx->msg.payload.inflight.num_queues);
- VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd queue_size: %u\n",
- dev->ifname, ctx->msg.payload.inflight.queue_size);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "get_inflight_fd num_queues: %u\n",
+ ctx->msg.payload.inflight.num_queues);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "get_inflight_fd queue_size: %u\n",
+ ctx->msg.payload.inflight.queue_size);
if (vq_is_packed(dev))
pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
mmap_size = num_queues * pervq_inflight_size;
addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd);
if (!addr) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc vhost inflight area\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc vhost inflight area\n");
ctx->msg.payload.inflight.mmap_size = 0;
return RTE_VHOST_MSG_RESULT_ERR;
}
}
}
- VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_size: %"PRIu64"\n",
- dev->ifname, ctx->msg.payload.inflight.mmap_size);
- VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_offset: %"PRIu64"\n",
- dev->ifname, ctx->msg.payload.inflight.mmap_offset);
- VHOST_LOG_CONFIG(INFO, "(%s) send inflight fd: %d\n", dev->ifname, ctx->fds[0]);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "send inflight mmap_size: %"PRIu64"\n",
+ ctx->msg.payload.inflight.mmap_size);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "send inflight mmap_offset: %"PRIu64"\n",
+ ctx->msg.payload.inflight.mmap_offset);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "send inflight fd: %d\n", ctx->fds[0]);
return RTE_VHOST_MSG_RESULT_REPLY;
}
fd = ctx->fds[0];
if (ctx->msg.size != sizeof(ctx->msg.payload.inflight) || fd < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid set_inflight_fd message size is %d,fd is %d\n",
- dev->ifname, ctx->msg.size, fd);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid set_inflight_fd message size is %d,fd is %d\n",
+ ctx->msg.size, fd);
return RTE_VHOST_MSG_RESULT_ERR;
}
else
pervq_inflight_size = get_pervq_shm_size_split(queue_size);
- VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd mmap_size: %"PRIu64"\n",
- dev->ifname, mmap_size);
- VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd mmap_offset: %"PRIu64"\n",
- dev->ifname, mmap_offset);
- VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd num_queues: %u\n", dev->ifname, num_queues);
- VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd queue_size: %u\n", dev->ifname, queue_size);
- VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd fd: %d\n", dev->ifname, fd);
- VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd pervq_inflight_size: %d\n",
- dev->ifname, pervq_inflight_size);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "set_inflight_fd mmap_offset: %"PRIu64"\n",
+ mmap_offset);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "set_inflight_fd num_queues: %u\n",
+ num_queues);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "set_inflight_fd queue_size: %u\n",
+ queue_size);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "set_inflight_fd fd: %d\n",
+ fd);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "set_inflight_fd pervq_inflight_size: %d\n",
+ pervq_inflight_size);
/*
* If VQ 0 has already been allocated, try to allocate on the same
dev->inflight_info = rte_zmalloc_socket("inflight_info",
sizeof(struct inflight_mem_info), 0, numa_node);
if (dev->inflight_info == NULL) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc dev inflight area\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->inflight_info->fd = -1;
addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, mmap_offset);
if (addr == MAP_FAILED) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap share memory.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap share memory.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
file.fd = VIRTIO_INVALID_EVENTFD;
else
file.fd = ctx->fds[0];
- VHOST_LOG_CONFIG(INFO, "(%s) vring call idx:%d file:%d\n",
- dev->ifname, file.index, file.fd);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "vring call idx:%d file:%d\n",
+ file.index, file.fd);
vq = dev->virtqueue[file.index];
if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
close(ctx->fds[0]);
- VHOST_LOG_CONFIG(INFO, "(%s) not implemented\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "not implemented\n");
return RTE_VHOST_MSG_RESULT_OK;
}
resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
0, vq->numa_node);
if (!resubmit) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to allocate memory for resubmit info.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for resubmit info.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
0, vq->numa_node);
if (!resubmit->resubmit_list) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to allocate memory for inflight desc.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for inflight desc.\n");
rte_free(resubmit);
return RTE_VHOST_MSG_RESULT_ERR;
}
resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
0, vq->numa_node);
if (resubmit == NULL) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to allocate memory for resubmit info.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for resubmit info.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
0, vq->numa_node);
if (resubmit->resubmit_list == NULL) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to allocate memory for resubmit desc.\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate memory for resubmit desc.\n");
rte_free(resubmit);
return RTE_VHOST_MSG_RESULT_ERR;
}
file.fd = VIRTIO_INVALID_EVENTFD;
else
file.fd = ctx->fds[0];
- VHOST_LOG_CONFIG(INFO, "(%s) vring kick idx:%d file:%d\n",
- dev->ifname, file.index, file.fd);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "vring kick idx:%d file:%d\n",
+ file.index, file.fd);
/* Interpret ring addresses only when ring is started. */
dev = translate_ring_addresses(dev, file.index);
if (vq_is_packed(dev)) {
if (vhost_check_queue_inflights_packed(dev, vq)) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to inflights for vq: %d\n",
- dev->ifname, file.index);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to inflights for vq: %d\n",
+ file.index);
return RTE_VHOST_MSG_RESULT_ERR;
}
} else {
if (vhost_check_queue_inflights_split(dev, vq)) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to inflights for vq: %d\n",
- dev->ifname, file.index);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to inflights for vq: %d\n",
+ file.index);
return RTE_VHOST_MSG_RESULT_ERR;
}
}
ctx->msg.payload.state.num = vq->last_avail_idx;
}
- VHOST_LOG_CONFIG(INFO, "(%s) vring base idx:%d file:%d\n",
- dev->ifname, ctx->msg.payload.state.index,
- ctx->msg.payload.state.num);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "vring base idx:%d file:%d\n",
+ ctx->msg.payload.state.index, ctx->msg.payload.state.num);
/*
* Based on current qemu vhost-user implementation, this message is
* sent and only sent in vhost_vring_stop.
bool enable = !!ctx->msg.payload.state.num;
int index = (int)ctx->msg.payload.state.index;
- VHOST_LOG_CONFIG(INFO, "(%s) set queue enable: %d to qp idx: %d\n",
- dev->ifname, enable, index);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "set queue enable: %d to qp idx: %d\n",
+ enable, index);
if (enable && dev->virtqueue[index]->async) {
if (dev->virtqueue[index]->async->pkts_inflight_n) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) failed to enable vring. Inflight packets must be completed first\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to enable vring. Inflight packets must be completed first\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
}
rte_vhost_driver_get_protocol_features(dev->ifname,
&slave_protocol_features);
if (protocol_features & ~slave_protocol_features) {
- VHOST_LOG_CONFIG(ERR, "(%s) received invalid protocol features.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "received invalid protocol features.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->protocol_features = protocol_features;
- VHOST_LOG_CONFIG(INFO, "(%s) negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
- dev->ifname, dev->protocol_features);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
+ dev->protocol_features);
return RTE_VHOST_MSG_RESULT_OK;
}
return RTE_VHOST_MSG_RESULT_ERR;
if (fd < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid log fd: %d\n", dev->ifname, fd);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid log fd: %d\n", fd);
return RTE_VHOST_MSG_RESULT_ERR;
}
if (ctx->msg.size != sizeof(VhostUserLog)) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid log base msg size: %"PRId32" != %d\n",
- dev->ifname, ctx->msg.size, (int)sizeof(VhostUserLog));
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid log base msg size: %"PRId32" != %d\n",
+ ctx->msg.size, (int)sizeof(VhostUserLog));
goto close_msg_fds;
}
/* Check for mmap size and offset overflow. */
if (off >= -size) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
- dev->ifname, off, size);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
+ off, size);
goto close_msg_fds;
}
- VHOST_LOG_CONFIG(INFO, "(%s) log mmap size: %"PRId64", offset: %"PRId64"\n",
- dev->ifname, size, off);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "log mmap size: %"PRId64", offset: %"PRId64"\n",
+ size, off);
/*
* mmap from 0 to workaround a hugepage mmap bug: mmap will
addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
if (addr == MAP_FAILED) {
- VHOST_LOG_CONFIG(ERR, "(%s) mmap log base failed!\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "mmap log base failed!\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
* caching will be done, which will impact performance
*/
if (!vq->log_cache)
- VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate VQ logging cache\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to allocate VQ logging cache\n");
}
/*
return RTE_VHOST_MSG_RESULT_ERR;
close(ctx->fds[0]);
- VHOST_LOG_CONFIG(INFO, "(%s) not implemented.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "not implemented.\n");
return RTE_VHOST_MSG_RESULT_OK;
}
uint8_t *mac = (uint8_t *)&ctx->msg.payload.u64;
struct rte_vdpa_device *vdpa_dev;
- VHOST_LOG_CONFIG(DEBUG, "(%s) MAC: " RTE_ETHER_ADDR_PRT_FMT "\n",
- dev->ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG,
+ "MAC: " RTE_ETHER_ADDR_PRT_FMT "\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
memcpy(dev->mac.addr_bytes, mac, 6);
/*
if (ctx->msg.payload.u64 < VIRTIO_MIN_MTU ||
ctx->msg.payload.u64 > VIRTIO_MAX_MTU) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid MTU size (%"PRIu64")\n",
- dev->ifname, ctx->msg.payload.u64);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid MTU size (%"PRIu64")\n",
+ ctx->msg.payload.u64);
return RTE_VHOST_MSG_RESULT_ERR;
}
return RTE_VHOST_MSG_RESULT_ERR;
if (fd < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid file descriptor for slave channel (%d)\n",
- dev->ifname, fd);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid file descriptor for slave channel (%d)\n", fd);
return RTE_VHOST_MSG_RESULT_ERR;
}
return RTE_VHOST_MSG_RESULT_ERR;
if (!vdpa_dev) {
- VHOST_LOG_CONFIG(ERR, "(%s) is not vDPA device!\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "is not vDPA device!\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
ctx->msg.payload.cfg.size);
if (ret != 0) {
ctx->msg.size = 0;
- VHOST_LOG_CONFIG(ERR,
- "(%s) get_config() return error!\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "get_config() return error!\n");
}
} else {
- VHOST_LOG_CONFIG(ERR, "(%s) get_config() not supported!\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "get_config() not supported!\n");
}
return RTE_VHOST_MSG_RESULT_REPLY;
return RTE_VHOST_MSG_RESULT_ERR;
if (ctx->msg.payload.cfg.size > VHOST_USER_MAX_CONFIG_SIZE) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) vhost_user_config size: %"PRIu32", should not be larger than %d\n",
- dev->ifname, ctx->msg.payload.cfg.size,
- VHOST_USER_MAX_CONFIG_SIZE);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "vhost_user_config size: %"PRIu32", should not be larger than %d\n",
+ ctx->msg.payload.cfg.size, VHOST_USER_MAX_CONFIG_SIZE);
goto out;
}
if (!vdpa_dev) {
- VHOST_LOG_CONFIG(ERR, "(%s) is not vDPA device!\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "is not vDPA device!\n");
goto out;
}
ctx->msg.payload.cfg.size,
ctx->msg.payload.cfg.flags);
if (ret)
- VHOST_LOG_CONFIG(ERR,
- "(%s) set_config() return error!\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "set_config() return error!\n");
} else {
- VHOST_LOG_CONFIG(ERR, "(%s) set_config() not supported!\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "set_config() not supported!\n");
}
return RTE_VHOST_MSG_RESULT_OK;
}
break;
default:
- VHOST_LOG_CONFIG(ERR, "(%s) invalid IOTLB message type (%d)\n",
- dev->ifname, imsg->type);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid IOTLB message type (%d)\n",
+ imsg->type);
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
if (dev->postcopy_ufd == -1) {
- VHOST_LOG_CONFIG(ERR, "(%s) userfaultfd not available: %s\n",
- dev->ifname, strerror(errno));
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "userfaultfd not available: %s\n",
+ strerror(errno));
return RTE_VHOST_MSG_RESULT_ERR;
}
api_struct.api = UFFD_API;
api_struct.features = 0;
if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
- VHOST_LOG_CONFIG(ERR, "(%s) UFFDIO_API ioctl failure: %s\n",
- dev->ifname, strerror(errno));
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "UFFDIO_API ioctl failure: %s\n",
+ strerror(errno));
close(dev->postcopy_ufd);
dev->postcopy_ufd = -1;
return RTE_VHOST_MSG_RESULT_ERR;
struct virtio_net *dev = *pdev;
if (dev->mem && dev->mem->nregions) {
- VHOST_LOG_CONFIG(ERR, "(%s) regions already registered at postcopy-listen\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "regions already registered at postcopy-listen\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->postcopy_listening = 1;
/* As per Virtio specification, the device status is 8bits long */
if (ctx->msg.payload.u64 > UINT8_MAX) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
- dev->ifname, ctx->msg.payload.u64);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
+ ctx->msg.payload.u64);
return RTE_VHOST_MSG_RESULT_ERR;
}
if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) &&
(dev->flags & VIRTIO_DEV_FEATURES_FAILED)) {
- VHOST_LOG_CONFIG(ERR,
- "(%s) FEATURES_OK bit is set but feature negotiation failed\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "FEATURES_OK bit is set but feature negotiation failed\n");
/*
* Clear the bit to let the driver know about the feature
* negotiation failure
dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
}
- VHOST_LOG_CONFIG(INFO, "(%s) new device status(0x%08x):\n", dev->ifname,
- dev->status);
- VHOST_LOG_CONFIG(INFO, "(%s)\t-RESET: %u\n", dev->ifname,
- (dev->status == VIRTIO_DEVICE_STATUS_RESET));
- VHOST_LOG_CONFIG(INFO, "(%s)\t-ACKNOWLEDGE: %u\n", dev->ifname,
- !!(dev->status & VIRTIO_DEVICE_STATUS_ACK));
- VHOST_LOG_CONFIG(INFO, "(%s)\t-DRIVER: %u\n", dev->ifname,
- !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER));
- VHOST_LOG_CONFIG(INFO, "(%s)\t-FEATURES_OK: %u\n", dev->ifname,
- !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK));
- VHOST_LOG_CONFIG(INFO, "(%s)\t-DRIVER_OK: %u\n", dev->ifname,
- !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK));
- VHOST_LOG_CONFIG(INFO, "(%s)\t-DEVICE_NEED_RESET: %u\n", dev->ifname,
- !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET));
- VHOST_LOG_CONFIG(INFO, "(%s)\t-FAILED: %u\n", dev->ifname,
- !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "new device status(0x%08x):\n", dev->status);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t-RESET: %u\n",
+ (dev->status == VIRTIO_DEVICE_STATUS_RESET));
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t-ACKNOWLEDGE: %u\n",
+ !!(dev->status & VIRTIO_DEVICE_STATUS_ACK));
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t-DRIVER: %u\n",
+ !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER));
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t-FEATURES_OK: %u\n",
+ !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK));
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t-DRIVER_OK: %u\n",
+ !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK));
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t-DEVICE_NEED_RESET: %u\n",
+ !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET));
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "\t-FAILED: %u\n",
+ !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
return RTE_VHOST_MSG_RESULT_OK;
}
if (ret <= 0) {
return ret;
} else if (ret != VHOST_USER_HDR_SIZE) {
- VHOST_LOG_CONFIG(ERR, "(%s) Unexpected header size read\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "Unexpected header size read\n");
close_msg_fds(ctx);
return -1;
}
if (ctx->msg.size) {
if (ctx->msg.size > sizeof(ctx->msg.payload)) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid msg size: %d\n",
- dev->ifname, ctx->msg.size);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid msg size: %d\n",
+ ctx->msg.size);
return -1;
}
ret = read(sockfd, &ctx->msg.payload, ctx->msg.size);
if (ret <= 0)
return ret;
if (ret != (int)ctx->msg.size) {
- VHOST_LOG_CONFIG(ERR, "(%s) read control message failed\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "read control message failed\n");
return -1;
}
}
}
if (vring_idx >= VHOST_MAX_VRING) {
- VHOST_LOG_CONFIG(ERR, "(%s) invalid vring index: %u\n", dev->ifname, vring_idx);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid vring index: %u\n", vring_idx);
return -1;
}
if (!dev->notify_ops) {
dev->notify_ops = vhost_driver_callback_get(dev->ifname);
if (!dev->notify_ops) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to get callback ops for driver\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to get callback ops for driver\n");
return -1;
}
}
ret = read_vhost_message(dev, fd, &ctx);
if (ret <= 0) {
if (ret < 0)
- VHOST_LOG_CONFIG(ERR, "(%s) vhost read message failed\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost read message failed\n");
else
- VHOST_LOG_CONFIG(INFO, "(%s) vhost peer closed\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "vhost peer closed\n");
return -1;
}
if (msg_handler != NULL && msg_handler->description != NULL) {
if (request != VHOST_USER_IOTLB_MSG)
- VHOST_LOG_CONFIG(INFO, "(%s) read message %s\n",
- dev->ifname, msg_handler->description);
+ VHOST_LOG_CONFIG(dev->ifname, INFO,
+ "read message %s\n",
+ msg_handler->description);
else
- VHOST_LOG_CONFIG(DEBUG, "(%s) read message %s\n",
- dev->ifname, msg_handler->description);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG,
+ "read message %s\n",
+ msg_handler->description);
} else {
- VHOST_LOG_CONFIG(DEBUG, "(%s) external request %d\n", dev->ifname, request);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "external request %d\n", request);
}
ret = vhost_user_check_and_alloc_queue_pair(dev, &ctx);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc queue\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc queue\n");
return -1;
}
switch (ret) {
case RTE_VHOST_MSG_RESULT_ERR:
- VHOST_LOG_CONFIG(ERR, "(%s) processing %s failed.\n",
- dev->ifname, msg_handler->description);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "processing %s failed.\n",
+ msg_handler->description);
handled = true;
break;
case RTE_VHOST_MSG_RESULT_OK:
- VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded.\n",
- dev->ifname, msg_handler->description);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG,
+ "processing %s succeeded.\n",
+ msg_handler->description);
handled = true;
break;
case RTE_VHOST_MSG_RESULT_REPLY:
- VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded and needs reply.\n",
- dev->ifname, msg_handler->description);
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG,
+ "processing %s succeeded and needs reply.\n",
+ msg_handler->description);
send_vhost_reply(dev, fd, &ctx);
handled = true;
break;
/* If message was not handled at this stage, treat it as an error */
if (!handled) {
- VHOST_LOG_CONFIG(ERR, "(%s) vhost message (req: %d) was not handled.\n",
- dev->ifname, request);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "vhost message (req: %d) was not handled.\n",
+ request);
close_msg_fds(&ctx);
ret = RTE_VHOST_MSG_RESULT_ERR;
}
ctx.fd_num = 0;
send_vhost_reply(dev, fd, &ctx);
} else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
- VHOST_LOG_CONFIG(ERR, "(%s) vhost message handling failed.\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling failed.\n");
ret = -1;
goto unlock;
}
if (vdpa_dev->ops->get_dev_type) {
ret = vdpa_dev->ops->get_dev_type(vdpa_dev, &vdpa_type);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "failed to get vdpa dev type.\n");
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to get vdpa dev type.\n");
ret = -1;
goto out;
}
if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
if (vdpa_dev->ops->dev_conf(dev->vid))
- VHOST_LOG_CONFIG(ERR, "(%s) failed to configure vDPA device\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to configure vDPA device\n");
else
dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
}
ret = read_vhost_message(dev, dev->slave_req_fd, &msg_reply);
if (ret <= 0) {
if (ret < 0)
- VHOST_LOG_CONFIG(ERR, "(%s) vhost read slave message reply failed\n",
- dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "vhost read slave message reply failed\n");
else
- VHOST_LOG_CONFIG(INFO, "(%s) vhost peer closed\n", dev->ifname);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "vhost peer closed\n");
ret = -1;
goto out;
}
ret = 0;
if (msg_reply.msg.request.slave != ctx->msg.request.slave) {
- VHOST_LOG_CONFIG(ERR, "(%s) received unexpected msg type (%u), expected %u\n",
- dev->ifname, msg_reply.msg.request.slave, ctx->msg.request.slave);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "received unexpected msg type (%u), expected %u\n",
+ msg_reply.msg.request.slave, ctx->msg.request.slave);
ret = -1;
goto out;
}
ret = send_vhost_message(dev, dev->slave_req_fd, &ctx);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to send IOTLB miss message (%d)\n",
- dev->ifname, ret);
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "failed to send IOTLB miss message (%d)\n",
+ ret);
return ret;
}
ret = send_vhost_slave_message(dev, &ctx);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to send config change (%d)\n",
- dev->ifname, ret);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to send config change (%d)\n", ret);
return ret;
}
ret = send_vhost_slave_message(dev, &ctx);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "(%s) failed to set host notifier (%d)\n",
- dev->ifname, ret);
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to set host notifier (%d)\n", ret);
return ret;
}
*/
if (unlikely(copy_idx < 0)) {
if (!vhost_async_dma_copy_log) {
- VHOST_LOG_DATA(ERR, "(%s) DMA copy failed for channel %d:%u\n",
- dev->ifname, dma_id, vchan_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "DMA copy failed for channel %d:%u\n",
+ dma_id, vchan_id);
vhost_async_dma_copy_log = true;
}
return -1;
*/
nr_copies = rte_dma_completed(dma_id, vchan_id, max_pkts, &last_idx, &has_error);
if (unlikely(!vhost_async_dma_complete_log && has_error)) {
- VHOST_LOG_DATA(ERR, "(%s) DMA completion failure on channel %d:%u\n", dev->ifname,
- dma_id, vchan_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "DMA completion failure on channel %d:%u\n",
+ dma_id, vchan_id);
vhost_async_dma_complete_log = true;
} else if (nr_copies == 0) {
goto out;
struct vhost_iov_iter *iter;
if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
- VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR, "no more async iovec available\n");
return -1;
}
static bool vhost_max_async_vec_log;
if (!vhost_max_async_vec_log) {
- VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR, "no more async iovec available\n");
vhost_max_async_vec_log = true;
}
host_iova = (void *)(uintptr_t)gpa_to_first_hpa(dev,
buf_iova + buf_offset, cpy_len, &mapped_len);
if (unlikely(!host_iova)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: failed to get host iova.\n",
- dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: failed to get host iova.\n",
+ __func__);
return -1;
}
} else
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
- VHOST_LOG_DATA(DEBUG, "(%s) RX: num merge buffers %d\n",
- dev->ifname, num_buffers);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "RX: num merge buffers %d\n", num_buffers);
if (unlikely(buf_len < dev->vhost_hlen)) {
buf_offset = dev->vhost_hlen - buf_len;
if (unlikely(reserve_avail_buf_split(dev, vq,
pkt_len, buf_vec, &num_buffers,
avail_head, &nr_vec) < 0)) {
- VHOST_LOG_DATA(DEBUG,
- "(%s) failed to get enough desc from vring\n",
- dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "failed to get enough desc from vring\n");
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
- dev->ifname, vq->last_avail_idx,
- vq->last_avail_idx + num_buffers);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "current index %d | end index %d\n",
+ vq->last_avail_idx, vq->last_avail_idx + num_buffers);
if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
num_buffers, false) < 0) {
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
- VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
- dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "failed to get enough desc from vring\n");
return -1;
}
- VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
- dev->ifname, vq->last_avail_idx,
- vq->last_avail_idx + nr_descs);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "current index %d | end index %d\n",
+ vq->last_avail_idx, vq->last_avail_idx + nr_descs);
vq_inc_last_avail_packed(vq, nr_descs);
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: invalid virtqueue idx %d.\n",
+ __func__, queue_id);
return 0;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
- dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: built-in vhost net backend is disabled.\n",
+ __func__);
return 0;
}
if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
&num_buffers, avail_head, &nr_vec) < 0)) {
- VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
- dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "failed to get enough desc from vring\n");
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
- dev->ifname, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "current index %d | end index %d\n",
+ vq->last_avail_idx, vq->last_avail_idx + num_buffers);
if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
vq->shadow_used_idx -= num_buffers;
if (unlikely(pkt_err)) {
uint16_t num_descs = 0;
- VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
- dev->ifname, __func__, pkt_err, queue_id);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "%s: failed to transfer %u packets for queue %u.\n",
+ __func__, pkt_err, queue_id);
/* update number of completed packets */
pkt_idx = n_xfer;
if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
nr_descs, nr_buffers) < 0)) {
- VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n", dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "failed to get enough desc from vring\n");
return -1;
}
- VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
- dev->ifname, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "current index %d | end index %d\n",
+ vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
return 0;
}
pkt_err = pkt_idx - n_xfer;
if (unlikely(pkt_err)) {
- VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
- dev->ifname, __func__, pkt_err, queue_id);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "%s: failed to transfer %u packets for queue %u.\n",
+ __func__, pkt_err, queue_id);
dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
}
if (unlikely(!dev))
return 0;
- VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: invalid virtqueue idx %d.\n",
+ __func__, queue_id);
return 0;
}
if (unlikely(!dma_copy_track[dma_id].vchans ||
!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
- dma_id, vchan_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: invalid channel %d:%u.\n",
+ __func__, dma_id, vchan_id);
return 0;
}
vq = dev->virtqueue[queue_id];
if (!rte_spinlock_trylock(&vq->access_lock)) {
- VHOST_LOG_DATA(DEBUG, "(%s) %s: virtqueue %u is busy.\n", dev->ifname, __func__,
- queue_id);
+ VHOST_LOG_DATA(dev->ifname, DEBUG,
+ "%s: virtqueue %u is busy.\n",
+ __func__, queue_id);
return 0;
}
if (unlikely(!vq->async)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for virtqueue %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: async not registered for virtqueue %d.\n",
+ __func__, queue_id);
goto out;
}
if (!dev)
return 0;
- VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
if (unlikely(queue_id >= dev->nr_vring)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid virtqueue idx %d.\n",
+ __func__, queue_id);
return 0;
}
if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid dma id %d.\n",
- dev->ifname, __func__, dma_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid dma id %d.\n",
+ __func__, dma_id);
return 0;
}
vq = dev->virtqueue[queue_id];
if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
- VHOST_LOG_DATA(ERR, "(%s) %s() called without access lock taken.\n",
- dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s() called without access lock taken.\n",
+ __func__);
return -1;
}
if (unlikely(!vq->async)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: async not registered for virtqueue %d.\n",
+ __func__, queue_id);
return 0;
}
if (unlikely(!dma_copy_track[dma_id].vchans ||
!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
- dma_id, vchan_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: invalid channel %d:%u.\n",
+ __func__, dma_id, vchan_id);
return 0;
}
if (!dev)
return 0;
- VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
if (unlikely(queue_id >= dev->nr_vring)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %u.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid virtqueue idx %u.\n",
+ __func__, queue_id);
return 0;
}
if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid dma id %d.\n",
- dev->ifname, __func__, dma_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid dma id %d.\n",
+ __func__, dma_id);
return 0;
}
vq = dev->virtqueue[queue_id];
if (!rte_spinlock_trylock(&vq->access_lock)) {
- VHOST_LOG_DATA(DEBUG, "(%s) %s: virtqueue %u is busy.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "%s: virtqueue %u is busy.\n",
+ __func__, queue_id);
return 0;
}
if (unlikely(!vq->async)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %u.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: async not registered for queue id %u.\n",
+ __func__, queue_id);
goto out_access_unlock;
}
if (unlikely(!dma_copy_track[dma_id].vchans ||
!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
- dma_id, vchan_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid channel %d:%u.\n",
+ __func__, dma_id, vchan_id);
goto out_access_unlock;
}
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: invalid virtqueue idx %d.\n",
+ __func__, queue_id);
return 0;
}
if (unlikely(!dma_copy_track[dma_id].vchans ||
!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
- dma_id, vchan_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: invalid channel %d:%u.\n",
+ __func__, dma_id, vchan_id);
return 0;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
- dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: built-in vhost net backend is disabled.\n",
+ __func__);
return 0;
}
m->l4_len = sizeof(struct rte_udp_hdr);
break;
default:
- VHOST_LOG_DATA(WARNING, "(%s) unsupported gso type %u.\n",
- dev->ifname, hdr->gso_type);
+ VHOST_LOG_DATA(dev->ifname, WARNING,
+ "unsupported gso type %u.\n",
+ hdr->gso_type);
goto error;
}
}
if (mbuf_avail == 0) {
cur = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(cur == NULL)) {
- VHOST_LOG_DATA(ERR, "(%s) failed to allocate memory for mbuf.\n",
- dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "failed to allocate memory for mbuf.\n");
goto error;
}
virtio_dev_extbuf_free, buf);
if (unlikely(shinfo == NULL)) {
rte_free(buf);
- VHOST_LOG_DATA(ERR, "(%s) failed to init shinfo\n", dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR, "failed to init shinfo\n");
return -1;
}
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
- VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, avail_entries);
- VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
- dev->ifname, count);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "about to dequeue %u buffers\n", count);
if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
return 0;
* is required. Drop this packet.
*/
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
- dev->ifname, buf_len, mbuf_pool->name);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "failed mbuf alloc of size %d from %s.\n",
+ buf_len, mbuf_pool->name);
allocerr_warned = true;
}
dropped += 1;
mbuf_pool, legacy_ol_flags, 0, false);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
- dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n");
allocerr_warned = true;
}
dropped += 1;
if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
- dev->ifname, buf_len, mbuf_pool->name);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "failed mbuf alloc of size %d from %s.\n",
+ buf_len, mbuf_pool->name);
allocerr_warned = true;
}
return -1;
mbuf_pool, legacy_ol_flags, 0, false);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
- dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n");
allocerr_warned = true;
}
return -1;
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
- dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: built-in vhost net backend is disabled.\n",
+ __func__);
return 0;
}
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: invalid virtqueue idx %d.\n",
+ __func__, queue_id);
return 0;
}
rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
- VHOST_LOG_DATA(ERR, "(%s) failed to make RARP packet.\n", dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR, "failed to make RARP packet.\n");
count = 0;
goto out;
}
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, avail_entries);
- VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
- dev->ifname, count);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "about to dequeue %u buffers\n", count);
if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count))
goto out;
* is required. Drop this packet.
*/
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "(%s) %s: Failed mbuf alloc of size %d from %s\n",
- dev->ifname, __func__, buf_len, mbuf_pool->name);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: Failed mbuf alloc of size %d from %s\n",
+ __func__, buf_len, mbuf_pool->name);
allocerr_warned = true;
}
dropped = true;
legacy_ol_flags, slot_idx, true);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "(%s) %s: Failed to offload copies to async channel.\n",
- dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, ERR,
+ "%s: Failed to offload copies to async channel.\n",
+ __func__);
allocerr_warned = true;
}
dropped = true;
pkt_err = pkt_idx - n_xfer;
if (unlikely(pkt_err)) {
- VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer data.\n",
- dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "%s: failed to transfer data.\n",
+ __func__);
pkt_idx = n_xfer;
/* recover available ring */
if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR, "(%s) Failed mbuf alloc of size %d from %s.\n",
- dev->ifname, buf_len, mbuf_pool->name);
+ VHOST_LOG_DATA(dev->ifname, ERR, "Failed mbuf alloc of size %d from %s.\n",
+ buf_len, mbuf_pool->name);
allocerr_warned = true;
}
if (unlikely(err)) {
rte_pktmbuf_free(pkts);
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR, "(%s) Failed to copy desc to mbuf on.\n", dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR, "Failed to copy desc to mbuf on.\n");
allocerr_warned = true;
}
return -1;
struct async_inflight_info *pkts_info = async->pkts_info;
struct rte_mbuf *pkts_prealloc[MAX_PKT_BURST];
- VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n", dev->vid, count);
+ VHOST_LOG_DATA(dev->ifname, DEBUG, "(%d) about to dequeue %u buffers\n", dev->vid, count);
async_iter_reset(async);
*nr_inflight = -1;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
- dev->ifname, __func__);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: built-in vhost net backend is disabled.\n",
+ __func__);
return 0;
}
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid virtqueue idx %d.\n",
+ __func__, queue_id);
return 0;
}
if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid dma id %d.\n",
- dev->ifname, __func__, dma_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid dma id %d.\n",
+ __func__, dma_id);
return 0;
}
if (unlikely(!dma_copy_track[dma_id].vchans ||
!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
- dma_id, vchan_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid channel %d:%u.\n",
+ __func__, dma_id, vchan_id);
return 0;
}
}
if (unlikely(!vq->async)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
- dev->ifname, __func__, queue_id);
+ VHOST_LOG_DATA(dev->ifname, ERR, "%s: async not registered for queue id %d.\n",
+ __func__, queue_id);
count = 0;
goto out_access_unlock;
}
rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
- VHOST_LOG_DATA(ERR, "(%s) failed to make RARP packet.\n", dev->ifname);
+ VHOST_LOG_DATA(dev->ifname, ERR, "failed to make RARP packet.\n");
count = 0;
goto out;
}