struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+int vhost_config_log_level;
+int vhost_data_log_level;
+
/* Called with iotlb_lock read-locked */
uint64_t
__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
vhost_user_iotlb_pending_insert(vq, iova, perm);
if (vhost_user_iotlb_miss(dev, iova, perm)) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
iova);
vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
if (map_len != len) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_DATA(ERR,
"Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
iova);
return;
hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
if (map_len != len) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_DATA(ERR,
"Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
iova);
return;
struct vhost_virtqueue *vq;
if (vring_idx >= VHOST_MAX_VRING) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed not init vring, out of bound (%d)\n",
vring_idx);
return;
int callfd;
if (vring_idx >= VHOST_MAX_VRING) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed not init vring, out of bound (%d)\n",
vring_idx);
return;
vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
if (vq == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed to allocate memory for vring:%u.\n", vring_idx);
return -1;
}
}
if (i == MAX_VHOST_DEVICE) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed to find a free slot for new device.\n");
return -1;
}
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
if (dev == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed to allocate memory for new dev.\n");
return -1;
}
dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
}
+void
+vhost_enable_extbuf(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ dev->extbuf = 1;
+}
+
+void
+vhost_enable_linearbuf(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ dev->linearbuf = 1;
+}
+
int
rte_vhost_get_mtu(int vid, uint16_t *mtu)
{
ret = get_mempolicy(&numa_node, NULL, 0, dev,
MPOL_F_NODE | MPOL_F_ADDR);
if (ret < 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"(%d) failed to query numa node: %s\n",
vid, rte_strerror(errno));
return -1;
return 0;
if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
- RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
dev->vid, __func__, qid);
return 0;
}
dev->extern_data = ctx;
return 0;
}
+
+RTE_INIT(vhost_log_init)
+{
+ vhost_config_log_level = rte_log_register("lib.vhost.config");
+ if (vhost_config_log_level >= 0)
+ rte_log_set_level(vhost_config_log_level, RTE_LOG_INFO);
+
+ vhost_data_log_level = rte_log_register("lib.vhost.data");
+ if (vhost_data_log_level >= 0)
+ rte_log_set_level(vhost_data_log_level, RTE_LOG_WARNING);
+}