ethdev: fix VLAN offloads set if no driver callback
[dpdk.git] / lib / librte_vhost / vhost.c
index 6c527e2..c819a84 100644 (file)
@@ -27,6 +27,9 @@
 
 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
 
+int vhost_config_log_level;
+int vhost_data_log_level;
+
 /* Called with iotlb_lock read-locked */
 uint64_t
 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
@@ -57,7 +60,7 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
                vhost_user_iotlb_pending_insert(vq, iova, perm);
                if (vhost_user_iotlb_miss(dev, iova, perm)) {
-                       RTE_LOG(ERR, VHOST_CONFIG,
+                       VHOST_LOG_CONFIG(ERR,
                                "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
                                iova);
                        vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
@@ -124,7 +127,7 @@ __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
        hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
        if (map_len != len) {
-               RTE_LOG(ERR, VHOST_CONFIG,
+               VHOST_LOG_DATA(ERR,
                        "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
                        iova);
                return;
@@ -229,7 +232,7 @@ __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
        hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
        if (map_len != len) {
-               RTE_LOG(ERR, VHOST_CONFIG,
+               VHOST_LOG_DATA(ERR,
                        "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
                        iova);
                return;
@@ -461,7 +464,7 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
        struct vhost_virtqueue *vq;
 
        if (vring_idx >= VHOST_MAX_VRING) {
-               RTE_LOG(ERR, VHOST_CONFIG,
+               VHOST_LOG_CONFIG(ERR,
                                "Failed not init vring, out of bound (%d)\n",
                                vring_idx);
                return;
@@ -488,7 +491,7 @@ reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
        int callfd;
 
        if (vring_idx >= VHOST_MAX_VRING) {
-               RTE_LOG(ERR, VHOST_CONFIG,
+               VHOST_LOG_CONFIG(ERR,
                                "Failed not init vring, out of bound (%d)\n",
                                vring_idx);
                return;
@@ -507,7 +510,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 
        vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
        if (vq == NULL) {
-               RTE_LOG(ERR, VHOST_CONFIG,
+               VHOST_LOG_CONFIG(ERR,
                        "Failed to allocate memory for vring:%u.\n", vring_idx);
                return -1;
        }
@@ -558,14 +561,14 @@ vhost_new_device(void)
        }
 
        if (i == MAX_VHOST_DEVICE) {
-               RTE_LOG(ERR, VHOST_CONFIG,
+               VHOST_LOG_CONFIG(ERR,
                        "Failed to find a free slot for new device.\n");
                return -1;
        }
 
        dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
        if (dev == NULL) {
-               RTE_LOG(ERR, VHOST_CONFIG,
+               VHOST_LOG_CONFIG(ERR,
                        "Failed to allocate memory for new dev.\n");
                return -1;
        }
@@ -673,6 +676,28 @@ vhost_set_builtin_virtio_net(int vid, bool enable)
                dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
 }
 
+void
+vhost_enable_extbuf(int vid)
+{
+       struct virtio_net *dev = get_device(vid);
+
+       if (dev == NULL)
+               return;
+
+       dev->extbuf = 1;
+}
+
+void
+vhost_enable_linearbuf(int vid)
+{
+       struct virtio_net *dev = get_device(vid);
+
+       if (dev == NULL)
+               return;
+
+       dev->linearbuf = 1;
+}
+
 int
 rte_vhost_get_mtu(int vid, uint16_t *mtu)
 {
@@ -706,7 +731,7 @@ rte_vhost_get_numa_node(int vid)
        ret = get_mempolicy(&numa_node, NULL, 0, dev,
                            MPOL_F_NODE | MPOL_F_ADDR);
        if (ret < 0) {
-               RTE_LOG(ERR, VHOST_CONFIG,
+               VHOST_LOG_CONFIG(ERR,
                        "(%d) failed to query numa node: %s\n",
                        vid, rte_strerror(errno));
                return -1;
@@ -1300,7 +1325,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
                return 0;
 
        if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
-               RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+               VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, qid);
                return 0;
        }
@@ -1435,3 +1460,14 @@ int rte_vhost_extern_callback_register(int vid,
        dev->extern_data = ctx;
        return 0;
 }
+
+RTE_INIT(vhost_log_init)
+{
+       vhost_config_log_level = rte_log_register("lib.vhost.config");
+       if (vhost_config_log_level >= 0)
+               rte_log_set_level(vhost_config_log_level, RTE_LOG_INFO);
+
+       vhost_data_log_level = rte_log_register("lib.vhost.data");
+       if (vhost_data_log_level >= 0)
+               rte_log_set_level(vhost_data_log_level, RTE_LOG_WARNING);
+}