]> git.droids-corp.org - dpdk.git/commitdiff
vhost: export device id as the interface to applications
authorYuanhan Liu <yuanhan.liu@linux.intel.com>
Mon, 13 Jun 2016 09:55:49 +0000 (17:55 +0800)
committerYuanhan Liu <yuanhan.liu@linux.intel.com>
Wed, 22 Jun 2016 07:42:57 +0000 (09:42 +0200)
With all the previous prepare works, we are just one step away from
the final ABI refactoring. That is, to change current API to let them
stick to vid instead of the old virtio_net dev.

Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Rich Lane <rich.lane@bigswitch.com>
Acked-by: Rich Lane <rich.lane@bigswitch.com>
14 files changed:
doc/guides/rel_notes/deprecation.rst
doc/guides/rel_notes/release_16_07.rst
drivers/net/vhost/rte_eth_vhost.c
examples/tep_termination/main.c
examples/tep_termination/main.h
examples/tep_termination/vxlan_setup.c
examples/tep_termination/vxlan_setup.h
examples/vhost/main.c
examples/vhost/main.h
lib/librte_vhost/Makefile
lib/librte_vhost/rte_virtio_net.h
lib/librte_vhost/vhost_rxtx.c
lib/librte_vhost/vhost_user/virtio-net-user.c
lib/librte_vhost/virtio-net.c

index f75183f2a273f1a3a92853bd331b041d7ae73e55..9ab8da8b4bb11f2f1931bbbe9c768f6808eddbe4 100644 (file)
@@ -43,13 +43,6 @@ Deprecation Notices
   functions added to facilitate the creation of mempools using an external
   handler. The 16.07 release will contain these changes.
 
-* A librte_vhost public structures refactor is planned for DPDK 16.07
-  that requires both ABI and API change.
-  The proposed refactor would expose DPDK vhost dev to applications as
-  a handle, like the way kernel exposes an fd to user for locating a
-  specific file, and to keep all major structures internally, so that
-  we are likely to be free from ABI violations in future.
-
 * The mbuf flags PKT_RX_VLAN_PKT and PKT_RX_QINQ_PKT are deprecated and
   are respectively replaced by PKT_RX_VLAN_STRIPPED and
   PKT_RX_QINQ_STRIPPED, that are better described. The old flags and
index d4e09b1a30a8302ad6b876ca02f5d2748bb00443..6d4d2781c2b629b72f04db68f452f6725b8edfd2 100644 (file)
@@ -164,6 +164,10 @@ API Changes
 * The vhost function ``rte_vring_available_entries`` is renamed to
   ``rte_vhost_avail_entries``.
 
+* All existing vhost APIs and callbacks with ``virtio_net`` struct pointer
+  as the parameter have been changed due to the ABI refactoring mentioned
+  below: it's replaced by ``int vid``.
+
 
 ABI Changes
 -----------
@@ -178,6 +182,10 @@ ABI Changes
 * The ``rte_eth_dev_info`` structure has new fields ``nb_rx_queues`` and ``nb_tx_queues``
   to support number of queues configured by software.
 
+* vhost ABI refactoring has been made: ``virtio_net`` structure is never
+  exported to application any more. Instead, a handle, ``vid``, has been
+  used to represent this structure internally.
+
 
 Shared Library Versions
 -----------------------
@@ -214,7 +222,7 @@ The libraries prepended with a plus sign were incremented in this version.
      librte_sched.so.1
      librte_table.so.2
      librte_timer.so.1
-     librte_vhost.so.2
+   + librte_vhost.so.3
 
 
 Tested Platforms
index ce5ca8b147d7d0437b14604be3bcc686ca961e77..395b892a2d78c48e71fc558513e8bf27fdc6cb55 100644 (file)
@@ -71,9 +71,9 @@ static struct ether_addr base_eth_addr = {
 };
 
 struct vhost_queue {
+       int vid;
        rte_atomic32_t allow_queuing;
        rte_atomic32_t while_queuing;
-       struct virtio_net *device;
        struct pmd_internal *internal;
        struct rte_mempool *mb_pool;
        uint8_t port;
@@ -139,7 +139,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
                goto out;
 
        /* Dequeue packets from guest TX queue */
-       nb_rx = rte_vhost_dequeue_burst(r->device,
+       nb_rx = rte_vhost_dequeue_burst(r->vid,
                        r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
 
        r->rx_pkts += nb_rx;
@@ -170,7 +170,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
                goto out;
 
        /* Enqueue packets to guest RX queue */
-       nb_tx = rte_vhost_enqueue_burst(r->device,
+       nb_tx = rte_vhost_enqueue_burst(r->vid,
                        r->virtqueue_id, bufs, nb_bufs);
 
        r->tx_pkts += nb_tx;
@@ -222,7 +222,7 @@ find_internal_resource(char *ifname)
 }
 
 static int
-new_device(struct virtio_net *dev)
+new_device(int vid)
 {
        struct rte_eth_dev *eth_dev;
        struct internal_list *list;
@@ -234,12 +234,7 @@ new_device(struct virtio_net *dev)
        int newnode;
 #endif
 
-       if (dev == NULL) {
-               RTE_LOG(INFO, PMD, "Invalid argument\n");
-               return -1;
-       }
-
-       rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
+       rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
        list = find_internal_resource(ifname);
        if (list == NULL) {
                RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
@@ -250,7 +245,7 @@ new_device(struct virtio_net *dev)
        internal = eth_dev->data->dev_private;
 
 #ifdef RTE_LIBRTE_VHOST_NUMA
-       newnode = rte_vhost_get_numa_node(dev->vid);
+       newnode = rte_vhost_get_numa_node(vid);
        if (newnode >= 0)
                eth_dev->data->numa_node = newnode;
 #endif
@@ -259,7 +254,7 @@ new_device(struct virtio_net *dev)
                vq = eth_dev->data->rx_queues[i];
                if (vq == NULL)
                        continue;
-               vq->device = dev;
+               vq->vid = vid;
                vq->internal = internal;
                vq->port = eth_dev->data->port_id;
        }
@@ -267,13 +262,13 @@ new_device(struct virtio_net *dev)
                vq = eth_dev->data->tx_queues[i];
                if (vq == NULL)
                        continue;
-               vq->device = dev;
+               vq->vid = vid;
                vq->internal = internal;
                vq->port = eth_dev->data->port_id;
        }
 
-       for (i = 0; i < rte_vhost_get_queue_num(dev->vid) * VIRTIO_QNUM; i++)
-               rte_vhost_enable_guest_notification(dev, i, 0);
+       for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
+               rte_vhost_enable_guest_notification(vid, i, 0);
 
        eth_dev->data->dev_link.link_status = ETH_LINK_UP;
 
@@ -298,7 +293,7 @@ new_device(struct virtio_net *dev)
 }
 
 static void
-destroy_device(volatile struct virtio_net *dev)
+destroy_device(int vid)
 {
        struct rte_eth_dev *eth_dev;
        struct vhost_queue *vq;
@@ -306,12 +301,7 @@ destroy_device(volatile struct virtio_net *dev)
        char ifname[PATH_MAX];
        unsigned i;
 
-       if (dev == NULL) {
-               RTE_LOG(INFO, PMD, "Invalid argument\n");
-               return;
-       }
-
-       rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
+       rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
        list = find_internal_resource(ifname);
        if (list == NULL) {
                RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
@@ -343,13 +333,13 @@ destroy_device(volatile struct virtio_net *dev)
                vq = eth_dev->data->rx_queues[i];
                if (vq == NULL)
                        continue;
-               vq->device = NULL;
+               vq->vid = -1;
        }
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
                vq = eth_dev->data->tx_queues[i];
                if (vq == NULL)
                        continue;
-               vq->device = NULL;
+               vq->vid = -1;
        }
 
        RTE_LOG(INFO, PMD, "Connection closed\n");
@@ -358,19 +348,14 @@ destroy_device(volatile struct virtio_net *dev)
 }
 
 static int
-vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable)
+vring_state_changed(int vid, uint16_t vring, int enable)
 {
        struct rte_vhost_vring_state *state;
        struct rte_eth_dev *eth_dev;
        struct internal_list *list;
        char ifname[PATH_MAX];
 
-       if (dev == NULL) {
-               RTE_LOG(ERR, PMD, "Invalid argument\n");
-               return -1;
-       }
-
-       rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
+       rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
        list = find_internal_resource(ifname);
        if (list == NULL) {
                RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
index 8c627d2299a76a858ae62fa503b3c144f7330775..ec57869cac57894aaee281a45528bb302512aeef 100644 (file)
@@ -566,10 +566,9 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
        struct rte_mbuf **m_table;
        unsigned len, ret = 0;
        const uint16_t lcore_id = rte_lcore_id();
-       struct virtio_net *dev = vdev->dev;
 
        RTE_LOG(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
-               dev->vid);
+               vdev->vid);
 
        /* Add packet to the port tx queue */
        tx_q = &lcore_tx_queue[lcore_id];
@@ -578,8 +577,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
        tx_q->m_table[len] = m;
        len++;
        if (enable_stats) {
-               dev_statistics[dev->vid].tx_total++;
-               dev_statistics[dev->vid].tx++;
+               dev_statistics[vdev->vid].tx_total++;
+               dev_statistics[vdev->vid].tx++;
        }
 
        if (unlikely(len == MAX_PKT_BURST)) {
@@ -614,7 +613,6 @@ static int
 switch_worker(__rte_unused void *arg)
 {
        struct rte_mempool *mbuf_pool = arg;
-       struct virtio_net *dev = NULL;
        struct vhost_dev *vdev = NULL;
        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
        struct virtio_net_data_ll *dev_ll;
@@ -688,7 +686,6 @@ switch_worker(__rte_unused void *arg)
 
                while (dev_ll != NULL) {
                        vdev = dev_ll->vdev;
-                       dev = vdev->dev;
 
                        if (unlikely(vdev->remove)) {
                                dev_ll = dev_ll->next;
@@ -709,22 +706,22 @@ switch_worker(__rte_unused void *arg)
                                        * must be less than virtio queue size
                                        */
                                        if (enable_retry && unlikely(rx_count >
-                                               rte_vhost_avail_entries(dev->vid, VIRTIO_RXQ))) {
+                                               rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))) {
                                                for (retry = 0; retry < burst_rx_retry_num;
                                                        retry++) {
                                                        rte_delay_us(burst_rx_delay_time);
-                                                       if (rx_count <= rte_vhost_avail_entries(dev->vid, VIRTIO_RXQ))
+                                                       if (rx_count <= rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))
                                                                break;
                                                }
                                        }
 
-                                       ret_count = overlay_options.rx_handle(dev, pkts_burst, rx_count);
+                                       ret_count = overlay_options.rx_handle(vdev->vid, pkts_burst, rx_count);
                                        if (enable_stats) {
                                                rte_atomic64_add(
-                                               &dev_statistics[dev->vid].rx_total_atomic,
+                                               &dev_statistics[vdev->vid].rx_total_atomic,
                                                rx_count);
                                                rte_atomic64_add(
-                                               &dev_statistics[dev->vid].rx_atomic, ret_count);
+                                               &dev_statistics[vdev->vid].rx_atomic, ret_count);
                                        }
                                        while (likely(rx_count)) {
                                                rx_count--;
@@ -736,7 +733,7 @@ switch_worker(__rte_unused void *arg)
 
                        if (likely(!vdev->remove)) {
                                /* Handle guest TX*/
-                               tx_count = rte_vhost_dequeue_burst(dev,
+                               tx_count = rte_vhost_dequeue_burst(vdev->vid,
                                                VIRTIO_TXQ, mbuf_pool,
                                                pkts_burst, MAX_PKT_BURST);
                                /* If this is the first received packet we need to learn the MAC */
@@ -908,12 +905,10 @@ init_data_ll(void)
 /**
  * Remove a device from the specific data core linked list and
  * from the main linked list. Synchonization occurs through the use
- * of the lcore dev_removal_flag. Device is made volatile here
- * to avoid re-ordering of dev->remove=1 which can cause an infinite
- * loop in the rte_pause loop.
+ * of the lcore dev_removal_flag.
  */
 static void
-destroy_device(volatile struct virtio_net *dev)
+destroy_device(int vid)
 {
        struct virtio_net_data_ll *ll_lcore_dev_cur;
        struct virtio_net_data_ll *ll_main_dev_cur;
@@ -922,11 +917,9 @@ destroy_device(volatile struct virtio_net *dev)
        struct vhost_dev *vdev = NULL;
        int lcore;
 
-       dev->flags &= ~VIRTIO_DEV_RUNNING;
-
        ll_main_dev_cur = ll_root_used;
        while (ll_main_dev_cur != NULL) {
-               if (ll_main_dev_cur->vdev->vid == dev->vid) {
+               if (ll_main_dev_cur->vdev->vid == vid) {
                        vdev = ll_main_dev_cur->vdev;
                        break;
                }
@@ -952,8 +945,7 @@ destroy_device(volatile struct virtio_net *dev)
 
        if (ll_lcore_dev_cur == NULL) {
                RTE_LOG(ERR, VHOST_CONFIG,
-                       "(%d) Failed to find the dev to be destroy.\n",
-                       dev->vid);
+                       "(%d) Failed to find the dev to be destroy.\n", vid);
                return;
        }
 
@@ -1001,7 +993,7 @@ destroy_device(volatile struct virtio_net *dev)
        lcore_info[vdev->coreid].lcore_ll->device_num--;
 
        RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been removed "
-               "from data core\n", dev->vid);
+               "from data core\n", vid);
 
        rte_free(vdev);
 
@@ -1012,7 +1004,7 @@ destroy_device(volatile struct virtio_net *dev)
  * to the main linked list and the allocated to a specific data core.
  */
 static int
-new_device(struct virtio_net *dev)
+new_device(int vid)
 {
        struct virtio_net_data_ll *ll_dev;
        int lcore, core_add = 0;
@@ -1022,18 +1014,16 @@ new_device(struct virtio_net *dev)
        vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
        if (vdev == NULL) {
                RTE_LOG(INFO, VHOST_DATA,
-                       "(%d) Couldn't allocate memory for vhost dev\n",
-                       dev->vid);
+                       "(%d) Couldn't allocate memory for vhost dev\n", vid);
                return -1;
        }
-       vdev->dev = dev;
-       vdev->vid = dev->vid;
+       vdev->vid = vid;
        /* Add device to main ll */
        ll_dev = get_data_ll_free_entry(&ll_root_free);
        if (ll_dev == NULL) {
                RTE_LOG(INFO, VHOST_DATA, "(%d) No free entry found in"
                        " linked list Device limit of %d devices per core"
-                       " has been reached\n", dev->vid, nb_devices);
+                       " has been reached\n", vid, nb_devices);
                if (vdev->regions_hpa)
                        rte_free(vdev->regions_hpa);
                rte_free(vdev);
@@ -1041,7 +1031,7 @@ new_device(struct virtio_net *dev)
        }
        ll_dev->vdev = vdev;
        add_data_ll_entry(&ll_root_used, ll_dev);
-       vdev->rx_q = dev->vid;
+       vdev->rx_q = vid;
 
        /* reset ready flag */
        vdev->ready = DEVICE_MAC_LEARNING;
@@ -1059,9 +1049,9 @@ new_device(struct virtio_net *dev)
        if (ll_dev == NULL) {
                RTE_LOG(INFO, VHOST_DATA,
                        "(%d) Failed to add device to data core\n",
-                       dev->vid);
+                       vid);
                vdev->ready = DEVICE_SAFE_REMOVE;
-               destroy_device(dev);
+               destroy_device(vid);
                rte_free(vdev->regions_hpa);
                rte_free(vdev);
                return -1;
@@ -1073,17 +1063,16 @@ new_device(struct virtio_net *dev)
                        ll_dev);
 
        /* Initialize device stats */
-       memset(&dev_statistics[dev->vid], 0,
+       memset(&dev_statistics[vid], 0,
                sizeof(struct device_statistics));
 
        /* Disable notifications. */
-       rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
-       rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
+       rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+       rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
        lcore_info[vdev->coreid].lcore_ll->device_num++;
-       dev->flags |= VIRTIO_DEV_RUNNING;
 
        RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been added to data core %d\n",
-               dev->vid, vdev->coreid);
+               vid, vdev->coreid);
 
        return 0;
 }
@@ -1121,7 +1110,7 @@ print_stats(void)
 
                dev_ll = ll_root_used;
                while (dev_ll != NULL) {
-                       vid = dev_ll->vdev->dev->vid;
+                       vid = dev_ll->vdev->vid;
                        tx_total = dev_statistics[vid].tx_total;
                        tx = dev_statistics[vid].tx;
                        tx_dropped = tx_total - tx;
index f786640a0956e3c3858ec7118aa5c610ca616734..c0ea766764686089dbdb172cfc82b2a7d12cd01f 100644 (file)
@@ -72,8 +72,6 @@ struct device_statistics {
  */
 struct vhost_dev {
        int vid;
-       /**< Pointer to device created by vhost lib. */
-       struct virtio_net      *dev;
        /**< Number of memory regions for gpa to hpa translation. */
        uint32_t nregions_hpa;
        /**< Memory region information for gpa to hpa translation. */
@@ -117,6 +115,6 @@ struct virtio_net_data_ll {
 };
 
 uint32_t
-virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count);
+virtio_dev_rx(int vid, struct rte_mbuf **pkts, uint32_t count);
 
 #endif /* _MAIN_H_ */
index 52e404c0b59b8b96af0186efc91e301b0a1ccff0..37575c27d85797aad827035e814a8c70df384d02 100644 (file)
@@ -244,8 +244,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
        int i, ret;
        struct ether_hdr *pkt_hdr;
-       struct virtio_net *dev = vdev->dev;
-       uint64_t portid = dev->vid;
+       uint64_t portid = vdev->vid;
        struct ipv4_hdr *ip;
 
        struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
@@ -254,7 +253,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
                RTE_LOG(INFO, VHOST_DATA,
                        "(%d) WARNING: Not configuring device,"
                        "as already have %d ports for VXLAN.",
-                       dev->vid, VXLAN_N_PORTS);
+                       vdev->vid, VXLAN_N_PORTS);
                return -1;
        }
 
@@ -264,7 +263,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
                RTE_LOG(INFO, VHOST_DATA,
                        "(%d) WARNING: This device is using an existing"
                        " MAC address and has not been registered.\n",
-                       dev->vid);
+                       vdev->vid);
                return -1;
        }
 
@@ -425,8 +424,7 @@ vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
 
 /* Check for decapsulation and pass packets directly to VIRTIO device */
 int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
-               uint32_t rx_count)
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
 {
        uint32_t i = 0;
        uint32_t count = 0;
@@ -436,11 +434,11 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
        for (i = 0; i < rx_count; i++) {
                if (enable_stats) {
                        rte_atomic64_add(
-                               &dev_statistics[dev->vid].rx_bad_ip_csum,
+                               &dev_statistics[vid].rx_bad_ip_csum,
                                (pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
                                != 0);
                        rte_atomic64_add(
-                               &dev_statistics[dev->vid].rx_bad_ip_csum,
+                               &dev_statistics[vid].rx_bad_ip_csum,
                                (pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
                                != 0);
                }
@@ -452,6 +450,6 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
                        count++;
        }
 
-       ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count);
+       ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
        return ret;
 }
index 1846540fa8999046e8f6a18e95cea569d267f9d5..8d26461952e0b4c749fa949b77db02a216a7e4af 100644 (file)
@@ -55,10 +55,10 @@ typedef void (*ol_tunnel_destroy_t)(struct vhost_dev *vdev);
 typedef int (*ol_tx_handle_t)(uint8_t port_id, uint16_t queue_id,
                              struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
-typedef int (*ol_rx_handle_t)(struct virtio_net *dev, struct rte_mbuf **pkts,
+typedef int (*ol_rx_handle_t)(int vid, struct rte_mbuf **pkts,
                              uint32_t count);
 
-typedef int (*ol_param_handle)(struct virtio_net *dev);
+typedef int (*ol_param_handle)(int vid);
 
 struct ol_switch_ops {
        ol_port_configure_t        port_configure;
@@ -82,6 +82,6 @@ int
 vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
                        struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count);
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts, uint32_t count);
 
 #endif /* VXLAN_SETUP_H_ */
index 77214a697d64ed8445a3cf9a7e37ea117e319591..b95d789cabb9a1c7a41fa5cb703a9b2a0720b7c8 100644 (file)
@@ -795,7 +795,7 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
 {
        uint16_t ret;
 
-       ret = rte_vhost_enqueue_burst(dst_vdev->dev, VIRTIO_RXQ, &m, 1);
+       ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
        if (enable_stats) {
                rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
                rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
@@ -1041,7 +1041,6 @@ static inline void __attribute__((always_inline))
 drain_eth_rx(struct vhost_dev *vdev)
 {
        uint16_t rx_count, enqueue_count;
-       struct virtio_net *dev = vdev->dev;
        struct rte_mbuf *pkts[MAX_PKT_BURST];
 
        rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
@@ -1055,19 +1054,19 @@ drain_eth_rx(struct vhost_dev *vdev)
         * to diminish packet loss.
         */
        if (enable_retry &&
-           unlikely(rx_count > rte_vhost_avail_entries(dev->vid,
+           unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
                        VIRTIO_RXQ))) {
                uint32_t retry;
 
                for (retry = 0; retry < burst_rx_retry_num; retry++) {
                        rte_delay_us(burst_rx_delay_time);
-                       if (rx_count <= rte_vhost_avail_entries(dev->vid,
+                       if (rx_count <= rte_vhost_avail_entries(vdev->vid,
                                        VIRTIO_RXQ))
                                break;
                }
        }
 
-       enqueue_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ,
+       enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
                                                pkts, rx_count);
        if (enable_stats) {
                rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
@@ -1084,7 +1083,7 @@ drain_virtio_tx(struct vhost_dev *vdev)
        uint16_t count;
        uint16_t i;
 
-       count = rte_vhost_dequeue_burst(vdev->dev, VIRTIO_TXQ, mbuf_pool,
+       count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
                                        pkts, MAX_PKT_BURST);
 
        /* setup VMDq for the first packet */
@@ -1171,13 +1170,13 @@ switch_worker(void *arg __rte_unused)
  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
  */
 static void
-destroy_device (volatile struct virtio_net *dev)
+destroy_device(int vid)
 {
        struct vhost_dev *vdev = NULL;
        int lcore;
 
        TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
-               if (vdev->vid == dev->vid)
+               if (vdev->vid == vid)
                        break;
        }
        if (!vdev)
@@ -1221,12 +1220,11 @@ destroy_device (volatile struct virtio_net *dev)
  * and the allocated to a specific data core.
  */
 static int
-new_device (struct virtio_net *dev)
+new_device(int vid)
 {
        int lcore, core_add = 0;
        uint32_t device_num_min = num_devices;
        struct vhost_dev *vdev;
-       int vid = dev->vid;
 
        vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
        if (vdev == NULL) {
@@ -1235,7 +1233,6 @@ new_device (struct virtio_net *dev)
                        vid);
                return -1;
        }
-       vdev->dev = dev;
        vdev->vid = vid;
 
        TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
@@ -1259,8 +1256,8 @@ new_device (struct virtio_net *dev)
        lcore_info[vdev->coreid].device_num++;
 
        /* Disable notifications. */
-       rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
-       rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
+       rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+       rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
 
        RTE_LOG(INFO, VHOST_DATA,
                "(%d) device has been added to data core %d\n",
@@ -1316,7 +1313,7 @@ print_stats(void)
                                "RX total:              %" PRIu64 "\n"
                                "RX dropped:            %" PRIu64 "\n"
                                "RX successful:         %" PRIu64 "\n",
-                               vdev->dev->vid,
+                               vdev->vid,
                                tx_total, tx_dropped, tx,
                                rx_total, rx_dropped, rx);
                }
index e99c4367c6578df2337b8ef8bed6256f88693a4b..6bb42e8990c3849d72600686dcf1cd19fd3caffb 100644 (file)
@@ -49,8 +49,6 @@ struct device_statistics {
 };
 
 struct vhost_dev {
-       /**< Pointer to device created by vhost lib. */
-       struct virtio_net      *dev;
        /**< Number of memory regions for gpa to hpa translation. */
        uint32_t nregions_hpa;
        /**< Device MAC address (Obtained on first TX packet). */
index e33ff53e006055cec1276701bbc6e7f9652b2e4f..7ef8d34a18f674206d3de980908b4ee5585fc33a 100644 (file)
@@ -36,7 +36,7 @@ LIB = librte_vhost.a
 
 EXPORT_MAP := rte_vhost_version.map
 
-LIBABIVER := 2
+LIBABIVER := 3
 
 CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -D_FILE_OFFSET_BITS=64
 ifeq ($(CONFIG_RTE_LIBRTE_VHOST_USER),y)
index 042746111d71a47128f836a6e4359385471bc074..370345ebb6851defcc87a2acffa64163f737153f 100644 (file)
@@ -178,10 +178,10 @@ struct virtio_memory {
  *
  */
 struct virtio_net_device_ops {
-       int (*new_device)(struct virtio_net *); /**< Add device. */
-       void (*destroy_device)(volatile struct virtio_net *);   /**< Remove device. */
+       int (*new_device)(int vid);             /**< Add device. */
+       void (*destroy_device)(int vid);        /**< Remove device. */
 
-       int (*vring_state_changed)(struct virtio_net *dev, uint16_t queue_id, int enable);      /**< triggered when a vring is enabled or disabled */
+       int (*vring_state_changed)(int vid, uint16_t queue_id, int enable);     /**< triggered when a vring is enabled or disabled */
 };
 
 /**
@@ -220,7 +220,7 @@ int rte_vhost_feature_enable(uint64_t feature_mask);
 /* Returns currently supported vhost features */
 uint64_t rte_vhost_feature_get(void);
 
-int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable);
+int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
 
 /* Register vhost driver. dev_name could be different for multiple instance support. */
 int rte_vhost_driver_register(const char *dev_name);
@@ -291,8 +291,8 @@ uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
  * be received from the physical port or from another virtual device. A packet
  * count is returned to indicate the number of packets that were succesfully
  * added to the RX queue.
- * @param dev
- *  virtio-net device
+ * @param vid
+ *  virtio-net device ID
  * @param queue_id
  *  virtio queue index in mq case
  * @param pkts
@@ -302,14 +302,14 @@ uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
  * @return
  *  num of packets enqueued
  */
-uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
        struct rte_mbuf **pkts, uint16_t count);
 
 /**
  * This function gets guest buffers from the virtio device TX virtqueue,
  * construct host mbufs, copies guest buffer content to host mbufs and
  * store them in pkts to be processed.
- * @param dev
+ * @param vid
  *  virtio-net device
  * @param queue_id
  *  virtio queue index in mq case
@@ -322,7 +322,7 @@ uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
  * @return
  *  num of packets dequeued
  */
-uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
 
 #endif /* _VIRTIO_NET_H_ */
index 8d87508c5a27292fc9e7f7d8c1cf3f53169033bd..08cab08e14f7a551cbe1ca450366117eccf30556 100644 (file)
@@ -46,6 +46,7 @@
 #include <rte_arp.h>
 
 #include "vhost-net.h"
+#include "virtio-net.h"
 
 #define MAX_PKT_BURST 32
 #define VHOST_LOG_PAGE 4096
@@ -587,9 +588,14 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 }
 
 uint16_t
-rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
        struct rte_mbuf **pkts, uint16_t count)
 {
+       struct virtio_net *dev = get_device(vid);
+
+       if (!dev)
+               return 0;
+
        if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
                return virtio_dev_merge_rx(dev, queue_id, pkts, count);
        else
@@ -815,9 +821,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 }
 
 uint16_t
-rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
 {
+       struct virtio_net *dev;
        struct rte_mbuf *rarp_mbuf = NULL;
        struct vhost_virtqueue *vq;
        uint32_t desc_indexes[MAX_PKT_BURST];
@@ -826,6 +833,10 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
        uint16_t free_entries;
        uint16_t avail_idx;
 
+       dev = get_device(vid);
+       if (!dev)
+               return 0;
+
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
index ae405e8acc08eed2d81cb4ada24db098cba7aa85..a3b707eeb995dfb1457b572c25c2bb2f6b130bfc 100644 (file)
@@ -117,7 +117,7 @@ user_set_mem_table(int vid, struct VhostUserMsg *pmsg)
        /* Remove from the data plane. */
        if (dev->flags & VIRTIO_DEV_RUNNING) {
                dev->flags &= ~VIRTIO_DEV_RUNNING;
-               notify_ops->destroy_device(dev);
+               notify_ops->destroy_device(vid);
        }
 
        if (dev->mem) {
@@ -279,6 +279,9 @@ user_set_vring_kick(int vid, struct VhostUserMsg *pmsg)
        struct vhost_vring_file file;
        struct virtio_net *dev = get_device(vid);
 
+       if (!dev)
+               return;
+
        file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
        if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
                file.fd = VIRTIO_INVALID_EVENTFD;
@@ -289,7 +292,7 @@ user_set_vring_kick(int vid, struct VhostUserMsg *pmsg)
        vhost_set_vring_kick(vid, &file);
 
        if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) {
-               if (notify_ops->new_device(dev) == 0)
+               if (notify_ops->new_device(vid) == 0)
                        dev->flags |= VIRTIO_DEV_RUNNING;
        }
 }
@@ -306,7 +309,7 @@ user_get_vring_base(int vid, struct vhost_vring_state *state)
                return -1;
        /* We have to stop the queue (virtio) if it is running. */
        if (dev->flags & VIRTIO_DEV_RUNNING)
-               notify_ops->destroy_device(dev);
+               notify_ops->destroy_device(vid);
 
        /* Here we are safe to get the last used index */
        vhost_get_vring_base(vid, state->index, state);
@@ -340,9 +343,8 @@ user_set_vring_enable(int vid, struct vhost_vring_state *state)
                "set queue enable: %d to qp idx: %d\n",
                enable, state->index);
 
-       if (notify_ops->vring_state_changed) {
-               notify_ops->vring_state_changed(dev, state->index, enable);
-       }
+       if (notify_ops->vring_state_changed)
+               notify_ops->vring_state_changed(vid, state->index, enable);
 
        dev->virtqueue[state->index]->enabled = enable;
 
index 115eba4ff085ca48c23d21100af78727aa6e3313..ea216c026f4393f7e08a66f1a91aaef4e2a6c954 100644 (file)
@@ -296,7 +296,7 @@ vhost_destroy_device(int vid)
 
        if (dev->flags & VIRTIO_DEV_RUNNING) {
                dev->flags &= ~VIRTIO_DEV_RUNNING;
-               notify_ops->destroy_device(dev);
+               notify_ops->destroy_device(vid);
        }
 
        cleanup_device(dev, 1);
@@ -354,7 +354,7 @@ vhost_reset_owner(int vid)
 
        if (dev->flags & VIRTIO_DEV_RUNNING) {
                dev->flags &= ~VIRTIO_DEV_RUNNING;
-               notify_ops->destroy_device(dev);
+               notify_ops->destroy_device(vid);
        }
 
        cleanup_device(dev, 0);
@@ -718,13 +718,13 @@ vhost_set_backend(int vid, struct vhost_vring_file *file)
        if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
                if (dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED &&
                    dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED) {
-                       if (notify_ops->new_device(dev) < 0)
+                       if (notify_ops->new_device(vid) < 0)
                                return -1;
                        dev->flags |= VIRTIO_DEV_RUNNING;
                }
        } else if (file->fd == VIRTIO_DEV_STOPPED) {
                dev->flags &= ~VIRTIO_DEV_RUNNING;
-               notify_ops->destroy_device(dev);
+               notify_ops->destroy_device(vid);
        }
 
        return 0;
@@ -800,9 +800,14 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
        return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx_res;
 }
 
-int rte_vhost_enable_guest_notification(struct virtio_net *dev,
-       uint16_t queue_id, int enable)
+int
+rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
 {
+       struct virtio_net *dev = get_device(vid);
+
+       if (dev == NULL)
+               return -1;
+
        if (enable) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "guest notification isn't supported.\n");