functions added to facilitate the creation of mempools using an external
handler. The 16.07 release will contain these changes.
-* A librte_vhost public structures refactor is planned for DPDK 16.07
- that requires both ABI and API change.
- The proposed refactor would expose DPDK vhost dev to applications as
- a handle, like the way kernel exposes an fd to user for locating a
- specific file, and to keep all major structures internally, so that
- we are likely to be free from ABI violations in future.
-
* The mbuf flags PKT_RX_VLAN_PKT and PKT_RX_QINQ_PKT are deprecated and
are respectively replaced by PKT_RX_VLAN_STRIPPED and
PKT_RX_QINQ_STRIPPED, that are better described. The old flags and
* The vhost function ``rte_vring_available_entries`` is renamed to
``rte_vhost_avail_entries``.
+* All existing vhost APIs and callbacks with ``virtio_net`` struct pointer
+ as the parameter have been changed due to the ABI refactoring mentioned
+ below: it's replaced by ``int vid``.
+
ABI Changes
-----------
* The ``rte_eth_dev_info`` structure has new fields ``nb_rx_queues`` and ``nb_tx_queues``
to support number of queues configured by software.
+* vhost ABI refactoring has been made: ``virtio_net`` structure is never
+ exported to application any more. Instead, a handle, ``vid``, has been
+ used to represent this structure internally.
+
Shared Library Versions
-----------------------
librte_sched.so.1
librte_table.so.2
librte_timer.so.1
- librte_vhost.so.2
+ + librte_vhost.so.3
Tested Platforms
};
struct vhost_queue {
+ int vid;
rte_atomic32_t allow_queuing;
rte_atomic32_t while_queuing;
- struct virtio_net *device;
struct pmd_internal *internal;
struct rte_mempool *mb_pool;
uint8_t port;
goto out;
/* Dequeue packets from guest TX queue */
- nb_rx = rte_vhost_dequeue_burst(r->device,
+ nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
r->rx_pkts += nb_rx;
goto out;
/* Enqueue packets to guest RX queue */
- nb_tx = rte_vhost_enqueue_burst(r->device,
+ nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
r->tx_pkts += nb_tx;
}
static int
-new_device(struct virtio_net *dev)
+new_device(int vid)
{
struct rte_eth_dev *eth_dev;
struct internal_list *list;
int newnode;
#endif
- if (dev == NULL) {
- RTE_LOG(INFO, PMD, "Invalid argument\n");
- return -1;
- }
-
- rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
internal = eth_dev->data->dev_private;
#ifdef RTE_LIBRTE_VHOST_NUMA
- newnode = rte_vhost_get_numa_node(dev->vid);
+ newnode = rte_vhost_get_numa_node(vid);
if (newnode >= 0)
eth_dev->data->numa_node = newnode;
#endif
vq = eth_dev->data->rx_queues[i];
if (vq == NULL)
continue;
- vq->device = dev;
+ vq->vid = vid;
vq->internal = internal;
vq->port = eth_dev->data->port_id;
}
vq = eth_dev->data->tx_queues[i];
if (vq == NULL)
continue;
- vq->device = dev;
+ vq->vid = vid;
vq->internal = internal;
vq->port = eth_dev->data->port_id;
}
- for (i = 0; i < rte_vhost_get_queue_num(dev->vid) * VIRTIO_QNUM; i++)
- rte_vhost_enable_guest_notification(dev, i, 0);
+ for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
+ rte_vhost_enable_guest_notification(vid, i, 0);
eth_dev->data->dev_link.link_status = ETH_LINK_UP;
}
static void
-destroy_device(volatile struct virtio_net *dev)
+destroy_device(int vid)
{
struct rte_eth_dev *eth_dev;
struct vhost_queue *vq;
char ifname[PATH_MAX];
unsigned i;
- if (dev == NULL) {
- RTE_LOG(INFO, PMD, "Invalid argument\n");
- return;
- }
-
- rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
vq = eth_dev->data->rx_queues[i];
if (vq == NULL)
continue;
- vq->device = NULL;
+ vq->vid = -1;
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
vq = eth_dev->data->tx_queues[i];
if (vq == NULL)
continue;
- vq->device = NULL;
+ vq->vid = -1;
}
RTE_LOG(INFO, PMD, "Connection closed\n");
}
static int
-vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable)
+vring_state_changed(int vid, uint16_t vring, int enable)
{
struct rte_vhost_vring_state *state;
struct rte_eth_dev *eth_dev;
struct internal_list *list;
char ifname[PATH_MAX];
- if (dev == NULL) {
- RTE_LOG(ERR, PMD, "Invalid argument\n");
- return -1;
- }
-
- rte_vhost_get_ifname(dev->vid, ifname, sizeof(ifname));
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
struct rte_mbuf **m_table;
unsigned len, ret = 0;
const uint16_t lcore_id = rte_lcore_id();
- struct virtio_net *dev = vdev->dev;
RTE_LOG(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
- dev->vid);
+ vdev->vid);
/* Add packet to the port tx queue */
tx_q = &lcore_tx_queue[lcore_id];
tx_q->m_table[len] = m;
len++;
if (enable_stats) {
- dev_statistics[dev->vid].tx_total++;
- dev_statistics[dev->vid].tx++;
+ dev_statistics[vdev->vid].tx_total++;
+ dev_statistics[vdev->vid].tx++;
}
if (unlikely(len == MAX_PKT_BURST)) {
switch_worker(__rte_unused void *arg)
{
struct rte_mempool *mbuf_pool = arg;
- struct virtio_net *dev = NULL;
struct vhost_dev *vdev = NULL;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct virtio_net_data_ll *dev_ll;
while (dev_ll != NULL) {
vdev = dev_ll->vdev;
- dev = vdev->dev;
if (unlikely(vdev->remove)) {
dev_ll = dev_ll->next;
* must be less than virtio queue size
*/
if (enable_retry && unlikely(rx_count >
- rte_vhost_avail_entries(dev->vid, VIRTIO_RXQ))) {
+ rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))) {
for (retry = 0; retry < burst_rx_retry_num;
retry++) {
rte_delay_us(burst_rx_delay_time);
- if (rx_count <= rte_vhost_avail_entries(dev->vid, VIRTIO_RXQ))
+ if (rx_count <= rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))
break;
}
}
- ret_count = overlay_options.rx_handle(dev, pkts_burst, rx_count);
+ ret_count = overlay_options.rx_handle(vdev->vid, pkts_burst, rx_count);
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->vid].rx_total_atomic,
+ &dev_statistics[vdev->vid].rx_total_atomic,
rx_count);
rte_atomic64_add(
- &dev_statistics[dev->vid].rx_atomic, ret_count);
+ &dev_statistics[vdev->vid].rx_atomic, ret_count);
}
while (likely(rx_count)) {
rx_count--;
if (likely(!vdev->remove)) {
/* Handle guest TX*/
- tx_count = rte_vhost_dequeue_burst(dev,
+ tx_count = rte_vhost_dequeue_burst(vdev->vid,
VIRTIO_TXQ, mbuf_pool,
pkts_burst, MAX_PKT_BURST);
/* If this is the first received packet we need to learn the MAC */
/**
* Remove a device from the specific data core linked list and
* from the main linked list. Synchonization occurs through the use
- * of the lcore dev_removal_flag. Device is made volatile here
- * to avoid re-ordering of dev->remove=1 which can cause an infinite
- * loop in the rte_pause loop.
+ * of the lcore dev_removal_flag.
*/
static void
-destroy_device(volatile struct virtio_net *dev)
+destroy_device(int vid)
{
struct virtio_net_data_ll *ll_lcore_dev_cur;
struct virtio_net_data_ll *ll_main_dev_cur;
struct vhost_dev *vdev = NULL;
int lcore;
- dev->flags &= ~VIRTIO_DEV_RUNNING;
-
ll_main_dev_cur = ll_root_used;
while (ll_main_dev_cur != NULL) {
- if (ll_main_dev_cur->vdev->vid == dev->vid) {
+ if (ll_main_dev_cur->vdev->vid == vid) {
vdev = ll_main_dev_cur->vdev;
break;
}
if (ll_lcore_dev_cur == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) Failed to find the dev to be destroy.\n",
- dev->vid);
+ "(%d) Failed to find the dev to be destroy.\n", vid);
return;
}
lcore_info[vdev->coreid].lcore_ll->device_num--;
RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been removed "
- "from data core\n", dev->vid);
+ "from data core\n", vid);
rte_free(vdev);
* to the main linked list and the allocated to a specific data core.
*/
static int
-new_device(struct virtio_net *dev)
+new_device(int vid)
{
struct virtio_net_data_ll *ll_dev;
int lcore, core_add = 0;
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
- "(%d) Couldn't allocate memory for vhost dev\n",
- dev->vid);
+ "(%d) Couldn't allocate memory for vhost dev\n", vid);
return -1;
}
- vdev->dev = dev;
- vdev->vid = dev->vid;
+ vdev->vid = vid;
/* Add device to main ll */
ll_dev = get_data_ll_free_entry(&ll_root_free);
if (ll_dev == NULL) {
RTE_LOG(INFO, VHOST_DATA, "(%d) No free entry found in"
" linked list Device limit of %d devices per core"
- " has been reached\n", dev->vid, nb_devices);
+ " has been reached\n", vid, nb_devices);
if (vdev->regions_hpa)
rte_free(vdev->regions_hpa);
rte_free(vdev);
}
ll_dev->vdev = vdev;
add_data_ll_entry(&ll_root_used, ll_dev);
- vdev->rx_q = dev->vid;
+ vdev->rx_q = vid;
/* reset ready flag */
vdev->ready = DEVICE_MAC_LEARNING;
if (ll_dev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
"(%d) Failed to add device to data core\n",
- dev->vid);
+ vid);
vdev->ready = DEVICE_SAFE_REMOVE;
- destroy_device(dev);
+ destroy_device(vid);
rte_free(vdev->regions_hpa);
rte_free(vdev);
return -1;
ll_dev);
/* Initialize device stats */
- memset(&dev_statistics[dev->vid], 0,
+ memset(&dev_statistics[vid], 0,
sizeof(struct device_statistics));
/* Disable notifications. */
- rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
- rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
lcore_info[vdev->coreid].lcore_ll->device_num++;
- dev->flags |= VIRTIO_DEV_RUNNING;
RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been added to data core %d\n",
- dev->vid, vdev->coreid);
+ vid, vdev->coreid);
return 0;
}
dev_ll = ll_root_used;
while (dev_ll != NULL) {
- vid = dev_ll->vdev->dev->vid;
+ vid = dev_ll->vdev->vid;
tx_total = dev_statistics[vid].tx_total;
tx = dev_statistics[vid].tx;
tx_dropped = tx_total - tx;
*/
struct vhost_dev {
int vid;
- /**< Pointer to device created by vhost lib. */
- struct virtio_net *dev;
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
/**< Memory region information for gpa to hpa translation. */
};
uint32_t
-virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count);
+virtio_dev_rx(int vid, struct rte_mbuf **pkts, uint32_t count);
#endif /* _MAIN_H_ */
{
int i, ret;
struct ether_hdr *pkt_hdr;
- struct virtio_net *dev = vdev->dev;
- uint64_t portid = dev->vid;
+ uint64_t portid = vdev->vid;
struct ipv4_hdr *ip;
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
RTE_LOG(INFO, VHOST_DATA,
"(%d) WARNING: Not configuring device,"
"as already have %d ports for VXLAN.",
- dev->vid, VXLAN_N_PORTS);
+ vdev->vid, VXLAN_N_PORTS);
return -1;
}
RTE_LOG(INFO, VHOST_DATA,
"(%d) WARNING: This device is using an existing"
" MAC address and has not been registered.\n",
- dev->vid);
+ vdev->vid);
return -1;
}
/* Check for decapsulation and pass packets directly to VIRTIO device */
int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
- uint32_t rx_count)
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
{
uint32_t i = 0;
uint32_t count = 0;
for (i = 0; i < rx_count; i++) {
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->vid].rx_bad_ip_csum,
+ &dev_statistics[vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
!= 0);
rte_atomic64_add(
- &dev_statistics[dev->vid].rx_bad_ip_csum,
+ &dev_statistics[vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
!= 0);
}
count++;
}
- ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count);
+ ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
return ret;
}
typedef int (*ol_tx_handle_t)(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
-typedef int (*ol_rx_handle_t)(struct virtio_net *dev, struct rte_mbuf **pkts,
+typedef int (*ol_rx_handle_t)(int vid, struct rte_mbuf **pkts,
uint32_t count);
-typedef int (*ol_param_handle)(struct virtio_net *dev);
+typedef int (*ol_param_handle)(int vid);
struct ol_switch_ops {
ol_port_configure_t port_configure;
vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count);
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts, uint32_t count);
#endif /* VXLAN_SETUP_H_ */
{
uint16_t ret;
- ret = rte_vhost_enqueue_burst(dst_vdev->dev, VIRTIO_RXQ, &m, 1);
+ ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
if (enable_stats) {
rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
drain_eth_rx(struct vhost_dev *vdev)
{
uint16_t rx_count, enqueue_count;
- struct virtio_net *dev = vdev->dev;
struct rte_mbuf *pkts[MAX_PKT_BURST];
rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
* to diminish packet loss.
*/
if (enable_retry &&
- unlikely(rx_count > rte_vhost_avail_entries(dev->vid,
+ unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
VIRTIO_RXQ))) {
uint32_t retry;
for (retry = 0; retry < burst_rx_retry_num; retry++) {
rte_delay_us(burst_rx_delay_time);
- if (rx_count <= rte_vhost_avail_entries(dev->vid,
+ if (rx_count <= rte_vhost_avail_entries(vdev->vid,
VIRTIO_RXQ))
break;
}
}
- enqueue_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ,
+ enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
pkts, rx_count);
if (enable_stats) {
rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
uint16_t count;
uint16_t i;
- count = rte_vhost_dequeue_burst(vdev->dev, VIRTIO_TXQ, mbuf_pool,
+ count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
pkts, MAX_PKT_BURST);
/* setup VMDq for the first packet */
* of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
*/
static void
-destroy_device (volatile struct virtio_net *dev)
+destroy_device(int vid)
{
struct vhost_dev *vdev = NULL;
int lcore;
TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
- if (vdev->vid == dev->vid)
+ if (vdev->vid == vid)
break;
}
if (!vdev)
* and the allocated to a specific data core.
*/
static int
-new_device (struct virtio_net *dev)
+new_device(int vid)
{
int lcore, core_add = 0;
uint32_t device_num_min = num_devices;
struct vhost_dev *vdev;
- int vid = dev->vid;
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
vid);
return -1;
}
- vdev->dev = dev;
vdev->vid = vid;
TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
lcore_info[vdev->coreid].device_num++;
/* Disable notifications. */
- rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
- rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
RTE_LOG(INFO, VHOST_DATA,
"(%d) device has been added to data core %d\n",
"RX total: %" PRIu64 "\n"
"RX dropped: %" PRIu64 "\n"
"RX successful: %" PRIu64 "\n",
- vdev->dev->vid,
+ vdev->vid,
tx_total, tx_dropped, tx,
rx_total, rx_dropped, rx);
}
};
struct vhost_dev {
- /**< Pointer to device created by vhost lib. */
- struct virtio_net *dev;
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
/**< Device MAC address (Obtained on first TX packet). */
EXPORT_MAP := rte_vhost_version.map
-LIBABIVER := 2
+LIBABIVER := 3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -D_FILE_OFFSET_BITS=64
ifeq ($(CONFIG_RTE_LIBRTE_VHOST_USER),y)
*
*/
struct virtio_net_device_ops {
- int (*new_device)(struct virtio_net *); /**< Add device. */
- void (*destroy_device)(volatile struct virtio_net *); /**< Remove device. */
+ int (*new_device)(int vid); /**< Add device. */
+ void (*destroy_device)(int vid); /**< Remove device. */
- int (*vring_state_changed)(struct virtio_net *dev, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
+ int (*vring_state_changed)(int vid, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
};
/**
/* Returns currently supported vhost features */
uint64_t rte_vhost_feature_get(void);
-int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable);
+int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
/* Register vhost driver. dev_name could be different for multiple instance support. */
int rte_vhost_driver_register(const char *dev_name);
* be received from the physical port or from another virtual device. A packet
* count is returned to indicate the number of packets that were succesfully
* added to the RX queue.
- * @param dev
- * virtio-net device
+ * @param vid
+ * virtio-net device ID
* @param queue_id
* virtio queue index in mq case
* @param pkts
* @return
* num of packets enqueued
*/
-uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
/**
* This function gets guest buffers from the virtio device TX virtqueue,
* construct host mbufs, copies guest buffer content to host mbufs and
* store them in pkts to be processed.
- * @param dev
+ * @param vid
* virtio-net device
* @param queue_id
* virtio queue index in mq case
* @return
* num of packets dequeued
*/
-uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
#endif /* _VIRTIO_NET_H_ */
#include <rte_arp.h>
#include "vhost-net.h"
+#include "virtio-net.h"
#define MAX_PKT_BURST 32
#define VHOST_LOG_PAGE 4096
}
uint16_t
-rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return 0;
+
if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
return virtio_dev_merge_rx(dev, queue_id, pkts, count);
else
}
uint16_t
-rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
+ struct virtio_net *dev;
struct rte_mbuf *rarp_mbuf = NULL;
struct vhost_virtqueue *vq;
uint32_t desc_indexes[MAX_PKT_BURST];
uint16_t free_entries;
uint16_t avail_idx;
+ dev = get_device(vid);
+ if (!dev)
+ return 0;
+
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
dev->vid, __func__, queue_id);
/* Remove from the data plane. */
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev);
+ notify_ops->destroy_device(vid);
}
if (dev->mem) {
struct vhost_vring_file file;
struct virtio_net *dev = get_device(vid);
+ if (!dev)
+ return;
+
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
file.fd = VIRTIO_INVALID_EVENTFD;
vhost_set_vring_kick(vid, &file);
if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) {
- if (notify_ops->new_device(dev) == 0)
+ if (notify_ops->new_device(vid) == 0)
dev->flags |= VIRTIO_DEV_RUNNING;
}
}
return -1;
/* We have to stop the queue (virtio) if it is running. */
if (dev->flags & VIRTIO_DEV_RUNNING)
- notify_ops->destroy_device(dev);
+ notify_ops->destroy_device(vid);
/* Here we are safe to get the last used index */
vhost_get_vring_base(vid, state->index, state);
"set queue enable: %d to qp idx: %d\n",
enable, state->index);
- if (notify_ops->vring_state_changed) {
- notify_ops->vring_state_changed(dev, state->index, enable);
- }
+ if (notify_ops->vring_state_changed)
+ notify_ops->vring_state_changed(vid, state->index, enable);
dev->virtqueue[state->index]->enabled = enable;
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev);
+ notify_ops->destroy_device(vid);
}
cleanup_device(dev, 1);
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev);
+ notify_ops->destroy_device(vid);
}
cleanup_device(dev, 0);
if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
if (dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED &&
dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED) {
- if (notify_ops->new_device(dev) < 0)
+ if (notify_ops->new_device(vid) < 0)
return -1;
dev->flags |= VIRTIO_DEV_RUNNING;
}
} else if (file->fd == VIRTIO_DEV_STOPPED) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev);
+ notify_ops->destroy_device(vid);
}
return 0;
return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx_res;
}
-int rte_vhost_enable_guest_notification(struct virtio_net *dev,
- uint16_t queue_id, int enable)
+int
+rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
if (enable) {
RTE_LOG(ERR, VHOST_CONFIG,
"guest notification isn't supported.\n");