There are two tailq lists, one for logging all vhost devices, another
one for logging vhost devices distributed on a specific core. However,
there is just one tailq entry, named "next", to chain the two list,
which is wrong and could result to a corrupted tailq list, that the
tailq list might always be non-empty: the entry is still there even
after you have invoked TAILQ_REMOVE several times.
Fix it by introducing two tailq entries, one for each list.
Fixes:
45657a5c6861 ("examples/vhost: use tailq to link vhost devices")
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
{
struct vhost_dev *vdev;
{
struct vhost_dev *vdev;
- TAILQ_FOREACH(vdev, &vhost_dev_list, next) {
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
if (vdev->ready == DEVICE_RX &&
is_same_ether_addr(mac, &vdev->mac_address))
return vdev;
if (vdev->ready == DEVICE_RX &&
is_same_ether_addr(mac, &vdev->mac_address))
return vdev;
if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
struct vhost_dev *vdev2;
if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
struct vhost_dev *vdev2;
- TAILQ_FOREACH(vdev2, &vhost_dev_list, next) {
+ TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
virtio_xmit(vdev2, vdev, m);
}
goto queue2nic;
virtio_xmit(vdev2, vdev, m);
}
goto queue2nic;
/*
* Process vhost devices
*/
/*
* Process vhost devices
*/
- TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list, next) {
+ TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
+ lcore_vdev_entry) {
if (unlikely(vdev->remove)) {
unlink_vmdq(vdev);
vdev->ready = DEVICE_SAFE_REMOVE;
if (unlikely(vdev->remove)) {
unlink_vmdq(vdev);
vdev->ready = DEVICE_SAFE_REMOVE;
- TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev, next);
- TAILQ_REMOVE(&vhost_dev_list, vdev, next);
+ TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
+ lcore_vdev_entry);
+ TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
+
/* Set the dev_removal_flag on each lcore. */
RTE_LCORE_FOREACH_SLAVE(lcore)
/* Set the dev_removal_flag on each lcore. */
RTE_LCORE_FOREACH_SLAVE(lcore)
vdev->dev = dev;
dev->priv = vdev;
vdev->dev = dev;
dev->priv = vdev;
- TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, next);
+ TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
vdev->vmdq_rx_q
= dev->device_fh * queues_per_pool + vmdq_queue_base;
vdev->vmdq_rx_q
= dev->device_fh * queues_per_pool + vmdq_queue_base;
}
vdev->coreid = core_add;
}
vdev->coreid = core_add;
- TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev, next);
+ TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
+ lcore_vdev_entry);
lcore_info[vdev->coreid].device_num++;
/* Disable notifications. */
lcore_info[vdev->coreid].device_num++;
/* Disable notifications. */
printf("%s%s\n", clr, top_left);
printf("Device statistics =================================\n");
printf("%s%s\n", clr, top_left);
printf("Device statistics =================================\n");
- TAILQ_FOREACH(vdev, &vhost_dev_list, next) {
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
tx_total = vdev->stats.tx_total;
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
tx_total = vdev->stats.tx_total;
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
volatile uint8_t remove;
struct device_statistics stats;
volatile uint8_t remove;
struct device_statistics stats;
- TAILQ_ENTRY(vhost_dev) next;
+ TAILQ_ENTRY(vhost_dev) global_vdev_entry;
+ TAILQ_ENTRY(vhost_dev) lcore_vdev_entry;
} __rte_cache_aligned;
TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
} __rte_cache_aligned;
TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);