I failed to figure out what does "fh" mean here for a long while.
The only guess I could have had is "file handle". So, you get the
point that it's not well named.
I then figured it out that "fh" is derived from the fuse lib, and
my above guess is right. However, device_fh represents a virtio
net device ID. Therefore, here I rename it to vid (Virtio-net device
ID, or Vhost device ID; choose one you prefer) to make it easier for
understanding.
This name (vid) then will be considered to the only interface to
applications. That's another reason to do the rename: it's our
interface, make it more understandable.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Rich Lane <rich.lane@bigswitch.com>
Acked-by: Rich Lane <rich.lane@bigswitch.com>
struct virtio_net *dev = vdev->dev;
RTE_LOG(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
- dev->device_fh);
+ dev->vid);
/* Add packet to the port tx queue */
tx_q = &lcore_tx_queue[lcore_id];
tx_q->m_table[len] = m;
len++;
if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
+ dev_statistics[dev->vid].tx_total++;
+ dev_statistics[dev->vid].tx++;
}
if (unlikely(len == MAX_PKT_BURST)) {
ret_count = overlay_options.rx_handle(dev, pkts_burst, rx_count);
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_total_atomic,
+ &dev_statistics[dev->vid].rx_total_atomic,
rx_count);
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_atomic, ret_count);
+ &dev_statistics[dev->vid].rx_atomic, ret_count);
}
while (likely(rx_count)) {
rx_count--;
if (ll_lcore_dev_cur == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) Failed to find the dev to be destroy.\n",
- dev->device_fh);
+ dev->vid);
return;
}
lcore_info[vdev->coreid].lcore_ll->device_num--;
RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been removed "
- "from data core\n", dev->device_fh);
+ "from data core\n", dev->vid);
rte_free(vdev);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
"(%d) Couldn't allocate memory for vhost dev\n",
- dev->device_fh);
+ dev->vid);
return -1;
}
vdev->dev = dev;
if (ll_dev == NULL) {
RTE_LOG(INFO, VHOST_DATA, "(%d) No free entry found in"
" linked list Device limit of %d devices per core"
- " has been reached\n", dev->device_fh, nb_devices);
+ " has been reached\n", dev->vid, nb_devices);
if (vdev->regions_hpa)
rte_free(vdev->regions_hpa);
rte_free(vdev);
}
ll_dev->vdev = vdev;
add_data_ll_entry(&ll_root_used, ll_dev);
- vdev->rx_q = dev->device_fh;
+ vdev->rx_q = dev->vid;
/* reset ready flag */
vdev->ready = DEVICE_MAC_LEARNING;
if (ll_dev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
"(%d) Failed to add device to data core\n",
- dev->device_fh);
+ dev->vid);
vdev->ready = DEVICE_SAFE_REMOVE;
destroy_device(dev);
rte_free(vdev->regions_hpa);
ll_dev);
/* Initialize device stats */
- memset(&dev_statistics[dev->device_fh], 0,
+ memset(&dev_statistics[dev->vid], 0,
sizeof(struct device_statistics));
/* Disable notifications. */
dev->flags |= VIRTIO_DEV_RUNNING;
RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been added to data core %d\n",
- dev->device_fh, vdev->coreid);
+ dev->vid, vdev->coreid);
return 0;
}
struct virtio_net_data_ll *dev_ll;
uint64_t tx_dropped, rx_dropped;
uint64_t tx, tx_total, rx, rx_total, rx_ip_csum, rx_l4_csum;
- uint32_t device_fh;
+ int vid;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
dev_ll = ll_root_used;
while (dev_ll != NULL) {
- device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
- tx_total = dev_statistics[device_fh].tx_total;
- tx = dev_statistics[device_fh].tx;
+ vid = dev_ll->vdev->dev->vid;
+ tx_total = dev_statistics[vid].tx_total;
+ tx = dev_statistics[vid].tx;
tx_dropped = tx_total - tx;
rx_total = rte_atomic64_read(
- &dev_statistics[device_fh].rx_total_atomic);
+ &dev_statistics[vid].rx_total_atomic);
rx = rte_atomic64_read(
- &dev_statistics[device_fh].rx_atomic);
+ &dev_statistics[vid].rx_atomic);
rx_dropped = rx_total - rx;
rx_ip_csum = rte_atomic64_read(
- &dev_statistics[device_fh].rx_bad_ip_csum);
+ &dev_statistics[vid].rx_bad_ip_csum);
rx_l4_csum = rte_atomic64_read(
- &dev_statistics[device_fh].rx_bad_l4_csum);
+ &dev_statistics[vid].rx_bad_l4_csum);
- printf("\nStatistics for device %"PRIu32" ----------"
+ printf("\nStatistics for device %d ----------"
"\nTX total: %"PRIu64""
"\nTX dropped: %"PRIu64""
"\nTX successful: %"PRIu64""
"\nRX bad L4 csum: %"PRIu64""
"\nRX dropped: %"PRIu64""
"\nRX successful: %"PRIu64"",
- device_fh,
+ vid,
tx_total,
tx_dropped,
tx,
int i, ret;
struct ether_hdr *pkt_hdr;
struct virtio_net *dev = vdev->dev;
- uint64_t portid = dev->device_fh;
+ uint64_t portid = dev->vid;
struct ipv4_hdr *ip;
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
RTE_LOG(INFO, VHOST_DATA,
"(%d) WARNING: Not configuring device,"
"as already have %d ports for VXLAN.",
- dev->device_fh, VXLAN_N_PORTS);
+ dev->vid, VXLAN_N_PORTS);
return -1;
}
RTE_LOG(INFO, VHOST_DATA,
"(%d) WARNING: This device is using an existing"
" MAC address and has not been registered.\n",
- dev->device_fh);
+ dev->vid);
return -1;
}
for (i = 0; i < rx_count; i++) {
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ &dev_statistics[dev->vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
!= 0);
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ &dev_statistics[dev->vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
!= 0);
}
if (find_vhost_dev(&pkt_hdr->s_addr)) {
RTE_LOG(ERR, VHOST_DATA,
"(%d) device is using a registered MAC!\n",
- vdev->device_fh);
+ vdev->vid);
return -1;
}
vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
/* vlan_tag currently uses the device_id. */
- vdev->vlan_tag = vlan_tags[vdev->device_fh];
+ vdev->vlan_tag = vlan_tags[vdev->vid];
/* Print out VMDQ registration info. */
RTE_LOG(INFO, VHOST_DATA,
"(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
- vdev->device_fh,
+ vdev->vid,
vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
/* Register the MAC address. */
ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
- (uint32_t)vdev->device_fh + vmdq_pool_base);
+ (uint32_t)vdev->vid + vmdq_pool_base);
if (ret)
RTE_LOG(ERR, VHOST_DATA,
"(%d) failed to add device MAC address to VMDQ\n",
- vdev->device_fh);
+ vdev->vid);
/* Enable stripping of the vlan tag as we handle routing. */
if (vlan_strip)
if (!dst_vdev)
return -1;
- if (vdev->device_fh == dst_vdev->device_fh) {
+ if (vdev->vid == dst_vdev->vid) {
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) TX: src and dst MAC is same. Dropping packet.\n",
- vdev->device_fh);
+ vdev->vid);
return 0;
}
RTE_LOG(DEBUG, VHOST_DATA,
- "(%d) TX: MAC address is local\n", dst_vdev->device_fh);
+ "(%d) TX: MAC address is local\n", dst_vdev->vid);
if (unlikely(dst_vdev->remove)) {
RTE_LOG(DEBUG, VHOST_DATA,
- "(%d) device is marked for removal\n", dst_vdev->device_fh);
+ "(%d) device is marked for removal\n", dst_vdev->vid);
return 0;
}
if (!dst_vdev)
return 0;
- if (vdev->device_fh == dst_vdev->device_fh) {
+ if (vdev->vid == dst_vdev->vid) {
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) TX: src and dst MAC is same. Dropping packet.\n",
- vdev->device_fh);
+ vdev->vid);
return -1;
}
* the packet length by plus it.
*/
*offset = VLAN_HLEN;
- *vlan_tag = vlan_tags[vdev->device_fh];
+ *vlan_tag = vlan_tags[vdev->vid];
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
- vdev->device_fh, dst_vdev->device_fh, *vlan_tag);
+ vdev->vid, dst_vdev->vid, *vlan_tag);
return 0;
}
}
RTE_LOG(DEBUG, VHOST_DATA,
- "(%d) TX: MAC address is external\n", vdev->device_fh);
+ "(%d) TX: MAC address is external\n", vdev->vid);
queue2nic:
}
for (i = 0; i < count; ++i)
- virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->device_fh]);
+ virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
}
/*
RTE_LOG(INFO, VHOST_DATA,
"(%d) device has been removed from data core\n",
- vdev->device_fh);
+ vdev->vid);
rte_free(vdev);
}
int lcore, core_add = 0;
uint32_t device_num_min = num_devices;
struct vhost_dev *vdev;
- int device_fh = dev->device_fh;
+ int vid = dev->vid;
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
"(%d) couldn't allocate memory for vhost dev\n",
- device_fh);
+ vid);
return -1;
}
vdev->dev = dev;
dev->priv = vdev;
- vdev->device_fh = device_fh;
+ vdev->vid = vid;
TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
- vdev->vmdq_rx_q = device_fh * queues_per_pool + vmdq_queue_base;
+ vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
/*reset ready flag*/
vdev->ready = DEVICE_MAC_LEARNING;
RTE_LOG(INFO, VHOST_DATA,
"(%d) device has been added to data core %d\n",
- device_fh, vdev->coreid);
+ vid, vdev->coreid);
return 0;
}
"RX total: %" PRIu64 "\n"
"RX dropped: %" PRIu64 "\n"
"RX successful: %" PRIu64 "\n",
- vdev->dev->device_fh,
+ vdev->dev->vid,
tx_total, tx_dropped, tx,
rx_total, rx_dropped, rx);
}
/**< Device is marked for removal from the data core. */
volatile uint8_t remove;
- int device_fh;
+ int vid;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
TAILQ_ENTRY(vhost_dev) lcore_vdev_entry;
struct virtio_memory *mem; /**< QEMU memory and memory region information. */
uint64_t features; /**< Negotiated feature set. */
uint64_t protocol_features; /**< Negotiated protocol feature set. */
- int device_fh; /**< device identifier. */
+ int vid; /**< device identifier. */
uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
char ifname[IF_NAME_SZ]; /**< Name of the tap device or socket path. */
char packet[VHOST_MAX_PRINT_BUFF]; \
\
if ((header)) \
- snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->device_fh), (size)); \
+ snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
else \
- snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->device_fh), (size)); \
+ snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
for (index = 0; index < (size); index++) { \
snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
"%02hhx ", pkt_addr[index]); \
*/
struct vhost_device_ctx {
pid_t pid; /* PID of process calling the IOCTL. */
- int fh; /* Populated with fi->fh to track the device index. */
+ int vid; /* Virtio-net device ID */
};
int vhost_new_device(struct vhost_device_ctx);
struct fuse_ctx const *const req_ctx = fuse_req_ctx(req);
ctx.pid = req_ctx->pid;
- ctx.fh = fi->fh;
+ ctx.vid = (int)fi->fh;
return ctx;
}
fi->fh = err;
RTE_LOG(INFO, VHOST_CONFIG,
- "(%d) device configuration started\n", fi->fh);
+ "(%d) device configuration started\n", err);
fuse_reply_open(req, fi);
}
struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
vhost_destroy_device(ctx);
- RTE_LOG(INFO, VHOST_CONFIG, "(%d) device released\n", ctx.fh);
+ RTE_LOG(INFO, VHOST_CONFIG, "(%d) device released\n", ctx.vid);
fuse_reply_err(req, err);
}
switch (cmd) {
case VHOST_NET_SET_BACKEND:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
+ "(%d) IOCTL: VHOST_NET_SET_BACKEND\n", ctx.vid);
if (!in_buf) {
VHOST_IOCTL_RETRY(sizeof(file), 0);
break;
case VHOST_GET_FEATURES:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
+ "(%d) IOCTL: VHOST_GET_FEATURES\n", ctx.vid);
VHOST_IOCTL_W(uint64_t, features, vhost_get_features);
break;
case VHOST_SET_FEATURES:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_FEATURES\n", ctx.vid);
VHOST_IOCTL_R(uint64_t, features, vhost_set_features);
break;
case VHOST_RESET_OWNER:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
+ "(%d) IOCTL: VHOST_RESET_OWNER\n", ctx.vid);
VHOST_IOCTL(vhost_reset_owner);
break;
case VHOST_SET_OWNER:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_SET_OWNER\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_OWNER\n", ctx.vid);
VHOST_IOCTL(vhost_set_owner);
break;
case VHOST_SET_MEM_TABLE:
/*TODO fix race condition.*/
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_MEM_TABLE\n", ctx.vid);
static struct vhost_memory mem_temp;
switch (in_bufsz) {
case VHOST_SET_VRING_NUM:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_VRING_NUM\n", ctx.vid);
VHOST_IOCTL_R(struct vhost_vring_state, state,
vhost_set_vring_num);
break;
case VHOST_SET_VRING_BASE:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_VRING_BASE\n", ctx.vid);
VHOST_IOCTL_R(struct vhost_vring_state, state,
vhost_set_vring_base);
break;
case VHOST_GET_VRING_BASE:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
+ "(%d) IOCTL: VHOST_GET_VRING_BASE\n", ctx.vid);
VHOST_IOCTL_RW(uint32_t, index,
struct vhost_vring_state, state, vhost_get_vring_base);
break;
case VHOST_SET_VRING_ADDR:
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_VRING_ADDR\n", ctx.vid);
VHOST_IOCTL_R(struct vhost_vring_addr, addr,
vhost_set_vring_addr);
break;
if (cmd == VHOST_SET_VRING_KICK)
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_VRING_KICK\n",
- ctx.fh);
+ ctx.vid);
else
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_VRING_CALL\n",
- ctx.fh);
+ ctx.vid);
if (!in_buf)
VHOST_IOCTL_RETRY(sizeof(struct vhost_vring_file), 0);
else {
default:
RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) IOCTL: DOESN NOT EXIST\n", ctx.fh);
+ "(%d) IOCTL: DOESN NOT EXIST\n", ctx.vid);
result = -1;
fuse_reply_ioctl(req, result, NULL, 0);
}
if (result < 0)
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: FAIL\n", ctx.fh);
+ "(%d) IOCTL: FAIL\n", ctx.vid);
else
LOG_DEBUG(VHOST_CONFIG,
- "(%d) IOCTL: SUCCESS\n", ctx.fh);
+ "(%d) IOCTL: SUCCESS\n", ctx.vid);
}
/*
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
- dev->device_fh);
+ dev->vid);
return -1;
}
if (close(fd_tap) < 0)
RTE_LOG(ERR, VHOST_CONFIG, "(%d) fd close failed\n",
- dev->device_fh);
+ dev->vid);
if (ret >= 0) {
ifr_size = strnlen(ifr.ifr_name, sizeof(ifr.ifr_name));
} else
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) TUNGETIFF ioctl failed\n",
- dev->device_fh);
+ dev->vid);
return 0;
}
uint16_t desc_indexes[MAX_PKT_BURST];
uint32_t i;
- LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->device_fh, __func__);
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->device_fh, __func__, queue_id);
+ dev->vid, __func__, queue_id);
return 0;
}
return 0;
LOG_DEBUG(VHOST_DATA, "(%d) res_start_idx %d | res_end_idx Index %d\n",
- dev->device_fh, res_start_idx, res_end_idx);
+ dev->vid, res_start_idx, res_end_idx);
/* Retrieve all of the desc indexes first to avoid caching issues. */
rte_prefetch0(&vq->avail->ring[res_start_idx & (vq->size - 1)]);
return 0;
LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
- dev->device_fh, cur_idx, res_end_idx);
+ dev->vid, cur_idx, res_end_idx);
if (vq->buf_vec[vec_idx].buf_len < vq->vhost_hlen)
return -1;
virtio_hdr.num_buffers = res_end_idx - res_start_idx;
LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
- dev->device_fh, virtio_hdr.num_buffers);
+ dev->vid, virtio_hdr.num_buffers);
virtio_enqueue_offload(m, &virtio_hdr.hdr);
copy_virtio_net_hdr(vq, desc_addr, virtio_hdr);
uint32_t pkt_idx = 0, nr_used = 0;
uint16_t start, end;
- LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->device_fh, __func__);
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->device_fh, __func__, queue_id);
+ dev->vid, __func__, queue_id);
return 0;
}
&start, &end) < 0)) {
LOG_DEBUG(VHOST_DATA,
"(%d) failed to get enough desc from vring\n",
- dev->device_fh);
+ dev->vid);
break;
}
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->device_fh, __func__, queue_id);
+ dev->vid, __func__, queue_id);
return 0;
}
if (free_entries == 0)
goto out;
- LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->device_fh, __func__);
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
/* Prefetch available ring to retrieve head indexes. */
used_idx = vq->last_used_idx & (vq->size - 1);
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, free_entries);
LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
- dev->device_fh, count);
+ dev->vid, count);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < count; i++) {
struct connfd_ctx {
struct vhost_server *vserver;
- int fh;
+ int vid;
};
#define MAX_VHOST_SERVER 1024
struct vhost_server *vserver = (struct vhost_server *)dat;
int conn_fd;
struct connfd_ctx *ctx;
- int fh;
+ int vid;
struct vhost_device_ctx vdev_ctx = { (pid_t)0, 0 };
unsigned int size;
return;
}
- fh = vhost_new_device(vdev_ctx);
- if (fh == -1) {
+ vid = vhost_new_device(vdev_ctx);
+ if (vid == -1) {
free(ctx);
close(conn_fd);
return;
}
- vdev_ctx.fh = fh;
+ vdev_ctx.vid = vid;
size = strnlen(vserver->path, PATH_MAX);
vhost_set_ifname(vdev_ctx, vserver->path,
size);
- RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", fh);
+ RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
ctx->vserver = vserver;
- ctx->fh = fh;
+ ctx->vid = vid;
fdset_add(&g_vhost_server.fdset,
conn_fd, vserver_message_handler, NULL, ctx);
}
uint64_t features;
int ret;
- ctx.fh = cfd_ctx->fh;
+ ctx.vid = cfd_ctx->vid;
ret = read_vhost_message(connfd, &msg);
if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
if (ret < 0)
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
- dev->device_fh);
+ dev->vid);
return -1;
}
dev->mem->nregions = memory.nregions;
struct virtio_net *
get_device(struct vhost_device_ctx ctx)
{
- struct virtio_net *dev = vhost_devices[ctx.fh];
+ struct virtio_net *dev = vhost_devices[ctx.vid];
if (unlikely(!dev)) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) device not found.\n", ctx.fh);
+ "(%d) device not found.\n", ctx.vid);
}
return dev;
/*
* Reset some variables in device structure, while keeping few
- * others untouched, such as device_fh, ifname, virt_qp_nb: they
+ * others untouched, such as vid, ifname, virt_qp_nb: they
* should be same unless the device is removed.
*/
static void
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
if (dev == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) failed to allocate memory for dev.\n", ctx.fh);
+ "(%d) failed to allocate memory for dev.\n", ctx.vid);
return -1;
}
}
vhost_devices[i] = dev;
- dev->device_fh = i;
+ dev->vid = i;
return i;
}
cleanup_device(dev, 1);
free_device(dev);
- vhost_devices[ctx.fh] = NULL;
+ vhost_devices[ctx.vid] = NULL;
}
void
}
LOG_DEBUG(VHOST_CONFIG,
"(%d) mergeable RX buffers %s, virtio 1 %s\n",
- dev->device_fh,
+ dev->vid,
(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
out:
dev->virtqueue[index] = vq;
dev->virtqueue[index + 1] = vq + 1;
- vhost_devices[dev->device_fh] = dev;
+ vhost_devices[dev->vid] = dev;
return dev;
}
if (vq->desc == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find desc ring address.\n",
- dev->device_fh);
+ dev->vid);
return -1;
}
if (vq->avail == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find avail ring address.\n",
- dev->device_fh);
+ dev->vid);
return -1;
}
if (vq->used == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find used ring address.\n",
- dev->device_fh);
+ dev->vid);
return -1;
}
vq->log_guest_addr = addr->log_guest_addr;
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
- dev->device_fh, vq->desc);
+ dev->vid, vq->desc);
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
- dev->device_fh, vq->avail);
+ dev->vid, vq->avail);
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
- dev->device_fh, vq->used);
+ dev->vid, vq->used);
LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
- dev->device_fh, vq->log_guest_addr);
+ dev->vid, vq->log_guest_addr);
return 0;
}