Fix serious coding style issues reported by checkpatch.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Changchun Ouyang <changchun.ouyang@intel.com>
#define BUF_VECTOR_MAX 256
-/*
+/**
* Structure contains buffer address, length and descriptor index
* from vring to do scatter RX.
-*/
+ */
struct buf_vector {
-uint64_t buf_addr;
-uint32_t buf_len;
-uint32_t desc_idx;
+ uint64_t buf_addr;
+ uint32_t buf_len;
+ uint32_t desc_idx;
};
-/*
- * Structure contains variables relevant to TX/RX virtqueues.
+/**
+ * Structure contains variables relevant to RX/TX virtqueues.
*/
-struct vhost_virtqueue
-{
- struct vring_desc *desc; /* Virtqueue descriptor ring. */
- struct vring_avail *avail; /* Virtqueue available ring. */
- struct vring_used *used; /* Virtqueue used ring. */
- uint32_t size; /* Size of descriptor ring. */
- uint32_t backend; /* Backend value to determine if device should started/stopped. */
- uint16_t vhost_hlen; /* Vhost header length (varies depending on RX merge buffers. */
- volatile uint16_t last_used_idx; /* Last index used on the available ring */
- volatile uint16_t last_used_idx_res; /* Used for multiple devices reserving buffers. */
- eventfd_t callfd; /* Currently unused as polling mode is enabled. */
- eventfd_t kickfd; /* Used to notify the guest (trigger interrupt). */
- /* Used for scatter RX. */
- struct buf_vector buf_vec[BUF_VECTOR_MAX];
+struct vhost_virtqueue {
+ struct vring_desc *desc; /**< Virtqueue descriptor ring. */
+ struct vring_avail *avail; /**< Virtqueue available ring. */
+ struct vring_used *used; /**< Virtqueue used ring. */
+ uint32_t size; /**< Size of descriptor ring. */
+ uint32_t backend; /**< Backend value to determine if device should started/stopped. */
+ uint16_t vhost_hlen; /**< Vhost header length (varies depending on RX merge buffers. */
+ volatile uint16_t last_used_idx; /**< Last index used on the available ring */
+ volatile uint16_t last_used_idx_res; /**< Used for multiple devices reserving buffers. */
+ eventfd_t callfd; /**< Currently unused as polling mode is enabled. */
+ eventfd_t kickfd; /**< Used to notify the guest (trigger interrupt). */
+ struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
} __rte_cache_aligned;
-/*
+/**
* Device structure contains all configuration information relating to the device.
*/
struct virtio_net
{
- struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /* Contains all virtqueue information. */
- struct virtio_memory *mem; /* QEMU memory and memory region information. */
- uint64_t features; /* Negotiated feature set. */
- uint64_t device_fh; /* device identifier. */
- uint32_t flags; /* Device flags. Only used to check if device is running on data core. */
- void *priv; /**< private context */
+ struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /**< Contains all virtqueue information. */
+ struct virtio_memory *mem; /**< QEMU memory and memory region information. */
+ uint64_t features; /**< Negotiated feature set. */
+ uint64_t device_fh; /**< device identifier. */
+ uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
+ void *priv; /**< private context */
} __rte_cache_aligned;
-/*
+/**
* Information relating to memory regions including offsets to addresses in QEMUs memory file.
*/
struct virtio_memory_regions {
- uint64_t guest_phys_address; /* Base guest physical address of region. */
- uint64_t guest_phys_address_end; /* End guest physical address of region. */
- uint64_t memory_size; /* Size of region. */
- uint64_t userspace_address; /* Base userspace address of region. */
- uint64_t address_offset; /* Offset of region for address translation. */
+ uint64_t guest_phys_address; /**< Base guest physical address of region. */
+ uint64_t guest_phys_address_end; /**< End guest physical address of region. */
+ uint64_t memory_size; /**< Size of region. */
+ uint64_t userspace_address; /**< Base userspace address of region. */
+ uint64_t address_offset; /**< Offset of region for address translation. */
};
-/*
+/**
* Memory structure includes region and mapping information.
*/
struct virtio_memory {
- uint64_t base_address; /* Base QEMU userspace address of the memory file. */
- uint64_t mapped_address; /* Mapped address of memory file base in our applications memory space. */
- uint64_t mapped_size; /* Total size of memory file. */
- uint32_t nregions; /* Number of memory regions. */
- /* Memory region information. */
- struct virtio_memory_regions regions[0];
+ uint64_t base_address; /**< Base QEMU userspace address of the memory file. */
+ uint64_t mapped_address; /**< Mapped address of memory file base in our applications memory space. */
+ uint64_t mapped_size; /**< Total size of memory file. */
+ uint32_t nregions; /**< Number of memory regions. */
+ struct virtio_memory_regions regions[0]; /**< Memory region information. */
};
-/*
+/**
* Device operations to add/remove device.
*/
struct virtio_net_device_ops {
- int (* new_device) (struct virtio_net *); /* Add device. */
- void (* destroy_device) (volatile struct virtio_net *); /* Remove device. */
+ int (*new_device)(struct virtio_net *); /**< Add device. */
+ void (*destroy_device)(volatile struct virtio_net *); /**< Remove device. */
};
static inline uint16_t __attribute__((always_inline))
#include "vhost-net-cdev.h"
-#define FUSE_OPT_DUMMY "\0\0"
-#define FUSE_OPT_FORE "-f\0\0"
-#define FUSE_OPT_NOMULTI "-s\0\0"
+#define FUSE_OPT_DUMMY "\0\0"
+#define FUSE_OPT_FORE "-f\0\0"
+#define FUSE_OPT_NOMULTI "-s\0\0"
static const uint32_t default_major = 231;
static const uint32_t default_minor = 1;
-static const char cuse_device_name[] = "/dev/cuse";
-static const char default_cdev[] = "vhost-net";
+static const char cuse_device_name[] = "/dev/cuse";
+static const char default_cdev[] = "vhost-net";
static struct fuse_session *session;
static struct vhost_net_device_ops const *ops;
* Boilerplate code for CUSE IOCTL
* Implicit arguments: ctx, req, result.
*/
-#define VHOST_IOCTL(func) do { \
- result = (func)(ctx); \
- fuse_reply_ioctl(req, result, NULL, 0); \
-} while(0) \
+#define VHOST_IOCTL(func) do { \
+ result = (func)(ctx); \
+ fuse_reply_ioctl(req, result, NULL, 0); \
+} while (0)
/*
* Boilerplate IOCTL RETRY
* Implicit arguments: req.
*/
-#define VHOST_IOCTL_RETRY(size_r, size_w) do { \
- struct iovec iov_r = { arg, (size_r) }; \
- struct iovec iov_w = { arg, (size_w) }; \
+#define VHOST_IOCTL_RETRY(size_r, size_w) do { \
+ struct iovec iov_r = { arg, (size_r) }; \
+ struct iovec iov_w = { arg, (size_w) }; \
fuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0); \
-} while(0) \
+} while (0)
/*
* Boilerplate code for CUSE Read IOCTL
* Implicit arguments: ctx, req, result, in_bufsz, in_buf.
*/
-#define VHOST_IOCTL_R(type, var, func) do { \
- if (!in_bufsz) { \
- VHOST_IOCTL_RETRY(sizeof(type), 0); \
- } else { \
- (var) = *(const type * ) in_buf; \
- result = func(ctx, &(var)); \
- fuse_reply_ioctl(req, result, NULL, 0); \
- } \
-} while(0) \
+#define VHOST_IOCTL_R(type, var, func) do { \
+ if (!in_bufsz) { \
+ VHOST_IOCTL_RETRY(sizeof(type), 0); \
+ } else { \
+ (var) = *(const type*) in_buf; \
+ result = func(ctx, &(var)); \
+ fuse_reply_ioctl(req, result, NULL, 0); \
+ } \
+} while (0)
/*
* Boilerplate code for CUSE Write IOCTL
* Implicit arguments: ctx, req, result, out_bufsz.
*/
-#define VHOST_IOCTL_W(type, var, func) do { \
- if (!out_bufsz) { \
- VHOST_IOCTL_RETRY(0, sizeof(type)); \
- } else { \
- result = (func)(ctx, &(var)); \
+#define VHOST_IOCTL_W(type, var, func) do { \
+ if (!out_bufsz) { \
+ VHOST_IOCTL_RETRY(0, sizeof(type)); \
+ } else { \
+ result = (func)(ctx, &(var)); \
fuse_reply_ioctl(req, result, &(var), sizeof(type)); \
- } \
-} while(0) \
+ } \
+} while (0)
/*
* Boilerplate code for CUSE Read/Write IOCTL
* Implicit arguments: ctx, req, result, in_bufsz, in_buf.
*/
-#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do { \
- if (!in_bufsz) { \
- VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2)); \
- } else { \
- (var1) = *(const type1* ) (in_buf); \
- result = (func)(ctx, (var1), &(var2)); \
- fuse_reply_ioctl(req, result, &(var2), sizeof(type2)); \
- } \
-} while(0) \
+#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do { \
+ if (!in_bufsz) { \
+ VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2)); \
+ } else { \
+ (var1) = *(const type1*) (in_buf); \
+ result = (func)(ctx, (var1), &(var2)); \
+ fuse_reply_ioctl(req, result, &(var2), sizeof(type2)); \
+ } \
+} while (0)
/*
* The IOCTLs are handled using CUSE/FUSE in userspace. Depending on
uint32_t index;
int result = 0;
- switch(cmd)
- {
- case VHOST_NET_SET_BACKEND:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
+ switch (cmd) {
+ case VHOST_NET_SET_BACKEND:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
+ break;
+
+ case VHOST_GET_FEATURES:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
+ VHOST_IOCTL_W(uint64_t, features, ops->get_features);
+ break;
+
+ case VHOST_SET_FEATURES:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
+ VHOST_IOCTL_R(uint64_t, features, ops->set_features);
+ break;
+
+ case VHOST_RESET_OWNER:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
+ VHOST_IOCTL(ops->reset_owner);
+ break;
+
+ case VHOST_SET_OWNER:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
+ VHOST_IOCTL(ops->set_owner);
+ break;
+
+ case VHOST_SET_MEM_TABLE:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
+ static struct vhost_memory mem_temp;
+
+ switch (in_bufsz) {
+ case 0:
+ VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
break;
- case VHOST_GET_FEATURES:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
- VHOST_IOCTL_W(uint64_t, features, ops->get_features);
- break;
-
- case VHOST_SET_FEATURES:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
- VHOST_IOCTL_R(uint64_t, features, ops->set_features);
- break;
-
- case VHOST_RESET_OWNER:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
- VHOST_IOCTL(ops->reset_owner);
- break;
-
- case VHOST_SET_OWNER:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
- VHOST_IOCTL(ops->set_owner);
- break;
-
- case VHOST_SET_MEM_TABLE:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
- static struct vhost_memory mem_temp;
-
- switch(in_bufsz){
- case 0:
- VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
- break;
-
- case sizeof(struct vhost_memory):
- mem_temp = *(const struct vhost_memory *) in_buf;
-
- if (mem_temp.nregions > 0) {
- VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);
- } else {
- result = -1;
- fuse_reply_ioctl(req, result, NULL, 0);
- }
- break;
-
- default:
- result = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);
- if (result)
- fuse_reply_err(req, EINVAL);
- else
- fuse_reply_ioctl(req, result, NULL, 0);
+ case sizeof(struct vhost_memory):
+ mem_temp = *(const struct vhost_memory *) in_buf;
+ if (mem_temp.nregions > 0) {
+ VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);
+ } else {
+ result = -1;
+ fuse_reply_ioctl(req, result, NULL, 0);
}
-
- break;
-
- case VHOST_SET_VRING_NUM:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
- break;
-
- case VHOST_SET_VRING_BASE:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
- break;
-
- case VHOST_GET_VRING_BASE:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
- VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
- break;
-
- case VHOST_SET_VRING_ADDR:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
- break;
-
- case VHOST_SET_VRING_KICK:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
- break;
-
- case VHOST_SET_VRING_CALL:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
break;
default:
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
- result = -1;
- fuse_reply_ioctl(req, result, NULL, 0);
+ result = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);
+ if (result)
+ fuse_reply_err(req, EINVAL);
+ else
+ fuse_reply_ioctl(req, result, NULL, 0);
+
+ }
+
+ break;
+
+ case VHOST_SET_VRING_NUM:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
+ break;
+
+ case VHOST_SET_VRING_BASE:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
+ break;
+
+ case VHOST_GET_VRING_BASE:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
+ VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
+ break;
+
+ case VHOST_SET_VRING_ADDR:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
+ break;
+
+ case VHOST_SET_VRING_KICK:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
+ break;
+
+ case VHOST_SET_VRING_CALL:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
+ break;
+
+ default:
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
+ result = -1;
+ fuse_reply_ioctl(req, result, NULL, 0);
}
- if (result < 0) {
+ if (result < 0)
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
- } else {
+ else
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
- }
}
/*
return 0;
}
-/*
+/**
* The CUSE session is launched allowing the application to receive open, release and ioctl calls.
*/
int
/*
* Structure used to identify device context.
*/
-struct vhost_device_ctx
-{
+struct vhost_device_ctx {
pid_t pid; /* PID of process calling the IOCTL. */
- uint64_t fh; /* Populated with fi->fh to track the device index. */
+ uint64_t fh; /* Populated with fi->fh to track the device index. */
};
/*
* functions are called in CUSE context and are used to configure devices.
*/
struct vhost_net_device_ops {
- int (* new_device) (struct vhost_device_ctx);
- void (* destroy_device) (struct vhost_device_ctx);
+ int (*new_device)(struct vhost_device_ctx);
+ void (*destroy_device)(struct vhost_device_ctx);
- int (* get_features) (struct vhost_device_ctx, uint64_t *);
- int (* set_features) (struct vhost_device_ctx, uint64_t *);
+ int (*get_features)(struct vhost_device_ctx, uint64_t *);
+ int (*set_features)(struct vhost_device_ctx, uint64_t *);
- int (* set_mem_table) (struct vhost_device_ctx, const void *, uint32_t);
+ int (*set_mem_table)(struct vhost_device_ctx, const void *, uint32_t);
- int (* set_vring_num) (struct vhost_device_ctx, struct vhost_vring_state *);
- int (* set_vring_addr) (struct vhost_device_ctx, struct vhost_vring_addr *);
- int (* set_vring_base) (struct vhost_device_ctx, struct vhost_vring_state *);
- int (* get_vring_base) (struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
+ int (*set_vring_num)(struct vhost_device_ctx, struct vhost_vring_state *);
+ int (*set_vring_addr)(struct vhost_device_ctx, struct vhost_vring_addr *);
+ int (*set_vring_base)(struct vhost_device_ctx, struct vhost_vring_state *);
+ int (*get_vring_base)(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
- int (* set_vring_kick) (struct vhost_device_ctx, struct vhost_vring_file *);
- int (* set_vring_call) (struct vhost_device_ctx, struct vhost_vring_file *);
+ int (*set_vring_kick)(struct vhost_device_ctx, struct vhost_vring_file *);
+ int (*set_vring_call)(struct vhost_device_ctx, struct vhost_vring_file *);
- int (* set_backend) (struct vhost_device_ctx, struct vhost_vring_file *);
+ int (*set_backend)(struct vhost_device_ctx, struct vhost_vring_file *);
- int (* set_owner) (struct vhost_device_ctx);
- int (* reset_owner) (struct vhost_device_ctx);
+ int (*set_owner)(struct vhost_device_ctx);
+ int (*reset_owner)(struct vhost_device_ctx);
};
#include "vhost-net-cdev.h"
-#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
+#define MAX_PKT_BURST 32
-/*
+/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtio device. A packet
* count is returned to indicate the number of packets that were succesfully
* added to the RX queue. This function works when mergeable is disabled.
*/
static inline uint32_t __attribute__((always_inline))
-virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count)
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
struct vring_desc *desc;
struct rte_mbuf *buff;
/* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
+ struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
uint64_t buff_addr = 0;
uint64_t buff_hdr_addr = 0;
uint32_t head[MAX_PKT_BURST], packet_len = 0;
vq = dev->virtqueue[VIRTIO_RXQ];
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
- /* As many data cores may want access to available buffers, they need to be reserved. */
+ /*
+ * As many data cores may want access to available buffers,
+ * they need to be reserved.
+ */
do {
res_base_idx = vq->last_used_idx_res;
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
res_end_idx = res_base_idx + count;
/* vq->last_used_idx_res is atomically updated. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
- res_end_idx);
+ success = rte_atomic16_cmpset(&vq->last_used_idx_res,
+ res_base_idx, res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
+ dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
+ head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) &
+ (vq->size - 1)];
/*Prefetch descriptor index. */
rte_prefetch0(&vq->desc[head[packet_success]]);
/* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
buff_addr = gpa_to_vva(dev, desc->addr);
/* Prefetch buffer address. */
- rte_prefetch0((void*)(uintptr_t)buff_addr);
+ rte_prefetch0((void *)(uintptr_t)buff_addr);
/* Copy virtio_hdr to packet and increment buffer address */
buff_hdr_addr = buff_addr;
}
static inline uint32_t __attribute__((always_inline))
-copy_from_mbuf_to_vring(struct virtio_net *dev,
- uint16_t res_base_idx, uint16_t res_end_idx,
- struct rte_mbuf *pkt)
+copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
+ uint16_t res_end_idx, struct rte_mbuf *pkt)
{
uint32_t vec_idx = 0;
uint32_t entry_success = 0;
* added to the RX queue. This function works for mergeable RX.
*/
static inline uint32_t __attribute__((always_inline))
-virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
- uint32_t count)
+virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
uint32_t pkt_idx = 0, entry_success = 0;
}
uint16_t
-rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count)
+rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
{
if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)))
return virtio_dev_merge_rx(dev, queue_id, pkts, count);
}
uint16_t
-rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
struct rte_mbuf *m, *prev;
struct vhost_virtqueue *vq;
free_entries = RTE_MIN(free_entries, MAX_PKT_BURST);
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
- dev->device_fh, free_entries);
+ dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
* Device linked list structure for configuration.
*/
struct virtio_net_config_ll {
- struct virtio_net dev; /* Virtio device. */
- struct virtio_net_config_ll *next; /* Next entry on linked list. */
+ struct virtio_net dev; /* Virtio device.*/
+ struct virtio_net_config_ll *next; /* Next entry on linked list.*/
};
const char eventfd_cdev[] = "/dev/eventfd-link";
/* device ops to add/remove device to data core. */
-static struct virtio_net_device_ops const * notify_ops;
+static struct virtio_net_device_ops const *notify_ops;
/* Root address of the linked list in the configuration core. */
-static struct virtio_net_config_ll *ll_root = NULL;
+static struct virtio_net_config_ll *ll_root;
/* Features supported by this application. RX merge buffers are enabled by default. */
#define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF)
#define PROCMAP_SZ 8
/* Structure containing information gathered from maps file. */
-struct procmap
-{
- uint64_t va_start; /* Start virtual address in file. */
- uint64_t len; /* Size of file. */
- uint64_t pgoff; /* Not used. */
- uint32_t maj; /* Not used. */
- uint32_t min; /* Not used. */
- uint32_t ino; /* Not used. */
- char prot[PROT_SZ]; /* Not used. */
- char fname[PATH_MAX]; /* File name. */
+struct procmap {
+ uint64_t va_start; /* Start virtual address in file. */
+ uint64_t len; /* Size of file. */
+ uint64_t pgoff; /* Not used. */
+ uint32_t maj; /* Not used. */
+ uint32_t min; /* Not used. */
+ uint32_t ino; /* Not used. */
+ char prot[PROT_SZ]; /* Not used. */
+ char fname[PATH_MAX];/* File name. */
};
/*
region = &dev->mem->regions[regionidx];
if ((qemu_va >= region->userspace_address) &&
(qemu_va <= region->userspace_address +
- region->memory_size)) {
- vhost_va = dev->mem->mapped_address + qemu_va - dev->mem->base_address;
+ region->memory_size)) {
+ vhost_va = dev->mem->mapped_address + qemu_va -
+ dev->mem->base_address;
break;
}
}
* Locate the file containing QEMU's memory space and map it to our address space.
*/
static int
-host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, uint64_t addr)
+host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
+ pid_t pid, uint64_t addr)
{
struct dirent *dptr = NULL;
struct procmap procmap;
char resolved_path[PATH_MAX];
FILE *fmap;
void *map;
- uint8_t found = 0;
- char line[BUFSIZE];
+ uint8_t found = 0;
+ char line[BUFSIZE];
char dlm[] = "- : ";
char *str, *sp, *in[PROCMAP_SZ];
char *end = NULL;
/* Path where mem files are located. */
- snprintf (procdir, PATH_MAX, "/proc/%u/fd/", pid);
+ snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
/* Maps file used to locate mem file. */
- snprintf (mapfile, PATH_MAX, "/proc/%u/maps", pid);
+ snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
fmap = fopen(mapfile, "r");
if (fmap == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open maps file for pid %d\n", dev->device_fh, pid);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to open maps file for pid %d\n",
+ dev->device_fh, pid);
return -1;
}
errno = 0;
/* Split line in to fields. */
for (i = 0; i < PROCMAP_SZ; i++) {
- if (((in[i] = strtok_r(str, &dlm[i], &sp)) == NULL) || (errno != 0)) {
+ in[i] = strtok_r(str, &dlm[i], &sp);
+ if ((in[i] == NULL) || (errno != 0)) {
fclose(fmap);
return -1;
}
/* Find the guest memory file among the process fds. */
dp = opendir(procdir);
if (dp == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory \n", dev->device_fh, pid);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory\n", dev->device_fh, pid);
return -1;
}
/* Read the fd directory contents. */
while (NULL != (dptr = readdir(dp))) {
- snprintf (memfile, PATH_MAX, "/proc/%u/fd/%s", pid, dptr->d_name);
+ snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s",
+ pid, dptr->d_name);
realpath(memfile, resolved_path);
if (resolved_path == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh);
return -1;
}
- map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE , MAP_POPULATE|MAP_SHARED, fd, 0);
- close (fd);
+ map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE ,
+ MAP_POPULATE|MAP_SHARED, fd, 0);
+ close(fd);
if (map == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n", dev->device_fh, memfile, pid);
ll_dev = get_config_ll_entry(ctx);
/* If a matching entry is found in the linked list, return the device in that entry. */
- if (ll_dev) {
+ if (ll_dev)
return &ll_dev->dev;
- }
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
return NULL;
{
/* Unmap QEMU memory file if mapped. */
if (dev->mem) {
- munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
+ munmap((void *)(uintptr_t)dev->mem->mapped_address,
+ (size_t)dev->mem->mapped_size);
free(dev->mem);
}
* Remove an entry from the device configuration linked list.
*/
static struct virtio_net_config_ll *
-rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config_ll *ll_dev_last)
+rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
+ struct virtio_net_config_ll *ll_dev_last)
{
/* First remove the device and then clean it up. */
if (ll_dev == ll_root) {
} else {
cleanup_device(&ll_dev->dev);
free_device(ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG, "Remove entry from config_ll failed\n");
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Remove entry from config_ll failed\n");
return NULL;
}
}
vq_offset = offsetof(struct virtio_net, mem);
/* Set everything to 0. */
- memset((void*)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
+ memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
(sizeof(struct virtio_net) - (size_t)vq_offset));
memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
/* Setup device and virtqueues. */
new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
if (new_ll_dev == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for dev.\n",
+ ctx.fh);
return -1;
}
virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
if (virtqueue_rx == NULL) {
free(new_ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_rx.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for rxq.\n",
+ ctx.fh);
return -1;
}
if (virtqueue_tx == NULL) {
free(virtqueue_rx);
free(new_ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_tx.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for txq.\n",
+ ctx.fh);
return -1;
}
/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
- dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
- dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
- dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);
- dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);
+ dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr);
+ dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr);
}
return 0;
}
* storing offsets used to translate buffer addresses.
*/
static int
-set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_t nregions)
+set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
+ uint32_t nregions)
{
struct virtio_net *dev;
struct vhost_memory_region *mem_regions;
return -1;
if (dev->mem) {
- munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
+ munmap((void *)(uintptr_t)dev->mem->mapped_address,
+ (size_t)dev->mem->mapped_size);
free(dev->mem);
}
/* Malloc the memory structure depending on the number of regions. */
- mem = calloc(1, sizeof(struct virtio_memory) + (sizeof(struct virtio_memory_regions) * nregions));
+ mem = calloc(1, sizeof(struct virtio_memory) +
+ (sizeof(struct virtio_memory_regions) * nregions));
if (mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
return -1;
mem->nregions = nregions;
- mem_regions = (void*)(uintptr_t)((uint64_t)(uintptr_t)mem_regions_addr + size);
+ mem_regions = (void *)(uintptr_t)
+ ((uint64_t)(uintptr_t)mem_regions_addr + size);
for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
/* Populate the region structure for each region. */
- mem->regions[regionidx].guest_phys_address = mem_regions[regionidx].guest_phys_addr;
- mem->regions[regionidx].guest_phys_address_end = mem->regions[regionidx].guest_phys_address +
+ mem->regions[regionidx].guest_phys_address =
+ mem_regions[regionidx].guest_phys_addr;
+ mem->regions[regionidx].guest_phys_address_end =
+ mem->regions[regionidx].guest_phys_address +
mem_regions[regionidx].memory_size;
- mem->regions[regionidx].memory_size = mem_regions[regionidx].memory_size;
- mem->regions[regionidx].userspace_address = mem_regions[regionidx].userspace_addr;
+ mem->regions[regionidx].memory_size =
+ mem_regions[regionidx].memory_size;
+ mem->regions[regionidx].userspace_address =
+ mem_regions[regionidx].userspace_addr;
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
- regionidx, (void*)(uintptr_t)mem->regions[regionidx].guest_phys_address,
- (void*)(uintptr_t)mem->regions[regionidx].userspace_address,
+ regionidx, (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
+ (void *)(uintptr_t)mem->regions[regionidx].userspace_address,
mem->regions[regionidx].memory_size);
/*set the base address mapping*/
vq = dev->virtqueue[addr->index];
/* The addresses are converted from QEMU virtual to Vhost virtual. */
- vq->desc = (struct vring_desc*)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
+ vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
if (vq->desc == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
return -1;
}
- vq->avail = (struct vring_avail*)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
+ vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
if (vq->avail == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
return -1;
}
- vq->used = (struct vring_used*)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
+ vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
if (vq->used == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
return -1;
* We send the virtio device our available ring last used index.
*/
static int
-get_vring_base(struct vhost_device_ctx ctx, uint32_t index, struct vhost_vring_state *state)
+get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
+ struct vhost_vring_state *state)
{
struct virtio_net *dev;
eventfd_call.target_pid = ctx.pid;
if (eventfd_copy(dev, &eventfd_call))
- return -1;
+ return -1;
return 0;
}
struct virtio_net *dev;
dev = get_device(ctx);
- if (dev == NULL) {
+ if (dev == NULL)
return -1;
- }
/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
dev->virtqueue[file->index]->backend = file->fd;
return notify_ops->new_device(dev);
/* Otherwise we remove it. */
} else
- if (file->fd == VIRTIO_DEV_STOPPED) {
+ if (file->fd == VIRTIO_DEV_STOPPED)
notify_ops->destroy_device(dev);
- }
return 0;
}
* Function pointers are set for the device operations to allow CUSE to call functions
* when an IOCTL, device_add or device_release is received.
*/
-static const struct vhost_net_device_ops vhost_device_ops =
-{
+static const struct vhost_net_device_ops vhost_device_ops = {
.new_device = new_device,
.destroy_device = destroy_device,
return &vhost_device_ops;
}
-int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable)
+int rte_vhost_enable_guest_notification(struct virtio_net *dev,
+ uint16_t queue_id, int enable)
{
if (enable) {
RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't supported.\n");