#include <sys/types.h>
#include <sys/stat.h>
+#include <rte_string_fns.h>
#include <rte_eal_memconfig.h>
#include "vhost.h"
int
virtio_user_start_device(struct virtio_user_dev *dev)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
uint64_t features;
int ret;
* replaced when we get proper supports from the
* memory subsystem in the future.
*/
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
pthread_mutex_lock(&dev->mutex);
if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
dev->started = true;
pthread_mutex_unlock(&dev->mutex);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return 0;
error:
pthread_mutex_unlock(&dev->mutex);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
/* TODO: free resource here or caller to check */
return -1;
}
static inline void
parse_mac(struct virtio_user_dev *dev, const char *mac)
{
- int i, r;
- uint32_t tmp[ETHER_ADDR_LEN];
+ struct rte_ether_addr tmp;
if (!mac)
return;
- r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
- &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
- if (r == ETHER_ADDR_LEN) {
- for (i = 0; i < ETHER_ADDR_LEN; ++i)
- dev->mac_addr[i] = (uint8_t)tmp[i];
+ if (rte_ether_unformat_addr(mac, &tmp) == 0) {
+ memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
dev->mac_specified = 1;
} else {
/* ignore the wrong mac, use random mac */
int server, int mrg_rxbuf, int in_order, int packed_vq)
{
pthread_mutex_init(&dev->mutex, NULL);
- snprintf(dev->path, PATH_MAX, "%s", path);
+ strlcpy(dev->path, path, PATH_MAX);
dev->started = 0;
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = desc->flags;
+ uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
struct vring_packed *vring = &dev->packed_vrings[queue_idx];
uint16_t n_descs, flags;
+ /* Perform a load-acquire barrier in desc_is_avail to
+ * enforce the ordering between desc flags and desc
+ * content.
+ */
while (desc_is_avail(&vring->desc[vq->used_idx],
vq->used_wrap_counter)) {
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- rte_smp_wmb();
- vring->desc[vq->used_idx].flags = flags;
+ __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
+ __ATOMIC_RELEASE);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {