net/virtio: improve perf via one-way barriers on used flag
[dpdk.git] / drivers / net / virtio / virtio_user / virtio_user_dev.c
index b004bc1..1c575d0 100644 (file)
@@ -13,6 +13,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 
+#include <rte_string_fns.h>
 #include <rte_eal_memconfig.h>
 
 #include "vhost.h"
@@ -124,7 +125,6 @@ is_vhost_user_by_type(const char *path)
 int
 virtio_user_start_device(struct virtio_user_dev *dev)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        uint64_t features;
        int ret;
 
@@ -141,7 +141,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
         * replaced when we get proper supports from the
         * memory subsystem in the future.
         */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_lock();
        pthread_mutex_lock(&dev->mutex);
 
        if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
@@ -179,12 +179,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
 
        dev->started = true;
        pthread_mutex_unlock(&dev->mutex);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
 
        return 0;
 error:
        pthread_mutex_unlock(&dev->mutex);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_mcfg_mem_read_unlock();
        /* TODO: free resource here or caller to check */
        return -1;
 }
@@ -224,17 +224,13 @@ out:
 static inline void
 parse_mac(struct virtio_user_dev *dev, const char *mac)
 {
-       int i, r;
-       uint32_t tmp[ETHER_ADDR_LEN];
+       struct rte_ether_addr tmp;
 
        if (!mac)
                return;
 
-       r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
-                       &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
-       if (r == ETHER_ADDR_LEN) {
-               for (i = 0; i < ETHER_ADDR_LEN; ++i)
-                       dev->mac_addr[i] = (uint8_t)tmp[i];
+       if (rte_ether_unformat_addr(mac, &tmp) == 0) {
+               memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
                dev->mac_specified = 1;
        } else {
                /* ignore the wrong mac, use random mac */
@@ -429,7 +425,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                     int server, int mrg_rxbuf, int in_order, int packed_vq)
 {
        pthread_mutex_init(&dev->mutex, NULL);
-       snprintf(dev->path, PATH_MAX, "%s", path);
+       strlcpy(dev->path, path, PATH_MAX);
        dev->started = 0;
        dev->max_queue_pairs = queues;
        dev->queue_pairs = 1; /* mq disabled by default */
@@ -628,7 +624,7 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
 static inline int
 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 {
-       uint16_t flags = desc->flags;
+       uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
 
        return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
                wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -688,6 +684,10 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
        struct vring_packed *vring = &dev->packed_vrings[queue_idx];
        uint16_t n_descs, flags;
 
+       /* Perform a load-acquire barrier in desc_is_avail to
+        * enforce the ordering between desc flags and desc
+        * content.
+        */
        while (desc_is_avail(&vring->desc[vq->used_idx],
                             vq->used_wrap_counter)) {
 
@@ -698,8 +698,8 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
                if (vq->used_wrap_counter)
                        flags |= VRING_PACKED_DESC_F_AVAIL_USED;
 
-               rte_smp_wmb();
-               vring->desc[vq->used_idx].flags = flags;
+               __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
+                                __ATOMIC_RELEASE);
 
                vq->used_idx += n_descs;
                if (vq->used_idx >= dev->queue_size) {