ethdev: add device flag to bypass auto-filled queue xstats
[dpdk.git] / drivers / net / memif / rte_eth_memif.c
index c1c7e9f..f7ae55f 100644 (file)
@@ -36,6 +36,7 @@
 #define ETH_MEMIF_PKT_BUFFER_SIZE_ARG  "bsize"
 #define ETH_MEMIF_RING_SIZE_ARG                "rsize"
 #define ETH_MEMIF_SOCKET_ARG           "socket"
+#define ETH_MEMIF_SOCKET_ABSTRACT_ARG  "socket-abstract"
 #define ETH_MEMIF_MAC_ARG              "mac"
 #define ETH_MEMIF_ZC_ARG               "zero-copy"
 #define ETH_MEMIF_SECRET_ARG           "secret"
@@ -46,6 +47,7 @@ static const char * const valid_arguments[] = {
        ETH_MEMIF_PKT_BUFFER_SIZE_ARG,
        ETH_MEMIF_RING_SIZE_ARG,
        ETH_MEMIF_SOCKET_ARG,
+       ETH_MEMIF_SOCKET_ABSTRACT_ARG,
        ETH_MEMIF_MAC_ARG,
        ETH_MEMIF_ZC_ARG,
        ETH_MEMIF_SECRET_ARG,
@@ -249,11 +251,17 @@ memif_get_buffer(struct pmd_process_private *proc_private, memif_desc_t *d)
 static void
 memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_queue *mq)
 {
+       uint16_t cur_tail;
        uint16_t mask = (1 << mq->log2_ring_size) - 1;
        memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
 
        /* FIXME: improve performance */
-       while (mq->last_tail != ring->tail) {
+       /* The ring->tail acts as a guard variable between Tx and Rx
+        * threads, so using load-acquire pairs with store-release
+        * in function eth_memif_rx for S2M queues.
+        */
+       cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+       while (mq->last_tail != cur_tail) {
                RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
                /* Decrement refcnt and free mbuf. (current segment) */
                rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1);
@@ -405,7 +413,11 @@ no_free_bufs:
 
 refill:
        if (type == MEMIF_RING_M2S) {
-               head = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+               /* ring->head is updated by the receiver and this function
+                * is called in the context of receiver thread. The loads in
+                * the receiver do not need to synchronize with its own stores.
+                */
+               head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
                n_slots = ring_size - head + mq->last_tail;
 
                while (n_slots--) {
@@ -455,7 +467,11 @@ eth_memif_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        mask = ring_size - 1;
 
        cur_slot = mq->last_tail;
-       last_slot = ring->tail;
+       /* The ring->tail acts as a guard variable between Tx and Rx
+        * threads, so using load-acquire pairs with store-release
+        * to synchronize it between threads.
+        */
+       last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
        if (cur_slot == last_slot)
                goto refill;
        n_slots = last_slot - cur_slot;
@@ -501,7 +517,11 @@ next_slot:
 
 /* Supply master with new buffers */
 refill:
-       head = ring->head;
+       /* ring->head is updated by the receiver and this function
+        * is called in the context of receiver thread. The loads in
+        * the receiver do not need to synchronize with its own stores.
+        */
+       head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
        n_slots = ring_size - head + mq->last_tail;
 
        if (n_slots < 32)
@@ -526,8 +546,11 @@ refill:
                        (uint8_t *)proc_private->regions[d0->region]->addr;
        }
 no_free_mbufs:
-       rte_mb();
-       ring->head = head;
+       /* The ring->head acts as a guard variable between Tx and Rx
+        * threads, so using store-release pairs with load-acquire
+        * in function eth_memif_tx.
+        */
+       __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
 
        mq->n_pkts += n_rx_pkts;
 
@@ -568,14 +591,24 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        ring_size = 1 << mq->log2_ring_size;
        mask = ring_size - 1;
 
-       n_free = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE) - mq->last_tail;
-       mq->last_tail += n_free;
-
        if (type == MEMIF_RING_S2M) {
-               slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
-               n_free = ring_size - slot + mq->last_tail;
+               /* For S2M queues ring->head is updated by the sender and
+                * this function is called in the context of sending thread.
+                * The loads in the sender do not need to synchronize with
+                * its own stores. Hence, the following load can be a
+                * relaxed load.
+                */
+               slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+               n_free = ring_size - slot +
+                               __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
        } else {
-               slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+               /* For M2S queues ring->tail is updated by the sender and
+                * this function is called in the context of sending thread.
+                * The loads in the sender do not need to synchronize with
+                * its own stores. Hence, the following load can be a
+                * relaxed load.
+                */
+               slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
                n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
        }
 
@@ -705,7 +738,6 @@ eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                rte_eth_devices[mq->in_port].process_private;
        memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
        uint16_t slot, n_free, ring_size, mask, n_tx_pkts = 0;
-       memif_ring_type_t type = mq->type;
        struct rte_eth_link link;
 
        if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
@@ -723,8 +755,14 @@ eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        memif_free_stored_mbufs(proc_private, mq);
 
        /* ring type always MEMIF_RING_S2M */
-       slot = ring->head;
-       n_free = ring_size - ring->head + mq->last_tail;
+       /* For S2M queues ring->head is updated by the sender and
+        * this function is called in the context of sending thread.
+        * The loads in the sender do not need to synchronize with
+        * its own stores. Hence, the following load can be a
+        * relaxed load.
+        */
+       slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+       n_free = ring_size - slot + mq->last_tail;
 
        int used_slots;
 
@@ -778,12 +816,12 @@ eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        }
 
 no_free_slots:
-       rte_mb();
-       /* update ring pointers */
-       if (type == MEMIF_RING_S2M)
-               ring->head = slot;
-       else
-               ring->tail = slot;
+       /* ring type always MEMIF_RING_S2M */
+       /* The ring->head acts as a guard variable between Tx and Rx
+        * threads, so using store-release pairs with load-acquire
+        * in function eth_memif_rx for S2M rings.
+        */
+       __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
 
        /* Send interrupt, if enabled. */
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
@@ -1203,7 +1241,7 @@ memif_dev_start(struct rte_eth_dev *dev)
        return ret;
 }
 
-static void
+static int
 memif_dev_close(struct rte_eth_dev *dev)
 {
        struct pmd_internals *pmd = dev->data->dev_private;
@@ -1224,6 +1262,8 @@ memif_dev_close(struct rte_eth_dev *dev)
        }
 
        rte_free(dev->process_private);
+
+       return 0;
 }
 
 static int
@@ -1499,6 +1539,7 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
        data->dev_link = pmd_link;
        data->mac_addrs = ether_addr;
        data->promiscuous = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        eth_dev->dev_ops = &ops;
        eth_dev->device = &vdev->device;
@@ -1510,9 +1551,6 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
                eth_dev->tx_pkt_burst = eth_memif_tx;
        }
 
-
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
        rte_eth_dev_probing_finish(eth_dev);
 
        return 0;
@@ -1640,7 +1678,23 @@ memif_set_socket_filename(const char *key __rte_unused, const char *value,
        const char **socket_filename = (const char **)extra_args;
 
        *socket_filename = value;
-       return memif_check_socket_filename(*socket_filename);
+       return 0;
+}
+
+static int
+memif_set_is_socket_abstract(const char *key __rte_unused, const char *value, void *extra_args)
+{
+       uint32_t *flags = (uint32_t *)extra_args;
+
+       if (strstr(value, "yes") != NULL) {
+               *flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+       } else if (strstr(value, "no") != NULL) {
+               *flags &= ~ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+       } else {
+               MIF_LOG(ERR, "Failed to parse socket-abstract param: %s.", value);
+               return -EINVAL;
+       }
+       return 0;
 }
 
 static int
@@ -1726,6 +1780,9 @@ rte_pmd_memif_probe(struct rte_vdev_device *vdev)
                MIF_LOG(WARNING, "Failed to register mp action callback: %s",
                        strerror(rte_errno));
 
+       /* use abstract address by default */
+       flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+
        kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_arguments);
 
        /* parse parameters */
@@ -1751,6 +1808,10 @@ rte_pmd_memif_probe(struct rte_vdev_device *vdev)
                                         (void *)(&socket_filename));
                if (ret < 0)
                        goto exit;
+               ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ABSTRACT_ARG,
+                                        &memif_set_is_socket_abstract, &flags);
+               if (ret < 0)
+                       goto exit;
                ret = rte_kvargs_process(kvlist, ETH_MEMIF_MAC_ARG,
                                         &memif_set_mac, ether_addr);
                if (ret < 0)
@@ -1765,6 +1826,12 @@ rte_pmd_memif_probe(struct rte_vdev_device *vdev)
                        goto exit;
        }
 
+       if (!(flags & ETH_MEMIF_FLAG_SOCKET_ABSTRACT)) {
+               ret = memif_check_socket_filename(socket_filename);
+               if (ret < 0)
+                       goto exit;
+       }
+
        /* create interface */
        ret = memif_create(vdev, role, id, flags, socket_filename,
                           log2_ring_size, pkt_buffer_size, secret, ether_addr);
@@ -1784,9 +1851,7 @@ rte_pmd_memif_remove(struct rte_vdev_device *vdev)
        if (eth_dev == NULL)
                return 0;
 
-       rte_eth_dev_close(eth_dev->data->port_id);
-
-       return 0;
+       return rte_eth_dev_close(eth_dev->data->port_id);
 }
 
 static struct rte_vdev_driver pmd_memif_drv = {
@@ -1802,6 +1867,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_memif,
                              ETH_MEMIF_PKT_BUFFER_SIZE_ARG "=<int>"
                              ETH_MEMIF_RING_SIZE_ARG "=<int>"
                              ETH_MEMIF_SOCKET_ARG "=<string>"
+                                 ETH_MEMIF_SOCKET_ABSTRACT_ARG "=yes|no"
                              ETH_MEMIF_MAC_ARG "=xx:xx:xx:xx:xx:xx"
                              ETH_MEMIF_ZC_ARG "=yes|no"
                              ETH_MEMIF_SECRET_ARG "=<string>");