drivers: remove direct access to interrupt handle
[dpdk.git] / drivers / net / memif / rte_eth_memif.c
index b16c73f..8cec493 100644 (file)
@@ -17,8 +17,8 @@
 #include <rte_version.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
 #include <rte_malloc.h>
 #include <rte_kvargs.h>
 #include <rte_bus_vdev.h>
@@ -36,6 +36,7 @@
 #define ETH_MEMIF_PKT_BUFFER_SIZE_ARG  "bsize"
 #define ETH_MEMIF_RING_SIZE_ARG                "rsize"
 #define ETH_MEMIF_SOCKET_ARG           "socket"
+#define ETH_MEMIF_SOCKET_ABSTRACT_ARG  "socket-abstract"
 #define ETH_MEMIF_MAC_ARG              "mac"
 #define ETH_MEMIF_ZC_ARG               "zero-copy"
 #define ETH_MEMIF_SECRET_ARG           "secret"
@@ -46,6 +47,7 @@ static const char * const valid_arguments[] = {
        ETH_MEMIF_PKT_BUFFER_SIZE_ARG,
        ETH_MEMIF_RING_SIZE_ARG,
        ETH_MEMIF_SOCKET_ARG,
+       ETH_MEMIF_SOCKET_ABSTRACT_ARG,
        ETH_MEMIF_MAC_ARG,
        ETH_MEMIF_ZC_ARG,
        ETH_MEMIF_SECRET_ARG,
@@ -53,10 +55,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-       .link_speed = ETH_SPEED_NUM_10G,
-       .link_duplex = ETH_LINK_FULL_DUPLEX,
-       .link_status = ETH_LINK_DOWN,
-       .link_autoneg = ETH_LINK_AUTONEG
+       .link_speed = RTE_ETH_SPEED_NUM_10G,
+       .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+       .link_status = RTE_ETH_LINK_DOWN,
+       .link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 #define MEMIF_MP_SEND_REGION           "memif_mp_send_region"
@@ -132,7 +134,7 @@ memif_mp_request_regions(struct rte_eth_dev *dev)
        struct memif_region *r;
        struct pmd_process_private *proc_private = dev->process_private;
        struct pmd_internals *pmd = dev->data->dev_private;
-       /* in case of zero-copy slave, only request region 0 */
+       /* in case of zero-copy client, only request region 0 */
        uint16_t max_region_num = (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) ?
                                   1 : ETH_MEMIF_MAX_REGION_NUM;
 
@@ -197,6 +199,7 @@ memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *de
        dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
        dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
        dev_info->min_rx_bufsize = 0;
+       dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
        return 0;
 }
@@ -210,7 +213,7 @@ memif_get_ring(struct pmd_internals *pmd, struct pmd_process_private *proc_priva
        int ring_size = sizeof(memif_ring_t) + sizeof(memif_desc_t) *
            (1 << pmd->run.log2_ring_size);
 
-       p = (uint8_t *)p + (ring_num + type * pmd->run.num_s2m_rings) * ring_size;
+       p = (uint8_t *)p + (ring_num + type * pmd->run.num_c2s_rings) * ring_size;
 
        return (memif_ring_t *)p;
 }
@@ -245,20 +248,21 @@ memif_get_buffer(struct pmd_process_private *proc_private, memif_desc_t *d)
        return ((uint8_t *)proc_private->regions[d->region]->addr + d->offset);
 }
 
-/* Free mbufs received by master */
+/* Free mbufs received by server */
 static void
 memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_queue *mq)
 {
+       uint16_t cur_tail;
        uint16_t mask = (1 << mq->log2_ring_size) - 1;
        memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
 
        /* FIXME: improve performance */
        /* The ring->tail acts as a guard variable between Tx and Rx
         * threads, so using load-acquire pairs with store-release
-        * to synchronize it between threads.
+        * in function eth_memif_rx for C2S queues.
         */
-       while (mq->last_tail != __atomic_load_n(&ring->tail,
-                                               __ATOMIC_ACQUIRE)) {
+       cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+       while (mq->last_tail != cur_tail) {
                RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
                /* Decrement refcnt and free mbuf. (current segment) */
                rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1);
@@ -322,12 +326,13 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
        /* consume interrupt */
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0)
-               size = read(mq->intr_handle.fd, &b, sizeof(b));
+               size = read(rte_intr_fd_get(mq->intr_handle), &b,
+                           sizeof(b));
 
        ring_size = 1 << mq->log2_ring_size;
        mask = ring_size - 1;
 
-       if (type == MEMIF_RING_S2M) {
+       if (type == MEMIF_RING_C2S) {
                cur_slot = mq->last_head;
                last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
        } else {
@@ -380,9 +385,10 @@ next_slot:
                        if (mbuf != mbuf_head)
                                rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
 
-                       memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, dst_off),
-                              (uint8_t *)memif_get_buffer(proc_private, d0) + src_off,
-                              cp_len);
+                       rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *,
+                                                          dst_off),
+                               (uint8_t *)memif_get_buffer(proc_private, d0) +
+                               src_off, cp_len);
 
                        src_off += cp_len;
                        dst_off += cp_len;
@@ -401,7 +407,7 @@ next_slot:
        }
 
 no_free_bufs:
-       if (type == MEMIF_RING_S2M) {
+       if (type == MEMIF_RING_C2S) {
                __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
                mq->last_head = cur_slot;
        } else {
@@ -409,7 +415,7 @@ no_free_bufs:
        }
 
 refill:
-       if (type == MEMIF_RING_M2S) {
+       if (type == MEMIF_RING_S2C) {
                /* ring->head is updated by the receiver and this function
                 * is called in the context of receiver thread. The loads in
                 * the receiver do not need to synchronize with its own stores.
@@ -457,7 +463,8 @@ eth_memif_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
                uint64_t b;
                ssize_t size __rte_unused;
-               size = read(mq->intr_handle.fd, &b, sizeof(b));
+               size = read(rte_intr_fd_get(mq->intr_handle), &b,
+                           sizeof(b));
        }
 
        ring_size = 1 << mq->log2_ring_size;
@@ -512,13 +519,13 @@ next_slot:
 
        mq->last_tail = cur_slot;
 
-/* Supply master with new buffers */
+/* Supply server with new buffers */
 refill:
-       /* The ring->head acts as a guard variable between Tx and Rx
-        * threads, so using load-acquire pairs with store-release
-        * to synchronize it between threads.
+       /* ring->head is updated by the receiver and this function
+        * is called in the context of receiver thread. The loads in
+        * the receiver do not need to synchronize with its own stores.
         */
-       head = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+       head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
        n_slots = ring_size - head + mq->last_tail;
 
        if (n_slots < 32)
@@ -543,6 +550,10 @@ refill:
                        (uint8_t *)proc_private->regions[d0->region]->addr;
        }
 no_free_mbufs:
+       /* The ring->head acts as a guard variable between Tx and Rx
+        * threads, so using store-release pairs with load-acquire
+        * in function eth_memif_tx.
+        */
        __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
 
        mq->n_pkts += n_rx_pkts;
@@ -559,7 +570,7 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                rte_eth_devices[mq->in_port].process_private;
        memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
        uint16_t slot, saved_slot, n_free, ring_size, mask, n_tx_pkts = 0;
-       uint16_t src_len, src_off, dst_len, dst_off, cp_len;
+       uint16_t src_len, src_off, dst_len, dst_off, cp_len, nb_segs;
        memif_ring_type_t type = mq->type;
        memif_desc_t *d0;
        struct rte_mbuf *mbuf;
@@ -584,8 +595,8 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        ring_size = 1 << mq->log2_ring_size;
        mask = ring_size - 1;
 
-       if (type == MEMIF_RING_S2M) {
-               /* For S2M queues ring->head is updated by the sender and
+       if (type == MEMIF_RING_C2S) {
+               /* For C2S queues ring->head is updated by the sender and
                 * this function is called in the context of sending thread.
                 * The loads in the sender do not need to synchronize with
                 * its own stores. Hence, the following load can be a
@@ -595,7 +606,7 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                n_free = ring_size - slot +
                                __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
        } else {
-               /* For M2S queues ring->tail is updated by the sender and
+               /* For S2C queues ring->tail is updated by the sender and
                 * this function is called in the context of sending thread.
                 * The loads in the sender do not need to synchronize with
                 * its own stores. Hence, the following load can be a
@@ -607,12 +618,13 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
        while (n_tx_pkts < nb_pkts && n_free) {
                mbuf_head = *bufs++;
+               nb_segs = mbuf_head->nb_segs;
                mbuf = mbuf_head;
 
                saved_slot = slot;
                d0 = &ring->desc[slot & mask];
                dst_off = 0;
-               dst_len = (type == MEMIF_RING_S2M) ?
+               dst_len = (type == MEMIF_RING_C2S) ?
                        pmd->run.pkt_buffer_size : d0->length;
 
 next_in_chain:
@@ -627,7 +639,7 @@ next_in_chain:
                                        d0->flags |= MEMIF_DESC_FLAG_NEXT;
                                        d0 = &ring->desc[slot & mask];
                                        dst_off = 0;
-                                       dst_len = (type == MEMIF_RING_S2M) ?
+                                       dst_len = (type == MEMIF_RING_C2S) ?
                                            pmd->run.pkt_buffer_size : d0->length;
                                        d0->flags = 0;
                                } else {
@@ -637,9 +649,10 @@ next_in_chain:
                        }
                        cp_len = RTE_MIN(dst_len, src_len);
 
-                       memcpy((uint8_t *)memif_get_buffer(proc_private, d0) + dst_off,
-                              rte_pktmbuf_mtod_offset(mbuf, void *, src_off),
-                              cp_len);
+                       rte_memcpy((uint8_t *)memif_get_buffer(proc_private,
+                                                              d0) + dst_off,
+                               rte_pktmbuf_mtod_offset(mbuf, void *, src_off),
+                               cp_len);
 
                        mq->n_bytes += cp_len;
                        src_off += cp_len;
@@ -650,7 +663,7 @@ next_in_chain:
                        d0->length = dst_off;
                }
 
-               if (rte_pktmbuf_is_contiguous(mbuf) == 0) {
+               if (--nb_segs > 0) {
                        mbuf = mbuf->next;
                        goto next_in_chain;
                }
@@ -662,14 +675,15 @@ next_in_chain:
        }
 
 no_free_slots:
-       if (type == MEMIF_RING_S2M)
+       if (type == MEMIF_RING_C2S)
                __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
        else
                __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
 
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
                a = 1;
-               size = write(mq->intr_handle.fd, &a, sizeof(a));
+               size = write(rte_intr_fd_get(mq->intr_handle), &a,
+                            sizeof(a));
                if (unlikely(size < 0)) {
                        MIF_LOG(WARNING,
                                "Failed to send interrupt. %s", strerror(errno));
@@ -687,18 +701,20 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq
                uint16_t slot, uint16_t n_free)
 {
        memif_desc_t *d0;
+       uint16_t nb_segs = mbuf->nb_segs;
        int used_slots = 1;
 
 next_in_chain:
        /* store pointer to mbuf to free it later */
        mq->buffers[slot & mask] = mbuf;
-       /* Increment refcnt to make sure the buffer is not freed before master
+       /* Increment refcnt to make sure the buffer is not freed before server
         * receives it. (current segment)
         */
        rte_mbuf_refcnt_update(mbuf, 1);
        /* populate descriptor */
        d0 = &ring->desc[slot & mask];
        d0->length = rte_pktmbuf_data_len(mbuf);
+       mq->n_bytes += rte_pktmbuf_data_len(mbuf);
        /* FIXME: get region index */
        d0->region = 1;
        d0->offset = rte_pktmbuf_mtod(mbuf, uint8_t *) -
@@ -706,7 +722,7 @@ next_in_chain:
        d0->flags = 0;
 
        /* check if buffer is chained */
-       if (rte_pktmbuf_is_contiguous(mbuf) == 0) {
+       if (--nb_segs > 0) {
                if (n_free < 2)
                        return 0;
                /* mark buffer as chained */
@@ -731,7 +747,6 @@ eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                rte_eth_devices[mq->in_port].process_private;
        memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
        uint16_t slot, n_free, ring_size, mask, n_tx_pkts = 0;
-       memif_ring_type_t type = mq->type;
        struct rte_eth_link link;
 
        if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
@@ -745,15 +760,17 @@ eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        ring_size = 1 << mq->log2_ring_size;
        mask = ring_size - 1;
 
-       /* free mbufs received by master */
+       /* free mbufs received by server */
        memif_free_stored_mbufs(proc_private, mq);
 
-       /* ring type always MEMIF_RING_S2M */
-       /* The ring->head acts as a guard variable between Tx and Rx
-        * threads, so using load-acquire pairs with store-release
-        * to synchronize it between threads.
+       /* ring type always MEMIF_RING_C2S */
+       /* For C2S queues ring->head is updated by the sender and
+        * this function is called in the context of sending thread.
+        * The loads in the sender do not need to synchronize with
+        * its own stores. Hence, the following load can be a
+        * relaxed load.
         */
-       slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+       slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
        n_free = ring_size - slot + mq->last_tail;
 
        int used_slots;
@@ -808,16 +825,18 @@ eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        }
 
 no_free_slots:
-       /* update ring pointers */
-       if (type == MEMIF_RING_S2M)
-               __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
-       else
-               __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+       /* ring type always MEMIF_RING_C2S */
+       /* The ring->head acts as a guard variable between Tx and Rx
+        * threads, so using store-release pairs with load-acquire
+        * in function eth_memif_rx for C2S rings.
+        */
+       __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
 
        /* Send interrupt, if enabled. */
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
                uint64_t a = 1;
-               ssize_t size = write(mq->intr_handle.fd, &a, sizeof(a));
+               ssize_t size = write(rte_intr_fd_get(mq->intr_handle),
+                                    &a, sizeof(a));
                if (unlikely(size < 0)) {
                        MIF_LOG(WARNING,
                                "Failed to send interrupt. %s", strerror(errno));
@@ -924,7 +943,7 @@ memif_region_init_shm(struct rte_eth_dev *dev, uint8_t has_buffers)
        }
 
        /* calculate buffer offset */
-       r->pkt_buffer_offset = (pmd->run.num_s2m_rings + pmd->run.num_m2s_rings) *
+       r->pkt_buffer_offset = (pmd->run.num_c2s_rings + pmd->run.num_s2c_rings) *
            (sizeof(memif_ring_t) + sizeof(memif_desc_t) *
            (1 << pmd->run.log2_ring_size));
 
@@ -933,8 +952,8 @@ memif_region_init_shm(struct rte_eth_dev *dev, uint8_t has_buffers)
        if (has_buffers == 1)
                r->region_size += (uint32_t)(pmd->run.pkt_buffer_size *
                        (1 << pmd->run.log2_ring_size) *
-                       (pmd->run.num_s2m_rings +
-                        pmd->run.num_m2s_rings));
+                       (pmd->run.num_c2s_rings +
+                        pmd->run.num_s2c_rings));
 
        memset(shm_name, 0, sizeof(char) * ETH_MEMIF_SHM_NAME_SIZE);
        snprintf(shm_name, ETH_MEMIF_SHM_NAME_SIZE, "memif_region_%d",
@@ -1019,8 +1038,8 @@ memif_init_rings(struct rte_eth_dev *dev)
        int i, j;
        uint16_t slot;
 
-       for (i = 0; i < pmd->run.num_s2m_rings; i++) {
-               ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2M, i);
+       for (i = 0; i < pmd->run.num_c2s_rings; i++) {
+               ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
                __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
                __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
                ring->cookie = MEMIF_COOKIE;
@@ -1039,8 +1058,8 @@ memif_init_rings(struct rte_eth_dev *dev)
                }
        }
 
-       for (i = 0; i < pmd->run.num_m2s_rings; i++) {
-               ring = memif_get_ring(pmd, proc_private, MEMIF_RING_M2S, i);
+       for (i = 0; i < pmd->run.num_s2c_rings; i++) {
+               ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
                __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
                __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
                ring->cookie = MEMIF_COOKIE;
@@ -1050,7 +1069,7 @@ memif_init_rings(struct rte_eth_dev *dev)
                        continue;
 
                for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) {
-                       slot = (i + pmd->run.num_s2m_rings) *
+                       slot = (i + pmd->run.num_c2s_rings) *
                            (1 << pmd->run.log2_ring_size) + j;
                        ring->desc[j].region = 0;
                        ring->desc[j].offset =
@@ -1061,7 +1080,7 @@ memif_init_rings(struct rte_eth_dev *dev)
        }
 }
 
-/* called only by slave */
+/* called only by client */
 static int
 memif_init_queues(struct rte_eth_dev *dev)
 {
@@ -1069,16 +1088,18 @@ memif_init_queues(struct rte_eth_dev *dev)
        struct memif_queue *mq;
        int i;
 
-       for (i = 0; i < pmd->run.num_s2m_rings; i++) {
+       for (i = 0; i < pmd->run.num_c2s_rings; i++) {
                mq = dev->data->tx_queues[i];
                mq->log2_ring_size = pmd->run.log2_ring_size;
                /* queues located only in region 0 */
                mq->region = 0;
-               mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2M, i);
+               mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_C2S, i);
                mq->last_head = 0;
                mq->last_tail = 0;
-               mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK);
-               if (mq->intr_handle.fd < 0) {
+               if (rte_intr_fd_set(mq->intr_handle, eventfd(0, EFD_NONBLOCK)))
+                       return -rte_errno;
+
+               if (rte_intr_fd_get(mq->intr_handle) < 0) {
                        MIF_LOG(WARNING,
                                "Failed to create eventfd for tx queue %d: %s.", i,
                                strerror(errno));
@@ -1092,16 +1113,17 @@ memif_init_queues(struct rte_eth_dev *dev)
                }
        }
 
-       for (i = 0; i < pmd->run.num_m2s_rings; i++) {
+       for (i = 0; i < pmd->run.num_s2c_rings; i++) {
                mq = dev->data->rx_queues[i];
                mq->log2_ring_size = pmd->run.log2_ring_size;
                /* queues located only in region 0 */
                mq->region = 0;
-               mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_M2S, i);
+               mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2C, i);
                mq->last_head = 0;
                mq->last_tail = 0;
-               mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK);
-               if (mq->intr_handle.fd < 0) {
+               if (rte_intr_fd_set(mq->intr_handle, eventfd(0, EFD_NONBLOCK)))
+                       return -rte_errno;
+               if (rte_intr_fd_get(mq->intr_handle) < 0) {
                        MIF_LOG(WARNING,
                                "Failed to create eventfd for rx queue %d: %s.", i,
                                strerror(errno));
@@ -1169,8 +1191,8 @@ memif_connect(struct rte_eth_dev *dev)
        }
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-               for (i = 0; i < pmd->run.num_s2m_rings; i++) {
-                       mq = (pmd->role == MEMIF_ROLE_SLAVE) ?
+               for (i = 0; i < pmd->run.num_c2s_rings; i++) {
+                       mq = (pmd->role == MEMIF_ROLE_CLIENT) ?
                            dev->data->tx_queues[i] : dev->data->rx_queues[i];
                        ring = memif_get_ring_from_queue(proc_private, mq);
                        if (ring == NULL || ring->cookie != MEMIF_COOKIE) {
@@ -1182,11 +1204,11 @@ memif_connect(struct rte_eth_dev *dev)
                        mq->last_head = 0;
                        mq->last_tail = 0;
                        /* enable polling mode */
-                       if (pmd->role == MEMIF_ROLE_MASTER)
+                       if (pmd->role == MEMIF_ROLE_SERVER)
                                ring->flags = MEMIF_RING_FLAG_MASK_INT;
                }
-               for (i = 0; i < pmd->run.num_m2s_rings; i++) {
-                       mq = (pmd->role == MEMIF_ROLE_SLAVE) ?
+               for (i = 0; i < pmd->run.num_s2c_rings; i++) {
+                       mq = (pmd->role == MEMIF_ROLE_CLIENT) ?
                            dev->data->rx_queues[i] : dev->data->tx_queues[i];
                        ring = memif_get_ring_from_queue(proc_private, mq);
                        if (ring == NULL || ring->cookie != MEMIF_COOKIE) {
@@ -1198,13 +1220,13 @@ memif_connect(struct rte_eth_dev *dev)
                        mq->last_head = 0;
                        mq->last_tail = 0;
                        /* enable polling mode */
-                       if (pmd->role == MEMIF_ROLE_SLAVE)
+                       if (pmd->role == MEMIF_ROLE_CLIENT)
                                ring->flags = MEMIF_RING_FLAG_MASK_INT;
                }
 
                pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
                pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
-               dev->data->dev_link.link_status = ETH_LINK_UP;
+               dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
        }
        MIF_LOG(INFO, "Connected.");
        return 0;
@@ -1217,11 +1239,11 @@ memif_dev_start(struct rte_eth_dev *dev)
        int ret = 0;
 
        switch (pmd->role) {
-       case MEMIF_ROLE_SLAVE:
-               ret = memif_connect_slave(dev);
+       case MEMIF_ROLE_CLIENT:
+               ret = memif_connect_client(dev);
                break;
-       case MEMIF_ROLE_MASTER:
-               ret = memif_connect_master(dev);
+       case MEMIF_ROLE_SERVER:
+               ret = memif_connect_server(dev);
                break;
        default:
                MIF_LOG(ERR, "Unknown role: %d.", pmd->role);
@@ -1243,9 +1265,9 @@ memif_dev_close(struct rte_eth_dev *dev)
                memif_disconnect(dev);
 
                for (i = 0; i < dev->data->nb_rx_queues; i++)
-                       (*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]);
+                       (*dev->dev_ops->rx_queue_release)(dev, i);
                for (i = 0; i < dev->data->nb_tx_queues; i++)
-                       (*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]);
+                       (*dev->dev_ops->tx_queue_release)(dev, i);
 
                memif_socket_remove_device(dev);
        } else {
@@ -1263,17 +1285,17 @@ memif_dev_configure(struct rte_eth_dev *dev)
        struct pmd_internals *pmd = dev->data->dev_private;
 
        /*
-        * SLAVE - TXQ
-        * MASTER - RXQ
+        * CLIENT - TXQ
+        * SERVER - RXQ
         */
-       pmd->cfg.num_s2m_rings = (pmd->role == MEMIF_ROLE_SLAVE) ?
+       pmd->cfg.num_c2s_rings = (pmd->role == MEMIF_ROLE_CLIENT) ?
                                  dev->data->nb_tx_queues : dev->data->nb_rx_queues;
 
        /*
-        * SLAVE - RXQ
-        * MASTER - TXQ
+        * CLIENT - RXQ
+        * SERVER - TXQ
         */
-       pmd->cfg.num_m2s_rings = (pmd->role == MEMIF_ROLE_SLAVE) ?
+       pmd->cfg.num_s2c_rings = (pmd->role == MEMIF_ROLE_CLIENT) ?
                                  dev->data->nb_rx_queues : dev->data->nb_tx_queues;
 
        return 0;
@@ -1295,12 +1317,24 @@ memif_tx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       /* Allocate interrupt instance */
+       mq->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+       if (mq->intr_handle == NULL) {
+               MIF_LOG(ERR, "Failed to allocate intr handle");
+               return -ENOMEM;
+       }
+
        mq->type =
-           (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_S2M : MEMIF_RING_M2S;
+           (pmd->role == MEMIF_ROLE_CLIENT) ? MEMIF_RING_C2S : MEMIF_RING_S2C;
        mq->n_pkts = 0;
        mq->n_bytes = 0;
-       mq->intr_handle.fd = -1;
-       mq->intr_handle.type = RTE_INTR_HANDLE_EXT;
+
+       if (rte_intr_fd_set(mq->intr_handle, -1))
+               return -rte_errno;
+
+       if (rte_intr_type_set(mq->intr_handle, RTE_INTR_HANDLE_EXT))
+               return -rte_errno;
+
        mq->in_port = dev->data->port_id;
        dev->data->tx_queues[qid] = mq;
 
@@ -1324,11 +1358,23 @@ memif_rx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
-       mq->type = (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_M2S : MEMIF_RING_S2M;
+       /* Allocate interrupt instance */
+       mq->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+       if (mq->intr_handle == NULL) {
+               MIF_LOG(ERR, "Failed to allocate intr handle");
+               return -ENOMEM;
+       }
+
+       mq->type = (pmd->role == MEMIF_ROLE_CLIENT) ? MEMIF_RING_S2C : MEMIF_RING_C2S;
        mq->n_pkts = 0;
        mq->n_bytes = 0;
-       mq->intr_handle.fd = -1;
-       mq->intr_handle.type = RTE_INTR_HANDLE_EXT;
+
+       if (rte_intr_fd_set(mq->intr_handle, -1))
+               return -rte_errno;
+
+       if (rte_intr_type_set(mq->intr_handle, RTE_INTR_HANDLE_EXT))
+               return -rte_errno;
+
        mq->mempool = mb_pool;
        mq->in_port = dev->data->port_id;
        dev->data->rx_queues[qid] = mq;
@@ -1337,9 +1383,21 @@ memif_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 static void
-memif_queue_release(void *queue)
+memif_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       struct memif_queue *mq = dev->data->rx_queues[qid];
+
+       if (!mq)
+               return;
+
+       rte_intr_instance_free(mq->intr_handle);
+       rte_free(mq);
+}
+
+static void
+memif_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct memif_queue *mq = (struct memif_queue *)queue;
+       struct memif_queue *mq = dev->data->tx_queues[qid];
 
        if (!mq)
                return;
@@ -1355,10 +1413,10 @@ memif_link_update(struct rte_eth_dev *dev,
 
        if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
                proc_private = dev->process_private;
-               if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+               if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
                                proc_private->regions_num == 0) {
                        memif_mp_request_regions(dev);
-               } else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+               } else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
                                proc_private->regions_num > 0) {
                        memif_free_regions(dev);
                }
@@ -1379,8 +1437,8 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        stats->opackets = 0;
        stats->obytes = 0;
 
-       tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_s2m_rings :
-           pmd->run.num_m2s_rings;
+       tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_c2s_rings :
+           pmd->run.num_s2c_rings;
        nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp :
            RTE_ETHDEV_QUEUE_STAT_CNTRS;
 
@@ -1393,8 +1451,8 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                stats->ibytes += mq->n_bytes;
        }
 
-       tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_m2s_rings :
-           pmd->run.num_s2m_rings;
+       tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings :
+           pmd->run.num_c2s_rings;
        nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp :
            RTE_ETHDEV_QUEUE_STAT_CNTRS;
 
@@ -1416,14 +1474,14 @@ memif_stats_reset(struct rte_eth_dev *dev)
        int i;
        struct memif_queue *mq;
 
-       for (i = 0; i < pmd->run.num_s2m_rings; i++) {
-               mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->tx_queues[i] :
+       for (i = 0; i < pmd->run.num_c2s_rings; i++) {
+               mq = (pmd->role == MEMIF_ROLE_CLIENT) ? dev->data->tx_queues[i] :
                    dev->data->rx_queues[i];
                mq->n_pkts = 0;
                mq->n_bytes = 0;
        }
-       for (i = 0; i < pmd->run.num_m2s_rings; i++) {
-               mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->rx_queues[i] :
+       for (i = 0; i < pmd->run.num_s2c_rings; i++) {
+               mq = (pmd->role == MEMIF_ROLE_CLIENT) ? dev->data->rx_queues[i] :
                    dev->data->tx_queues[i];
                mq->n_pkts = 0;
                mq->n_bytes = 0;
@@ -1456,8 +1514,8 @@ static const struct eth_dev_ops ops = {
        .dev_configure = memif_dev_configure,
        .tx_queue_setup = memif_tx_queue_setup,
        .rx_queue_setup = memif_rx_queue_setup,
-       .rx_queue_release = memif_queue_release,
-       .tx_queue_release = memif_queue_release,
+       .rx_queue_release = memif_rx_queue_release,
+       .tx_queue_release = memif_tx_queue_release,
        .rx_queue_intr_enable = memif_rx_queue_intr_enable,
        .rx_queue_intr_disable = memif_rx_queue_intr_disable,
        .link_update = memif_link_update,
@@ -1504,8 +1562,8 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
        pmd->flags = flags;
        pmd->flags |= ETH_MEMIF_FLAG_DISABLED;
        pmd->role = role;
-       /* Zero-copy flag irelevant to master. */
-       if (pmd->role == MEMIF_ROLE_MASTER)
+       /* Zero-copy flag irelevant to server. */
+       if (pmd->role == MEMIF_ROLE_SERVER)
                pmd->flags &= ~ETH_MEMIF_FLAG_ZERO_COPY;
 
        ret = memif_socket_init(eth_dev, socket_filename);
@@ -1518,8 +1576,8 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
 
        pmd->cfg.log2_ring_size = log2_ring_size;
        /* set in .dev_configure() */
-       pmd->cfg.num_s2m_rings = 0;
-       pmd->cfg.num_m2s_rings = 0;
+       pmd->cfg.num_c2s_rings = 0;
+       pmd->cfg.num_s2c_rings = 0;
 
        pmd->cfg.pkt_buffer_size = pkt_buffer_size;
        rte_spinlock_init(&pmd->cc_lock);
@@ -1530,6 +1588,7 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
        data->dev_link = pmd_link;
        data->mac_addrs = ether_addr;
        data->promiscuous = 1;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        eth_dev->dev_ops = &ops;
        eth_dev->device = &vdev->device;
@@ -1552,10 +1611,16 @@ memif_set_role(const char *key __rte_unused, const char *value,
 {
        enum memif_role_t *role = (enum memif_role_t *)extra_args;
 
-       if (strstr(value, "master") != NULL) {
-               *role = MEMIF_ROLE_MASTER;
+       if (strstr(value, "server") != NULL) {
+               *role = MEMIF_ROLE_SERVER;
+       } else if (strstr(value, "client") != NULL) {
+               *role = MEMIF_ROLE_CLIENT;
+       } else if (strstr(value, "master") != NULL) {
+               MIF_LOG(NOTICE, "Role argument \"master\" is deprecated, use \"server\"");
+               *role = MEMIF_ROLE_SERVER;
        } else if (strstr(value, "slave") != NULL) {
-               *role = MEMIF_ROLE_SLAVE;
+               MIF_LOG(NOTICE, "Role argument \"slave\" is deprecated, use \"client\"");
+               *role = MEMIF_ROLE_CLIENT;
        } else {
                MIF_LOG(ERR, "Unknown role: %s.", value);
                return -EINVAL;
@@ -1668,7 +1733,23 @@ memif_set_socket_filename(const char *key __rte_unused, const char *value,
        const char **socket_filename = (const char **)extra_args;
 
        *socket_filename = value;
-       return memif_check_socket_filename(*socket_filename);
+       return 0;
+}
+
+static int
+memif_set_is_socket_abstract(const char *key __rte_unused, const char *value, void *extra_args)
+{
+       uint32_t *flags = (uint32_t *)extra_args;
+
+       if (strstr(value, "yes") != NULL) {
+               *flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+       } else if (strstr(value, "no") != NULL) {
+               *flags &= ~ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+       } else {
+               MIF_LOG(ERR, "Failed to parse socket-abstract param: %s.", value);
+               return -EINVAL;
+       }
+       return 0;
 }
 
 static int
@@ -1698,7 +1779,7 @@ rte_pmd_memif_probe(struct rte_vdev_device *vdev)
        int ret = 0;
        struct rte_kvargs *kvlist;
        const char *name = rte_vdev_device_name(vdev);
-       enum memif_role_t role = MEMIF_ROLE_SLAVE;
+       enum memif_role_t role = MEMIF_ROLE_CLIENT;
        memif_interface_id_t id = 0;
        uint16_t pkt_buffer_size = ETH_MEMIF_DEFAULT_PKT_BUFFER_SIZE;
        memif_log2_ring_size_t log2_ring_size = ETH_MEMIF_DEFAULT_RING_SIZE;
@@ -1754,6 +1835,9 @@ rte_pmd_memif_probe(struct rte_vdev_device *vdev)
                MIF_LOG(WARNING, "Failed to register mp action callback: %s",
                        strerror(rte_errno));
 
+       /* use abstract address by default */
+       flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+
        kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_arguments);
 
        /* parse parameters */
@@ -1779,6 +1863,10 @@ rte_pmd_memif_probe(struct rte_vdev_device *vdev)
                                         (void *)(&socket_filename));
                if (ret < 0)
                        goto exit;
+               ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ABSTRACT_ARG,
+                                        &memif_set_is_socket_abstract, &flags);
+               if (ret < 0)
+                       goto exit;
                ret = rte_kvargs_process(kvlist, ETH_MEMIF_MAC_ARG,
                                         &memif_set_mac, ether_addr);
                if (ret < 0)
@@ -1793,6 +1881,12 @@ rte_pmd_memif_probe(struct rte_vdev_device *vdev)
                        goto exit;
        }
 
+       if (!(flags & ETH_MEMIF_FLAG_SOCKET_ABSTRACT)) {
+               ret = memif_check_socket_filename(socket_filename);
+               if (ret < 0)
+                       goto exit;
+       }
+
        /* create interface */
        ret = memif_create(vdev, role, id, flags, socket_filename,
                           log2_ring_size, pkt_buffer_size, secret, ether_addr);
@@ -1812,9 +1906,7 @@ rte_pmd_memif_remove(struct rte_vdev_device *vdev)
        if (eth_dev == NULL)
                return 0;
 
-       rte_eth_dev_close(eth_dev->data->port_id);
-
-       return 0;
+       return rte_eth_dev_close(eth_dev->data->port_id);
 }
 
 static struct rte_vdev_driver pmd_memif_drv = {
@@ -1826,12 +1918,13 @@ RTE_PMD_REGISTER_VDEV(net_memif, pmd_memif_drv);
 
 RTE_PMD_REGISTER_PARAM_STRING(net_memif,
                              ETH_MEMIF_ID_ARG "=<int>"
-                             ETH_MEMIF_ROLE_ARG "=master|slave"
+                             ETH_MEMIF_ROLE_ARG "=server|client"
                              ETH_MEMIF_PKT_BUFFER_SIZE_ARG "=<int>"
                              ETH_MEMIF_RING_SIZE_ARG "=<int>"
                              ETH_MEMIF_SOCKET_ARG "=<string>"
+                                 ETH_MEMIF_SOCKET_ABSTRACT_ARG "=yes|no"
                              ETH_MEMIF_MAC_ARG "=xx:xx:xx:xx:xx:xx"
                              ETH_MEMIF_ZC_ARG "=yes|no"
                              ETH_MEMIF_SECRET_ARG "=<string>");
 
-RTE_LOG_REGISTER(memif_logtype, pmd.net.memif, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(memif_logtype, NOTICE);