#include <rte_version.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
#include <rte_bus_vdev.h>
#define ETH_MEMIF_PKT_BUFFER_SIZE_ARG "bsize"
#define ETH_MEMIF_RING_SIZE_ARG "rsize"
#define ETH_MEMIF_SOCKET_ARG "socket"
+#define ETH_MEMIF_SOCKET_ABSTRACT_ARG "socket-abstract"
#define ETH_MEMIF_MAC_ARG "mac"
#define ETH_MEMIF_ZC_ARG "zero-copy"
#define ETH_MEMIF_SECRET_ARG "secret"
ETH_MEMIF_PKT_BUFFER_SIZE_ARG,
ETH_MEMIF_RING_SIZE_ARG,
ETH_MEMIF_SOCKET_ARG,
+ ETH_MEMIF_SOCKET_ABSTRACT_ARG,
ETH_MEMIF_MAC_ARG,
ETH_MEMIF_ZC_ARG,
ETH_MEMIF_SECRET_ARG,
};
static const struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG
};
#define MEMIF_MP_SEND_REGION "memif_mp_send_region"
struct memif_region *r;
struct pmd_process_private *proc_private = dev->process_private;
struct pmd_internals *pmd = dev->data->dev_private;
- /* in case of zero-copy slave, only request region 0 */
+ /* in case of zero-copy client, only request region 0 */
uint16_t max_region_num = (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) ?
1 : ETH_MEMIF_MAX_REGION_NUM;
memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info)
{
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
+ dev_info->max_rx_pktlen = RTE_ETHER_MAX_LEN;
dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
dev_info->min_rx_bufsize = 0;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return 0;
}
int ring_size = sizeof(memif_ring_t) + sizeof(memif_desc_t) *
(1 << pmd->run.log2_ring_size);
- p = (uint8_t *)p + (ring_num + type * pmd->run.num_s2m_rings) * ring_size;
+ p = (uint8_t *)p + (ring_num + type * pmd->run.num_c2s_rings) * ring_size;
return (memif_ring_t *)p;
}
return ((uint8_t *)proc_private->regions[d->region]->addr + d->offset);
}
-/* Free mbufs received by master */
+/* Free mbufs received by server */
static void
memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_queue *mq)
{
+ uint16_t cur_tail;
uint16_t mask = (1 << mq->log2_ring_size) - 1;
memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
/* FIXME: improve performance */
/* The ring->tail acts as a guard variable between Tx and Rx
* threads, so using load-acquire pairs with store-release
- * to synchronize it between threads.
+ * in function eth_memif_rx for C2S queues.
*/
- while (mq->last_tail != __atomic_load_n(&ring->tail,
- __ATOMIC_ACQUIRE)) {
+ cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ while (mq->last_tail != cur_tail) {
RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
/* Decrement refcnt and free mbuf. (current segment) */
rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1);
/* consume interrupt */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0)
- size = read(mq->intr_handle.fd, &b, sizeof(b));
+ size = read(rte_intr_fd_get(mq->intr_handle), &b,
+ sizeof(b));
ring_size = 1 << mq->log2_ring_size;
mask = ring_size - 1;
- if (type == MEMIF_RING_S2M) {
+ if (type == MEMIF_RING_C2S) {
cur_slot = mq->last_head;
last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
} else {
if (mbuf != mbuf_head)
rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
- memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, dst_off),
- (uint8_t *)memif_get_buffer(proc_private, d0) + src_off,
- cp_len);
+ rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *,
+ dst_off),
+ (uint8_t *)memif_get_buffer(proc_private, d0) +
+ src_off, cp_len);
src_off += cp_len;
dst_off += cp_len;
}
no_free_bufs:
- if (type == MEMIF_RING_S2M) {
+ if (type == MEMIF_RING_C2S) {
__atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
mq->last_head = cur_slot;
} else {
}
refill:
- if (type == MEMIF_RING_M2S) {
+ if (type == MEMIF_RING_S2C) {
/* ring->head is updated by the receiver and this function
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
uint64_t b;
ssize_t size __rte_unused;
- size = read(mq->intr_handle.fd, &b, sizeof(b));
+ size = read(rte_intr_fd_get(mq->intr_handle), &b,
+ sizeof(b));
}
ring_size = 1 << mq->log2_ring_size;
mq->last_tail = cur_slot;
-/* Supply master with new buffers */
+/* Supply server with new buffers */
refill:
/* ring->head is updated by the receiver and this function
* is called in the context of receiver thread. The loads in
rte_eth_devices[mq->in_port].process_private;
memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
uint16_t slot, saved_slot, n_free, ring_size, mask, n_tx_pkts = 0;
- uint16_t src_len, src_off, dst_len, dst_off, cp_len;
+ uint16_t src_len, src_off, dst_len, dst_off, cp_len, nb_segs;
memif_ring_type_t type = mq->type;
memif_desc_t *d0;
struct rte_mbuf *mbuf;
ring_size = 1 << mq->log2_ring_size;
mask = ring_size - 1;
- if (type == MEMIF_RING_S2M) {
- /* For S2M queues ring->head is updated by the sender and
+ if (type == MEMIF_RING_C2S) {
+ /* For C2S queues ring->head is updated by the sender and
* this function is called in the context of sending thread.
* The loads in the sender do not need to synchronize with
* its own stores. Hence, the following load can be a
n_free = ring_size - slot +
__atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
} else {
- /* For M2S queues ring->tail is updated by the sender and
+ /* For S2C queues ring->tail is updated by the sender and
* this function is called in the context of sending thread.
* The loads in the sender do not need to synchronize with
* its own stores. Hence, the following load can be a
while (n_tx_pkts < nb_pkts && n_free) {
mbuf_head = *bufs++;
+ nb_segs = mbuf_head->nb_segs;
mbuf = mbuf_head;
saved_slot = slot;
d0 = &ring->desc[slot & mask];
dst_off = 0;
- dst_len = (type == MEMIF_RING_S2M) ?
+ dst_len = (type == MEMIF_RING_C2S) ?
pmd->run.pkt_buffer_size : d0->length;
next_in_chain:
d0->flags |= MEMIF_DESC_FLAG_NEXT;
d0 = &ring->desc[slot & mask];
dst_off = 0;
- dst_len = (type == MEMIF_RING_S2M) ?
+ dst_len = (type == MEMIF_RING_C2S) ?
pmd->run.pkt_buffer_size : d0->length;
d0->flags = 0;
} else {
}
cp_len = RTE_MIN(dst_len, src_len);
- memcpy((uint8_t *)memif_get_buffer(proc_private, d0) + dst_off,
- rte_pktmbuf_mtod_offset(mbuf, void *, src_off),
- cp_len);
+ rte_memcpy((uint8_t *)memif_get_buffer(proc_private,
+ d0) + dst_off,
+ rte_pktmbuf_mtod_offset(mbuf, void *, src_off),
+ cp_len);
mq->n_bytes += cp_len;
src_off += cp_len;
d0->length = dst_off;
}
- if (rte_pktmbuf_is_contiguous(mbuf) == 0) {
+ if (--nb_segs > 0) {
mbuf = mbuf->next;
goto next_in_chain;
}
}
no_free_slots:
- if (type == MEMIF_RING_S2M)
+ if (type == MEMIF_RING_C2S)
__atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
else
__atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
a = 1;
- size = write(mq->intr_handle.fd, &a, sizeof(a));
+ size = write(rte_intr_fd_get(mq->intr_handle), &a,
+ sizeof(a));
if (unlikely(size < 0)) {
MIF_LOG(WARNING,
"Failed to send interrupt. %s", strerror(errno));
uint16_t slot, uint16_t n_free)
{
memif_desc_t *d0;
+ uint16_t nb_segs = mbuf->nb_segs;
int used_slots = 1;
next_in_chain:
/* store pointer to mbuf to free it later */
mq->buffers[slot & mask] = mbuf;
- /* Increment refcnt to make sure the buffer is not freed before master
+ /* Increment refcnt to make sure the buffer is not freed before server
* receives it. (current segment)
*/
rte_mbuf_refcnt_update(mbuf, 1);
/* populate descriptor */
d0 = &ring->desc[slot & mask];
d0->length = rte_pktmbuf_data_len(mbuf);
+ mq->n_bytes += rte_pktmbuf_data_len(mbuf);
/* FIXME: get region index */
d0->region = 1;
d0->offset = rte_pktmbuf_mtod(mbuf, uint8_t *) -
d0->flags = 0;
/* check if buffer is chained */
- if (rte_pktmbuf_is_contiguous(mbuf) == 0) {
+ if (--nb_segs > 0) {
if (n_free < 2)
return 0;
/* mark buffer as chained */
ring_size = 1 << mq->log2_ring_size;
mask = ring_size - 1;
- /* free mbufs received by master */
+ /* free mbufs received by server */
memif_free_stored_mbufs(proc_private, mq);
- /* ring type always MEMIF_RING_S2M */
- /* For S2M queues ring->head is updated by the sender and
+ /* ring type always MEMIF_RING_C2S */
+ /* For C2S queues ring->head is updated by the sender and
* this function is called in the context of sending thread.
* The loads in the sender do not need to synchronize with
* its own stores. Hence, the following load can be a
}
no_free_slots:
- /* ring type always MEMIF_RING_S2M */
+ /* ring type always MEMIF_RING_C2S */
/* The ring->head acts as a guard variable between Tx and Rx
* threads, so using store-release pairs with load-acquire
- * in function eth_memif_rx for S2M rings.
+ * in function eth_memif_rx for C2S rings.
*/
__atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
/* Send interrupt, if enabled. */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
uint64_t a = 1;
- ssize_t size = write(mq->intr_handle.fd, &a, sizeof(a));
+ ssize_t size = write(rte_intr_fd_get(mq->intr_handle),
+ &a, sizeof(a));
if (unlikely(size < 0)) {
MIF_LOG(WARNING,
"Failed to send interrupt. %s", strerror(errno));
}
/* calculate buffer offset */
- r->pkt_buffer_offset = (pmd->run.num_s2m_rings + pmd->run.num_m2s_rings) *
+ r->pkt_buffer_offset = (pmd->run.num_c2s_rings + pmd->run.num_s2c_rings) *
(sizeof(memif_ring_t) + sizeof(memif_desc_t) *
(1 << pmd->run.log2_ring_size));
if (has_buffers == 1)
r->region_size += (uint32_t)(pmd->run.pkt_buffer_size *
(1 << pmd->run.log2_ring_size) *
- (pmd->run.num_s2m_rings +
- pmd->run.num_m2s_rings));
+ (pmd->run.num_c2s_rings +
+ pmd->run.num_s2c_rings));
memset(shm_name, 0, sizeof(char) * ETH_MEMIF_SHM_NAME_SIZE);
snprintf(shm_name, ETH_MEMIF_SHM_NAME_SIZE, "memif_region_%d",
int i, j;
uint16_t slot;
- for (i = 0; i < pmd->run.num_s2m_rings; i++) {
- ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2M, i);
+ for (i = 0; i < pmd->run.num_c2s_rings; i++) {
+ ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
__atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
__atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
ring->cookie = MEMIF_COOKIE;
}
}
- for (i = 0; i < pmd->run.num_m2s_rings; i++) {
- ring = memif_get_ring(pmd, proc_private, MEMIF_RING_M2S, i);
+ for (i = 0; i < pmd->run.num_s2c_rings; i++) {
+ ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
__atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
__atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
ring->cookie = MEMIF_COOKIE;
continue;
for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) {
- slot = (i + pmd->run.num_s2m_rings) *
+ slot = (i + pmd->run.num_c2s_rings) *
(1 << pmd->run.log2_ring_size) + j;
ring->desc[j].region = 0;
ring->desc[j].offset =
}
}
-/* called only by slave */
+/* called only by client */
static int
memif_init_queues(struct rte_eth_dev *dev)
{
struct memif_queue *mq;
int i;
- for (i = 0; i < pmd->run.num_s2m_rings; i++) {
+ for (i = 0; i < pmd->run.num_c2s_rings; i++) {
mq = dev->data->tx_queues[i];
mq->log2_ring_size = pmd->run.log2_ring_size;
/* queues located only in region 0 */
mq->region = 0;
- mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2M, i);
+ mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_C2S, i);
mq->last_head = 0;
mq->last_tail = 0;
- mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK);
- if (mq->intr_handle.fd < 0) {
+ if (rte_intr_fd_set(mq->intr_handle, eventfd(0, EFD_NONBLOCK)))
+ return -rte_errno;
+
+ if (rte_intr_fd_get(mq->intr_handle) < 0) {
MIF_LOG(WARNING,
"Failed to create eventfd for tx queue %d: %s.", i,
strerror(errno));
}
}
- for (i = 0; i < pmd->run.num_m2s_rings; i++) {
+ for (i = 0; i < pmd->run.num_s2c_rings; i++) {
mq = dev->data->rx_queues[i];
mq->log2_ring_size = pmd->run.log2_ring_size;
/* queues located only in region 0 */
mq->region = 0;
- mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_M2S, i);
+ mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2C, i);
mq->last_head = 0;
mq->last_tail = 0;
- mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK);
- if (mq->intr_handle.fd < 0) {
+ if (rte_intr_fd_set(mq->intr_handle, eventfd(0, EFD_NONBLOCK)))
+ return -rte_errno;
+ if (rte_intr_fd_get(mq->intr_handle) < 0) {
MIF_LOG(WARNING,
"Failed to create eventfd for rx queue %d: %s.", i,
strerror(errno));
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- for (i = 0; i < pmd->run.num_s2m_rings; i++) {
- mq = (pmd->role == MEMIF_ROLE_SLAVE) ?
+ for (i = 0; i < pmd->run.num_c2s_rings; i++) {
+ mq = (pmd->role == MEMIF_ROLE_CLIENT) ?
dev->data->tx_queues[i] : dev->data->rx_queues[i];
ring = memif_get_ring_from_queue(proc_private, mq);
if (ring == NULL || ring->cookie != MEMIF_COOKIE) {
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
- if (pmd->role == MEMIF_ROLE_MASTER)
+ if (pmd->role == MEMIF_ROLE_SERVER)
ring->flags = MEMIF_RING_FLAG_MASK_INT;
}
- for (i = 0; i < pmd->run.num_m2s_rings; i++) {
- mq = (pmd->role == MEMIF_ROLE_SLAVE) ?
+ for (i = 0; i < pmd->run.num_s2c_rings; i++) {
+ mq = (pmd->role == MEMIF_ROLE_CLIENT) ?
dev->data->rx_queues[i] : dev->data->tx_queues[i];
ring = memif_get_ring_from_queue(proc_private, mq);
if (ring == NULL || ring->cookie != MEMIF_COOKIE) {
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
- if (pmd->role == MEMIF_ROLE_SLAVE)
+ if (pmd->role == MEMIF_ROLE_CLIENT)
ring->flags = MEMIF_RING_FLAG_MASK_INT;
}
pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
}
MIF_LOG(INFO, "Connected.");
return 0;
int ret = 0;
switch (pmd->role) {
- case MEMIF_ROLE_SLAVE:
- ret = memif_connect_slave(dev);
+ case MEMIF_ROLE_CLIENT:
+ ret = memif_connect_client(dev);
break;
- case MEMIF_ROLE_MASTER:
- ret = memif_connect_master(dev);
+ case MEMIF_ROLE_SERVER:
+ ret = memif_connect_server(dev);
break;
default:
MIF_LOG(ERR, "Unknown role: %d.", pmd->role);
memif_disconnect(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++)
- (*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]);
+ (*dev->dev_ops->rx_queue_release)(dev, i);
for (i = 0; i < dev->data->nb_tx_queues; i++)
- (*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]);
+ (*dev->dev_ops->tx_queue_release)(dev, i);
memif_socket_remove_device(dev);
} else {
struct pmd_internals *pmd = dev->data->dev_private;
/*
- * SLAVE - TXQ
- * MASTER - RXQ
+ * CLIENT - TXQ
+ * SERVER - RXQ
*/
- pmd->cfg.num_s2m_rings = (pmd->role == MEMIF_ROLE_SLAVE) ?
+ pmd->cfg.num_c2s_rings = (pmd->role == MEMIF_ROLE_CLIENT) ?
dev->data->nb_tx_queues : dev->data->nb_rx_queues;
/*
- * SLAVE - RXQ
- * MASTER - TXQ
+ * CLIENT - RXQ
+ * SERVER - TXQ
*/
- pmd->cfg.num_m2s_rings = (pmd->role == MEMIF_ROLE_SLAVE) ?
+ pmd->cfg.num_s2c_rings = (pmd->role == MEMIF_ROLE_CLIENT) ?
dev->data->nb_rx_queues : dev->data->nb_tx_queues;
return 0;
return -ENOMEM;
}
+ /* Allocate interrupt instance */
+ mq->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+ if (mq->intr_handle == NULL) {
+ MIF_LOG(ERR, "Failed to allocate intr handle");
+ return -ENOMEM;
+ }
+
mq->type =
- (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_S2M : MEMIF_RING_M2S;
+ (pmd->role == MEMIF_ROLE_CLIENT) ? MEMIF_RING_C2S : MEMIF_RING_S2C;
mq->n_pkts = 0;
mq->n_bytes = 0;
- mq->intr_handle.fd = -1;
- mq->intr_handle.type = RTE_INTR_HANDLE_EXT;
+
+ if (rte_intr_fd_set(mq->intr_handle, -1))
+ return -rte_errno;
+
+ if (rte_intr_type_set(mq->intr_handle, RTE_INTR_HANDLE_EXT))
+ return -rte_errno;
+
mq->in_port = dev->data->port_id;
dev->data->tx_queues[qid] = mq;
return -ENOMEM;
}
- mq->type = (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_M2S : MEMIF_RING_S2M;
+ /* Allocate interrupt instance */
+ mq->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+ if (mq->intr_handle == NULL) {
+ MIF_LOG(ERR, "Failed to allocate intr handle");
+ return -ENOMEM;
+ }
+
+ mq->type = (pmd->role == MEMIF_ROLE_CLIENT) ? MEMIF_RING_S2C : MEMIF_RING_C2S;
mq->n_pkts = 0;
mq->n_bytes = 0;
- mq->intr_handle.fd = -1;
- mq->intr_handle.type = RTE_INTR_HANDLE_EXT;
+
+ if (rte_intr_fd_set(mq->intr_handle, -1))
+ return -rte_errno;
+
+ if (rte_intr_type_set(mq->intr_handle, RTE_INTR_HANDLE_EXT))
+ return -rte_errno;
+
mq->mempool = mb_pool;
mq->in_port = dev->data->port_id;
dev->data->rx_queues[qid] = mq;
}
static void
-memif_queue_release(void *queue)
+memif_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct memif_queue *mq = dev->data->rx_queues[qid];
+
+ if (!mq)
+ return;
+
+ rte_intr_instance_free(mq->intr_handle);
+ rte_free(mq);
+}
+
+static void
+memif_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct memif_queue *mq = (struct memif_queue *)queue;
+ struct memif_queue *mq = dev->data->tx_queues[qid];
if (!mq)
return;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
proc_private = dev->process_private;
- if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+ if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
proc_private->regions_num == 0) {
memif_mp_request_regions(dev);
- } else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+ } else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
proc_private->regions_num > 0) {
memif_free_regions(dev);
}
stats->opackets = 0;
stats->obytes = 0;
- tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_s2m_rings :
- pmd->run.num_m2s_rings;
+ tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_c2s_rings :
+ pmd->run.num_s2c_rings;
nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp :
RTE_ETHDEV_QUEUE_STAT_CNTRS;
stats->ibytes += mq->n_bytes;
}
- tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_m2s_rings :
- pmd->run.num_s2m_rings;
+ tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings :
+ pmd->run.num_c2s_rings;
nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp :
RTE_ETHDEV_QUEUE_STAT_CNTRS;
int i;
struct memif_queue *mq;
- for (i = 0; i < pmd->run.num_s2m_rings; i++) {
- mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->tx_queues[i] :
+ for (i = 0; i < pmd->run.num_c2s_rings; i++) {
+ mq = (pmd->role == MEMIF_ROLE_CLIENT) ? dev->data->tx_queues[i] :
dev->data->rx_queues[i];
mq->n_pkts = 0;
mq->n_bytes = 0;
}
- for (i = 0; i < pmd->run.num_m2s_rings; i++) {
- mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->rx_queues[i] :
+ for (i = 0; i < pmd->run.num_s2c_rings; i++) {
+ mq = (pmd->role == MEMIF_ROLE_CLIENT) ? dev->data->rx_queues[i] :
dev->data->tx_queues[i];
mq->n_pkts = 0;
mq->n_bytes = 0;
.dev_configure = memif_dev_configure,
.tx_queue_setup = memif_tx_queue_setup,
.rx_queue_setup = memif_rx_queue_setup,
- .rx_queue_release = memif_queue_release,
- .tx_queue_release = memif_queue_release,
+ .rx_queue_release = memif_rx_queue_release,
+ .tx_queue_release = memif_tx_queue_release,
.rx_queue_intr_enable = memif_rx_queue_intr_enable,
.rx_queue_intr_disable = memif_rx_queue_intr_disable,
.link_update = memif_link_update,
pmd->flags = flags;
pmd->flags |= ETH_MEMIF_FLAG_DISABLED;
pmd->role = role;
- /* Zero-copy flag irelevant to master. */
- if (pmd->role == MEMIF_ROLE_MASTER)
+ /* Zero-copy flag irelevant to server. */
+ if (pmd->role == MEMIF_ROLE_SERVER)
pmd->flags &= ~ETH_MEMIF_FLAG_ZERO_COPY;
ret = memif_socket_init(eth_dev, socket_filename);
pmd->cfg.log2_ring_size = log2_ring_size;
/* set in .dev_configure() */
- pmd->cfg.num_s2m_rings = 0;
- pmd->cfg.num_m2s_rings = 0;
+ pmd->cfg.num_c2s_rings = 0;
+ pmd->cfg.num_s2c_rings = 0;
pmd->cfg.pkt_buffer_size = pkt_buffer_size;
rte_spinlock_init(&pmd->cc_lock);
data->dev_link = pmd_link;
data->mac_addrs = ether_addr;
data->promiscuous = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
eth_dev->device = &vdev->device;
{
enum memif_role_t *role = (enum memif_role_t *)extra_args;
- if (strstr(value, "master") != NULL) {
- *role = MEMIF_ROLE_MASTER;
+ if (strstr(value, "server") != NULL) {
+ *role = MEMIF_ROLE_SERVER;
+ } else if (strstr(value, "client") != NULL) {
+ *role = MEMIF_ROLE_CLIENT;
+ } else if (strstr(value, "master") != NULL) {
+ MIF_LOG(NOTICE, "Role argument \"master\" is deprecated, use \"server\"");
+ *role = MEMIF_ROLE_SERVER;
} else if (strstr(value, "slave") != NULL) {
- *role = MEMIF_ROLE_SLAVE;
+ MIF_LOG(NOTICE, "Role argument \"slave\" is deprecated, use \"client\"");
+ *role = MEMIF_ROLE_CLIENT;
} else {
MIF_LOG(ERR, "Unknown role: %s.", value);
return -EINVAL;
const char **socket_filename = (const char **)extra_args;
*socket_filename = value;
- return memif_check_socket_filename(*socket_filename);
+ return 0;
+}
+
+static int
+memif_set_is_socket_abstract(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ uint32_t *flags = (uint32_t *)extra_args;
+
+ if (strstr(value, "yes") != NULL) {
+ *flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+ } else if (strstr(value, "no") != NULL) {
+ *flags &= ~ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+ } else {
+ MIF_LOG(ERR, "Failed to parse socket-abstract param: %s.", value);
+ return -EINVAL;
+ }
+ return 0;
}
static int
int ret = 0;
struct rte_kvargs *kvlist;
const char *name = rte_vdev_device_name(vdev);
- enum memif_role_t role = MEMIF_ROLE_SLAVE;
+ enum memif_role_t role = MEMIF_ROLE_CLIENT;
memif_interface_id_t id = 0;
uint16_t pkt_buffer_size = ETH_MEMIF_DEFAULT_PKT_BUFFER_SIZE;
memif_log2_ring_size_t log2_ring_size = ETH_MEMIF_DEFAULT_RING_SIZE;
MIF_LOG(WARNING, "Failed to register mp action callback: %s",
strerror(rte_errno));
+ /* use abstract address by default */
+ flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+
kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_arguments);
/* parse parameters */
(void *)(&socket_filename));
if (ret < 0)
goto exit;
+ ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ABSTRACT_ARG,
+ &memif_set_is_socket_abstract, &flags);
+ if (ret < 0)
+ goto exit;
ret = rte_kvargs_process(kvlist, ETH_MEMIF_MAC_ARG,
&memif_set_mac, ether_addr);
if (ret < 0)
goto exit;
}
+ if (!(flags & ETH_MEMIF_FLAG_SOCKET_ABSTRACT)) {
+ ret = memif_check_socket_filename(socket_filename);
+ if (ret < 0)
+ goto exit;
+ }
+
/* create interface */
ret = memif_create(vdev, role, id, flags, socket_filename,
log2_ring_size, pkt_buffer_size, secret, ether_addr);
if (eth_dev == NULL)
return 0;
- rte_eth_dev_close(eth_dev->data->port_id);
-
- return 0;
+ return rte_eth_dev_close(eth_dev->data->port_id);
}
static struct rte_vdev_driver pmd_memif_drv = {
RTE_PMD_REGISTER_PARAM_STRING(net_memif,
ETH_MEMIF_ID_ARG "=<int>"
- ETH_MEMIF_ROLE_ARG "=master|slave"
+ ETH_MEMIF_ROLE_ARG "=server|client"
ETH_MEMIF_PKT_BUFFER_SIZE_ARG "=<int>"
ETH_MEMIF_RING_SIZE_ARG "=<int>"
ETH_MEMIF_SOCKET_ARG "=<string>"
+ ETH_MEMIF_SOCKET_ABSTRACT_ARG "=yes|no"
ETH_MEMIF_MAC_ARG "=xx:xx:xx:xx:xx:xx"
ETH_MEMIF_ZC_ARG "=yes|no"
ETH_MEMIF_SECRET_ARG "=<string>");
-RTE_LOG_REGISTER(memif_logtype, pmd.net.memif, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(memif_logtype, NOTICE);