#define ETH_MEMIF_PKT_BUFFER_SIZE_ARG "bsize"
#define ETH_MEMIF_RING_SIZE_ARG "rsize"
#define ETH_MEMIF_SOCKET_ARG "socket"
+#define ETH_MEMIF_SOCKET_ABSTRACT_ARG "socket-abstract"
#define ETH_MEMIF_MAC_ARG "mac"
#define ETH_MEMIF_ZC_ARG "zero-copy"
#define ETH_MEMIF_SECRET_ARG "secret"
ETH_MEMIF_PKT_BUFFER_SIZE_ARG,
ETH_MEMIF_RING_SIZE_ARG,
ETH_MEMIF_SOCKET_ARG,
+ ETH_MEMIF_SOCKET_ABSTRACT_ARG,
ETH_MEMIF_MAC_ARG,
ETH_MEMIF_ZC_ARG,
ETH_MEMIF_SECRET_ARG,
static void
memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_queue *mq)
{
+ uint16_t cur_tail;
uint16_t mask = (1 << mq->log2_ring_size) - 1;
memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
/* FIXME: improve performance */
/* The ring->tail acts as a guard variable between Tx and Rx
* threads, so using load-acquire pairs with store-release
- * to synchronize it between threads.
+ * in function eth_memif_rx for S2M queues.
*/
- while (mq->last_tail != __atomic_load_n(&ring->tail,
- __ATOMIC_ACQUIRE)) {
+ cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ while (mq->last_tail != cur_tail) {
RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
/* Decrement refcnt and free mbuf. (current segment) */
rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1);
refill:
if (type == MEMIF_RING_M2S) {
- head = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ /* ring->head is updated by the receiver and this function
+ * is called in the context of receiver thread. The loads in
+ * the receiver do not need to synchronize with its own stores.
+ */
+ head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
n_slots = ring_size - head + mq->last_tail;
while (n_slots--) {
/* Supply master with new buffers */
refill:
- /* The ring->head acts as a guard variable between Tx and Rx
- * threads, so using load-acquire pairs with store-release
- * to synchronize it between threads.
+ /* ring->head is updated by the receiver and this function
+ * is called in the context of receiver thread. The loads in
+ * the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
n_slots = ring_size - head + mq->last_tail;
if (n_slots < 32)
(uint8_t *)proc_private->regions[d0->region]->addr;
}
no_free_mbufs:
+ /* The ring->head acts as a guard variable between Tx and Rx
+ * threads, so using store-release pairs with load-acquire
+ * in function eth_memif_tx.
+ */
__atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
mq->n_pkts += n_rx_pkts;
ring_size = 1 << mq->log2_ring_size;
mask = ring_size - 1;
- n_free = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE) - mq->last_tail;
- mq->last_tail += n_free;
-
if (type == MEMIF_RING_S2M) {
- slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
- n_free = ring_size - slot + mq->last_tail;
+ /* For S2M queues ring->head is updated by the sender and
+ * this function is called in the context of sending thread.
+ * The loads in the sender do not need to synchronize with
+ * its own stores. Hence, the following load can be a
+ * relaxed load.
+ */
+ slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ n_free = ring_size - slot +
+ __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
} else {
- slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ /* For M2S queues ring->tail is updated by the sender and
+ * this function is called in the context of sending thread.
+ * The loads in the sender do not need to synchronize with
+ * its own stores. Hence, the following load can be a
+ * relaxed load.
+ */
+ slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
}
rte_eth_devices[mq->in_port].process_private;
memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
uint16_t slot, n_free, ring_size, mask, n_tx_pkts = 0;
- memif_ring_type_t type = mq->type;
struct rte_eth_link link;
if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
memif_free_stored_mbufs(proc_private, mq);
/* ring type always MEMIF_RING_S2M */
- /* The ring->head acts as a guard variable between Tx and Rx
- * threads, so using load-acquire pairs with store-release
- * to synchronize it between threads.
+ /* For S2M queues ring->head is updated by the sender and
+ * this function is called in the context of sending thread.
+ * The loads in the sender do not need to synchronize with
+ * its own stores. Hence, the following load can be a
+ * relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
n_free = ring_size - slot + mq->last_tail;
int used_slots;
}
no_free_slots:
- /* update ring pointers */
- if (type == MEMIF_RING_S2M)
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
- else
- __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+ /* ring type always MEMIF_RING_S2M */
+ /* The ring->head acts as a guard variable between Tx and Rx
+ * threads, so using store-release pairs with load-acquire
+ * in function eth_memif_rx for S2M rings.
+ */
+ __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
/* Send interrupt, if enabled. */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
return ret;
}
-static void
+static int
memif_dev_close(struct rte_eth_dev *dev)
{
struct pmd_internals *pmd = dev->data->dev_private;
}
rte_free(dev->process_private);
+
+ return 0;
}
static int
data->dev_link = pmd_link;
data->mac_addrs = ether_addr;
data->promiscuous = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
eth_dev->device = &vdev->device;
eth_dev->tx_pkt_burst = eth_memif_tx;
}
-
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
rte_eth_dev_probing_finish(eth_dev);
return 0;
const char **socket_filename = (const char **)extra_args;
*socket_filename = value;
- return memif_check_socket_filename(*socket_filename);
+ return 0;
+}
+
+static int
+memif_set_is_socket_abstract(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ uint32_t *flags = (uint32_t *)extra_args;
+
+ if (strstr(value, "yes") != NULL) {
+ *flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+ } else if (strstr(value, "no") != NULL) {
+ *flags &= ~ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+ } else {
+ MIF_LOG(ERR, "Failed to parse socket-abstract param: %s.", value);
+ return -EINVAL;
+ }
+ return 0;
}
static int
MIF_LOG(WARNING, "Failed to register mp action callback: %s",
strerror(rte_errno));
+ /* use abstract address by default */
+ flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
+
kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_arguments);
/* parse parameters */
(void *)(&socket_filename));
if (ret < 0)
goto exit;
+ ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ABSTRACT_ARG,
+ &memif_set_is_socket_abstract, &flags);
+ if (ret < 0)
+ goto exit;
ret = rte_kvargs_process(kvlist, ETH_MEMIF_MAC_ARG,
&memif_set_mac, ether_addr);
if (ret < 0)
goto exit;
}
+ if (!(flags & ETH_MEMIF_FLAG_SOCKET_ABSTRACT)) {
+ ret = memif_check_socket_filename(socket_filename);
+ if (ret < 0)
+ goto exit;
+ }
+
/* create interface */
ret = memif_create(vdev, role, id, flags, socket_filename,
log2_ring_size, pkt_buffer_size, secret, ether_addr);
if (eth_dev == NULL)
return 0;
- rte_eth_dev_close(eth_dev->data->port_id);
-
- return 0;
+ return rte_eth_dev_close(eth_dev->data->port_id);
}
static struct rte_vdev_driver pmd_memif_drv = {
ETH_MEMIF_PKT_BUFFER_SIZE_ARG "=<int>"
ETH_MEMIF_RING_SIZE_ARG "=<int>"
ETH_MEMIF_SOCKET_ARG "=<string>"
+ ETH_MEMIF_SOCKET_ABSTRACT_ARG "=yes|no"
ETH_MEMIF_MAC_ARG "=xx:xx:xx:xx:xx:xx"
ETH_MEMIF_ZC_ARG "=yes|no"
ETH_MEMIF_SECRET_ARG "=<string>");