1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 Cisco Systems, Inc. All rights reserved.
9 #include <sys/socket.h>
11 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
15 #include <sys/eventfd.h>
17 #include <rte_version.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_vdev.h>
22 #include <rte_malloc.h>
23 #include <rte_kvargs.h>
24 #include <rte_bus_vdev.h>
25 #include <rte_string_fns.h>
26 #include <rte_errno.h>
27 #include <rte_memory.h>
28 #include <rte_memzone.h>
29 #include <rte_eal_memconfig.h>
31 #include "rte_eth_memif.h"
32 #include "memif_socket.h"
34 #define ETH_MEMIF_ID_ARG "id"
35 #define ETH_MEMIF_ROLE_ARG "role"
36 #define ETH_MEMIF_PKT_BUFFER_SIZE_ARG "bsize"
37 #define ETH_MEMIF_RING_SIZE_ARG "rsize"
38 #define ETH_MEMIF_SOCKET_ARG "socket"
39 #define ETH_MEMIF_SOCKET_ABSTRACT_ARG "socket-abstract"
40 #define ETH_MEMIF_MAC_ARG "mac"
41 #define ETH_MEMIF_ZC_ARG "zero-copy"
42 #define ETH_MEMIF_SECRET_ARG "secret"
44 static const char * const valid_arguments[] = {
47 ETH_MEMIF_PKT_BUFFER_SIZE_ARG,
48 ETH_MEMIF_RING_SIZE_ARG,
50 ETH_MEMIF_SOCKET_ABSTRACT_ARG,
57 static const struct rte_eth_link pmd_link = {
58 .link_speed = RTE_ETH_SPEED_NUM_10G,
59 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
60 .link_status = RTE_ETH_LINK_DOWN,
61 .link_autoneg = RTE_ETH_LINK_AUTONEG
64 #define MEMIF_MP_SEND_REGION "memif_mp_send_region"
67 static int memif_region_init_zc(const struct rte_memseg_list *msl,
68 const struct rte_memseg *ms, void *arg);
73 return ("memif-" RTE_STR(MEMIF_VERSION_MAJOR) "." RTE_STR(MEMIF_VERSION_MINOR));
76 /* Message header to synchronize regions */
77 struct mp_region_msg {
78 char port_name[RTE_DEV_NAME_MAX_LEN];
79 memif_region_index_t idx;
80 memif_region_size_t size;
84 memif_mp_send_region(const struct rte_mp_msg *msg, const void *peer)
86 struct rte_eth_dev *dev;
87 struct pmd_process_private *proc_private;
88 const struct mp_region_msg *msg_param = (const struct mp_region_msg *)msg->param;
89 struct rte_mp_msg reply;
90 struct mp_region_msg *reply_param = (struct mp_region_msg *)reply.param;
94 /* Get requested port */
95 ret = rte_eth_dev_get_port_by_name(msg_param->port_name, &port_id);
97 MIF_LOG(ERR, "Failed to get port id for %s",
98 msg_param->port_name);
101 dev = &rte_eth_devices[port_id];
102 proc_private = dev->process_private;
104 memset(&reply, 0, sizeof(reply));
105 strlcpy(reply.name, msg->name, sizeof(reply.name));
106 reply_param->idx = msg_param->idx;
107 if (proc_private->regions[msg_param->idx] != NULL) {
108 reply_param->size = proc_private->regions[msg_param->idx]->region_size;
109 reply.fds[0] = proc_private->regions[msg_param->idx]->fd;
112 reply.len_param = sizeof(*reply_param);
113 if (rte_mp_reply(&reply, peer) < 0) {
114 MIF_LOG(ERR, "Failed to reply to an add region request");
123 * Called by secondary process, when ports link status goes up.
126 memif_mp_request_regions(struct rte_eth_dev *dev)
129 struct timespec timeout = {.tv_sec = 5, .tv_nsec = 0};
130 struct rte_mp_msg msg, *reply;
131 struct rte_mp_reply replies;
132 struct mp_region_msg *msg_param = (struct mp_region_msg *)msg.param;
133 struct mp_region_msg *reply_param;
134 struct memif_region *r;
135 struct pmd_process_private *proc_private = dev->process_private;
136 struct pmd_internals *pmd = dev->data->dev_private;
137 /* in case of zero-copy client, only request region 0 */
138 uint16_t max_region_num = (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) ?
139 1 : ETH_MEMIF_MAX_REGION_NUM;
141 MIF_LOG(DEBUG, "Requesting memory regions");
143 for (i = 0; i < max_region_num; i++) {
144 /* Prepare the message */
145 memset(&msg, 0, sizeof(msg));
146 strlcpy(msg.name, MEMIF_MP_SEND_REGION, sizeof(msg.name));
147 strlcpy(msg_param->port_name, dev->data->name,
148 sizeof(msg_param->port_name));
150 msg.len_param = sizeof(*msg_param);
153 ret = rte_mp_request_sync(&msg, &replies, &timeout);
154 if (ret < 0 || replies.nb_received != 1) {
155 MIF_LOG(ERR, "Failed to send mp msg: %d",
160 reply = &replies.msgs[0];
161 reply_param = (struct mp_region_msg *)reply->param;
163 if (reply_param->size > 0) {
164 r = rte_zmalloc("region", sizeof(struct memif_region), 0);
166 MIF_LOG(ERR, "Failed to alloc memif region.");
170 r->region_size = reply_param->size;
171 if (reply->num_fds < 1) {
172 MIF_LOG(ERR, "Missing file descriptor.");
176 r->fd = reply->fds[0];
179 proc_private->regions[reply_param->idx] = r;
180 proc_private->regions_num++;
185 if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) {
186 ret = rte_memseg_walk(memif_region_init_zc, (void *)proc_private);
191 return memif_connect(dev);
195 memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info)
197 dev_info->max_mac_addrs = 1;
198 dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
199 dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
200 dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
201 dev_info->min_rx_bufsize = 0;
202 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
207 static memif_ring_t *
208 memif_get_ring(struct pmd_internals *pmd, struct pmd_process_private *proc_private,
209 memif_ring_type_t type, uint16_t ring_num)
211 /* rings only in region 0 */
212 void *p = proc_private->regions[0]->addr;
213 int ring_size = sizeof(memif_ring_t) + sizeof(memif_desc_t) *
214 (1 << pmd->run.log2_ring_size);
216 p = (uint8_t *)p + (ring_num + type * pmd->run.num_c2s_rings) * ring_size;
218 return (memif_ring_t *)p;
221 static memif_region_offset_t
222 memif_get_ring_offset(struct rte_eth_dev *dev, struct memif_queue *mq,
223 memif_ring_type_t type, uint16_t num)
225 struct pmd_internals *pmd = dev->data->dev_private;
226 struct pmd_process_private *proc_private = dev->process_private;
228 return ((uint8_t *)memif_get_ring(pmd, proc_private, type, num) -
229 (uint8_t *)proc_private->regions[mq->region]->addr);
232 static memif_ring_t *
233 memif_get_ring_from_queue(struct pmd_process_private *proc_private,
234 struct memif_queue *mq)
236 struct memif_region *r;
238 r = proc_private->regions[mq->region];
242 return (memif_ring_t *)((uint8_t *)r->addr + mq->ring_offset);
246 memif_get_buffer(struct pmd_process_private *proc_private, memif_desc_t *d)
248 return ((uint8_t *)proc_private->regions[d->region]->addr + d->offset);
251 /* Free mbufs received by server */
253 memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_queue *mq)
256 uint16_t mask = (1 << mq->log2_ring_size) - 1;
257 memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
259 /* FIXME: improve performance */
260 /* The ring->tail acts as a guard variable between Tx and Rx
261 * threads, so using load-acquire pairs with store-release
262 * in function eth_memif_rx for C2S queues.
264 cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
265 while (mq->last_tail != cur_tail) {
266 RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
267 /* Decrement refcnt and free mbuf. (current segment) */
268 rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1);
269 rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
275 memif_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *cur_tail,
276 struct rte_mbuf *tail)
278 /* Check for number-of-segments-overflow */
279 if (unlikely(head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS))
282 /* Chain 'tail' onto the old tail */
283 cur_tail->next = tail;
285 /* accumulate number of segments and total length. */
286 head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
288 tail->pkt_len = tail->data_len;
289 head->pkt_len += tail->pkt_len;
295 eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
297 struct memif_queue *mq = queue;
298 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private;
299 struct pmd_process_private *proc_private =
300 rte_eth_devices[mq->in_port].process_private;
301 memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
302 uint16_t cur_slot, last_slot, n_slots, ring_size, mask, s0;
303 uint16_t n_rx_pkts = 0;
304 uint16_t mbuf_size = rte_pktmbuf_data_room_size(mq->mempool) -
305 RTE_PKTMBUF_HEADROOM;
306 uint16_t src_len, src_off, dst_len, dst_off, cp_len;
307 memif_ring_type_t type = mq->type;
309 struct rte_mbuf *mbuf, *mbuf_head, *mbuf_tail;
311 ssize_t size __rte_unused;
314 struct rte_eth_link link;
316 if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
318 if (unlikely(ring == NULL)) {
319 /* Secondary process will attempt to request regions. */
320 ret = rte_eth_link_get(mq->in_port, &link);
322 MIF_LOG(ERR, "Failed to get port %u link info: %s",
323 mq->in_port, rte_strerror(-ret));
327 /* consume interrupt */
328 if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0)
329 size = read(rte_intr_fd_get(mq->intr_handle), &b,
332 ring_size = 1 << mq->log2_ring_size;
333 mask = ring_size - 1;
335 if (type == MEMIF_RING_C2S) {
336 cur_slot = mq->last_head;
337 last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
339 cur_slot = mq->last_tail;
340 last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
343 if (cur_slot == last_slot)
345 n_slots = last_slot - cur_slot;
347 while (n_slots && n_rx_pkts < nb_pkts) {
348 mbuf_head = rte_pktmbuf_alloc(mq->mempool);
349 if (unlikely(mbuf_head == NULL))
352 mbuf->port = mq->in_port;
355 s0 = cur_slot & mask;
356 d0 = &ring->desc[s0];
358 src_len = d0->length;
363 dst_len = mbuf_size - dst_off;
368 /* store pointer to tail */
370 mbuf = rte_pktmbuf_alloc(mq->mempool);
371 if (unlikely(mbuf == NULL))
373 mbuf->port = mq->in_port;
374 ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf);
375 if (unlikely(ret < 0)) {
376 MIF_LOG(ERR, "number-of-segments-overflow");
377 rte_pktmbuf_free(mbuf);
381 cp_len = RTE_MIN(dst_len, src_len);
383 rte_pktmbuf_data_len(mbuf) += cp_len;
384 rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
385 if (mbuf != mbuf_head)
386 rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
388 rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *,
390 (uint8_t *)memif_get_buffer(proc_private, d0) +
401 if (d0->flags & MEMIF_DESC_FLAG_NEXT)
404 mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
410 if (type == MEMIF_RING_C2S) {
411 __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
412 mq->last_head = cur_slot;
414 mq->last_tail = cur_slot;
418 if (type == MEMIF_RING_S2C) {
419 /* ring->head is updated by the receiver and this function
420 * is called in the context of receiver thread. The loads in
421 * the receiver do not need to synchronize with its own stores.
423 head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
424 n_slots = ring_size - head + mq->last_tail;
428 d0 = &ring->desc[s0];
429 d0->length = pmd->run.pkt_buffer_size;
431 __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
434 mq->n_pkts += n_rx_pkts;
439 eth_memif_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
441 struct memif_queue *mq = queue;
442 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private;
443 struct pmd_process_private *proc_private =
444 rte_eth_devices[mq->in_port].process_private;
445 memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
446 uint16_t cur_slot, last_slot, n_slots, ring_size, mask, s0, head;
447 uint16_t n_rx_pkts = 0;
449 struct rte_mbuf *mbuf, *mbuf_tail;
450 struct rte_mbuf *mbuf_head = NULL;
452 struct rte_eth_link link;
454 if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
456 if (unlikely(ring == NULL)) {
457 /* Secondary process will attempt to request regions. */
458 rte_eth_link_get(mq->in_port, &link);
462 /* consume interrupt */
463 if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
465 ssize_t size __rte_unused;
466 size = read(rte_intr_fd_get(mq->intr_handle), &b,
470 ring_size = 1 << mq->log2_ring_size;
471 mask = ring_size - 1;
473 cur_slot = mq->last_tail;
474 /* The ring->tail acts as a guard variable between Tx and Rx
475 * threads, so using load-acquire pairs with store-release
476 * to synchronize it between threads.
478 last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
479 if (cur_slot == last_slot)
481 n_slots = last_slot - cur_slot;
483 while (n_slots && n_rx_pkts < nb_pkts) {
484 s0 = cur_slot & mask;
486 d0 = &ring->desc[s0];
487 mbuf_head = mq->buffers[s0];
491 /* prefetch next descriptor */
492 if (n_rx_pkts + 1 < nb_pkts)
493 rte_prefetch0(&ring->desc[(cur_slot + 1) & mask]);
495 mbuf->port = mq->in_port;
496 rte_pktmbuf_data_len(mbuf) = d0->length;
497 rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
499 mq->n_bytes += rte_pktmbuf_data_len(mbuf);
503 if (d0->flags & MEMIF_DESC_FLAG_NEXT) {
504 s0 = cur_slot & mask;
505 d0 = &ring->desc[s0];
507 mbuf = mq->buffers[s0];
508 ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf);
509 if (unlikely(ret < 0)) {
510 MIF_LOG(ERR, "number-of-segments-overflow");
520 mq->last_tail = cur_slot;
522 /* Supply server with new buffers */
524 /* ring->head is updated by the receiver and this function
525 * is called in the context of receiver thread. The loads in
526 * the receiver do not need to synchronize with its own stores.
528 head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
529 n_slots = ring_size - head + mq->last_tail;
534 ret = rte_pktmbuf_alloc_bulk(mq->mempool, &mq->buffers[head & mask], n_slots);
535 if (unlikely(ret < 0))
541 rte_prefetch0(mq->buffers[head & mask]);
542 d0 = &ring->desc[s0];
543 /* store buffer header */
544 mbuf = mq->buffers[s0];
545 /* populate descriptor */
546 d0->length = rte_pktmbuf_data_room_size(mq->mempool) -
547 RTE_PKTMBUF_HEADROOM;
549 d0->offset = rte_pktmbuf_mtod(mbuf, uint8_t *) -
550 (uint8_t *)proc_private->regions[d0->region]->addr;
553 /* The ring->head acts as a guard variable between Tx and Rx
554 * threads, so using store-release pairs with load-acquire
555 * in function eth_memif_tx.
557 __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
559 mq->n_pkts += n_rx_pkts;
565 eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
567 struct memif_queue *mq = queue;
568 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private;
569 struct pmd_process_private *proc_private =
570 rte_eth_devices[mq->in_port].process_private;
571 memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
572 uint16_t slot, saved_slot, n_free, ring_size, mask, n_tx_pkts = 0;
573 uint16_t src_len, src_off, dst_len, dst_off, cp_len, nb_segs;
574 memif_ring_type_t type = mq->type;
576 struct rte_mbuf *mbuf;
577 struct rte_mbuf *mbuf_head;
580 struct rte_eth_link link;
582 if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
584 if (unlikely(ring == NULL)) {
587 /* Secondary process will attempt to request regions. */
588 ret = rte_eth_link_get(mq->in_port, &link);
590 MIF_LOG(ERR, "Failed to get port %u link info: %s",
591 mq->in_port, rte_strerror(-ret));
595 ring_size = 1 << mq->log2_ring_size;
596 mask = ring_size - 1;
598 if (type == MEMIF_RING_C2S) {
599 /* For C2S queues ring->head is updated by the sender and
600 * this function is called in the context of sending thread.
601 * The loads in the sender do not need to synchronize with
602 * its own stores. Hence, the following load can be a
605 slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
606 n_free = ring_size - slot +
607 __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
609 /* For S2C queues ring->tail is updated by the sender and
610 * this function is called in the context of sending thread.
611 * The loads in the sender do not need to synchronize with
612 * its own stores. Hence, the following load can be a
615 slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
616 n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
619 while (n_tx_pkts < nb_pkts && n_free) {
621 nb_segs = mbuf_head->nb_segs;
625 d0 = &ring->desc[slot & mask];
627 dst_len = (type == MEMIF_RING_C2S) ?
628 pmd->run.pkt_buffer_size : d0->length;
632 src_len = rte_pktmbuf_data_len(mbuf);
639 d0->flags |= MEMIF_DESC_FLAG_NEXT;
640 d0 = &ring->desc[slot & mask];
642 dst_len = (type == MEMIF_RING_C2S) ?
643 pmd->run.pkt_buffer_size : d0->length;
650 cp_len = RTE_MIN(dst_len, src_len);
652 rte_memcpy((uint8_t *)memif_get_buffer(proc_private,
654 rte_pktmbuf_mtod_offset(mbuf, void *, src_off),
657 mq->n_bytes += cp_len;
663 d0->length = dst_off;
674 rte_pktmbuf_free(mbuf_head);
678 if (type == MEMIF_RING_C2S)
679 __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
681 __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
683 if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
685 size = write(rte_intr_fd_get(mq->intr_handle), &a,
687 if (unlikely(size < 0)) {
689 "Failed to send interrupt. %s", strerror(errno));
693 mq->n_pkts += n_tx_pkts;
699 memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq,
700 memif_ring_t *ring, struct rte_mbuf *mbuf, const uint16_t mask,
701 uint16_t slot, uint16_t n_free)
704 uint16_t nb_segs = mbuf->nb_segs;
708 /* store pointer to mbuf to free it later */
709 mq->buffers[slot & mask] = mbuf;
710 /* Increment refcnt to make sure the buffer is not freed before server
711 * receives it. (current segment)
713 rte_mbuf_refcnt_update(mbuf, 1);
714 /* populate descriptor */
715 d0 = &ring->desc[slot & mask];
716 d0->length = rte_pktmbuf_data_len(mbuf);
717 mq->n_bytes += rte_pktmbuf_data_len(mbuf);
718 /* FIXME: get region index */
720 d0->offset = rte_pktmbuf_mtod(mbuf, uint8_t *) -
721 (uint8_t *)proc_private->regions[d0->region]->addr;
724 /* check if buffer is chained */
728 /* mark buffer as chained */
729 d0->flags |= MEMIF_DESC_FLAG_NEXT;
732 /* update counters */
742 eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
744 struct memif_queue *mq = queue;
745 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private;
746 struct pmd_process_private *proc_private =
747 rte_eth_devices[mq->in_port].process_private;
748 memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
749 uint16_t slot, n_free, ring_size, mask, n_tx_pkts = 0;
750 struct rte_eth_link link;
752 if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
754 if (unlikely(ring == NULL)) {
755 /* Secondary process will attempt to request regions. */
756 rte_eth_link_get(mq->in_port, &link);
760 ring_size = 1 << mq->log2_ring_size;
761 mask = ring_size - 1;
763 /* free mbufs received by server */
764 memif_free_stored_mbufs(proc_private, mq);
766 /* ring type always MEMIF_RING_C2S */
767 /* For C2S queues ring->head is updated by the sender and
768 * this function is called in the context of sending thread.
769 * The loads in the sender do not need to synchronize with
770 * its own stores. Hence, the following load can be a
773 slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
774 n_free = ring_size - slot + mq->last_tail;
778 while (n_free && (n_tx_pkts < nb_pkts)) {
779 while ((n_free > 4) && ((nb_pkts - n_tx_pkts) > 4)) {
780 if ((nb_pkts - n_tx_pkts) > 8) {
781 rte_prefetch0(*bufs + 4);
782 rte_prefetch0(*bufs + 5);
783 rte_prefetch0(*bufs + 6);
784 rte_prefetch0(*bufs + 7);
786 used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++,
788 if (unlikely(used_slots < 1))
792 n_free -= used_slots;
794 used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++,
796 if (unlikely(used_slots < 1))
800 n_free -= used_slots;
802 used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++,
804 if (unlikely(used_slots < 1))
808 n_free -= used_slots;
810 used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++,
812 if (unlikely(used_slots < 1))
816 n_free -= used_slots;
818 used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++,
820 if (unlikely(used_slots < 1))
824 n_free -= used_slots;
828 /* ring type always MEMIF_RING_C2S */
829 /* The ring->head acts as a guard variable between Tx and Rx
830 * threads, so using store-release pairs with load-acquire
831 * in function eth_memif_rx for C2S rings.
833 __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
835 /* Send interrupt, if enabled. */
836 if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
838 ssize_t size = write(rte_intr_fd_get(mq->intr_handle),
840 if (unlikely(size < 0)) {
842 "Failed to send interrupt. %s", strerror(errno));
846 /* increment queue counters */
847 mq->n_pkts += n_tx_pkts;
853 memif_free_regions(struct rte_eth_dev *dev)
855 struct pmd_process_private *proc_private = dev->process_private;
856 struct pmd_internals *pmd = dev->data->dev_private;
858 struct memif_region *r;
860 /* regions are allocated contiguously, so it's
861 * enough to loop until 'proc_private->regions_num'
863 for (i = 0; i < proc_private->regions_num; i++) {
864 r = proc_private->regions[i];
866 /* This is memzone */
867 if (i > 0 && (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY)) {
872 if (r->addr != NULL) {
873 munmap(r->addr, r->region_size);
880 proc_private->regions[i] = NULL;
883 proc_private->regions_num = 0;
887 memif_region_init_zc(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
890 struct pmd_process_private *proc_private = (struct pmd_process_private *)arg;
891 struct memif_region *r;
893 if (proc_private->regions_num < 1) {
894 MIF_LOG(ERR, "Missing descriptor region");
898 r = proc_private->regions[proc_private->regions_num - 1];
900 if (r->addr != msl->base_va)
901 r = proc_private->regions[++proc_private->regions_num - 1];
904 r = rte_zmalloc("region", sizeof(struct memif_region), 0);
906 MIF_LOG(ERR, "Failed to alloc memif region.");
910 r->addr = msl->base_va;
911 r->region_size = ms->len;
912 r->fd = rte_memseg_get_fd(ms);
915 r->pkt_buffer_offset = 0;
917 proc_private->regions[proc_private->regions_num - 1] = r;
919 r->region_size += ms->len;
926 memif_region_init_shm(struct rte_eth_dev *dev, uint8_t has_buffers)
928 struct pmd_internals *pmd = dev->data->dev_private;
929 struct pmd_process_private *proc_private = dev->process_private;
930 char shm_name[ETH_MEMIF_SHM_NAME_SIZE];
932 struct memif_region *r;
934 if (proc_private->regions_num >= ETH_MEMIF_MAX_REGION_NUM) {
935 MIF_LOG(ERR, "Too many regions.");
939 r = rte_zmalloc("region", sizeof(struct memif_region), 0);
941 MIF_LOG(ERR, "Failed to alloc memif region.");
945 /* calculate buffer offset */
946 r->pkt_buffer_offset = (pmd->run.num_c2s_rings + pmd->run.num_s2c_rings) *
947 (sizeof(memif_ring_t) + sizeof(memif_desc_t) *
948 (1 << pmd->run.log2_ring_size));
950 r->region_size = r->pkt_buffer_offset;
951 /* if region has buffers, add buffers size to region_size */
952 if (has_buffers == 1)
953 r->region_size += (uint32_t)(pmd->run.pkt_buffer_size *
954 (1 << pmd->run.log2_ring_size) *
955 (pmd->run.num_c2s_rings +
956 pmd->run.num_s2c_rings));
958 memset(shm_name, 0, sizeof(char) * ETH_MEMIF_SHM_NAME_SIZE);
959 snprintf(shm_name, ETH_MEMIF_SHM_NAME_SIZE, "memif_region_%d",
960 proc_private->regions_num);
962 r->fd = memfd_create(shm_name, MFD_ALLOW_SEALING);
964 MIF_LOG(ERR, "Failed to create shm file: %s.", strerror(errno));
969 ret = fcntl(r->fd, F_ADD_SEALS, F_SEAL_SHRINK);
971 MIF_LOG(ERR, "Failed to add seals to shm file: %s.", strerror(errno));
975 ret = ftruncate(r->fd, r->region_size);
977 MIF_LOG(ERR, "Failed to truncate shm file: %s.", strerror(errno));
981 r->addr = mmap(NULL, r->region_size, PROT_READ |
982 PROT_WRITE, MAP_SHARED, r->fd, 0);
983 if (r->addr == MAP_FAILED) {
984 MIF_LOG(ERR, "Failed to mmap shm region: %s.", strerror(ret));
989 proc_private->regions[proc_private->regions_num] = r;
990 proc_private->regions_num++;
1003 memif_regions_init(struct rte_eth_dev *dev)
1005 struct pmd_internals *pmd = dev->data->dev_private;
1009 * Zero-copy exposes dpdk memory.
1010 * Each memseg list will be represented by memif region.
1011 * Zero-copy regions indexing: memseg list idx + 1,
1012 * as we already have region 0 reserved for descriptors.
1014 if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) {
1015 /* create region idx 0 containing descriptors */
1016 ret = memif_region_init_shm(dev, 0);
1019 ret = rte_memseg_walk(memif_region_init_zc, (void *)dev->process_private);
1023 /* create one memory region contaning rings and buffers */
1024 ret = memif_region_init_shm(dev, /* has buffers */ 1);
1033 memif_init_rings(struct rte_eth_dev *dev)
1035 struct pmd_internals *pmd = dev->data->dev_private;
1036 struct pmd_process_private *proc_private = dev->process_private;
1041 for (i = 0; i < pmd->run.num_c2s_rings; i++) {
1042 ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
1043 __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
1044 __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
1045 ring->cookie = MEMIF_COOKIE;
1048 if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY)
1051 for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) {
1052 slot = i * (1 << pmd->run.log2_ring_size) + j;
1053 ring->desc[j].region = 0;
1054 ring->desc[j].offset =
1055 proc_private->regions[0]->pkt_buffer_offset +
1056 (uint32_t)(slot * pmd->run.pkt_buffer_size);
1057 ring->desc[j].length = pmd->run.pkt_buffer_size;
1061 for (i = 0; i < pmd->run.num_s2c_rings; i++) {
1062 ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
1063 __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
1064 __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
1065 ring->cookie = MEMIF_COOKIE;
1068 if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY)
1071 for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) {
1072 slot = (i + pmd->run.num_c2s_rings) *
1073 (1 << pmd->run.log2_ring_size) + j;
1074 ring->desc[j].region = 0;
1075 ring->desc[j].offset =
1076 proc_private->regions[0]->pkt_buffer_offset +
1077 (uint32_t)(slot * pmd->run.pkt_buffer_size);
1078 ring->desc[j].length = pmd->run.pkt_buffer_size;
1083 /* called only by client */
1085 memif_init_queues(struct rte_eth_dev *dev)
1087 struct pmd_internals *pmd = dev->data->dev_private;
1088 struct memif_queue *mq;
1091 for (i = 0; i < pmd->run.num_c2s_rings; i++) {
1092 mq = dev->data->tx_queues[i];
1093 mq->log2_ring_size = pmd->run.log2_ring_size;
1094 /* queues located only in region 0 */
1096 mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_C2S, i);
1099 if (rte_intr_fd_set(mq->intr_handle, eventfd(0, EFD_NONBLOCK)))
1102 if (rte_intr_fd_get(mq->intr_handle) < 0) {
1104 "Failed to create eventfd for tx queue %d: %s.", i,
1108 if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) {
1109 mq->buffers = rte_zmalloc("bufs", sizeof(struct rte_mbuf *) *
1110 (1 << mq->log2_ring_size), 0);
1111 if (mq->buffers == NULL)
1116 for (i = 0; i < pmd->run.num_s2c_rings; i++) {
1117 mq = dev->data->rx_queues[i];
1118 mq->log2_ring_size = pmd->run.log2_ring_size;
1119 /* queues located only in region 0 */
1121 mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2C, i);
1124 if (rte_intr_fd_set(mq->intr_handle, eventfd(0, EFD_NONBLOCK)))
1126 if (rte_intr_fd_get(mq->intr_handle) < 0) {
1128 "Failed to create eventfd for rx queue %d: %s.", i,
1132 if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) {
1133 mq->buffers = rte_zmalloc("bufs", sizeof(struct rte_mbuf *) *
1134 (1 << mq->log2_ring_size), 0);
1135 if (mq->buffers == NULL)
1143 memif_init_regions_and_queues(struct rte_eth_dev *dev)
1147 ret = memif_regions_init(dev);
1151 memif_init_rings(dev);
1153 ret = memif_init_queues(dev);
1161 memif_connect(struct rte_eth_dev *dev)
1163 struct pmd_internals *pmd = dev->data->dev_private;
1164 struct pmd_process_private *proc_private = dev->process_private;
1165 struct memif_region *mr;
1166 struct memif_queue *mq;
1170 for (i = 0; i < proc_private->regions_num; i++) {
1171 mr = proc_private->regions[i];
1173 if (mr->addr == NULL) {
1176 mr->addr = mmap(NULL, mr->region_size,
1177 PROT_READ | PROT_WRITE,
1178 MAP_SHARED, mr->fd, 0);
1179 if (mr->addr == MAP_FAILED) {
1180 MIF_LOG(ERR, "mmap failed: %s\n",
1185 if (i > 0 && (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY)) {
1186 /* close memseg file */
1193 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1194 for (i = 0; i < pmd->run.num_c2s_rings; i++) {
1195 mq = (pmd->role == MEMIF_ROLE_CLIENT) ?
1196 dev->data->tx_queues[i] : dev->data->rx_queues[i];
1197 ring = memif_get_ring_from_queue(proc_private, mq);
1198 if (ring == NULL || ring->cookie != MEMIF_COOKIE) {
1199 MIF_LOG(ERR, "Wrong ring");
1202 __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
1203 __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
1206 /* enable polling mode */
1207 if (pmd->role == MEMIF_ROLE_SERVER)
1208 ring->flags = MEMIF_RING_FLAG_MASK_INT;
1210 for (i = 0; i < pmd->run.num_s2c_rings; i++) {
1211 mq = (pmd->role == MEMIF_ROLE_CLIENT) ?
1212 dev->data->rx_queues[i] : dev->data->tx_queues[i];
1213 ring = memif_get_ring_from_queue(proc_private, mq);
1214 if (ring == NULL || ring->cookie != MEMIF_COOKIE) {
1215 MIF_LOG(ERR, "Wrong ring");
1218 __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
1219 __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
1222 /* enable polling mode */
1223 if (pmd->role == MEMIF_ROLE_CLIENT)
1224 ring->flags = MEMIF_RING_FLAG_MASK_INT;
1227 pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
1228 pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
1229 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
1231 MIF_LOG(INFO, "Connected.");
1236 memif_dev_start(struct rte_eth_dev *dev)
1238 struct pmd_internals *pmd = dev->data->dev_private;
1241 switch (pmd->role) {
1242 case MEMIF_ROLE_CLIENT:
1243 ret = memif_connect_client(dev);
1245 case MEMIF_ROLE_SERVER:
1246 ret = memif_connect_server(dev);
1249 MIF_LOG(ERR, "Unknown role: %d.", pmd->role);
1258 memif_dev_close(struct rte_eth_dev *dev)
1260 struct pmd_internals *pmd = dev->data->dev_private;
1263 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1264 memif_msg_enq_disconnect(pmd->cc, "Device closed", 0);
1265 memif_disconnect(dev);
1267 for (i = 0; i < dev->data->nb_rx_queues; i++)
1268 (*dev->dev_ops->rx_queue_release)(dev, i);
1269 for (i = 0; i < dev->data->nb_tx_queues; i++)
1270 (*dev->dev_ops->tx_queue_release)(dev, i);
1272 memif_socket_remove_device(dev);
1274 memif_disconnect(dev);
1277 rte_free(dev->process_private);
1283 memif_dev_configure(struct rte_eth_dev *dev)
1285 struct pmd_internals *pmd = dev->data->dev_private;
1291 pmd->cfg.num_c2s_rings = (pmd->role == MEMIF_ROLE_CLIENT) ?
1292 dev->data->nb_tx_queues : dev->data->nb_rx_queues;
1298 pmd->cfg.num_s2c_rings = (pmd->role == MEMIF_ROLE_CLIENT) ?
1299 dev->data->nb_rx_queues : dev->data->nb_tx_queues;
1305 memif_tx_queue_setup(struct rte_eth_dev *dev,
1307 uint16_t nb_tx_desc __rte_unused,
1308 unsigned int socket_id __rte_unused,
1309 const struct rte_eth_txconf *tx_conf __rte_unused)
1311 struct pmd_internals *pmd = dev->data->dev_private;
1312 struct memif_queue *mq;
1314 mq = rte_zmalloc("tx-queue", sizeof(struct memif_queue), 0);
1316 MIF_LOG(ERR, "Failed to allocate tx queue id: %u", qid);
1320 /* Allocate interrupt instance */
1321 mq->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
1322 if (mq->intr_handle == NULL) {
1323 MIF_LOG(ERR, "Failed to allocate intr handle");
1328 (pmd->role == MEMIF_ROLE_CLIENT) ? MEMIF_RING_C2S : MEMIF_RING_S2C;
1332 if (rte_intr_fd_set(mq->intr_handle, -1))
1335 if (rte_intr_type_set(mq->intr_handle, RTE_INTR_HANDLE_EXT))
1338 mq->in_port = dev->data->port_id;
1339 dev->data->tx_queues[qid] = mq;
1345 memif_rx_queue_setup(struct rte_eth_dev *dev,
1347 uint16_t nb_rx_desc __rte_unused,
1348 unsigned int socket_id __rte_unused,
1349 const struct rte_eth_rxconf *rx_conf __rte_unused,
1350 struct rte_mempool *mb_pool)
1352 struct pmd_internals *pmd = dev->data->dev_private;
1353 struct memif_queue *mq;
1355 mq = rte_zmalloc("rx-queue", sizeof(struct memif_queue), 0);
1357 MIF_LOG(ERR, "Failed to allocate rx queue id: %u", qid);
1361 /* Allocate interrupt instance */
1362 mq->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
1363 if (mq->intr_handle == NULL) {
1364 MIF_LOG(ERR, "Failed to allocate intr handle");
1368 mq->type = (pmd->role == MEMIF_ROLE_CLIENT) ? MEMIF_RING_S2C : MEMIF_RING_C2S;
1372 if (rte_intr_fd_set(mq->intr_handle, -1))
1375 if (rte_intr_type_set(mq->intr_handle, RTE_INTR_HANDLE_EXT))
1378 mq->mempool = mb_pool;
1379 mq->in_port = dev->data->port_id;
1380 dev->data->rx_queues[qid] = mq;
1386 memif_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1388 struct memif_queue *mq = dev->data->rx_queues[qid];
1393 rte_intr_instance_free(mq->intr_handle);
1398 memif_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1400 struct memif_queue *mq = dev->data->tx_queues[qid];
1409 memif_link_update(struct rte_eth_dev *dev,
1410 int wait_to_complete __rte_unused)
1412 struct pmd_process_private *proc_private;
1414 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1415 proc_private = dev->process_private;
1416 if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
1417 proc_private->regions_num == 0) {
1418 memif_mp_request_regions(dev);
1419 } else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
1420 proc_private->regions_num > 0) {
1421 memif_free_regions(dev);
1428 memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1430 struct pmd_internals *pmd = dev->data->dev_private;
1431 struct memif_queue *mq;
1435 stats->ipackets = 0;
1437 stats->opackets = 0;
1440 tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_c2s_rings :
1441 pmd->run.num_s2c_rings;
1442 nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp :
1443 RTE_ETHDEV_QUEUE_STAT_CNTRS;
1446 for (i = 0; i < nq; i++) {
1447 mq = dev->data->rx_queues[i];
1448 stats->q_ipackets[i] = mq->n_pkts;
1449 stats->q_ibytes[i] = mq->n_bytes;
1450 stats->ipackets += mq->n_pkts;
1451 stats->ibytes += mq->n_bytes;
1454 tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings :
1455 pmd->run.num_c2s_rings;
1456 nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp :
1457 RTE_ETHDEV_QUEUE_STAT_CNTRS;
1460 for (i = 0; i < nq; i++) {
1461 mq = dev->data->tx_queues[i];
1462 stats->q_opackets[i] = mq->n_pkts;
1463 stats->q_obytes[i] = mq->n_bytes;
1464 stats->opackets += mq->n_pkts;
1465 stats->obytes += mq->n_bytes;
1471 memif_stats_reset(struct rte_eth_dev *dev)
1473 struct pmd_internals *pmd = dev->data->dev_private;
1475 struct memif_queue *mq;
1477 for (i = 0; i < pmd->run.num_c2s_rings; i++) {
1478 mq = (pmd->role == MEMIF_ROLE_CLIENT) ? dev->data->tx_queues[i] :
1479 dev->data->rx_queues[i];
1483 for (i = 0; i < pmd->run.num_s2c_rings; i++) {
1484 mq = (pmd->role == MEMIF_ROLE_CLIENT) ? dev->data->rx_queues[i] :
1485 dev->data->tx_queues[i];
1494 memif_rx_queue_intr_enable(struct rte_eth_dev *dev __rte_unused,
1495 uint16_t qid __rte_unused)
1497 MIF_LOG(WARNING, "Interrupt mode not supported.");
1503 memif_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t qid __rte_unused)
1505 struct pmd_internals *pmd __rte_unused = dev->data->dev_private;
1510 static const struct eth_dev_ops ops = {
1511 .dev_start = memif_dev_start,
1512 .dev_close = memif_dev_close,
1513 .dev_infos_get = memif_dev_info,
1514 .dev_configure = memif_dev_configure,
1515 .tx_queue_setup = memif_tx_queue_setup,
1516 .rx_queue_setup = memif_rx_queue_setup,
1517 .rx_queue_release = memif_rx_queue_release,
1518 .tx_queue_release = memif_tx_queue_release,
1519 .rx_queue_intr_enable = memif_rx_queue_intr_enable,
1520 .rx_queue_intr_disable = memif_rx_queue_intr_disable,
1521 .link_update = memif_link_update,
1522 .stats_get = memif_stats_get,
1523 .stats_reset = memif_stats_reset,
1527 memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
1528 memif_interface_id_t id, uint32_t flags,
1529 const char *socket_filename,
1530 memif_log2_ring_size_t log2_ring_size,
1531 uint16_t pkt_buffer_size, const char *secret,
1532 struct rte_ether_addr *ether_addr)
1535 struct rte_eth_dev *eth_dev;
1536 struct rte_eth_dev_data *data;
1537 struct pmd_internals *pmd;
1538 struct pmd_process_private *process_private;
1539 const unsigned int numa_node = vdev->device.numa_node;
1540 const char *name = rte_vdev_device_name(vdev);
1542 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1543 if (eth_dev == NULL) {
1544 MIF_LOG(ERR, "%s: Unable to allocate device struct.", name);
1548 process_private = (struct pmd_process_private *)
1549 rte_zmalloc(name, sizeof(struct pmd_process_private),
1550 RTE_CACHE_LINE_SIZE);
1552 if (process_private == NULL) {
1553 MIF_LOG(ERR, "Failed to alloc memory for process private");
1556 eth_dev->process_private = process_private;
1558 pmd = eth_dev->data->dev_private;
1559 memset(pmd, 0, sizeof(*pmd));
1563 pmd->flags |= ETH_MEMIF_FLAG_DISABLED;
1565 /* Zero-copy flag irelevant to server. */
1566 if (pmd->role == MEMIF_ROLE_SERVER)
1567 pmd->flags &= ~ETH_MEMIF_FLAG_ZERO_COPY;
1569 ret = memif_socket_init(eth_dev, socket_filename);
1573 memset(pmd->secret, 0, sizeof(char) * ETH_MEMIF_SECRET_SIZE);
1575 strlcpy(pmd->secret, secret, sizeof(pmd->secret));
1577 pmd->cfg.log2_ring_size = log2_ring_size;
1578 /* set in .dev_configure() */
1579 pmd->cfg.num_c2s_rings = 0;
1580 pmd->cfg.num_s2c_rings = 0;
1582 pmd->cfg.pkt_buffer_size = pkt_buffer_size;
1583 rte_spinlock_init(&pmd->cc_lock);
1585 data = eth_dev->data;
1586 data->dev_private = pmd;
1587 data->numa_node = numa_node;
1588 data->dev_link = pmd_link;
1589 data->mac_addrs = ether_addr;
1590 data->promiscuous = 1;
1591 data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1593 eth_dev->dev_ops = &ops;
1594 eth_dev->device = &vdev->device;
1595 if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) {
1596 eth_dev->rx_pkt_burst = eth_memif_rx_zc;
1597 eth_dev->tx_pkt_burst = eth_memif_tx_zc;
1599 eth_dev->rx_pkt_burst = eth_memif_rx;
1600 eth_dev->tx_pkt_burst = eth_memif_tx;
1603 rte_eth_dev_probing_finish(eth_dev);
1609 memif_set_role(const char *key __rte_unused, const char *value,
1612 enum memif_role_t *role = (enum memif_role_t *)extra_args;
1614 if (strstr(value, "server") != NULL) {
1615 *role = MEMIF_ROLE_SERVER;
1616 } else if (strstr(value, "client") != NULL) {
1617 *role = MEMIF_ROLE_CLIENT;
1618 } else if (strstr(value, "master") != NULL) {
1619 MIF_LOG(NOTICE, "Role argument \"master\" is deprecated, use \"server\"");
1620 *role = MEMIF_ROLE_SERVER;
1621 } else if (strstr(value, "slave") != NULL) {
1622 MIF_LOG(NOTICE, "Role argument \"slave\" is deprecated, use \"client\"");
1623 *role = MEMIF_ROLE_CLIENT;
1625 MIF_LOG(ERR, "Unknown role: %s.", value);
1632 memif_set_zc(const char *key __rte_unused, const char *value, void *extra_args)
1634 uint32_t *flags = (uint32_t *)extra_args;
1636 if (strstr(value, "yes") != NULL) {
1637 if (!rte_mcfg_get_single_file_segments()) {
1638 MIF_LOG(ERR, "Zero-copy doesn't support multi-file segments.");
1641 *flags |= ETH_MEMIF_FLAG_ZERO_COPY;
1642 } else if (strstr(value, "no") != NULL) {
1643 *flags &= ~ETH_MEMIF_FLAG_ZERO_COPY;
1645 MIF_LOG(ERR, "Failed to parse zero-copy param: %s.", value);
1652 memif_set_id(const char *key __rte_unused, const char *value, void *extra_args)
1654 memif_interface_id_t *id = (memif_interface_id_t *)extra_args;
1656 /* even if parsing fails, 0 is a valid id */
1657 *id = strtoul(value, NULL, 10);
1662 memif_set_bs(const char *key __rte_unused, const char *value, void *extra_args)
1665 uint16_t *pkt_buffer_size = (uint16_t *)extra_args;
1667 tmp = strtoul(value, NULL, 10);
1668 if (tmp == 0 || tmp > 0xFFFF) {
1669 MIF_LOG(ERR, "Invalid buffer size: %s.", value);
1672 *pkt_buffer_size = tmp;
1677 memif_set_rs(const char *key __rte_unused, const char *value, void *extra_args)
1680 memif_log2_ring_size_t *log2_ring_size =
1681 (memif_log2_ring_size_t *)extra_args;
1683 tmp = strtoul(value, NULL, 10);
1684 if (tmp == 0 || tmp > ETH_MEMIF_MAX_LOG2_RING_SIZE) {
1685 MIF_LOG(ERR, "Invalid ring size: %s (max %u).",
1686 value, ETH_MEMIF_MAX_LOG2_RING_SIZE);
1689 *log2_ring_size = tmp;
1693 /* check if directory exists and if we have permission to read/write */
1695 memif_check_socket_filename(const char *filename)
1697 char *dir = NULL, *tmp;
1701 if (strlen(filename) >= MEMIF_SOCKET_UN_SIZE) {
1702 MIF_LOG(ERR, "Unix socket address too long (max 108).");
1706 tmp = strrchr(filename, '/');
1708 idx = tmp - filename;
1709 dir = rte_zmalloc("memif_tmp", sizeof(char) * (idx + 1), 0);
1711 MIF_LOG(ERR, "Failed to allocate memory.");
1714 strlcpy(dir, filename, sizeof(char) * (idx + 1));
1717 if (dir == NULL || (faccessat(-1, dir, F_OK | R_OK |
1718 W_OK, AT_EACCESS) < 0)) {
1719 MIF_LOG(ERR, "Invalid socket directory.");
1730 memif_set_socket_filename(const char *key __rte_unused, const char *value,
1733 const char **socket_filename = (const char **)extra_args;
1735 *socket_filename = value;
1740 memif_set_is_socket_abstract(const char *key __rte_unused, const char *value, void *extra_args)
1742 uint32_t *flags = (uint32_t *)extra_args;
1744 if (strstr(value, "yes") != NULL) {
1745 *flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
1746 } else if (strstr(value, "no") != NULL) {
1747 *flags &= ~ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
1749 MIF_LOG(ERR, "Failed to parse socket-abstract param: %s.", value);
1756 memif_set_mac(const char *key __rte_unused, const char *value, void *extra_args)
1758 struct rte_ether_addr *ether_addr = (struct rte_ether_addr *)extra_args;
1760 if (rte_ether_unformat_addr(value, ether_addr) < 0)
1761 MIF_LOG(WARNING, "Failed to parse mac '%s'.", value);
1766 memif_set_secret(const char *key __rte_unused, const char *value, void *extra_args)
1768 const char **secret = (const char **)extra_args;
1775 rte_pmd_memif_probe(struct rte_vdev_device *vdev)
1777 RTE_BUILD_BUG_ON(sizeof(memif_msg_t) != 128);
1778 RTE_BUILD_BUG_ON(sizeof(memif_desc_t) != 16);
1780 struct rte_kvargs *kvlist;
1781 const char *name = rte_vdev_device_name(vdev);
1782 enum memif_role_t role = MEMIF_ROLE_CLIENT;
1783 memif_interface_id_t id = 0;
1784 uint16_t pkt_buffer_size = ETH_MEMIF_DEFAULT_PKT_BUFFER_SIZE;
1785 memif_log2_ring_size_t log2_ring_size = ETH_MEMIF_DEFAULT_RING_SIZE;
1786 const char *socket_filename = ETH_MEMIF_DEFAULT_SOCKET_FILENAME;
1788 const char *secret = NULL;
1789 struct rte_ether_addr *ether_addr = rte_zmalloc("",
1790 sizeof(struct rte_ether_addr), 0);
1791 struct rte_eth_dev *eth_dev;
1793 rte_eth_random_addr(ether_addr->addr_bytes);
1795 MIF_LOG(INFO, "Initialize MEMIF: %s.", name);
1797 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1798 eth_dev = rte_eth_dev_attach_secondary(name);
1800 MIF_LOG(ERR, "Failed to probe %s", name);
1804 eth_dev->dev_ops = &ops;
1805 eth_dev->device = &vdev->device;
1806 eth_dev->rx_pkt_burst = eth_memif_rx;
1807 eth_dev->tx_pkt_burst = eth_memif_tx;
1809 if (!rte_eal_primary_proc_alive(NULL)) {
1810 MIF_LOG(ERR, "Primary process is missing");
1814 eth_dev->process_private = (struct pmd_process_private *)
1816 sizeof(struct pmd_process_private),
1817 RTE_CACHE_LINE_SIZE);
1818 if (eth_dev->process_private == NULL) {
1820 "Failed to alloc memory for process private");
1824 rte_eth_dev_probing_finish(eth_dev);
1829 ret = rte_mp_action_register(MEMIF_MP_SEND_REGION, memif_mp_send_region);
1831 * Primary process can continue probing, but secondary process won't
1832 * be able to get memory regions information
1834 if (ret < 0 && rte_errno != EEXIST)
1835 MIF_LOG(WARNING, "Failed to register mp action callback: %s",
1836 strerror(rte_errno));
1838 /* use abstract address by default */
1839 flags |= ETH_MEMIF_FLAG_SOCKET_ABSTRACT;
1841 kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_arguments);
1843 /* parse parameters */
1844 if (kvlist != NULL) {
1845 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ROLE_ARG,
1846 &memif_set_role, &role);
1849 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ID_ARG,
1850 &memif_set_id, &id);
1853 ret = rte_kvargs_process(kvlist, ETH_MEMIF_PKT_BUFFER_SIZE_ARG,
1854 &memif_set_bs, &pkt_buffer_size);
1857 ret = rte_kvargs_process(kvlist, ETH_MEMIF_RING_SIZE_ARG,
1858 &memif_set_rs, &log2_ring_size);
1861 ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ARG,
1862 &memif_set_socket_filename,
1863 (void *)(&socket_filename));
1866 ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ABSTRACT_ARG,
1867 &memif_set_is_socket_abstract, &flags);
1870 ret = rte_kvargs_process(kvlist, ETH_MEMIF_MAC_ARG,
1871 &memif_set_mac, ether_addr);
1874 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ZC_ARG,
1875 &memif_set_zc, &flags);
1878 ret = rte_kvargs_process(kvlist, ETH_MEMIF_SECRET_ARG,
1879 &memif_set_secret, (void *)(&secret));
1884 if (!(flags & ETH_MEMIF_FLAG_SOCKET_ABSTRACT)) {
1885 ret = memif_check_socket_filename(socket_filename);
1890 /* create interface */
1891 ret = memif_create(vdev, role, id, flags, socket_filename,
1892 log2_ring_size, pkt_buffer_size, secret, ether_addr);
1896 rte_kvargs_free(kvlist);
1901 rte_pmd_memif_remove(struct rte_vdev_device *vdev)
1903 struct rte_eth_dev *eth_dev;
1905 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
1906 if (eth_dev == NULL)
1909 return rte_eth_dev_close(eth_dev->data->port_id);
1912 static struct rte_vdev_driver pmd_memif_drv = {
1913 .probe = rte_pmd_memif_probe,
1914 .remove = rte_pmd_memif_remove,
1917 RTE_PMD_REGISTER_VDEV(net_memif, pmd_memif_drv);
1919 RTE_PMD_REGISTER_PARAM_STRING(net_memif,
1920 ETH_MEMIF_ID_ARG "=<int>"
1921 ETH_MEMIF_ROLE_ARG "=server|client"
1922 ETH_MEMIF_PKT_BUFFER_SIZE_ARG "=<int>"
1923 ETH_MEMIF_RING_SIZE_ARG "=<int>"
1924 ETH_MEMIF_SOCKET_ARG "=<string>"
1925 ETH_MEMIF_SOCKET_ABSTRACT_ARG "=yes|no"
1926 ETH_MEMIF_MAC_ARG "=xx:xx:xx:xx:xx:xx"
1927 ETH_MEMIF_ZC_ARG "=yes|no"
1928 ETH_MEMIF_SECRET_ARG "=<string>");
1930 RTE_LOG_REGISTER_DEFAULT(memif_logtype, NOTICE);