4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/types.h>
41 #include <sys/resource.h>
44 #include <rte_common.h>
45 #include <rte_errno.h>
46 #include <rte_ethdev.h>
48 #include <rte_malloc.h>
50 #include <rte_memzone.h>
51 #include <rte_spinlock.h>
52 #include <rte_string_fns.h>
54 #include "compat_netmap.h"
57 struct rte_mempool *pool;
58 struct netmap_if *nmif;
59 struct rte_eth_conf eth_conf;
60 struct rte_eth_txconf tx_conf;
61 struct rte_eth_rxconf rx_conf;
76 #define FD_PORT_FREE UINT32_MAX
77 #define FD_PORT_RSRV (FD_PORT_FREE - 1)
80 struct rte_netmap_conf conf;
88 #define COMPAT_NETMAP_MAX_NOFILE (2 * RTE_MAX_ETHPORTS)
89 #define COMPAT_NETMAP_MAX_BURST 64
90 #define COMPAT_NETMAP_MAX_PKT_PER_SYNC (2 * COMPAT_NETMAP_MAX_BURST)
92 static struct netmap_port ports[RTE_MAX_ETHPORTS];
93 static struct netmap_state netmap;
95 static struct fd_port fd_port[COMPAT_NETMAP_MAX_NOFILE];
96 static const int next_fd_start = RLIMIT_NOFILE + 1;
97 static rte_spinlock_t netmap_lock;
99 #define IDX_TO_FD(x) ((x) + next_fd_start)
100 #define FD_TO_IDX(x) ((x) - next_fd_start)
101 #define FD_VALID(x) ((x) >= next_fd_start && \
102 (x) < (typeof (x))(RTE_DIM(fd_port) + next_fd_start))
104 #define PORT_NUM_RINGS (2 * netmap.conf.max_rings)
105 #define PORT_NUM_SLOTS (PORT_NUM_RINGS * netmap.conf.max_slots)
107 #define BUF_IDX(port, ring, slot) \
108 (((port) * PORT_NUM_RINGS + (ring)) * netmap.conf.max_slots + \
111 #define NETMAP_IF_RING_OFS(rid, rings, slots) ({\
112 struct netmap_if *_if; \
113 struct netmap_ring *_rg; \
115 (rings) * sizeof(_if->ring_ofs[0]) + \
116 (rid) * sizeof(*_rg) + \
117 (slots) * sizeof(_rg->slot[0]); \
120 static void netmap_unregif(uint32_t idx, uint32_t port);
124 ifname_to_portid(const char *ifname, uint8_t *port)
130 portid = strtoul(ifname, &endptr, 10);
131 if (endptr == ifname || *endptr != '\0' ||
132 portid >= RTE_DIM(ports) || errno != 0)
135 *port = (uint8_t)portid;
140 * Given a dpdk mbuf, fill in the Netmap slot in ring r and its associated
141 * buffer with the data held by the mbuf.
142 * Note that mbuf chains are not supported.
145 mbuf_to_slot(struct rte_mbuf *mbuf, struct netmap_ring *r, uint32_t index)
150 data = rte_pktmbuf_mtod(mbuf, char *);
151 length = rte_pktmbuf_data_len(mbuf);
153 if (length > r->nr_buf_size)
156 r->slot[index].len = length;
157 rte_memcpy(NETMAP_BUF(r, r->slot[index].buf_idx), data, length);
161 * Given a Netmap ring and a slot index for that ring, construct a dpdk mbuf
162 * from the data held in the buffer associated with the slot.
163 * Allocation/deallocation of the dpdk mbuf are the responsability of the
165 * Note that mbuf chains are not supported.
168 slot_to_mbuf(struct netmap_ring *r, uint32_t index, struct rte_mbuf *mbuf)
173 rte_pktmbuf_reset(mbuf);
174 length = r->slot[index].len;
175 data = rte_pktmbuf_append(mbuf, length);
178 rte_memcpy(data, NETMAP_BUF(r, r->slot[index].buf_idx), length);
186 for (i = 0; i != RTE_DIM(fd_port) && fd_port[i].port != FD_PORT_FREE;
190 if (i == RTE_DIM(fd_port))
193 fd_port[i].port = FD_PORT_RSRV;
194 return (IDX_TO_FD(i));
198 fd_release(int32_t fd)
204 if (!FD_VALID(fd) || (port = fd_port[idx].port) == FD_PORT_FREE)
207 /* if we still have a valid port attached, release the port */
208 if (port < RTE_DIM(ports) && ports[port].fd == idx) {
209 netmap_unregif(idx, port);
212 fd_port[idx].port = FD_PORT_FREE;
217 check_nmreq(struct nmreq *req, uint8_t *port)
225 if (req->nr_version != NETMAP_API) {
226 req->nr_version = NETMAP_API;
230 if ((rc = ifname_to_portid(req->nr_name, &portid)) != 0) {
231 RTE_LOG(ERR, USER1, "Invalid interface name:\"%s\" "
232 "in NIOCGINFO call\n", req->nr_name);
236 if (ports[portid].pool == NULL) {
237 RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid);
246 * Simulate a Netmap NIOCGINFO ioctl: given a struct nmreq holding an interface
247 * name (a port number in our case), fill the struct nmreq in with advisory
248 * information about the interface: number of rings and their size, total memory
249 * required in the map, ...
250 * Those are preconfigured using rte_eth_{,tx,rx}conf and
251 * rte_netmap_port_conf structures
252 * and calls to rte_netmap_init_port() in the Netmap application.
255 ioctl_niocginfo(__rte_unused int fd, void * param)
261 req = (struct nmreq *)param;
262 if ((rc = check_nmreq(req, &portid)) != 0)
265 req->nr_tx_rings = (uint16_t)(ports[portid].nr_tx_rings - 1);
266 req->nr_rx_rings = (uint16_t)(ports[portid].nr_rx_rings - 1);
267 req->nr_tx_slots = ports[portid].nr_tx_slots;
268 req->nr_rx_slots = ports[portid].nr_rx_slots;
270 /* in current implementation we have all NETIFs shared aone region. */
271 req->nr_memsize = netmap.mem_sz;
278 netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid,
283 ring->buf_ofs = netmap.buf_start - (uintptr_t)ring;
284 ring->num_slots = num_slots;
287 ring->nr_buf_size = netmap.conf.max_bufsz;
290 ring->ts.tv_usec = 0;
292 for (j = 0; j < ring->num_slots; j++) {
293 ring->slot[j].buf_idx = BUF_IDX(port, ringid, j);
294 ring->slot[j].len = 0;
300 netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
302 struct netmap_if *nmif;
303 struct netmap_ring *ring;
304 uint32_t i, slots, start_ring;
307 if (ports[port].fd < RTE_DIM(fd_port)) {
308 RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n",
309 port, IDX_TO_FD(ports[port].fd));
312 if (fd_port[idx].port != FD_PORT_RSRV) {
313 RTE_LOG(ERR, USER1, "fd: %u is misconfigured\n",
318 nmif = ports[port].nmif;
320 /* setup netmap_if fields. */
321 memset(nmif, 0, netmap.netif_memsz);
323 /* only ALL rings supported right now. */
324 if (req->nr_ringid != 0)
327 snprintf(nmif->ni_name, sizeof(nmif->ni_name), "%s", req->nr_name);
328 nmif->ni_version = req->nr_version;
330 /* Netmap uses ni_(r|t)x_rings + 1 */
331 nmif->ni_rx_rings = ports[port].nr_rx_rings - 1;
332 nmif->ni_tx_rings = ports[port].nr_tx_rings - 1;
335 * Setup TX rings and slots.
336 * Refer to the comments in netmap.h for details
340 for (i = 0; i < nmif->ni_tx_rings + 1; i++) {
342 nmif->ring_ofs[i] = NETMAP_IF_RING_OFS(i,
343 PORT_NUM_RINGS, slots);
345 ring = NETMAP_TXRING(nmif, i);
346 netmap_ring_setup(ring, port, i, ports[port].nr_tx_slots);
347 ring->avail = ring->num_slots;
349 slots += ports[port].nr_tx_slots;
353 * Setup RX rings and slots.
354 * Refer to the comments in netmap.h for details
359 for (; i < nmif->ni_rx_rings + 1 + start_ring; i++) {
361 nmif->ring_ofs[i] = NETMAP_IF_RING_OFS(i,
362 PORT_NUM_RINGS, slots);
364 ring = NETMAP_RXRING(nmif, (i - start_ring));
365 netmap_ring_setup(ring, port, i, ports[port].nr_rx_slots);
368 slots += ports[port].nr_rx_slots;
371 if ((rc = rte_eth_dev_start(port)) < 0) {
373 "Couldn't start ethernet device %s (error %d)\n",
378 /* setup fdi <--> port relationtip. */
379 ports[port].fd = idx;
380 fd_port[idx].port = port;
382 req->nr_memsize = netmap.mem_sz;
383 req->nr_offset = (uintptr_t)nmif - (uintptr_t)netmap.mem;
389 * Simulate a Netmap NIOCREGIF ioctl:
392 ioctl_niocregif(int32_t fd, void * param)
399 req = (struct nmreq *)param;
400 if ((rc = check_nmreq(req, &portid)) != 0)
405 rte_spinlock_lock(&netmap_lock);
406 rc = netmap_regif(req, idx, portid);
407 rte_spinlock_unlock(&netmap_lock);
413 netmap_unregif(uint32_t idx, uint32_t port)
415 fd_port[idx].port = FD_PORT_RSRV;
416 ports[port].fd = UINT32_MAX;
417 rte_eth_dev_stop((uint8_t)port);
421 * Simulate a Netmap NIOCUNREGIF ioctl: put an interface running in Netmap
422 * mode back in "normal" mode. In our case, we just stop the port associated
423 * with this file descriptor.
426 ioctl_niocunregif(int fd)
433 rte_spinlock_lock(&netmap_lock);
435 port = fd_port[idx].port;
436 if (port < RTE_DIM(ports) && ports[port].fd == idx) {
437 netmap_unregif(idx, port);
441 "%s: %d is not associated with valid port\n",
446 rte_spinlock_unlock(&netmap_lock);
451 * A call to rx_sync_ring will try to fill a Netmap RX ring with as many
452 * packets as it can hold coming from its dpdk port.
455 rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
460 uint32_t cur_slot, n_free_slots;
461 struct rte_mbuf *rx_mbufs[COMPAT_NETMAP_MAX_BURST];
463 n_free_slots = ring->num_slots - (ring->avail + ring->reserved);
464 n_free_slots = RTE_MIN(n_free_slots, max_burst);
465 cur_slot = (ring->cur + ring->avail) & (ring->num_slots - 1);
467 while (n_free_slots) {
468 burst_size = (uint16_t)RTE_MIN(n_free_slots, RTE_DIM(rx_mbufs));
470 /* receive up to burst_size packets from the NIC's queue */
471 n_rx = rte_eth_rx_burst(port, ring_number, rx_mbufs,
476 if (unlikely(n_rx < 0))
479 /* Put those n_rx packets in the Netmap structures */
480 for (i = 0; i < n_rx ; i++) {
481 mbuf_to_slot(rx_mbufs[i], ring, cur_slot);
482 rte_pktmbuf_free(rx_mbufs[i]);
483 cur_slot = NETMAP_RING_NEXT(ring, cur_slot);
486 /* Update the Netmap ring structure to reflect the change */
488 n_free_slots -= n_rx;
495 rx_sync_if(uint32_t port)
499 struct netmap_if *nifp;
500 struct netmap_ring *r;
502 nifp = ports[port].nmif;
503 burst = ports[port].rx_burst;
506 for (i = 0; i < nifp->ni_rx_rings + 1; i++) {
507 r = NETMAP_RXRING(nifp, i);
508 rx_sync_ring(r, (uint8_t)port, (uint16_t)i, burst);
516 * Simulate a Netmap NIOCRXSYNC ioctl:
519 ioctl_niocrxsync(int fd)
524 if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
525 ports[port].fd == idx) {
526 return (rx_sync_if(fd_port[idx].port));
533 * A call to tx_sync_ring will try to empty a Netmap TX ring by converting its
534 * buffers into rte_mbufs and sending them out on the rings's dpdk port.
537 tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
538 struct rte_mempool *pool, uint16_t max_burst)
542 uint32_t cur_slot, n_used_slots;
543 struct rte_mbuf *tx_mbufs[COMPAT_NETMAP_MAX_BURST];
545 n_used_slots = ring->num_slots - ring->avail;
546 n_used_slots = RTE_MIN(n_used_slots, max_burst);
547 cur_slot = (ring->cur + ring->avail) & (ring->num_slots - 1);
549 while (n_used_slots) {
550 burst_size = (uint16_t)RTE_MIN(n_used_slots, RTE_DIM(tx_mbufs));
552 for (i = 0; i < burst_size; i++) {
553 tx_mbufs[i] = rte_pktmbuf_alloc(pool);
554 if (tx_mbufs[i] == NULL)
557 slot_to_mbuf(ring, cur_slot, tx_mbufs[i]);
558 cur_slot = NETMAP_RING_NEXT(ring, cur_slot);
561 n_tx = rte_eth_tx_burst(port, ring_number, tx_mbufs,
564 /* Update the Netmap ring structure to reflect the change */
566 n_used_slots -= n_tx;
568 /* Return the mbufs that failed to transmit to their pool */
569 if (unlikely(n_tx != burst_size)) {
570 for (i = n_tx; i < burst_size; i++)
571 rte_pktmbuf_free(tx_mbufs[i]);
580 rte_pktmbuf_free(tx_mbufs[i]);
583 "Couldn't get mbuf from mempool is the mempool too small?\n");
588 tx_sync_if(uint32_t port)
592 struct netmap_if *nifp;
593 struct netmap_ring *r;
594 struct rte_mempool *mp;
596 nifp = ports[port].nmif;
597 mp = ports[port].pool;
598 burst = ports[port].tx_burst;
601 for (i = 0; i < nifp->ni_tx_rings + 1; i++) {
602 r = NETMAP_TXRING(nifp, i);
603 tx_sync_ring(r, (uint8_t)port, (uint16_t)i, mp, burst);
611 * Simulate a Netmap NIOCTXSYNC ioctl:
614 ioctl_nioctxsync(int fd)
619 if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
620 ports[port].fd == idx) {
621 return (tx_sync_if(fd_port[idx].port));
628 * Give the library a mempool of rte_mbufs with which it can do the
629 * rte_mbuf <--> netmap slot conversions.
632 rte_netmap_init(const struct rte_netmap_conf *conf)
634 size_t buf_ofs, nmif_sz, sz;
635 size_t port_rings, port_slots, port_bufs;
636 uint32_t i, port_num;
638 port_num = RTE_MAX_ETHPORTS;
639 port_rings = 2 * conf->max_rings;
640 port_slots = port_rings * conf->max_slots;
641 port_bufs = port_slots;
643 nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots);
644 sz = nmif_sz * port_num;
646 buf_ofs = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
647 sz = buf_ofs + port_bufs * conf->max_bufsz * port_num;
649 if (sz > UINT32_MAX ||
650 (netmap.mem = rte_zmalloc_socket(__func__, sz,
651 RTE_CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
652 RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
658 netmap.netif_memsz = nmif_sz;
659 netmap.buf_start = (uintptr_t)netmap.mem + buf_ofs;
662 rte_spinlock_init(&netmap_lock);
664 /* Mark all ports as unused and set NETIF pointer. */
665 for (i = 0; i != RTE_DIM(ports); i++) {
666 ports[i].fd = UINT32_MAX;
667 ports[i].nmif = (struct netmap_if *)
668 ((uintptr_t)netmap.mem + nmif_sz * i);
671 /* Mark all fd_ports as unused. */
672 for (i = 0; i != RTE_DIM(fd_port); i++) {
673 fd_port[i].port = FD_PORT_FREE;
681 rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
685 uint16_t rx_slots, tx_slots;
688 portid >= RTE_DIM(ports) ||
689 conf->nr_tx_rings > netmap.conf.max_rings ||
690 conf->nr_rx_rings > netmap.conf.max_rings) {
691 RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
696 rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
697 tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
699 if (tx_slots > netmap.conf.max_slots ||
700 rx_slots > netmap.conf.max_slots) {
701 RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
706 ret = rte_eth_dev_configure(portid, conf->nr_rx_rings,
707 conf->nr_tx_rings, conf->eth_conf);
710 RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid);
714 for (i = 0; i < conf->nr_tx_rings; i++) {
715 ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
716 conf->socket_id, NULL);
720 "Couldn't configure TX queue %"PRIu16" of "
726 ret = rte_eth_rx_queue_setup(portid, i, rx_slots,
727 conf->socket_id, NULL, conf->pool);
731 "Couldn't configure RX queue %"PRIu16" of "
738 /* copy config to the private storage. */
739 ports[portid].eth_conf = conf->eth_conf[0];
740 ports[portid].pool = conf->pool;
741 ports[portid].socket_id = conf->socket_id;
742 ports[portid].nr_tx_rings = conf->nr_tx_rings;
743 ports[portid].nr_rx_rings = conf->nr_rx_rings;
744 ports[portid].nr_tx_slots = tx_slots;
745 ports[portid].nr_rx_slots = rx_slots;
746 ports[portid].tx_burst = conf->tx_burst;
747 ports[portid].rx_burst = conf->rx_burst;
753 rte_netmap_close(__rte_unused int fd)
757 rte_spinlock_lock(&netmap_lock);
759 rte_spinlock_unlock(&netmap_lock);
768 int rte_netmap_ioctl(int fd, uint32_t op, void *param)
780 ret = ioctl_niocginfo(fd, param);
784 ret = ioctl_niocregif(fd, param);
788 ret = ioctl_niocunregif(fd);
792 ret = ioctl_niocrxsync(fd);
796 ret = ioctl_nioctxsync(fd);
814 rte_netmap_mmap(void *addr, size_t length,
815 int prot, int flags, int fd, off_t offset)
817 static const int cprot = PROT_WRITE | PROT_READ;
819 if (!FD_VALID(fd) || length + offset > netmap.mem_sz ||
820 (prot & cprot) != cprot ||
821 ((flags & MAP_FIXED) != 0 && addr != NULL)) {
827 return (void *)((uintptr_t)netmap.mem + (uintptr_t)offset);
831 * Return a "fake" file descriptor with a value above RLIMIT_NOFILE so that
832 * any attempt to use that file descriptor with the usual API will fail.
835 rte_netmap_open(__rte_unused const char *pathname, __rte_unused int flags)
839 rte_spinlock_lock(&netmap_lock);
841 rte_spinlock_unlock(&netmap_lock);
851 * Doesn't support timeout other than 0 or infinite (negative) timeout
854 rte_netmap_poll(struct pollfd *fds, nfds_t nfds, int timeout)
856 int32_t count_it, ret;
857 uint32_t i, idx, port;
858 uint32_t want_rx, want_tx;
862 for (i = 0; i < nfds; i++) {
866 if (!FD_VALID(fds[i].fd) || fds[i].events == 0) {
871 idx = FD_TO_IDX(fds[i].fd);
872 if ((port = fd_port[idx].port) >= RTE_DIM(ports) ||
873 ports[port].fd != idx) {
875 fds[i].revents |= POLLERR;
880 want_rx = fds[i].events & (POLLIN | POLLRDNORM);
881 want_tx = fds[i].events & (POLLOUT | POLLWRNORM);
883 if (want_rx && rx_sync_if(port) > 0) {
884 fds[i].revents = (uint16_t)
885 (fds[i].revents | want_rx);
888 if (want_tx && tx_sync_if(port) > 0) {
889 fds[i].revents = (uint16_t)
890 (fds[i].revents | want_tx);
897 while ((ret == 0 && timeout < 0) || timeout);