4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/types.h>
41 #include <sys/resource.h>
44 #include <rte_common.h>
45 #include <rte_errno.h>
46 #include <rte_ethdev.h>
48 #include <rte_malloc.h>
50 #include <rte_memzone.h>
51 #include <rte_spinlock.h>
52 #include <rte_string_fns.h>
54 #include "compat_netmap.h"
57 struct rte_mempool *pool;
58 struct netmap_if *nmif;
59 struct rte_eth_conf eth_conf;
60 struct rte_eth_txconf tx_conf;
61 struct rte_eth_rxconf rx_conf;
77 #define POLLRDNORM 0x0040
81 #define POLLWRNORM 0x0100
84 #define FD_PORT_FREE UINT32_MAX
85 #define FD_PORT_RSRV (FD_PORT_FREE - 1)
88 struct rte_netmap_conf conf;
96 #define COMPAT_NETMAP_MAX_NOFILE (2 * RTE_MAX_ETHPORTS)
97 #define COMPAT_NETMAP_MAX_BURST 64
98 #define COMPAT_NETMAP_MAX_PKT_PER_SYNC (2 * COMPAT_NETMAP_MAX_BURST)
100 static struct netmap_port ports[RTE_MAX_ETHPORTS];
101 static struct netmap_state netmap;
103 static struct fd_port fd_port[COMPAT_NETMAP_MAX_NOFILE];
104 static const int next_fd_start = RLIMIT_NOFILE + 1;
105 static rte_spinlock_t netmap_lock;
107 #define IDX_TO_FD(x) ((x) + next_fd_start)
108 #define FD_TO_IDX(x) ((x) - next_fd_start)
109 #define FD_VALID(x) ((x) >= next_fd_start && \
110 (x) < (typeof (x))(RTE_DIM(fd_port) + next_fd_start))
112 #define PORT_NUM_RINGS (2 * netmap.conf.max_rings)
113 #define PORT_NUM_SLOTS (PORT_NUM_RINGS * netmap.conf.max_slots)
115 #define BUF_IDX(port, ring, slot) \
116 (((port) * PORT_NUM_RINGS + (ring)) * netmap.conf.max_slots + \
119 #define NETMAP_IF_RING_OFS(rid, rings, slots) ({\
120 struct netmap_if *_if; \
121 struct netmap_ring *_rg; \
123 (rings) * sizeof(_if->ring_ofs[0]) + \
124 (rid) * sizeof(*_rg) + \
125 (slots) * sizeof(_rg->slot[0]); \
128 static void netmap_unregif(uint32_t idx, uint32_t port);
132 ifname_to_portid(const char *ifname, uint8_t *port)
138 portid = strtoul(ifname, &endptr, 10);
139 if (endptr == ifname || *endptr != '\0' ||
140 portid >= RTE_DIM(ports) || errno != 0)
143 *port = (uint8_t)portid;
148 * Given a dpdk mbuf, fill in the Netmap slot in ring r and its associated
149 * buffer with the data held by the mbuf.
150 * Note that mbuf chains are not supported.
153 mbuf_to_slot(struct rte_mbuf *mbuf, struct netmap_ring *r, uint32_t index)
158 data = rte_pktmbuf_mtod(mbuf, char *);
159 length = rte_pktmbuf_data_len(mbuf);
161 if (length > r->nr_buf_size)
164 r->slot[index].len = length;
165 rte_memcpy(NETMAP_BUF(r, r->slot[index].buf_idx), data, length);
169 * Given a Netmap ring and a slot index for that ring, construct a dpdk mbuf
170 * from the data held in the buffer associated with the slot.
171 * Allocation/deallocation of the dpdk mbuf are the responsibility of the
173 * Note that mbuf chains are not supported.
176 slot_to_mbuf(struct netmap_ring *r, uint32_t index, struct rte_mbuf *mbuf)
181 rte_pktmbuf_reset(mbuf);
182 length = r->slot[index].len;
183 data = rte_pktmbuf_append(mbuf, length);
186 rte_memcpy(data, NETMAP_BUF(r, r->slot[index].buf_idx), length);
194 for (i = 0; i != RTE_DIM(fd_port) && fd_port[i].port != FD_PORT_FREE;
198 if (i == RTE_DIM(fd_port))
201 fd_port[i].port = FD_PORT_RSRV;
206 fd_release(int32_t fd)
212 if (!FD_VALID(fd) || (port = fd_port[idx].port) == FD_PORT_FREE)
215 /* if we still have a valid port attached, release the port */
216 if (port < RTE_DIM(ports) && ports[port].fd == idx) {
217 netmap_unregif(idx, port);
220 fd_port[idx].port = FD_PORT_FREE;
225 check_nmreq(struct nmreq *req, uint8_t *port)
233 if (req->nr_version != NETMAP_API) {
234 req->nr_version = NETMAP_API;
238 if ((rc = ifname_to_portid(req->nr_name, &portid)) != 0) {
239 RTE_LOG(ERR, USER1, "Invalid interface name:\"%s\" "
240 "in NIOCGINFO call\n", req->nr_name);
244 if (ports[portid].pool == NULL) {
245 RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid);
254 * Simulate a Netmap NIOCGINFO ioctl: given a struct nmreq holding an interface
255 * name (a port number in our case), fill the struct nmreq in with advisory
256 * information about the interface: number of rings and their size, total memory
257 * required in the map, ...
258 * Those are preconfigured using rte_eth_{,tx,rx}conf and
259 * rte_netmap_port_conf structures
260 * and calls to rte_netmap_init_port() in the Netmap application.
263 ioctl_niocginfo(__rte_unused int fd, void * param)
269 req = (struct nmreq *)param;
270 if ((rc = check_nmreq(req, &portid)) != 0)
273 req->nr_tx_rings = (uint16_t)(ports[portid].nr_tx_rings - 1);
274 req->nr_rx_rings = (uint16_t)(ports[portid].nr_rx_rings - 1);
275 req->nr_tx_slots = ports[portid].nr_tx_slots;
276 req->nr_rx_slots = ports[portid].nr_rx_slots;
278 /* in current implementation we have all NETIFs shared aone region. */
279 req->nr_memsize = netmap.mem_sz;
286 netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid,
291 ring->buf_ofs = netmap.buf_start - (uintptr_t)ring;
292 ring->num_slots = num_slots;
295 ring->nr_buf_size = netmap.conf.max_bufsz;
298 ring->ts.tv_usec = 0;
300 for (j = 0; j < ring->num_slots; j++) {
301 ring->slot[j].buf_idx = BUF_IDX(port, ringid, j);
302 ring->slot[j].len = 0;
308 netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
310 struct netmap_if *nmif;
311 struct netmap_ring *ring;
312 uint32_t i, slots, start_ring;
315 if (ports[port].fd < RTE_DIM(fd_port)) {
316 RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n",
317 port, IDX_TO_FD(ports[port].fd));
320 if (fd_port[idx].port != FD_PORT_RSRV) {
321 RTE_LOG(ERR, USER1, "fd: %u is misconfigured\n",
326 nmif = ports[port].nmif;
328 /* setup netmap_if fields. */
329 memset(nmif, 0, netmap.netif_memsz);
331 /* only ALL rings supported right now. */
332 if (req->nr_ringid != 0)
335 snprintf(nmif->ni_name, sizeof(nmif->ni_name), "%s", req->nr_name);
336 nmif->ni_version = req->nr_version;
338 /* Netmap uses ni_(r|t)x_rings + 1 */
339 nmif->ni_rx_rings = ports[port].nr_rx_rings - 1;
340 nmif->ni_tx_rings = ports[port].nr_tx_rings - 1;
343 * Setup TX rings and slots.
344 * Refer to the comments in netmap.h for details
348 for (i = 0; i < nmif->ni_tx_rings + 1; i++) {
350 nmif->ring_ofs[i] = NETMAP_IF_RING_OFS(i,
351 PORT_NUM_RINGS, slots);
353 ring = NETMAP_TXRING(nmif, i);
354 netmap_ring_setup(ring, port, i, ports[port].nr_tx_slots);
355 ring->avail = ring->num_slots;
357 slots += ports[port].nr_tx_slots;
361 * Setup RX rings and slots.
362 * Refer to the comments in netmap.h for details
367 for (; i < nmif->ni_rx_rings + 1 + start_ring; i++) {
369 nmif->ring_ofs[i] = NETMAP_IF_RING_OFS(i,
370 PORT_NUM_RINGS, slots);
372 ring = NETMAP_RXRING(nmif, (i - start_ring));
373 netmap_ring_setup(ring, port, i, ports[port].nr_rx_slots);
376 slots += ports[port].nr_rx_slots;
379 if ((rc = rte_eth_dev_start(port)) < 0) {
381 "Couldn't start ethernet device %s (error %d)\n",
386 /* setup fdi <--> port relationtip. */
387 ports[port].fd = idx;
388 fd_port[idx].port = port;
390 req->nr_memsize = netmap.mem_sz;
391 req->nr_offset = (uintptr_t)nmif - (uintptr_t)netmap.mem;
397 * Simulate a Netmap NIOCREGIF ioctl:
400 ioctl_niocregif(int32_t fd, void * param)
407 req = (struct nmreq *)param;
408 if ((rc = check_nmreq(req, &portid)) != 0)
413 rte_spinlock_lock(&netmap_lock);
414 rc = netmap_regif(req, idx, portid);
415 rte_spinlock_unlock(&netmap_lock);
421 netmap_unregif(uint32_t idx, uint32_t port)
423 fd_port[idx].port = FD_PORT_RSRV;
424 ports[port].fd = UINT32_MAX;
425 rte_eth_dev_stop((uint8_t)port);
429 * Simulate a Netmap NIOCUNREGIF ioctl: put an interface running in Netmap
430 * mode back in "normal" mode. In our case, we just stop the port associated
431 * with this file descriptor.
434 ioctl_niocunregif(int fd)
441 rte_spinlock_lock(&netmap_lock);
443 port = fd_port[idx].port;
444 if (port < RTE_DIM(ports) && ports[port].fd == idx) {
445 netmap_unregif(idx, port);
449 "%s: %d is not associated with valid port\n",
454 rte_spinlock_unlock(&netmap_lock);
459 * A call to rx_sync_ring will try to fill a Netmap RX ring with as many
460 * packets as it can hold coming from its dpdk port.
463 rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
468 uint32_t cur_slot, n_free_slots;
469 struct rte_mbuf *rx_mbufs[COMPAT_NETMAP_MAX_BURST];
471 n_free_slots = ring->num_slots - (ring->avail + ring->reserved);
472 n_free_slots = RTE_MIN(n_free_slots, max_burst);
473 cur_slot = (ring->cur + ring->avail) & (ring->num_slots - 1);
475 while (n_free_slots) {
476 burst_size = (uint16_t)RTE_MIN(n_free_slots, RTE_DIM(rx_mbufs));
478 /* receive up to burst_size packets from the NIC's queue */
479 n_rx = rte_eth_rx_burst(port, ring_number, rx_mbufs,
484 if (unlikely(n_rx < 0))
487 /* Put those n_rx packets in the Netmap structures */
488 for (i = 0; i < n_rx ; i++) {
489 mbuf_to_slot(rx_mbufs[i], ring, cur_slot);
490 rte_pktmbuf_free(rx_mbufs[i]);
491 cur_slot = NETMAP_RING_NEXT(ring, cur_slot);
494 /* Update the Netmap ring structure to reflect the change */
496 n_free_slots -= n_rx;
503 rx_sync_if(uint32_t port)
507 struct netmap_if *nifp;
508 struct netmap_ring *r;
510 nifp = ports[port].nmif;
511 burst = ports[port].rx_burst;
514 for (i = 0; i < nifp->ni_rx_rings + 1; i++) {
515 r = NETMAP_RXRING(nifp, i);
516 rx_sync_ring(r, (uint8_t)port, (uint16_t)i, burst);
524 * Simulate a Netmap NIOCRXSYNC ioctl:
527 ioctl_niocrxsync(int fd)
532 if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
533 ports[port].fd == idx) {
534 return rx_sync_if(fd_port[idx].port);
541 * A call to tx_sync_ring will try to empty a Netmap TX ring by converting its
542 * buffers into rte_mbufs and sending them out on the rings's dpdk port.
545 tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
546 struct rte_mempool *pool, uint16_t max_burst)
550 uint32_t cur_slot, n_used_slots;
551 struct rte_mbuf *tx_mbufs[COMPAT_NETMAP_MAX_BURST];
553 n_used_slots = ring->num_slots - ring->avail;
554 n_used_slots = RTE_MIN(n_used_slots, max_burst);
555 cur_slot = (ring->cur + ring->avail) & (ring->num_slots - 1);
557 while (n_used_slots) {
558 burst_size = (uint16_t)RTE_MIN(n_used_slots, RTE_DIM(tx_mbufs));
560 for (i = 0; i < burst_size; i++) {
561 tx_mbufs[i] = rte_pktmbuf_alloc(pool);
562 if (tx_mbufs[i] == NULL)
565 slot_to_mbuf(ring, cur_slot, tx_mbufs[i]);
566 cur_slot = NETMAP_RING_NEXT(ring, cur_slot);
569 n_tx = rte_eth_tx_burst(port, ring_number, tx_mbufs,
572 /* Update the Netmap ring structure to reflect the change */
574 n_used_slots -= n_tx;
576 /* Return the mbufs that failed to transmit to their pool */
577 if (unlikely(n_tx != burst_size)) {
578 for (i = n_tx; i < burst_size; i++)
579 rte_pktmbuf_free(tx_mbufs[i]);
588 rte_pktmbuf_free(tx_mbufs[i]);
591 "Couldn't get mbuf from mempool is the mempool too small?\n");
596 tx_sync_if(uint32_t port)
600 struct netmap_if *nifp;
601 struct netmap_ring *r;
602 struct rte_mempool *mp;
604 nifp = ports[port].nmif;
605 mp = ports[port].pool;
606 burst = ports[port].tx_burst;
609 for (i = 0; i < nifp->ni_tx_rings + 1; i++) {
610 r = NETMAP_TXRING(nifp, i);
611 tx_sync_ring(r, (uint8_t)port, (uint16_t)i, mp, burst);
619 * Simulate a Netmap NIOCTXSYNC ioctl:
622 ioctl_nioctxsync(int fd)
627 if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
628 ports[port].fd == idx) {
629 return tx_sync_if(fd_port[idx].port);
636 * Give the library a mempool of rte_mbufs with which it can do the
637 * rte_mbuf <--> netmap slot conversions.
640 rte_netmap_init(const struct rte_netmap_conf *conf)
642 size_t buf_ofs, nmif_sz, sz;
643 size_t port_rings, port_slots, port_bufs;
644 uint32_t i, port_num;
646 port_num = RTE_MAX_ETHPORTS;
647 port_rings = 2 * conf->max_rings;
648 port_slots = port_rings * conf->max_slots;
649 port_bufs = port_slots;
651 nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots);
652 sz = nmif_sz * port_num;
654 buf_ofs = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
655 sz = buf_ofs + port_bufs * conf->max_bufsz * port_num;
657 if (sz > UINT32_MAX ||
658 (netmap.mem = rte_zmalloc_socket(__func__, sz,
659 RTE_CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
660 RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
666 netmap.netif_memsz = nmif_sz;
667 netmap.buf_start = (uintptr_t)netmap.mem + buf_ofs;
670 rte_spinlock_init(&netmap_lock);
672 /* Mark all ports as unused and set NETIF pointer. */
673 for (i = 0; i != RTE_DIM(ports); i++) {
674 ports[i].fd = UINT32_MAX;
675 ports[i].nmif = (struct netmap_if *)
676 ((uintptr_t)netmap.mem + nmif_sz * i);
679 /* Mark all fd_ports as unused. */
680 for (i = 0; i != RTE_DIM(fd_port); i++) {
681 fd_port[i].port = FD_PORT_FREE;
689 rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
693 uint16_t rx_slots, tx_slots;
696 portid >= RTE_DIM(ports) ||
697 conf->nr_tx_rings > netmap.conf.max_rings ||
698 conf->nr_rx_rings > netmap.conf.max_rings) {
699 RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
704 rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
705 tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
707 if (tx_slots > netmap.conf.max_slots ||
708 rx_slots > netmap.conf.max_slots) {
709 RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
714 ret = rte_eth_dev_configure(portid, conf->nr_rx_rings,
715 conf->nr_tx_rings, conf->eth_conf);
718 RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid);
722 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_slots, &tx_slots);
726 "Couldn't ot adjust number of descriptors for port %hhu\n",
731 for (i = 0; i < conf->nr_tx_rings; i++) {
732 ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
733 conf->socket_id, NULL);
737 "Couldn't configure TX queue %"PRIu16" of "
743 ret = rte_eth_rx_queue_setup(portid, i, rx_slots,
744 conf->socket_id, NULL, conf->pool);
748 "Couldn't configure RX queue %"PRIu16" of "
755 /* copy config to the private storage. */
756 ports[portid].eth_conf = conf->eth_conf[0];
757 ports[portid].pool = conf->pool;
758 ports[portid].socket_id = conf->socket_id;
759 ports[portid].nr_tx_rings = conf->nr_tx_rings;
760 ports[portid].nr_rx_rings = conf->nr_rx_rings;
761 ports[portid].nr_tx_slots = tx_slots;
762 ports[portid].nr_rx_slots = rx_slots;
763 ports[portid].tx_burst = conf->tx_burst;
764 ports[portid].rx_burst = conf->rx_burst;
770 rte_netmap_close(__rte_unused int fd)
774 rte_spinlock_lock(&netmap_lock);
776 rte_spinlock_unlock(&netmap_lock);
785 int rte_netmap_ioctl(int fd, uint32_t op, void *param)
797 ret = ioctl_niocginfo(fd, param);
801 ret = ioctl_niocregif(fd, param);
805 ret = ioctl_niocunregif(fd);
809 ret = ioctl_niocrxsync(fd);
813 ret = ioctl_nioctxsync(fd);
831 rte_netmap_mmap(void *addr, size_t length,
832 int prot, int flags, int fd, off_t offset)
834 static const int cprot = PROT_WRITE | PROT_READ;
836 if (!FD_VALID(fd) || length + offset > netmap.mem_sz ||
837 (prot & cprot) != cprot ||
838 ((flags & MAP_FIXED) != 0 && addr != NULL)) {
844 return (void *)((uintptr_t)netmap.mem + (uintptr_t)offset);
848 * Return a "fake" file descriptor with a value above RLIMIT_NOFILE so that
849 * any attempt to use that file descriptor with the usual API will fail.
852 rte_netmap_open(__rte_unused const char *pathname, __rte_unused int flags)
856 rte_spinlock_lock(&netmap_lock);
858 rte_spinlock_unlock(&netmap_lock);
868 * Doesn't support timeout other than 0 or infinite (negative) timeout
871 rte_netmap_poll(struct pollfd *fds, nfds_t nfds, int timeout)
873 int32_t count_it, ret;
874 uint32_t i, idx, port;
875 uint32_t want_rx, want_tx;
882 for (i = 0; i < nfds; i++) {
886 if (!FD_VALID(fds[i].fd) || fds[i].events == 0) {
891 idx = FD_TO_IDX(fds[i].fd);
892 if ((port = fd_port[idx].port) >= RTE_DIM(ports) ||
893 ports[port].fd != idx) {
895 fds[i].revents |= POLLERR;
900 want_rx = fds[i].events & (POLLIN | POLLRDNORM);
901 want_tx = fds[i].events & (POLLOUT | POLLWRNORM);
903 if (want_rx && rx_sync_if(port) > 0) {
904 fds[i].revents = (uint16_t)
905 (fds[i].revents | want_rx);
908 if (want_tx && tx_sync_if(port) > 0) {
909 fds[i].revents = (uint16_t)
910 (fds[i].revents | want_tx);
917 while ((ret == 0 && timeout < 0) || timeout);