X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fnetmap_compat%2Flib%2Fcompat_netmap.c;h=c25cc093d0a75350597a712828241b2bc34becf7;hb=46971b273ec7f469ca67a865453e1dd36274c5e1;hp=234836675afcad87fbc3e50323eeb8c28446d11f;hpb=81f7ecd934372fc9f592d1322f8eff86350fa4f5;p=dpdk.git diff --git a/examples/netmap_compat/lib/compat_netmap.c b/examples/netmap_compat/lib/compat_netmap.c index 234836675a..c25cc093d0 100644 --- a/examples/netmap_compat/lib/compat_netmap.c +++ b/examples/netmap_compat/lib/compat_netmap.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -47,7 +18,6 @@ #include #include #include -#include #include #include @@ -73,6 +43,14 @@ struct fd_port { uint32_t port; }; +#ifndef POLLRDNORM +#define POLLRDNORM 0x0040 +#endif + +#ifndef POLLWRNORM +#define POLLWRNORM 0x0100 +#endif + #define FD_PORT_FREE UINT32_MAX #define FD_PORT_RSRV (FD_PORT_FREE - 1) @@ -121,7 +99,7 @@ static void netmap_unregif(uint32_t idx, uint32_t port); static int32_t -ifname_to_portid(const char *ifname, uint8_t *port) +ifname_to_portid(const char *ifname, uint16_t *port) { char *endptr; uint64_t portid; @@ -130,10 +108,10 @@ ifname_to_portid(const char *ifname, uint8_t *port) portid = strtoul(ifname, &endptr, 10); if (endptr == ifname || *endptr != '\0' || portid >= RTE_DIM(ports) || errno != 0) - return (-EINVAL); + return -EINVAL; - *port = (uint8_t)portid; - return (0); + *port = portid; + return 0; } /** @@ -160,7 +138,7 @@ mbuf_to_slot(struct rte_mbuf *mbuf, struct netmap_ring *r, uint32_t index) /** * Given a Netmap ring and a slot index for that ring, construct a dpdk mbuf * from the data held in the buffer associated with the slot. - * Allocation/deallocation of the dpdk mbuf are the responsability of the + * Allocation/deallocation of the dpdk mbuf are the responsibility of the * caller. * Note that mbuf chains are not supported. */ @@ -188,10 +166,10 @@ fd_reserve(void) ; if (i == RTE_DIM(fd_port)) - return (-ENOMEM); + return -ENOMEM; fd_port[i].port = FD_PORT_RSRV; - return (IDX_TO_FD(i)); + return IDX_TO_FD(i); } static int32_t @@ -202,7 +180,7 @@ fd_release(int32_t fd) idx = FD_TO_IDX(fd); if (!FD_VALID(fd) || (port = fd_port[idx].port) == FD_PORT_FREE) - return (-EINVAL); + return -EINVAL; /* if we still have a valid port attached, release the port */ if (port < RTE_DIM(ports) && ports[port].fd == idx) { @@ -210,36 +188,36 @@ fd_release(int32_t fd) } fd_port[idx].port = FD_PORT_FREE; - return (0); + return 0; } static int -check_nmreq(struct nmreq *req, uint8_t *port) +check_nmreq(struct nmreq *req, uint16_t *port) { int32_t rc; - uint8_t portid; + uint16_t portid; if (req == NULL) - return (-EINVAL); + return -EINVAL; if (req->nr_version != NETMAP_API) { req->nr_version = NETMAP_API; - return (-EINVAL); + return -EINVAL; } if ((rc = ifname_to_portid(req->nr_name, &portid)) != 0) { RTE_LOG(ERR, USER1, "Invalid interface name:\"%s\" " "in NIOCGINFO call\n", req->nr_name); - return (rc); + return rc; } if (ports[portid].pool == NULL) { - RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid); - return (-EINVAL); + RTE_LOG(ERR, USER1, "Misconfigured portid %u\n", portid); + return -EINVAL; } *port = portid; - return (0); + return 0; } /** @@ -254,13 +232,13 @@ check_nmreq(struct nmreq *req, uint8_t *port) static int ioctl_niocginfo(__rte_unused int fd, void * param) { - uint8_t portid; + uint16_t portid; struct nmreq *req; int32_t rc; req = (struct nmreq *)param; if ((rc = check_nmreq(req, &portid)) != 0) - return (rc); + return rc; req->nr_tx_rings = (uint16_t)(ports[portid].nr_tx_rings - 1); req->nr_rx_rings = (uint16_t)(ports[portid].nr_rx_rings - 1); @@ -271,11 +249,11 @@ ioctl_niocginfo(__rte_unused int fd, void * param) req->nr_memsize = netmap.mem_sz; req->nr_offset = 0; - return (0); + return 0; } static void -netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid, +netmap_ring_setup(struct netmap_ring *ring, uint16_t port, uint32_t ringid, uint32_t num_slots) { uint32_t j; @@ -297,7 +275,7 @@ netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid, } static int -netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port) +netmap_regif(struct nmreq *req, uint32_t idx, uint16_t port) { struct netmap_if *nmif; struct netmap_ring *ring; @@ -305,14 +283,14 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port) int32_t rc; if (ports[port].fd < RTE_DIM(fd_port)) { - RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n", + RTE_LOG(ERR, USER1, "port %u already in use by fd: %u\n", port, IDX_TO_FD(ports[port].fd)); - return (-EBUSY); + return -EBUSY; } if (fd_port[idx].port != FD_PORT_RSRV) { RTE_LOG(ERR, USER1, "fd: %u is misconfigured\n", IDX_TO_FD(idx)); - return (-EBUSY); + return -EBUSY; } nmif = ports[port].nmif; @@ -322,9 +300,9 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port) /* only ALL rings supported right now. */ if (req->nr_ringid != 0) - return (-EINVAL); + return -EINVAL; - snprintf(nmif->ni_name, sizeof(nmif->ni_name), "%s", req->nr_name); + strlcpy(nmif->ni_name, req->nr_name, sizeof(nmif->ni_name)); nmif->ni_version = req->nr_version; /* Netmap uses ni_(r|t)x_rings + 1 */ @@ -372,7 +350,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port) RTE_LOG(ERR, USER1, "Couldn't start ethernet device %s (error %d)\n", req->nr_name, rc); - return (rc); + return rc; } /* setup fdi <--> port relationtip. */ @@ -382,7 +360,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port) req->nr_memsize = netmap.mem_sz; req->nr_offset = (uintptr_t)nmif - (uintptr_t)netmap.mem; - return (0); + return 0; } /** @@ -391,14 +369,14 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port) static int ioctl_niocregif(int32_t fd, void * param) { - uint8_t portid; + uint16_t portid; int32_t rc; uint32_t idx; struct nmreq *req; req = (struct nmreq *)param; if ((rc = check_nmreq(req, &portid)) != 0) - return (rc); + return rc; idx = FD_TO_IDX(fd); @@ -406,7 +384,7 @@ ioctl_niocregif(int32_t fd, void * param) rc = netmap_regif(req, idx, portid); rte_spinlock_unlock(&netmap_lock); - return (rc); + return rc; } static void @@ -414,7 +392,7 @@ netmap_unregif(uint32_t idx, uint32_t port) { fd_port[idx].port = FD_PORT_RSRV; ports[port].fd = UINT32_MAX; - rte_eth_dev_stop((uint8_t)port); + rte_eth_dev_stop(port); } /** @@ -444,7 +422,7 @@ ioctl_niocunregif(int fd) } rte_spinlock_unlock(&netmap_lock); - return (rc); + return rc; } /** @@ -452,7 +430,7 @@ ioctl_niocunregif(int fd) * packets as it can hold coming from its dpdk port. */ static inline int -rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number, +rx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number, uint16_t max_burst) { int32_t i, n_rx; @@ -505,11 +483,11 @@ rx_sync_if(uint32_t port) for (i = 0; i < nifp->ni_rx_rings + 1; i++) { r = NETMAP_RXRING(nifp, i); - rx_sync_ring(r, (uint8_t)port, (uint16_t)i, burst); + rx_sync_ring(r, port, (uint16_t)i, burst); rc += r->avail; } - return (rc); + return rc; } /** @@ -523,9 +501,9 @@ ioctl_niocrxsync(int fd) idx = FD_TO_IDX(fd); if ((port = fd_port[idx].port) < RTE_DIM(ports) && ports[port].fd == idx) { - return (rx_sync_if(fd_port[idx].port)); + return rx_sync_if(fd_port[idx].port); } else { - return (-EINVAL); + return -EINVAL; } } @@ -534,7 +512,7 @@ ioctl_niocrxsync(int fd) * buffers into rte_mbufs and sending them out on the rings's dpdk port. */ static int -tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number, +tx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number, struct rte_mempool *pool, uint16_t max_burst) { uint32_t i, n_tx; @@ -550,7 +528,7 @@ tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number, burst_size = (uint16_t)RTE_MIN(n_used_slots, RTE_DIM(tx_mbufs)); for (i = 0; i < burst_size; i++) { - tx_mbufs[i] = rte_pktmbuf_alloc(pool); + tx_mbufs[i] = rte_pktmbuf_alloc(pool); if (tx_mbufs[i] == NULL) goto err; @@ -600,11 +578,11 @@ tx_sync_if(uint32_t port) for (i = 0; i < nifp->ni_tx_rings + 1; i++) { r = NETMAP_TXRING(nifp, i); - tx_sync_ring(r, (uint8_t)port, (uint16_t)i, mp, burst); + tx_sync_ring(r, port, (uint16_t)i, mp, burst); rc += r->avail; } - return (rc); + return rc; } /** @@ -618,9 +596,9 @@ ioctl_nioctxsync(int fd) idx = FD_TO_IDX(fd); if ((port = fd_port[idx].port) < RTE_DIM(ports) && ports[port].fd == idx) { - return (tx_sync_if(fd_port[idx].port)); + return tx_sync_if(fd_port[idx].port); } else { - return (-EINVAL); + return -EINVAL; } } @@ -638,20 +616,20 @@ rte_netmap_init(const struct rte_netmap_conf *conf) port_num = RTE_MAX_ETHPORTS; port_rings = 2 * conf->max_rings; port_slots = port_rings * conf->max_slots; - port_bufs = port_slots; + port_bufs = port_slots; nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots); sz = nmif_sz * port_num; - buf_ofs = RTE_ALIGN_CEIL(sz, CACHE_LINE_SIZE); + buf_ofs = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE); sz = buf_ofs + port_bufs * conf->max_bufsz * port_num; if (sz > UINT32_MAX || (netmap.mem = rte_zmalloc_socket(__func__, sz, - CACHE_LINE_SIZE, conf->socket_id)) == NULL) { + RTE_CACHE_LINE_SIZE, conf->socket_id)) == NULL) { RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n", __func__, sz); - return (-ENOMEM); + return -ENOMEM; } netmap.mem_sz = sz; @@ -673,65 +651,90 @@ rte_netmap_init(const struct rte_netmap_conf *conf) fd_port[i].port = FD_PORT_FREE; } - return (0); + return 0; } int -rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf) +rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf) { int32_t ret; uint16_t i; uint16_t rx_slots, tx_slots; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + struct rte_eth_dev_info dev_info; if (conf == NULL || portid >= RTE_DIM(ports) || conf->nr_tx_rings > netmap.conf.max_rings || conf->nr_rx_rings > netmap.conf.max_rings) { - RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n", + RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n", __func__, portid); - return (-EINVAL); + return -EINVAL; } - rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots); - tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots); + rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots); + tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots); if (tx_slots > netmap.conf.max_slots || rx_slots > netmap.conf.max_slots) { - RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n", + RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n", __func__, portid); - return (-EINVAL); + return -EINVAL; + } + + ret = rte_eth_dev_info_get(portid, &dev_info); + if (ret != 0) { + RTE_LOG(ERR, USER1, + "Error during getting device (port %u) info: %s\n", + portid, strerror(-ret)); + return ret; } + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + conf->eth_conf->txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; ret = rte_eth_dev_configure(portid, conf->nr_rx_rings, conf->nr_tx_rings, conf->eth_conf); if (ret < 0) { - RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid); - return (ret); + RTE_LOG(ERR, USER1, "Couldn't configure port %u\n", portid); + return ret; + } + + ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_slots, &tx_slots); + + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Couldn't ot adjust number of descriptors for port %u\n", + portid); + return ret; } + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = conf->eth_conf->rxmode.offloads; + txq_conf = dev_info.default_txconf; + txq_conf.offloads = conf->eth_conf->txmode.offloads; for (i = 0; i < conf->nr_tx_rings; i++) { ret = rte_eth_tx_queue_setup(portid, i, tx_slots, - conf->socket_id, NULL); + conf->socket_id, &txq_conf); if (ret < 0) { RTE_LOG(ERR, USER1, - "Couldn't configure TX queue %"PRIu16" of " - "port %"PRIu8"\n", + "fail to configure TX queue %u of port %u\n", i, portid); - return (ret); + return ret; } ret = rte_eth_rx_queue_setup(portid, i, rx_slots, - conf->socket_id, NULL, conf->pool); + conf->socket_id, &rxq_conf, conf->pool); if (ret < 0) { RTE_LOG(ERR, USER1, - "Couldn't configure RX queue %"PRIu16" of " - "port %"PRIu8"\n", + "fail to configure RX queue %u of port %u\n", i, portid); - return (ret); + return ret; } } @@ -746,7 +749,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf) ports[portid].tx_burst = conf->tx_burst; ports[portid].rx_burst = conf->rx_burst; - return (0); + return 0; } int @@ -762,16 +765,16 @@ rte_netmap_close(__rte_unused int fd) errno =-rc; rc = -1; } - return (rc); + return rc; } -int rte_netmap_ioctl(int fd, int op, void *param) +int rte_netmap_ioctl(int fd, uint32_t op, void *param) { int ret; if (!FD_VALID(fd)) { errno = EBADF; - return (-1); + return -1; } switch (op) { @@ -807,7 +810,7 @@ int rte_netmap_ioctl(int fd, int op, void *param) ret = 0; } - return (ret); + return ret; } void * @@ -821,10 +824,10 @@ rte_netmap_mmap(void *addr, size_t length, ((flags & MAP_FIXED) != 0 && addr != NULL)) { errno = EINVAL; - return (MAP_FAILED); + return MAP_FAILED; } - return ((void *)((uintptr_t)netmap.mem + offset)); + return (void *)((uintptr_t)netmap.mem + (uintptr_t)offset); } /** @@ -844,7 +847,7 @@ rte_netmap_open(__rte_unused const char *pathname, __rte_unused int flags) errno = -fd; fd = -1; } - return (fd); + return fd; } /** @@ -857,6 +860,9 @@ rte_netmap_poll(struct pollfd *fds, nfds_t nfds, int timeout) uint32_t i, idx, port; uint32_t want_rx, want_tx; + if (timeout > 0) + return -1; + ret = 0; do { for (i = 0; i < nfds; i++) { @@ -870,7 +876,7 @@ rte_netmap_poll(struct pollfd *fds, nfds_t nfds, int timeout) idx = FD_TO_IDX(fds[i].fd); if ((port = fd_port[idx].port) >= RTE_DIM(ports) || - ports[port].fd != idx) { + ports[port].fd != idx) { fds[i].revents |= POLLERR; ret++;