-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <errno.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_memzone.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
uint32_t port;
};
+#ifndef POLLRDNORM
+#define POLLRDNORM 0x0040
+#endif
+
+#ifndef POLLWRNORM
+#define POLLWRNORM 0x0100
+#endif
+
#define FD_PORT_FREE UINT32_MAX
#define FD_PORT_RSRV (FD_PORT_FREE - 1)
static int32_t
-ifname_to_portid(const char *ifname, uint8_t *port)
+ifname_to_portid(const char *ifname, uint16_t *port)
{
char *endptr;
uint64_t portid;
portid = strtoul(ifname, &endptr, 10);
if (endptr == ifname || *endptr != '\0' ||
portid >= RTE_DIM(ports) || errno != 0)
- return (-EINVAL);
+ return -EINVAL;
- *port = (uint8_t)portid;
- return (0);
+ *port = portid;
+ return 0;
}
/**
/**
* Given a Netmap ring and a slot index for that ring, construct a dpdk mbuf
* from the data held in the buffer associated with the slot.
- * Allocation/deallocation of the dpdk mbuf are the responsability of the
+ * Allocation/deallocation of the dpdk mbuf are the responsibility of the
* caller.
* Note that mbuf chains are not supported.
*/
;
if (i == RTE_DIM(fd_port))
- return (-ENOMEM);
+ return -ENOMEM;
fd_port[i].port = FD_PORT_RSRV;
- return (IDX_TO_FD(i));
+ return IDX_TO_FD(i);
}
static int32_t
idx = FD_TO_IDX(fd);
if (!FD_VALID(fd) || (port = fd_port[idx].port) == FD_PORT_FREE)
- return (-EINVAL);
+ return -EINVAL;
/* if we still have a valid port attached, release the port */
if (port < RTE_DIM(ports) && ports[port].fd == idx) {
}
fd_port[idx].port = FD_PORT_FREE;
- return (0);
+ return 0;
}
static int
-check_nmreq(struct nmreq *req, uint8_t *port)
+check_nmreq(struct nmreq *req, uint16_t *port)
{
int32_t rc;
- uint8_t portid;
+ uint16_t portid;
if (req == NULL)
- return (-EINVAL);
+ return -EINVAL;
if (req->nr_version != NETMAP_API) {
req->nr_version = NETMAP_API;
- return (-EINVAL);
+ return -EINVAL;
}
if ((rc = ifname_to_portid(req->nr_name, &portid)) != 0) {
RTE_LOG(ERR, USER1, "Invalid interface name:\"%s\" "
"in NIOCGINFO call\n", req->nr_name);
- return (rc);
+ return rc;
}
if (ports[portid].pool == NULL) {
- RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid);
- return (-EINVAL);
+ RTE_LOG(ERR, USER1, "Misconfigured portid %u\n", portid);
+ return -EINVAL;
}
*port = portid;
- return (0);
+ return 0;
}
/**
static int
ioctl_niocginfo(__rte_unused int fd, void * param)
{
- uint8_t portid;
+ uint16_t portid;
struct nmreq *req;
int32_t rc;
req = (struct nmreq *)param;
if ((rc = check_nmreq(req, &portid)) != 0)
- return (rc);
+ return rc;
req->nr_tx_rings = (uint16_t)(ports[portid].nr_tx_rings - 1);
req->nr_rx_rings = (uint16_t)(ports[portid].nr_rx_rings - 1);
req->nr_memsize = netmap.mem_sz;
req->nr_offset = 0;
- return (0);
+ return 0;
}
static void
-netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid,
+netmap_ring_setup(struct netmap_ring *ring, uint16_t port, uint32_t ringid,
uint32_t num_slots)
{
uint32_t j;
}
static int
-netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
+netmap_regif(struct nmreq *req, uint32_t idx, uint16_t port)
{
struct netmap_if *nmif;
struct netmap_ring *ring;
int32_t rc;
if (ports[port].fd < RTE_DIM(fd_port)) {
- RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n",
+ RTE_LOG(ERR, USER1, "port %u already in use by fd: %u\n",
port, IDX_TO_FD(ports[port].fd));
- return (-EBUSY);
+ return -EBUSY;
}
if (fd_port[idx].port != FD_PORT_RSRV) {
RTE_LOG(ERR, USER1, "fd: %u is misconfigured\n",
IDX_TO_FD(idx));
- return (-EBUSY);
+ return -EBUSY;
}
nmif = ports[port].nmif;
/* only ALL rings supported right now. */
if (req->nr_ringid != 0)
- return (-EINVAL);
+ return -EINVAL;
snprintf(nmif->ni_name, sizeof(nmif->ni_name), "%s", req->nr_name);
nmif->ni_version = req->nr_version;
RTE_LOG(ERR, USER1,
"Couldn't start ethernet device %s (error %d)\n",
req->nr_name, rc);
- return (rc);
+ return rc;
}
/* setup fdi <--> port relationtip. */
req->nr_memsize = netmap.mem_sz;
req->nr_offset = (uintptr_t)nmif - (uintptr_t)netmap.mem;
- return (0);
+ return 0;
}
/**
static int
ioctl_niocregif(int32_t fd, void * param)
{
- uint8_t portid;
+ uint16_t portid;
int32_t rc;
uint32_t idx;
struct nmreq *req;
req = (struct nmreq *)param;
if ((rc = check_nmreq(req, &portid)) != 0)
- return (rc);
+ return rc;
idx = FD_TO_IDX(fd);
rc = netmap_regif(req, idx, portid);
rte_spinlock_unlock(&netmap_lock);
- return (rc);
+ return rc;
}
static void
{
fd_port[idx].port = FD_PORT_RSRV;
ports[port].fd = UINT32_MAX;
- rte_eth_dev_stop((uint8_t)port);
+ rte_eth_dev_stop(port);
}
/**
}
rte_spinlock_unlock(&netmap_lock);
- return (rc);
+ return rc;
}
/**
* packets as it can hold coming from its dpdk port.
*/
static inline int
-rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
+rx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number,
uint16_t max_burst)
{
int32_t i, n_rx;
for (i = 0; i < nifp->ni_rx_rings + 1; i++) {
r = NETMAP_RXRING(nifp, i);
- rx_sync_ring(r, (uint8_t)port, (uint16_t)i, burst);
+ rx_sync_ring(r, port, (uint16_t)i, burst);
rc += r->avail;
}
- return (rc);
+ return rc;
}
/**
idx = FD_TO_IDX(fd);
if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
ports[port].fd == idx) {
- return (rx_sync_if(fd_port[idx].port));
+ return rx_sync_if(fd_port[idx].port);
} else {
- return (-EINVAL);
+ return -EINVAL;
}
}
* buffers into rte_mbufs and sending them out on the rings's dpdk port.
*/
static int
-tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
+tx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number,
struct rte_mempool *pool, uint16_t max_burst)
{
uint32_t i, n_tx;
burst_size = (uint16_t)RTE_MIN(n_used_slots, RTE_DIM(tx_mbufs));
for (i = 0; i < burst_size; i++) {
- tx_mbufs[i] = rte_pktmbuf_alloc(pool);
+ tx_mbufs[i] = rte_pktmbuf_alloc(pool);
if (tx_mbufs[i] == NULL)
goto err;
for (i = 0; i < nifp->ni_tx_rings + 1; i++) {
r = NETMAP_TXRING(nifp, i);
- tx_sync_ring(r, (uint8_t)port, (uint16_t)i, mp, burst);
+ tx_sync_ring(r, port, (uint16_t)i, mp, burst);
rc += r->avail;
}
- return (rc);
+ return rc;
}
/**
idx = FD_TO_IDX(fd);
if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
ports[port].fd == idx) {
- return (tx_sync_if(fd_port[idx].port));
+ return tx_sync_if(fd_port[idx].port);
} else {
- return (-EINVAL);
+ return -EINVAL;
}
}
port_num = RTE_MAX_ETHPORTS;
port_rings = 2 * conf->max_rings;
port_slots = port_rings * conf->max_slots;
- port_bufs = port_slots;
+ port_bufs = port_slots;
nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots);
sz = nmif_sz * port_num;
- buf_ofs = RTE_ALIGN_CEIL(sz, CACHE_LINE_SIZE);
+ buf_ofs = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
sz = buf_ofs + port_bufs * conf->max_bufsz * port_num;
if (sz > UINT32_MAX ||
(netmap.mem = rte_zmalloc_socket(__func__, sz,
- CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
+ RTE_CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
__func__, sz);
- return (-ENOMEM);
+ return -ENOMEM;
}
netmap.mem_sz = sz;
fd_port[i].port = FD_PORT_FREE;
}
- return (0);
+ return 0;
}
int
-rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
+rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf)
{
int32_t ret;
uint16_t i;
uint16_t rx_slots, tx_slots;
+ struct rte_eth_rxconf rxq_conf;
+ struct rte_eth_txconf txq_conf;
+ struct rte_eth_dev_info dev_info;
if (conf == NULL ||
portid >= RTE_DIM(ports) ||
conf->nr_tx_rings > netmap.conf.max_rings ||
conf->nr_rx_rings > netmap.conf.max_rings) {
- RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
+ RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n",
__func__, portid);
- return (-EINVAL);
+ return -EINVAL;
}
- rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
- tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
+ rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
+ tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
if (tx_slots > netmap.conf.max_slots ||
rx_slots > netmap.conf.max_slots) {
- RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
+ RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n",
__func__, portid);
- return (-EINVAL);
+ return -EINVAL;
}
+ rte_eth_dev_info_get(portid, &dev_info);
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ conf->eth_conf->txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(portid, conf->nr_rx_rings,
conf->nr_tx_rings, conf->eth_conf);
if (ret < 0) {
- RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid);
- return (ret);
+ RTE_LOG(ERR, USER1, "Couldn't configure port %u\n", portid);
+ return ret;
}
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_slots, &tx_slots);
+
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Couldn't ot adjust number of descriptors for port %u\n",
+ portid);
+ return ret;
+ }
+
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = conf->eth_conf->rxmode.offloads;
+ txq_conf = dev_info.default_txconf;
+ txq_conf.offloads = conf->eth_conf->txmode.offloads;
for (i = 0; i < conf->nr_tx_rings; i++) {
ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
- conf->socket_id, conf->tx_conf);
+ conf->socket_id, &txq_conf);
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't configure TX queue %"PRIu16" of "
- "port %"PRIu8"\n",
+ "fail to configure TX queue %u of port %u\n",
i, portid);
- return (ret);
+ return ret;
}
ret = rte_eth_rx_queue_setup(portid, i, rx_slots,
- conf->socket_id, conf->rx_conf, conf->pool);
+ conf->socket_id, &rxq_conf, conf->pool);
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't configure RX queue %"PRIu16" of "
- "port %"PRIu8"\n",
+ "fail to configure RX queue %u of port %u\n",
i, portid);
- return (ret);
+ return ret;
}
}
/* copy config to the private storage. */
ports[portid].eth_conf = conf->eth_conf[0];
- ports[portid].rx_conf = conf->rx_conf[0];
- ports[portid].tx_conf = conf->tx_conf[0];
ports[portid].pool = conf->pool;
ports[portid].socket_id = conf->socket_id;
ports[portid].nr_tx_rings = conf->nr_tx_rings;
ports[portid].tx_burst = conf->tx_burst;
ports[portid].rx_burst = conf->rx_burst;
- return (0);
+ return 0;
}
int
errno =-rc;
rc = -1;
}
- return (rc);
+ return rc;
}
-int rte_netmap_ioctl(int fd, int op, void *param)
+int rte_netmap_ioctl(int fd, uint32_t op, void *param)
{
int ret;
if (!FD_VALID(fd)) {
errno = EBADF;
- return (-1);
+ return -1;
}
switch (op) {
ret = 0;
}
- return (ret);
+ return ret;
}
void *
((flags & MAP_FIXED) != 0 && addr != NULL)) {
errno = EINVAL;
- return (MAP_FAILED);
+ return MAP_FAILED;
}
- return ((void *)((uintptr_t)netmap.mem + offset));
+ return (void *)((uintptr_t)netmap.mem + (uintptr_t)offset);
}
/**
errno = -fd;
fd = -1;
}
- return (fd);
+ return fd;
}
/**
uint32_t i, idx, port;
uint32_t want_rx, want_tx;
+ if (timeout > 0)
+ return -1;
+
ret = 0;
do {
for (i = 0; i < nfds; i++) {
idx = FD_TO_IDX(fds[i].fd);
if ((port = fd_port[idx].port) >= RTE_DIM(ports) ||
- ports[port].fd != idx) {
+ ports[port].fd != idx) {
fds[i].revents |= POLLERR;
ret++;