#include <rte_string_fns.h>
#include <rte_ethdev.h>
#include <rte_errno.h>
+#include <rte_cycles.h>
#include <sys/types.h>
#include <sys/stat.h>
static int tap_devices_count;
+static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
+ "UNKNOWN", "TUN", "TAP"
+};
+
static const char *valid_arguments[] = {
ETH_TAP_IFACE_ARG,
ETH_TAP_REMOTE_ARG,
#ifdef IFF_MULTI_QUEUE
unsigned int features;
#endif
- int fd;
+ int fd, signo, flags;
memset(&ifr, 0, sizeof(struct ifreq));
}
}
+ flags = fcntl(fd, F_GETFL);
+ if (flags == -1) {
+ TAP_LOG(WARNING,
+ "Unable to get %s current flags\n",
+ ifr.ifr_name);
+ goto error;
+ }
+
/* Always set the file descriptor to non-blocking */
- if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
+ flags |= O_NONBLOCK;
+ if (fcntl(fd, F_SETFL, flags) < 0) {
TAP_LOG(WARNING,
"Unable to set %s to nonblocking: %s",
ifr.ifr_name, strerror(errno));
goto error;
}
- /* Set up trigger to optimize empty Rx bursts */
- errno = 0;
- do {
+ /* Find a free realtime signal */
+ for (signo = SIGRTMIN + 1; signo < SIGRTMAX; signo++) {
struct sigaction sa;
- int flags = fcntl(fd, F_GETFL);
- if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
+ if (sigaction(signo, NULL, &sa) == -1) {
+ TAP_LOG(WARNING,
+ "Unable to get current rt-signal %d handler",
+ signo);
+ goto error;
+ }
+
+ /* Already have the handler we want on this signal */
+ if (sa.sa_handler == tap_trigger_cb)
break;
- if (sa.sa_handler != tap_trigger_cb) {
- /*
- * Make sure SIGIO is not already taken. This is done
- * as late as possible to leave the application a
- * chance to set up its own signal handler first.
- */
- if (sa.sa_handler != SIG_IGN &&
- sa.sa_handler != SIG_DFL) {
- errno = EBUSY;
- break;
- }
- sa = (struct sigaction){
- .sa_flags = SA_RESTART,
- .sa_handler = tap_trigger_cb,
- };
- if (sigaction(SIGIO, &sa, NULL) == -1)
- break;
+
+ /* Is handler in use by application */
+ if (sa.sa_handler != SIG_DFL) {
+ TAP_LOG(DEBUG,
+ "Skipping used rt-signal %d", signo);
+ continue;
}
- /* Enable SIGIO on file descriptor */
- fcntl(fd, F_SETFL, flags | O_ASYNC);
- fcntl(fd, F_SETOWN, getpid());
- } while (0);
- if (errno) {
+ sa = (struct sigaction) {
+ .sa_flags = SA_RESTART,
+ .sa_handler = tap_trigger_cb,
+ };
+
+ if (sigaction(signo, &sa, NULL) == -1) {
+ TAP_LOG(WARNING,
+ "Unable to set rt-signal %d handler\n", signo);
+ goto error;
+ }
+
+ /* Found a good signal to use */
+ TAP_LOG(DEBUG,
+ "Using rt-signal %d", signo);
+ break;
+ }
+
+ if (signo == SIGRTMAX) {
+ TAP_LOG(WARNING, "All rt-signals are in use\n");
+
/* Disable trigger globally in case of error */
tap_trigger = 0;
- TAP_LOG(WARNING, "Rx trigger disabled: %s",
- strerror(errno));
- }
+ TAP_LOG(NOTICE, "No Rx trigger signal available\n");
+ } else {
+ /* Enable signal on file descriptor */
+ if (fcntl(fd, F_SETSIG, signo) < 0) {
+ TAP_LOG(WARNING, "Unable to set signo %d for fd %d: %s",
+ signo, fd, strerror(errno));
+ goto error;
+ }
+ if (fcntl(fd, F_SETFL, flags | O_ASYNC) < 0) {
+ TAP_LOG(WARNING, "Unable to set fcntl flags: %s",
+ strerror(errno));
+ goto error;
+ }
+ if (fcntl(fd, F_SETOWN, getpid()) < 0) {
+ TAP_LOG(WARNING, "Unable to set fcntl owner: %s",
+ strerror(errno));
+ goto error;
+ }
+ }
return fd;
error:
if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
struct rte_ipv4_hdr *iph = l3_hdr;
- /* ihl contains the number of 4-byte words in the header */
- l3_len = 4 * (iph->version_ihl & 0xf);
+ l3_len = rte_ipv4_hdr_len(iph);
if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
return;
/* check that the total length reported by header is not
/* This function gets called when the current port gets stopped.
*/
-static void
+static int
tap_dev_stop(struct rte_eth_dev *dev)
{
int i;
tap_intr_handle_set(dev, 0);
tap_link_set_down(dev);
+
+ return 0;
}
static int
return 0;
}
-static void
+static int
tap_dev_close(struct rte_eth_dev *dev)
{
int i;
struct pmd_process_private *process_private = dev->process_private;
struct rx_queue *rxq;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ rte_free(dev->process_private);
+ return 0;
+ }
+
tap_link_set_down(dev);
if (internals->nlsk_fd != -1) {
tap_flow_flush(dev, NULL);
&internals->remote_initial_flags);
}
+ rte_mempool_free(internals->gso_ctx_mp);
+ internals->gso_ctx_mp = NULL;
+
if (internals->ka_fd != -1) {
close(internals->ka_fd);
internals->ka_fd = -1;
}
+
+ /* mac_addrs must not be freed alone because part of dev_private */
+ dev->data->mac_addrs = NULL;
+
+ internals = dev->data->dev_private;
+ TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
+ tuntap_types[internals->type], rte_socket_id());
+
+ if (internals->ioctl_sock != -1) {
+ close(internals->ioctl_sock);
+ internals->ioctl_sock = -1;
+ }
+ rte_free(dev->process_private);
+ if (tap_devices_count == 1)
+ rte_mp_action_unregister(TAP_MP_KEY);
+ tap_devices_count--;
/*
* Since TUN device has no more opened file descriptors
* it will be removed from kernel
*/
+
+ return 0;
}
static void
if (!rxq)
return;
process_private = rte_eth_devices[rxq->in_port].process_private;
- if (process_private->rxq_fds[rxq->queue_id] > 0) {
+ if (process_private->rxq_fds[rxq->queue_id] != -1) {
close(process_private->rxq_fds[rxq->queue_id]);
process_private->rxq_fds[rxq->queue_id] = -1;
tap_rxq_pool_free(rxq->pool);
return;
process_private = rte_eth_devices[txq->out_port].process_private;
- if (process_private->txq_fds[txq->queue_id] > 0) {
+ if (process_private->txq_fds[txq->queue_id] != -1) {
close(process_private->txq_fds[txq->queue_id]);
process_private->txq_fds[txq->queue_id] = -1;
}
{
uint32_t gso_types;
char pool_name[64];
-
- /*
- * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
- * size per mbuf use this pool for both direct and indirect mbufs
- */
-
- struct rte_mempool *mp; /* Mempool for GSO packets */
+ struct pmd_internals *pmd = dev->data->dev_private;
+ int ret;
/* initialize GSO context */
gso_types = DEV_TX_OFFLOAD_TCP_TSO;
- snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
- mp = rte_mempool_lookup((const char *)pool_name);
- if (!mp) {
- mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
- TAP_GSO_MBUF_CACHE_SIZE, 0,
+ if (!pmd->gso_ctx_mp) {
+ /*
+ * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
+ * bytes size per mbuf use this pool for both direct and
+ * indirect mbufs
+ */
+ ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
+ dev->device->name);
+ if (ret < 0 || ret >= (int)sizeof(pool_name)) {
+ TAP_LOG(ERR,
+ "%s: failed to create mbuf pool name for device %s,"
+ "device name too long or output error, ret: %d\n",
+ pmd->name, dev->device->name, ret);
+ return -ENAMETOOLONG;
+ }
+ pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
+ TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
SOCKET_ID_ANY);
- if (!mp) {
- struct pmd_internals *pmd = dev->data->dev_private;
-
+ if (!pmd->gso_ctx_mp) {
TAP_LOG(ERR,
"%s: failed to create mbuf pool for device %s\n",
pmd->name, dev->device->name);
}
}
- gso_ctx->direct_pool = mp;
- gso_ctx->indirect_pool = mp;
+ gso_ctx->direct_pool = pmd->gso_ctx_mp;
+ gso_ctx->indirect_pool = pmd->gso_ctx_mp;
gso_ctx->gso_types = gso_types;
gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
gso_ctx->flag = 0;
tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ int ret;
/* In any case, disable interrupt if the conf is no longer there. */
if (!dev->data->dev_conf.intr_conf.lsc) {
if (pmd->intr_handle.fd != -1) {
- tap_nl_final(pmd->intr_handle.fd);
- rte_intr_callback_unregister(&pmd->intr_handle,
- tap_dev_intr_handler, dev);
+ goto clean;
}
return 0;
}
return rte_intr_callback_register(
&pmd->intr_handle, tap_dev_intr_handler, dev);
}
+
+clean:
+ do {
+ ret = rte_intr_callback_unregister(&pmd->intr_handle,
+ tap_dev_intr_handler, dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret == -EAGAIN) {
+ rte_delay_ms(100);
+ } else {
+ TAP_LOG(ERR, "intr callback unregister failed: %d",
+ ret);
+ break;
+ }
+ } while (true);
+
tap_nl_final(pmd->intr_handle.fd);
- return rte_intr_callback_unregister(&pmd->intr_handle,
- tap_dev_intr_handler, dev);
+ pmd->intr_handle.fd = -1;
+
+ return 0;
}
static int
.filter_ctrl = tap_dev_filter_ctrl,
};
-static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
- "UNKNOWN", "TUN", "TAP"
-};
-
static int
eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
char *remote_iface, struct rte_ether_addr *mac_addr,
pmd->dev = dev;
strlcpy(pmd->name, tap_name, sizeof(pmd->name));
pmd->type = type;
+ pmd->ka_fd = -1;
+ pmd->nlsk_fd = -1;
+ pmd->gso_ctx_mp = NULL;
pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
if (pmd->ioctl_sock == -1) {
/* Setup some default values */
data = dev->data;
data->dev_private = pmd;
- data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
data->numa_node = numa_node;
data->dev_link = pmd_link;
dev->intr_handle = &pmd->intr_handle;
/* Presetup the fds to -1 as being not valid */
- pmd->ka_fd = -1;
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
process_private->rxq_fds[i] = -1;
process_private->txq_fds[i] = -1;
tap_flow_implicit_flush(pmd, NULL);
error_exit:
- if (pmd->ioctl_sock > 0)
+ if (pmd->nlsk_fd != -1)
+ close(pmd->nlsk_fd);
+ if (pmd->ka_fd != -1)
+ close(pmd->ka_fd);
+ if (pmd->ioctl_sock != -1)
close(pmd->ioctl_sock);
/* mac_addrs must not be freed alone because part of dev_private */
dev->data->mac_addrs = NULL;
rte_pmd_tap_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
- struct pmd_internals *internals;
/* find the ethdev entry */
eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (!eth_dev)
- return -ENODEV;
-
- /* mac_addrs must not be freed alone because part of dev_private */
- eth_dev->data->mac_addrs = NULL;
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return rte_eth_dev_release_port(eth_dev);
+ return 0;
tap_dev_close(eth_dev);
-
- internals = eth_dev->data->dev_private;
- TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
- tuntap_types[internals->type], rte_socket_id());
-
- close(internals->ioctl_sock);
- rte_free(eth_dev->process_private);
- if (tap_devices_count == 1)
- rte_mp_action_unregister(TAP_MP_KEY);
- tap_devices_count--;
rte_eth_dev_release_port(eth_dev);
- if (internals->ka_fd != -1) {
- close(internals->ka_fd);
- internals->ka_fd = -1;
- }
return 0;
}
ETH_TAP_IFACE_ARG "=<string> "
ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
ETH_TAP_REMOTE_ARG "=<string>");
-int tap_logtype;
-
-RTE_INIT(tap_init_log)
-{
- tap_logtype = rte_log_register("pmd.net.tap");
- if (tap_logtype >= 0)
- rte_log_set_level(tap_logtype, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(tap_logtype, pmd.net.tap, NOTICE);