net/tap: fix rx cksum flags on IP options packets
[dpdk.git] / drivers / net / tap / rte_eth_tap.c
index a13d8d5..e7b185a 100644 (file)
@@ -7,8 +7,8 @@
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
 #include <rte_malloc.h>
 #include <rte_bus_vdev.h>
 #include <rte_kvargs.h>
@@ -18,8 +18,8 @@
 #include <rte_string_fns.h>
 #include <rte_ethdev.h>
 #include <rte_errno.h>
+#include <rte_cycles.h>
 
-#include <assert.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/socket.h>
 
 static int tap_devices_count;
 
+static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
+       "UNKNOWN", "TUN", "TAP"
+};
+
 static const char *valid_arguments[] = {
        ETH_TAP_IFACE_ARG,
        ETH_TAP_REMOTE_ARG,
@@ -134,7 +138,7 @@ tun_alloc(struct pmd_internals *pmd, int is_keepalive)
 #ifdef IFF_MULTI_QUEUE
        unsigned int features;
 #endif
-       int fd;
+       int fd, signo, flags;
 
        memset(&ifr, 0, sizeof(struct ifreq));
 
@@ -199,52 +203,87 @@ tun_alloc(struct pmd_internals *pmd, int is_keepalive)
                }
        }
 
+       flags = fcntl(fd, F_GETFL);
+       if (flags == -1) {
+               TAP_LOG(WARNING,
+                       "Unable to get %s current flags\n",
+                       ifr.ifr_name);
+               goto error;
+       }
+
        /* Always set the file descriptor to non-blocking */
-       if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
+       flags |= O_NONBLOCK;
+       if (fcntl(fd, F_SETFL, flags) < 0) {
                TAP_LOG(WARNING,
                        "Unable to set %s to nonblocking: %s",
                        ifr.ifr_name, strerror(errno));
                goto error;
        }
 
-       /* Set up trigger to optimize empty Rx bursts */
-       errno = 0;
-       do {
+       /* Find a free realtime signal */
+       for (signo = SIGRTMIN + 1; signo < SIGRTMAX; signo++) {
                struct sigaction sa;
-               int flags = fcntl(fd, F_GETFL);
 
-               if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
+               if (sigaction(signo, NULL, &sa) == -1) {
+                       TAP_LOG(WARNING,
+                               "Unable to get current rt-signal %d handler",
+                               signo);
+                       goto error;
+               }
+
+               /* Already have the handler we want on this signal  */
+               if (sa.sa_handler == tap_trigger_cb)
                        break;
-               if (sa.sa_handler != tap_trigger_cb) {
-                       /*
-                        * Make sure SIGIO is not already taken. This is done
-                        * as late as possible to leave the application a
-                        * chance to set up its own signal handler first.
-                        */
-                       if (sa.sa_handler != SIG_IGN &&
-                           sa.sa_handler != SIG_DFL) {
-                               errno = EBUSY;
-                               break;
-                       }
-                       sa = (struct sigaction){
-                               .sa_flags = SA_RESTART,
-                               .sa_handler = tap_trigger_cb,
-                       };
-                       if (sigaction(SIGIO, &sa, NULL) == -1)
-                               break;
+
+               /* Is handler in use by application */
+               if (sa.sa_handler != SIG_DFL) {
+                       TAP_LOG(DEBUG,
+                               "Skipping used rt-signal %d", signo);
+                       continue;
+               }
+
+               sa = (struct sigaction) {
+                       .sa_flags = SA_RESTART,
+                       .sa_handler = tap_trigger_cb,
+               };
+
+               if (sigaction(signo, &sa, NULL) == -1) {
+                       TAP_LOG(WARNING,
+                               "Unable to set rt-signal %d handler\n", signo);
+                       goto error;
                }
-               /* Enable SIGIO on file descriptor */
-               fcntl(fd, F_SETFL, flags | O_ASYNC);
-               fcntl(fd, F_SETOWN, getpid());
-       } while (0);
 
-       if (errno) {
+               /* Found a good signal to use */
+               TAP_LOG(DEBUG,
+                       "Using rt-signal %d", signo);
+               break;
+       }
+
+       if (signo == SIGRTMAX) {
+               TAP_LOG(WARNING, "All rt-signals are in use\n");
+
                /* Disable trigger globally in case of error */
                tap_trigger = 0;
-               TAP_LOG(WARNING, "Rx trigger disabled: %s",
-                       strerror(errno));
-       }
+               TAP_LOG(NOTICE, "No Rx trigger signal available\n");
+       } else {
+               /* Enable signal on file descriptor */
+               if (fcntl(fd, F_SETSIG, signo) < 0) {
+                       TAP_LOG(WARNING, "Unable to set signo %d for fd %d: %s",
+                               signo, fd, strerror(errno));
+                       goto error;
+               }
+               if (fcntl(fd, F_SETFL, flags | O_ASYNC) < 0) {
+                       TAP_LOG(WARNING, "Unable to set fcntl flags: %s",
+                               strerror(errno));
+                       goto error;
+               }
 
+               if (fcntl(fd, F_SETOWN, getpid()) < 0) {
+                       TAP_LOG(WARNING, "Unable to set fcntl owner: %s",
+                               strerror(errno));
+                       goto error;
+               }
+       }
        return fd;
 
 error:
@@ -264,6 +303,7 @@ tap_verify_csum(struct rte_mbuf *mbuf)
        uint16_t cksum = 0;
        void *l3_hdr;
        void *l4_hdr;
+       struct rte_udp_hdr *udp_hdr;
 
        if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
                l2_len += 4;
@@ -277,8 +317,7 @@ tap_verify_csum(struct rte_mbuf *mbuf)
        if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
                struct rte_ipv4_hdr *iph = l3_hdr;
 
-               /* ihl contains the number of 4-byte words in the header */
-               l3_len = 4 * (iph->version_ihl & 0xf);
+               l3_len = rte_ipv4_hdr_len(iph);
                if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
                        return;
                /* check that the total length reported by header is not
@@ -311,10 +350,23 @@ tap_verify_csum(struct rte_mbuf *mbuf)
                /* Don't verify checksum for multi-segment packets. */
                if (mbuf->nb_segs > 1)
                        return;
-               if (l3 == RTE_PTYPE_L3_IPV4)
+               if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
+                       if (l4 == RTE_PTYPE_L4_UDP) {
+                               udp_hdr = (struct rte_udp_hdr *)l4_hdr;
+                               if (udp_hdr->dgram_cksum == 0) {
+                                       /*
+                                        * For IPv4, a zero UDP checksum
+                                        * indicates that the sender did not
+                                        * generate one [RFC 768].
+                                        */
+                                       mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+                                       return;
+                               }
+                       }
                        cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
-               else if (l3 == RTE_PTYPE_L3_IPV6)
+               } else { /* l3 == RTE_PTYPE_L3_IPV6, checked above */
                        cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
+               }
                mbuf->ol_flags |= cksum ?
                        PKT_RX_L4_CKSUM_BAD :
                        PKT_RX_L4_CKSUM_GOOD;
@@ -339,6 +391,23 @@ tap_rx_offload_get_queue_capa(void)
               DEV_RX_OFFLOAD_TCP_CKSUM;
 }
 
+static void
+tap_rxq_pool_free(struct rte_mbuf *pool)
+{
+       struct rte_mbuf *mbuf = pool;
+       uint16_t nb_segs = 1;
+
+       if (mbuf == NULL)
+               return;
+
+       while (mbuf->next) {
+               mbuf = mbuf->next;
+               nb_segs++;
+       }
+       pool->nb_segs = nb_segs;
+       rte_pktmbuf_free(pool);
+}
+
 /* Callback to handle the rx burst of packets to the correct interface and
  * file descriptor(s) in a multi-queue setup.
  */
@@ -389,7 +458,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                        goto end;
 
                                seg->next = NULL;
-                               rte_pktmbuf_free(mbuf);
+                               tap_rxq_pool_free(mbuf);
 
                                goto end;
                        }
@@ -521,7 +590,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
        }
 }
 
-static inline void
+static inline int
 tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
                        struct rte_mbuf **pmbufs,
                        uint16_t *num_packets, unsigned long *num_tx_bytes)
@@ -588,7 +657,7 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
                        seg_len = rte_pktmbuf_data_len(mbuf);
                        l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
                        if (seg_len < l234_hlen)
-                               break;
+                               return -1;
 
                        /* To change checksums, work on a * copy of l2, l3
                         * headers + l4 pseudo header
@@ -634,10 +703,12 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
                /* copy the tx frame data */
                n = writev(process_private->txq_fds[txq->queue_id], iovecs, j);
                if (n <= 0)
-                       break;
+                       return -1;
+
                (*num_packets)++;
                (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
        }
+       return 0;
 }
 
 /* Callback to handle sending packets from the tap interface
@@ -663,16 +734,14 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                uint16_t num_mbufs = 0;
                uint16_t tso_segsz = 0;
                int ret;
+               int num_tso_mbufs;
                uint16_t hdrs_len;
-               int j;
                uint64_t tso;
 
                tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
                if (tso) {
                        struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
 
-                       assert(gso_ctx != NULL);
-
                        /* TCP segmentation implies TCP checksum offload */
                        mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
 
@@ -686,43 +755,59 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                break;
                        }
                        gso_ctx->gso_size = tso_segsz;
-                       ret = rte_gso_segment(mbuf_in, /* packet to segment */
+                       /* 'mbuf_in' packet to segment */
+                       num_tso_mbufs = rte_gso_segment(mbuf_in,
                                gso_ctx, /* gso control block */
                                (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
                                RTE_DIM(gso_mbufs)); /* max tso mbufs */
 
                        /* ret contains the number of new created mbufs */
-                       if (ret < 0)
+                       if (num_tso_mbufs < 0)
                                break;
 
-                       mbuf = gso_mbufs;
-                       num_mbufs = ret;
+                       if (num_tso_mbufs >= 1) {
+                               mbuf = gso_mbufs;
+                               num_mbufs = num_tso_mbufs;
+                       } else {
+                               /* 0 means it can be transmitted directly
+                                * without gso.
+                                */
+                               mbuf = &mbuf_in;
+                               num_mbufs = 1;
+                       }
                } else {
                        /* stats.errs will be incremented */
                        if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
                                break;
 
                        /* ret 0 indicates no new mbufs were created */
-                       ret = 0;
+                       num_tso_mbufs = 0;
                        mbuf = &mbuf_in;
                        num_mbufs = 1;
                }
 
-               tap_write_mbufs(txq, num_mbufs, mbuf,
+               ret = tap_write_mbufs(txq, num_mbufs, mbuf,
                                &num_packets, &num_tx_bytes);
+               if (ret == -1) {
+                       txq->stats.errs++;
+                       /* free tso mbufs */
+                       if (num_tso_mbufs > 0)
+                               rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
+                       break;
+               }
                num_tx++;
                /* free original mbuf */
                rte_pktmbuf_free(mbuf_in);
                /* free tso mbufs */
-               for (j = 0; j < ret; j++)
-                       rte_pktmbuf_free(mbuf[j]);
+               if (num_tso_mbufs > 0)
+                       rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
        }
 
        txq->stats.opackets += num_packets;
        txq->stats.errs += nb_pkts - num_tx;
        txq->stats.obytes += num_tx_bytes;
 
-       return num_packets;
+       return num_tx;
 }
 
 static const char *
@@ -778,7 +863,7 @@ apply:
        case SIOCSIFMTU:
                break;
        default:
-               RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
+               TAP_LOG(WARNING, "%s: ioctl() called with wrong arg",
                        pmd->name);
                return -EINVAL;
        }
@@ -837,7 +922,7 @@ tap_dev_start(struct rte_eth_dev *dev)
 
 /* This function gets called when the current port gets stopped.
  */
-static void
+static int
 tap_dev_stop(struct rte_eth_dev *dev)
 {
        int i;
@@ -849,6 +934,8 @@ tap_dev_stop(struct rte_eth_dev *dev)
 
        tap_intr_handle_set(dev, 0);
        tap_link_set_down(dev);
+
+       return 0;
 }
 
 static int
@@ -1007,21 +1094,36 @@ tap_stats_reset(struct rte_eth_dev *dev)
        return 0;
 }
 
-static void
+static int
 tap_dev_close(struct rte_eth_dev *dev)
 {
        int i;
        struct pmd_internals *internals = dev->data->dev_private;
        struct pmd_process_private *process_private = dev->process_private;
+       struct rx_queue *rxq;
+
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+               rte_free(dev->process_private);
+               return 0;
+       }
 
        tap_link_set_down(dev);
-       tap_flow_flush(dev, NULL);
-       tap_flow_implicit_flush(internals, NULL);
+       if (internals->nlsk_fd != -1) {
+               tap_flow_flush(dev, NULL);
+               tap_flow_implicit_flush(internals, NULL);
+               tap_nl_final(internals->nlsk_fd);
+               internals->nlsk_fd = -1;
+       }
 
        for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
                if (process_private->rxq_fds[i] != -1) {
+                       rxq = &internals->rxq[i];
                        close(process_private->rxq_fds[i]);
                        process_private->rxq_fds[i] = -1;
+                       tap_rxq_pool_free(rxq->pool);
+                       rte_free(rxq->iovecs);
+                       rxq->pool = NULL;
+                       rxq->iovecs = NULL;
                }
                if (process_private->txq_fds[i] != -1) {
                        close(process_private->txq_fds[i]);
@@ -1035,14 +1137,35 @@ tap_dev_close(struct rte_eth_dev *dev)
                                &internals->remote_initial_flags);
        }
 
+       rte_mempool_free(internals->gso_ctx_mp);
+       internals->gso_ctx_mp = NULL;
+
        if (internals->ka_fd != -1) {
                close(internals->ka_fd);
                internals->ka_fd = -1;
        }
+
+       /* mac_addrs must not be freed alone because part of dev_private */
+       dev->data->mac_addrs = NULL;
+
+       internals = dev->data->dev_private;
+       TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
+               tuntap_types[internals->type], rte_socket_id());
+
+       if (internals->ioctl_sock != -1) {
+               close(internals->ioctl_sock);
+               internals->ioctl_sock = -1;
+       }
+       rte_free(dev->process_private);
+       if (tap_devices_count == 1)
+               rte_mp_action_unregister(TAP_MP_KEY);
+       tap_devices_count--;
        /*
         * Since TUN device has no more opened file descriptors
         * it will be removed from kernel
         */
+
+       return 0;
 }
 
 static void
@@ -1054,10 +1177,10 @@ tap_rx_queue_release(void *queue)
        if (!rxq)
                return;
        process_private = rte_eth_devices[rxq->in_port].process_private;
-       if (process_private->rxq_fds[rxq->queue_id] > 0) {
+       if (process_private->rxq_fds[rxq->queue_id] != -1) {
                close(process_private->rxq_fds[rxq->queue_id]);
                process_private->rxq_fds[rxq->queue_id] = -1;
-               rte_pktmbuf_free(rxq->pool);
+               tap_rxq_pool_free(rxq->pool);
                rte_free(rxq->iovecs);
                rxq->pool = NULL;
                rxq->iovecs = NULL;
@@ -1074,7 +1197,7 @@ tap_tx_queue_release(void *queue)
                return;
        process_private = rte_eth_devices[txq->out_port].process_private;
 
-       if (process_private->txq_fds[txq->queue_id] > 0) {
+       if (process_private->txq_fds[txq->queue_id] != -1) {
                close(process_private->txq_fds[txq->queue_id]);
                process_private->txq_fds[txq->queue_id] = -1;
        }
@@ -1282,33 +1405,40 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
 {
        uint32_t gso_types;
        char pool_name[64];
-
-       /*
-        * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
-        * size per mbuf use this pool for both direct and indirect mbufs
-        */
-
-       struct rte_mempool *mp;      /* Mempool for GSO packets */
+       struct pmd_internals *pmd = dev->data->dev_private;
+       int ret;
 
        /* initialize GSO context */
        gso_types = DEV_TX_OFFLOAD_TCP_TSO;
-       snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
-       mp = rte_mempool_lookup((const char *)pool_name);
-       if (!mp) {
-               mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
-                       TAP_GSO_MBUF_CACHE_SIZE, 0,
+       if (!pmd->gso_ctx_mp) {
+               /*
+                * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
+                * bytes size per mbuf use this pool for both direct and
+                * indirect mbufs
+                */
+               ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
+                               dev->device->name);
+               if (ret < 0 || ret >= (int)sizeof(pool_name)) {
+                       TAP_LOG(ERR,
+                               "%s: failed to create mbuf pool name for device %s,"
+                               "device name too long or output error, ret: %d\n",
+                               pmd->name, dev->device->name, ret);
+                       return -ENAMETOOLONG;
+               }
+               pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
+                       TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
                        RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
                        SOCKET_ID_ANY);
-               if (!mp) {
-                       struct pmd_internals *pmd = dev->data->dev_private;
-                       RTE_LOG(DEBUG, PMD, "%s: failed to create mbuf pool for device %s\n",
+               if (!pmd->gso_ctx_mp) {
+                       TAP_LOG(ERR,
+                               "%s: failed to create mbuf pool for device %s\n",
                                pmd->name, dev->device->name);
                        return -1;
                }
        }
 
-       gso_ctx->direct_pool = mp;
-       gso_ctx->indirect_pool = mp;
+       gso_ctx->direct_pool = pmd->gso_ctx_mp;
+       gso_ctx->indirect_pool = pmd->gso_ctx_mp;
        gso_ctx->gso_types = gso_types;
        gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
        gso_ctx->flag = 0;
@@ -1465,7 +1595,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
        return 0;
 
 error:
-       rte_pktmbuf_free(rxq->pool);
+       tap_rxq_pool_free(rxq->pool);
        rxq->pool = NULL;
        rte_free(rxq->iovecs);
        rxq->iovecs = NULL;
@@ -1563,13 +1693,12 @@ static int
 tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
 {
        struct pmd_internals *pmd = dev->data->dev_private;
+       int ret;
 
        /* In any case, disable interrupt if the conf is no longer there. */
        if (!dev->data->dev_conf.intr_conf.lsc) {
                if (pmd->intr_handle.fd != -1) {
-                       tap_nl_final(pmd->intr_handle.fd);
-                       rte_intr_callback_unregister(&pmd->intr_handle,
-                               tap_dev_intr_handler, dev);
+                       goto clean;
                }
                return 0;
        }
@@ -1580,9 +1709,26 @@ tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
                return rte_intr_callback_register(
                        &pmd->intr_handle, tap_dev_intr_handler, dev);
        }
+
+clean:
+       do {
+               ret = rte_intr_callback_unregister(&pmd->intr_handle,
+                       tap_dev_intr_handler, dev);
+               if (ret >= 0) {
+                       break;
+               } else if (ret == -EAGAIN) {
+                       rte_delay_ms(100);
+               } else {
+                       TAP_LOG(ERR, "intr callback unregister failed: %d",
+                                    ret);
+                       break;
+               }
+       } while (true);
+
        tap_nl_final(pmd->intr_handle.fd);
-       return rte_intr_callback_unregister(&pmd->intr_handle,
-                                           tap_dev_intr_handler, dev);
+       pmd->intr_handle.fd = -1;
+
+       return 0;
 }
 
 static int
@@ -1591,8 +1737,11 @@ tap_intr_handle_set(struct rte_eth_dev *dev, int set)
        int err;
 
        err = tap_lsc_intr_handle_set(dev, set);
-       if (err)
+       if (err < 0) {
+               if (!set)
+                       tap_rx_intr_vec_set(dev, 0);
                return err;
+       }
        err = tap_rx_intr_vec_set(dev, set);
        if (err && set)
                tap_lsc_intr_handle_set(dev, 0);
@@ -1741,11 +1890,7 @@ static const struct eth_dev_ops ops = {
        .stats_reset            = tap_stats_reset,
        .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
        .rss_hash_update        = tap_rss_hash_update,
-       .filter_ctrl            = tap_dev_filter_ctrl,
-};
-
-static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
-       "UNKNOWN", "TUN", "TAP"
+       .flow_ops_get           = tap_dev_flow_ops_get,
 };
 
 static int
@@ -1784,6 +1929,9 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
        pmd->dev = dev;
        strlcpy(pmd->name, tap_name, sizeof(pmd->name));
        pmd->type = type;
+       pmd->ka_fd = -1;
+       pmd->nlsk_fd = -1;
+       pmd->gso_ctx_mp = NULL;
 
        pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
        if (pmd->ioctl_sock == -1) {
@@ -1796,7 +1944,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
        /* Setup some default values */
        data = dev->data;
        data->dev_private = pmd;
-       data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+       data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+                               RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        data->numa_node = numa_node;
 
        data->dev_link = pmd_link;
@@ -1814,7 +1963,6 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
        dev->intr_handle = &pmd->intr_handle;
 
        /* Presetup the fds to -1 as being not valid */
-       pmd->ka_fd = -1;
        for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
                process_private->rxq_fds[i] = -1;
                process_private->txq_fds[i] = -1;
@@ -1954,7 +2102,11 @@ error_remote:
        tap_flow_implicit_flush(pmd, NULL);
 
 error_exit:
-       if (pmd->ioctl_sock > 0)
+       if (pmd->nlsk_fd != -1)
+               close(pmd->nlsk_fd);
+       if (pmd->ka_fd != -1)
+               close(pmd->ka_fd);
+       if (pmd->ioctl_sock != -1)
                close(pmd->ioctl_sock);
        /* mac_addrs must not be freed alone because part of dev_private */
        dev->data->mac_addrs = NULL;
@@ -2385,54 +2537,15 @@ static int
 rte_pmd_tap_remove(struct rte_vdev_device *dev)
 {
        struct rte_eth_dev *eth_dev = NULL;
-       struct pmd_internals *internals;
-       struct pmd_process_private *process_private;
-       int i;
 
        /* find the ethdev entry */
        eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
        if (!eth_dev)
-               return -ENODEV;
-
-       /* mac_addrs must not be freed alone because part of dev_private */
-       eth_dev->data->mac_addrs = NULL;
-
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-               return rte_eth_dev_release_port(eth_dev);
-
-       internals = eth_dev->data->dev_private;
-       process_private = eth_dev->process_private;
-
-       TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
-               tuntap_types[internals->type], rte_socket_id());
-
-       if (internals->nlsk_fd) {
-               tap_flow_flush(eth_dev, NULL);
-               tap_flow_implicit_flush(internals, NULL);
-               tap_nl_final(internals->nlsk_fd);
-       }
-       for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
-               if (process_private->rxq_fds[i] != -1) {
-                       close(process_private->rxq_fds[i]);
-                       process_private->rxq_fds[i] = -1;
-               }
-               if (process_private->txq_fds[i] != -1) {
-                       close(process_private->txq_fds[i]);
-                       process_private->txq_fds[i] = -1;
-               }
-       }
+               return 0;
 
-       close(internals->ioctl_sock);
-       rte_free(eth_dev->process_private);
-       if (tap_devices_count == 1)
-               rte_mp_action_unregister(TAP_MP_KEY);
-       tap_devices_count--;
+       tap_dev_close(eth_dev);
        rte_eth_dev_release_port(eth_dev);
 
-       if (internals->ka_fd != -1) {
-               close(internals->ka_fd);
-               internals->ka_fd = -1;
-       }
        return 0;
 }
 
@@ -2455,11 +2568,4 @@ RTE_PMD_REGISTER_PARAM_STRING(net_tap,
                              ETH_TAP_IFACE_ARG "=<string> "
                              ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
                              ETH_TAP_REMOTE_ARG "=<string>");
-int tap_logtype;
-
-RTE_INIT(tap_init_log)
-{
-       tap_logtype = rte_log_register("pmd.net.tap");
-       if (tap_logtype >= 0)
-               rte_log_set_level(tap_logtype, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(tap_logtype, pmd.net.tap, NOTICE);