#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#define TAP_IOV_DEFAULT_MAX 1024
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
+
static int tap_devices_count;
static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
static volatile uint32_t tap_trigger; /* Rx trigger */
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_FIXED,
};
static void
uint16_t cksum = 0;
void *l3_hdr;
void *l4_hdr;
+ struct rte_udp_hdr *udp_hdr;
if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
l2_len += 4;
cksum = ~rte_raw_cksum(iph, l3_len);
mbuf->ol_flags |= cksum ?
- PKT_RX_IP_CKSUM_BAD :
- PKT_RX_IP_CKSUM_GOOD;
+ RTE_MBUF_F_RX_IP_CKSUM_BAD :
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD;
} else if (l3 == RTE_PTYPE_L3_IPV6) {
struct rte_ipv6_hdr *iph = l3_hdr;
rte_pktmbuf_data_len(mbuf))
return;
} else {
- /* IPv6 extensions are not supported */
+ /* - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN cannot happen because
+ * mbuf->packet_type is filled by rte_net_get_ptype() which
+ * never returns this value.
+ * - IPv6 extensions are not supported.
+ */
return;
}
if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
+ int cksum_ok;
+
l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
/* Don't verify checksum for multi-segment packets. */
if (mbuf->nb_segs > 1)
return;
- if (l3 == RTE_PTYPE_L3_IPV4)
- cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
- else if (l3 == RTE_PTYPE_L3_IPV6)
- cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
- mbuf->ol_flags |= cksum ?
- PKT_RX_L4_CKSUM_BAD :
- PKT_RX_L4_CKSUM_GOOD;
+ if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
+ if (l4 == RTE_PTYPE_L4_UDP) {
+ udp_hdr = (struct rte_udp_hdr *)l4_hdr;
+ if (udp_hdr->dgram_cksum == 0) {
+ /*
+ * For IPv4, a zero UDP checksum
+ * indicates that the sender did not
+ * generate one [RFC 768].
+ */
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
+ return;
+ }
+ }
+ cksum_ok = !rte_ipv4_udptcp_cksum_verify(l3_hdr,
+ l4_hdr);
+ } else { /* l3 == RTE_PTYPE_L3_IPV6, checked above */
+ cksum_ok = !rte_ipv6_udptcp_cksum_verify(l3_hdr,
+ l4_hdr);
+ }
+ mbuf->ol_flags |= cksum_ok ?
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
-static uint64_t
-tap_rx_offload_get_port_capa(void)
-{
- /*
- * No specific port Rx offload capabilities.
- */
- return 0;
-}
-
-static uint64_t
-tap_rx_offload_get_queue_capa(void)
-{
- return DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
-}
-
static void
tap_rxq_pool_free(struct rte_mbuf *pool)
{
len = readv(process_private->rxq_fds[rxq->queue_id],
*rxq->iovecs,
- 1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+ 1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
rxq->nb_rx_desc : 1));
if (len < (int)sizeof(struct tun_pi))
break;
seg->next = NULL;
mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
RTE_PTYPE_ALL_MASK);
- if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
tap_verify_csum(mbuf);
/* account for the receive frame */
return num_rx;
}
-static uint64_t
-tap_tx_offload_get_port_capa(void)
-{
- /*
- * No specific port Tx offload capabilities.
- */
- return 0;
-}
-
-static uint64_t
-tap_tx_offload_get_queue_capa(void)
-{
- return DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
-}
-
/* Finalize l4 checksum calculation */
static void
tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
{
void *l3_hdr = packet + l2_len;
- if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+ if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
struct rte_ipv4_hdr *iph = l3_hdr;
uint16_t cksum;
cksum = rte_raw_cksum(iph, l3_len);
iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
}
- if (ol_flags & PKT_TX_L4_MASK) {
+ if (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
void *l4_hdr;
l4_hdr = packet + l2_len + l3_len;
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
*l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum;
- else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
+ else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM)
*l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
else
return;
**l4_cksum = 0;
- if (ol_flags & PKT_TX_IPV4)
+ if (ol_flags & RTE_MBUF_F_TX_IPV4)
*l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
else
*l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
nb_segs = mbuf->nb_segs;
if (txq->csum &&
- ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
- (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
- (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
+ ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) ||
+ (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM ||
+ (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) {
is_cksum = 1;
/* Support only packets with at least layer 4
uint16_t hdrs_len;
uint64_t tso;
- tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
+ tso = mbuf_in->ol_flags & RTE_MBUF_F_TX_TCP_SEG;
if (tso) {
struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
/* TCP segmentation implies TCP checksum offload */
- mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
+ mbuf_in->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
/* gso size is calculated without RTE_ETHER_CRC_LEN */
hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
if (num_tso_mbufs < 0)
break;
- mbuf = gso_mbufs;
- num_mbufs = num_tso_mbufs;
+ if (num_tso_mbufs >= 1) {
+ mbuf = gso_mbufs;
+ num_mbufs = num_tso_mbufs;
+ } else {
+ /* 0 means it can be transmitted directly
+ * without gso.
+ */
+ mbuf = &mbuf_in;
+ num_mbufs = 1;
+ }
} else {
/* stats.errs will be incremented */
if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
struct pmd_internals *pmd = dev->data->dev_private;
struct ifreq ifr = { .ifr_flags = IFF_UP };
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
}
struct pmd_internals *pmd = dev->data->dev_private;
struct ifreq ifr = { .ifr_flags = IFF_UP };
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
}
/* This function gets called when the current port gets stopped.
*/
-static void
+static int
tap_dev_stop(struct rte_eth_dev *dev)
{
int i;
tap_intr_handle_set(dev, 0);
tap_link_set_down(dev);
+
+ return 0;
}
static int
uint32_t speed = pmd_link.link_speed;
uint32_t capa = 0;
- if (speed >= ETH_SPEED_NUM_10M)
- capa |= ETH_LINK_SPEED_10M;
- if (speed >= ETH_SPEED_NUM_100M)
- capa |= ETH_LINK_SPEED_100M;
- if (speed >= ETH_SPEED_NUM_1G)
- capa |= ETH_LINK_SPEED_1G;
- if (speed >= ETH_SPEED_NUM_5G)
- capa |= ETH_LINK_SPEED_2_5G;
- if (speed >= ETH_SPEED_NUM_5G)
- capa |= ETH_LINK_SPEED_5G;
- if (speed >= ETH_SPEED_NUM_10G)
- capa |= ETH_LINK_SPEED_10G;
- if (speed >= ETH_SPEED_NUM_20G)
- capa |= ETH_LINK_SPEED_20G;
- if (speed >= ETH_SPEED_NUM_25G)
- capa |= ETH_LINK_SPEED_25G;
- if (speed >= ETH_SPEED_NUM_40G)
- capa |= ETH_LINK_SPEED_40G;
- if (speed >= ETH_SPEED_NUM_50G)
- capa |= ETH_LINK_SPEED_50G;
- if (speed >= ETH_SPEED_NUM_56G)
- capa |= ETH_LINK_SPEED_56G;
- if (speed >= ETH_SPEED_NUM_100G)
- capa |= ETH_LINK_SPEED_100G;
+ if (speed >= RTE_ETH_SPEED_NUM_10M)
+ capa |= RTE_ETH_LINK_SPEED_10M;
+ if (speed >= RTE_ETH_SPEED_NUM_100M)
+ capa |= RTE_ETH_LINK_SPEED_100M;
+ if (speed >= RTE_ETH_SPEED_NUM_1G)
+ capa |= RTE_ETH_LINK_SPEED_1G;
+ if (speed >= RTE_ETH_SPEED_NUM_5G)
+ capa |= RTE_ETH_LINK_SPEED_2_5G;
+ if (speed >= RTE_ETH_SPEED_NUM_5G)
+ capa |= RTE_ETH_LINK_SPEED_5G;
+ if (speed >= RTE_ETH_SPEED_NUM_10G)
+ capa |= RTE_ETH_LINK_SPEED_10G;
+ if (speed >= RTE_ETH_SPEED_NUM_20G)
+ capa |= RTE_ETH_LINK_SPEED_20G;
+ if (speed >= RTE_ETH_SPEED_NUM_25G)
+ capa |= RTE_ETH_LINK_SPEED_25G;
+ if (speed >= RTE_ETH_SPEED_NUM_40G)
+ capa |= RTE_ETH_LINK_SPEED_40G;
+ if (speed >= RTE_ETH_SPEED_NUM_50G)
+ capa |= RTE_ETH_LINK_SPEED_50G;
+ if (speed >= RTE_ETH_SPEED_NUM_56G)
+ capa |= RTE_ETH_LINK_SPEED_56G;
+ if (speed >= RTE_ETH_SPEED_NUM_100G)
+ capa |= RTE_ETH_LINK_SPEED_100G;
return capa;
}
dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
dev_info->min_rx_bufsize = 0;
dev_info->speed_capa = tap_dev_speed_capa();
- dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
- dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
- dev_info->rx_queue_offload_capa;
- dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
- dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
- dev_info->tx_queue_offload_capa;
+ dev_info->rx_queue_offload_capa = TAP_RX_OFFLOAD;
+ dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = TAP_TX_OFFLOAD;
+ dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa;
dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
/*
* limitation: TAP supports all of IP, UDP and TCP hash
* functions together and not in partial combinations
*/
dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
return 0;
}
if (internals->remote_if_index) {
/* Restore initial remote state */
- ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
+ int ret = ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
&internals->remote_initial_flags);
+ if (ret)
+ TAP_LOG(ERR, "restore remote state failed: %d", ret);
+
}
rte_mempool_free(internals->gso_ctx_mp);
}
static void
-tap_rx_queue_release(void *queue)
+tap_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct rx_queue *rxq = queue;
+ struct rx_queue *rxq = dev->data->rx_queues[qid];
struct pmd_process_private *process_private;
if (!rxq)
}
static void
-tap_tx_queue_release(void *queue)
+tap_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct tx_queue *txq = queue;
+ struct tx_queue *txq = dev->data->tx_queues[qid];
struct pmd_process_private *process_private;
if (!txq)
tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
if (!(ifr.ifr_flags & IFF_UP) ||
!(ifr.ifr_flags & IFF_RUNNING)) {
- dev_link->link_status = ETH_LINK_DOWN;
+ dev_link->link_status = RTE_ETH_LINK_DOWN;
return 0;
}
}
tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
dev_link->link_status =
((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
- ETH_LINK_UP :
- ETH_LINK_DOWN);
+ RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN);
return 0;
}
int ret;
/* initialize GSO context */
- gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+ gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (!pmd->gso_ctx_mp) {
/*
* Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
txq->csum = !!(offloads &
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM));
+ (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
if (ret == -1)
{
struct pmd_internals *pmd = dev->data->dev_private;
struct ifreq ifr = { .ifr_mtu = mtu };
- int err = 0;
- err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
- if (!err)
- dev->data->mtu = mtu;
-
- return err;
+ return tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
}
static int
struct rte_eth_dev *dev = cb_arg;
struct pmd_internals *pmd = dev->data->dev_private;
- tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
+ tap_nl_recv(rte_intr_fd_get(pmd->intr_handle),
+ tap_nl_msg_handler, dev);
}
static int
/* In any case, disable interrupt if the conf is no longer there. */
if (!dev->data->dev_conf.intr_conf.lsc) {
- if (pmd->intr_handle.fd != -1) {
+ if (rte_intr_fd_get(pmd->intr_handle) != -1)
goto clean;
- }
+
return 0;
}
if (set) {
- pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
- if (unlikely(pmd->intr_handle.fd == -1))
+ rte_intr_fd_set(pmd->intr_handle, tap_nl_init(RTMGRP_LINK));
+ if (unlikely(rte_intr_fd_get(pmd->intr_handle) == -1))
return -EBADF;
return rte_intr_callback_register(
- &pmd->intr_handle, tap_dev_intr_handler, dev);
+ pmd->intr_handle, tap_dev_intr_handler, dev);
}
clean:
do {
- ret = rte_intr_callback_unregister(&pmd->intr_handle,
+ ret = rte_intr_callback_unregister(pmd->intr_handle,
tap_dev_intr_handler, dev);
if (ret >= 0) {
break;
}
} while (true);
- tap_nl_final(pmd->intr_handle.fd);
- pmd->intr_handle.fd = -1;
+ tap_nl_final(rte_intr_fd_get(pmd->intr_handle));
+ rte_intr_fd_set(pmd->intr_handle, -1);
return 0;
}
tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_fc_conf *fc_conf)
{
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_fc_conf *fc_conf)
{
- if (fc_conf->mode != RTE_FC_NONE)
+ if (fc_conf->mode != RTE_ETH_FC_NONE)
return -ENOTSUP;
return 0;
}
.stats_reset = tap_stats_reset,
.dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
.rss_hash_update = tap_rss_hash_update,
- .filter_ctrl = tap_dev_filter_ctrl,
+ .flow_ops_get = tap_dev_flow_ops_get,
};
static int
goto error_exit;
}
+ /* Allocate interrupt instance */
+ pmd->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+ if (pmd->intr_handle == NULL) {
+ TAP_LOG(ERR, "Failed to allocate intr handle");
+ goto error_exit;
+ }
+
/* Setup some default values */
data = dev->data;
data->dev_private = pmd;
- data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
data->numa_node = numa_node;
data->dev_link = pmd_link;
dev->rx_pkt_burst = pmd_rx_burst;
dev->tx_pkt_burst = pmd_tx_burst;
- pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
- pmd->intr_handle.fd = -1;
- dev->intr_handle = &pmd->intr_handle;
+ rte_intr_type_set(pmd->intr_handle, RTE_INTR_HANDLE_EXT);
+ rte_intr_fd_set(pmd->intr_handle, -1);
+ dev->intr_handle = pmd->intr_handle;
/* Presetup the fds to -1 as being not valid */
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
/* mac_addrs must not be freed alone because part of dev_private */
dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(dev);
+ rte_intr_instance_free(pmd->intr_handle);
error_exit_nodev:
TAP_LOG(ERR, "%s Unable to initialize %s",
}
}
}
- pmd_link.link_speed = ETH_SPEED_NUM_10G;
+ pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
return 0;
}
- speed = ETH_SPEED_NUM_10G;
+ speed = RTE_ETH_SPEED_NUM_10G;
/* use tap%d which causes kernel to choose next available */
strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
ETH_TAP_IFACE_ARG "=<string> "
ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
ETH_TAP_REMOTE_ARG "=<string>");
-RTE_LOG_REGISTER(tap_logtype, pmd.net.tap, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(tap_logtype, NOTICE);