net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / tap / rte_eth_tap.c
index 2229eef..111037d 100644 (file)
 
 /* IPC key for queue fds sync */
 #define TAP_MP_KEY "tap_mp_sync_queues"
+#define TAP_MP_REQ_START_RXTX "tap_mp_req_start_rxtx"
 
 #define TAP_IOV_DEFAULT_MAX 1024
 
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER |   \
+                       RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+                       RTE_ETH_RX_OFFLOAD_UDP_CKSUM |  \
+                       RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |        \
+                       RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+                       RTE_ETH_TX_OFFLOAD_UDP_CKSUM |  \
+                       RTE_ETH_TX_OFFLOAD_TCP_CKSUM |  \
+                       RTE_ETH_TX_OFFLOAD_TCP_TSO)
+
 static int tap_devices_count;
 
 static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
@@ -86,10 +98,10 @@ static const char *valid_arguments[] = {
 static volatile uint32_t tap_trigger;  /* Rx trigger */
 
 static struct rte_eth_link pmd_link = {
-       .link_speed = ETH_SPEED_NUM_10G,
-       .link_duplex = ETH_LINK_FULL_DUPLEX,
-       .link_status = ETH_LINK_DOWN,
-       .link_autoneg = ETH_LINK_FIXED,
+       .link_speed = RTE_ETH_SPEED_NUM_10G,
+       .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+       .link_status = RTE_ETH_LINK_DOWN,
+       .link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
 static void
@@ -329,8 +341,8 @@ tap_verify_csum(struct rte_mbuf *mbuf)
 
                cksum = ~rte_raw_cksum(iph, l3_len);
                mbuf->ol_flags |= cksum ?
-                       PKT_RX_IP_CKSUM_BAD :
-                       PKT_RX_IP_CKSUM_GOOD;
+                       RTE_MBUF_F_RX_IP_CKSUM_BAD :
+                       RTE_MBUF_F_RX_IP_CKSUM_GOOD;
        } else if (l3 == RTE_PTYPE_L3_IPV6) {
                struct rte_ipv6_hdr *iph = l3_hdr;
 
@@ -365,7 +377,7 @@ tap_verify_csum(struct rte_mbuf *mbuf)
                                         * indicates that the sender did not
                                         * generate one [RFC 768].
                                         */
-                                       mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+                                       mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
                                        return;
                                }
                        }
@@ -376,28 +388,10 @@ tap_verify_csum(struct rte_mbuf *mbuf)
                                                                 l4_hdr);
                }
                mbuf->ol_flags |= cksum_ok ?
-                       PKT_RX_L4_CKSUM_GOOD : PKT_RX_L4_CKSUM_BAD;
+                       RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD;
        }
 }
 
-static uint64_t
-tap_rx_offload_get_port_capa(void)
-{
-       /*
-        * No specific port Rx offload capabilities.
-        */
-       return 0;
-}
-
-static uint64_t
-tap_rx_offload_get_queue_capa(void)
-{
-       return DEV_RX_OFFLOAD_SCATTER |
-              DEV_RX_OFFLOAD_IPV4_CKSUM |
-              DEV_RX_OFFLOAD_UDP_CKSUM |
-              DEV_RX_OFFLOAD_TCP_CKSUM;
-}
-
 static void
 tap_rxq_pool_free(struct rte_mbuf *pool)
 {
@@ -440,7 +434,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
                len = readv(process_private->rxq_fds[rxq->queue_id],
                        *rxq->iovecs,
-                       1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+                       1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
                             rxq->nb_rx_desc : 1));
                if (len < (int)sizeof(struct tun_pi))
                        break;
@@ -496,7 +490,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                seg->next = NULL;
                mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
                                                      RTE_PTYPE_ALL_MASK);
-               if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+               if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
                        tap_verify_csum(mbuf);
 
                /* account for the receive frame */
@@ -513,25 +507,6 @@ end:
        return num_rx;
 }
 
-static uint64_t
-tap_tx_offload_get_port_capa(void)
-{
-       /*
-        * No specific port Tx offload capabilities.
-        */
-       return 0;
-}
-
-static uint64_t
-tap_tx_offload_get_queue_capa(void)
-{
-       return DEV_TX_OFFLOAD_MULTI_SEGS |
-              DEV_TX_OFFLOAD_IPV4_CKSUM |
-              DEV_TX_OFFLOAD_UDP_CKSUM |
-              DEV_TX_OFFLOAD_TCP_CKSUM |
-              DEV_TX_OFFLOAD_TCP_TSO;
-}
-
 /* Finalize l4 checksum calculation */
 static void
 tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
@@ -551,7 +526,7 @@ tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
        }
 }
 
-/* Accumaulate L4 raw checksums */
+/* Accumulate L4 raw checksums */
 static void
 tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
                        uint32_t *l4_raw_cksum)
@@ -570,7 +545,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
 {
        void *l3_hdr = packet + l2_len;
 
-       if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+       if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
                struct rte_ipv4_hdr *iph = l3_hdr;
                uint16_t cksum;
 
@@ -578,18 +553,18 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
                cksum = rte_raw_cksum(iph, l3_len);
                iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
        }
-       if (ol_flags & PKT_TX_L4_MASK) {
+       if (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
                void *l4_hdr;
 
                l4_hdr = packet + l2_len + l3_len;
-               if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+               if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
                        *l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum;
-               else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
+               else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM)
                        *l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
                else
                        return;
                **l4_cksum = 0;
-               if (ol_flags & PKT_TX_IPV4)
+               if (ol_flags & RTE_MBUF_F_TX_IPV4)
                        *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
                else
                        *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
@@ -653,9 +628,9 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
 
                nb_segs = mbuf->nb_segs;
                if (txq->csum &&
-                   ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
-                    (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
-                    (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
+                   ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) ||
+                     (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM ||
+                     (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) {
                        is_cksum = 1;
 
                        /* Support only packets with at least layer 4
@@ -745,12 +720,12 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                uint16_t hdrs_len;
                uint64_t tso;
 
-               tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
+               tso = mbuf_in->ol_flags & RTE_MBUF_F_TX_TCP_SEG;
                if (tso) {
                        struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
 
                        /* TCP segmentation implies TCP checksum offload */
-                       mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
+                       mbuf_in->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
 
                        /* gso size is calculated without RTE_ETHER_CRC_LEN */
                        hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
@@ -892,7 +867,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
        struct pmd_internals *pmd = dev->data->dev_private;
        struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-       dev->data->dev_link.link_status = ETH_LINK_DOWN;
+       dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
        return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
 }
 
@@ -902,15 +877,53 @@ tap_link_set_up(struct rte_eth_dev *dev)
        struct pmd_internals *pmd = dev->data->dev_private;
        struct ifreq ifr = { .ifr_flags = IFF_UP };
 
-       dev->data->dev_link.link_status = ETH_LINK_UP;
+       dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
        return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
+static int
+tap_mp_req_on_rxtx(struct rte_eth_dev *dev)
+{
+       struct rte_mp_msg msg;
+       struct ipc_queues *request_param = (struct ipc_queues *)msg.param;
+       int err;
+       int fd_iterator = 0;
+       struct pmd_process_private *process_private = dev->process_private;
+       int i;
+
+       memset(&msg, 0, sizeof(msg));
+       strlcpy(msg.name, TAP_MP_REQ_START_RXTX, sizeof(msg.name));
+       strlcpy(request_param->port_name, dev->data->name, sizeof(request_param->port_name));
+       msg.len_param = sizeof(*request_param);
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               msg.fds[fd_iterator++] = process_private->txq_fds[i];
+               msg.num_fds++;
+               request_param->txq_count++;
+       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               msg.fds[fd_iterator++] = process_private->rxq_fds[i];
+               msg.num_fds++;
+               request_param->rxq_count++;
+       }
+
+       err = rte_mp_sendmsg(&msg);
+       if (err < 0) {
+               TAP_LOG(ERR, "Failed to send start req to secondary %d",
+                       rte_errno);
+               return -1;
+       }
+
+       return 0;
+}
+
 static int
 tap_dev_start(struct rte_eth_dev *dev)
 {
        int err, i;
 
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               tap_mp_req_on_rxtx(dev);
+
        err = tap_intr_handle_set(dev, 1);
        if (err)
                return err;
@@ -927,6 +940,34 @@ tap_dev_start(struct rte_eth_dev *dev)
        return err;
 }
 
+static int
+tap_mp_req_start_rxtx(const struct rte_mp_msg *request, __rte_unused const void *peer)
+{
+       struct rte_eth_dev *dev;
+       const struct ipc_queues *request_param =
+               (const struct ipc_queues *)request->param;
+       int fd_iterator;
+       int queue;
+       struct pmd_process_private *process_private;
+
+       dev = rte_eth_dev_get_by_name(request_param->port_name);
+       if (!dev) {
+               TAP_LOG(ERR, "Failed to get dev for %s",
+                       request_param->port_name);
+               return -1;
+       }
+       process_private = dev->process_private;
+       fd_iterator = 0;
+       TAP_LOG(DEBUG, "tap_attach rx_q:%d tx_q:%d\n", request_param->rxq_count,
+               request_param->txq_count);
+       for (queue = 0; queue < request_param->txq_count; queue++)
+               process_private->txq_fds[queue] = request->fds[fd_iterator++];
+       for (queue = 0; queue < request_param->rxq_count; queue++)
+               process_private->rxq_fds[queue] = request->fds[fd_iterator++];
+
+       return 0;
+}
+
 /* This function gets called when the current port gets stopped.
  */
 static int
@@ -966,6 +1007,14 @@ tap_dev_configure(struct rte_eth_dev *dev)
                        RTE_PMD_TAP_MAX_QUEUES);
                return -1;
        }
+       if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
+               TAP_LOG(ERR,
+                       "%s: number of rx queues %d must be equal to number of tx queues %d",
+                       dev->device->name,
+                       dev->data->nb_rx_queues,
+                       dev->data->nb_tx_queues);
+               return -1;
+       }
 
        TAP_LOG(INFO, "%s: %s: TX configured queues number: %u",
                dev->device->name, pmd->name, dev->data->nb_tx_queues);
@@ -982,30 +1031,30 @@ tap_dev_speed_capa(void)
        uint32_t speed = pmd_link.link_speed;
        uint32_t capa = 0;
 
-       if (speed >= ETH_SPEED_NUM_10M)
-               capa |= ETH_LINK_SPEED_10M;
-       if (speed >= ETH_SPEED_NUM_100M)
-               capa |= ETH_LINK_SPEED_100M;
-       if (speed >= ETH_SPEED_NUM_1G)
-               capa |= ETH_LINK_SPEED_1G;
-       if (speed >= ETH_SPEED_NUM_5G)
-               capa |= ETH_LINK_SPEED_2_5G;
-       if (speed >= ETH_SPEED_NUM_5G)
-               capa |= ETH_LINK_SPEED_5G;
-       if (speed >= ETH_SPEED_NUM_10G)
-               capa |= ETH_LINK_SPEED_10G;
-       if (speed >= ETH_SPEED_NUM_20G)
-               capa |= ETH_LINK_SPEED_20G;
-       if (speed >= ETH_SPEED_NUM_25G)
-               capa |= ETH_LINK_SPEED_25G;
-       if (speed >= ETH_SPEED_NUM_40G)
-               capa |= ETH_LINK_SPEED_40G;
-       if (speed >= ETH_SPEED_NUM_50G)
-               capa |= ETH_LINK_SPEED_50G;
-       if (speed >= ETH_SPEED_NUM_56G)
-               capa |= ETH_LINK_SPEED_56G;
-       if (speed >= ETH_SPEED_NUM_100G)
-               capa |= ETH_LINK_SPEED_100G;
+       if (speed >= RTE_ETH_SPEED_NUM_10M)
+               capa |= RTE_ETH_LINK_SPEED_10M;
+       if (speed >= RTE_ETH_SPEED_NUM_100M)
+               capa |= RTE_ETH_LINK_SPEED_100M;
+       if (speed >= RTE_ETH_SPEED_NUM_1G)
+               capa |= RTE_ETH_LINK_SPEED_1G;
+       if (speed >= RTE_ETH_SPEED_NUM_5G)
+               capa |= RTE_ETH_LINK_SPEED_2_5G;
+       if (speed >= RTE_ETH_SPEED_NUM_5G)
+               capa |= RTE_ETH_LINK_SPEED_5G;
+       if (speed >= RTE_ETH_SPEED_NUM_10G)
+               capa |= RTE_ETH_LINK_SPEED_10G;
+       if (speed >= RTE_ETH_SPEED_NUM_20G)
+               capa |= RTE_ETH_LINK_SPEED_20G;
+       if (speed >= RTE_ETH_SPEED_NUM_25G)
+               capa |= RTE_ETH_LINK_SPEED_25G;
+       if (speed >= RTE_ETH_SPEED_NUM_40G)
+               capa |= RTE_ETH_LINK_SPEED_40G;
+       if (speed >= RTE_ETH_SPEED_NUM_50G)
+               capa |= RTE_ETH_LINK_SPEED_50G;
+       if (speed >= RTE_ETH_SPEED_NUM_56G)
+               capa |= RTE_ETH_LINK_SPEED_56G;
+       if (speed >= RTE_ETH_SPEED_NUM_100G)
+               capa |= RTE_ETH_LINK_SPEED_100G;
 
        return capa;
 }
@@ -1022,18 +1071,17 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
        dev_info->min_rx_bufsize = 0;
        dev_info->speed_capa = tap_dev_speed_capa();
-       dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
-       dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
-                                   dev_info->rx_queue_offload_capa;
-       dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
-       dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
-                                   dev_info->tx_queue_offload_capa;
+       dev_info->rx_queue_offload_capa = TAP_RX_OFFLOAD;
+       dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa;
+       dev_info->tx_queue_offload_capa = TAP_TX_OFFLOAD;
+       dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa;
        dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
        /*
         * limitation: TAP supports all of IP, UDP and TCP hash
         * functions together and not in partial combinations
         */
        dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        return 0;
 }
@@ -1111,6 +1159,9 @@ tap_dev_close(struct rte_eth_dev *dev)
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
                rte_free(dev->process_private);
+               if (tap_devices_count == 1)
+                       rte_mp_action_unregister(TAP_MP_REQ_START_RXTX);
+               tap_devices_count--;
                return 0;
        }
 
@@ -1179,9 +1230,9 @@ tap_dev_close(struct rte_eth_dev *dev)
 }
 
 static void
-tap_rx_queue_release(void *queue)
+tap_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct rx_queue *rxq = queue;
+       struct rx_queue *rxq = dev->data->rx_queues[qid];
        struct pmd_process_private *process_private;
 
        if (!rxq)
@@ -1198,9 +1249,9 @@ tap_rx_queue_release(void *queue)
 }
 
 static void
-tap_tx_queue_release(void *queue)
+tap_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct tx_queue *txq = queue;
+       struct tx_queue *txq = dev->data->tx_queues[qid];
        struct pmd_process_private *process_private;
 
        if (!txq)
@@ -1224,15 +1275,15 @@ tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
                tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
                if (!(ifr.ifr_flags & IFF_UP) ||
                    !(ifr.ifr_flags & IFF_RUNNING)) {
-                       dev_link->link_status = ETH_LINK_DOWN;
+                       dev_link->link_status = RTE_ETH_LINK_DOWN;
                        return 0;
                }
        }
        tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
        dev_link->link_status =
                ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
-                ETH_LINK_UP :
-                ETH_LINK_DOWN);
+                RTE_ETH_LINK_UP :
+                RTE_ETH_LINK_DOWN);
        return 0;
 }
 
@@ -1419,7 +1470,7 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
        int ret;
 
        /* initialize GSO context */
-       gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+       gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
        if (!pmd->gso_ctx_mp) {
                /*
                 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
@@ -1634,9 +1685,9 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 
        offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
        txq->csum = !!(offloads &
-                       (DEV_TX_OFFLOAD_IPV4_CKSUM |
-                        DEV_TX_OFFLOAD_UDP_CKSUM |
-                        DEV_TX_OFFLOAD_TCP_CKSUM));
+                       (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                        RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                        RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
 
        ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
        if (ret == -1)
@@ -1655,13 +1706,8 @@ tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct pmd_internals *pmd = dev->data->dev_private;
        struct ifreq ifr = { .ifr_mtu = mtu };
-       int err = 0;
-
-       err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
-       if (!err)
-               dev->data->mtu = mtu;
 
-       return err;
+       return tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
 static int
@@ -1696,7 +1742,9 @@ tap_dev_intr_handler(void *cb_arg)
        struct rte_eth_dev *dev = cb_arg;
        struct pmd_internals *pmd = dev->data->dev_private;
 
-       tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
+       if (rte_intr_fd_get(pmd->intr_handle) >= 0)
+               tap_nl_recv(rte_intr_fd_get(pmd->intr_handle),
+                           tap_nl_msg_handler, dev);
 }
 
 static int
@@ -1707,22 +1755,22 @@ tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
 
        /* In any case, disable interrupt if the conf is no longer there. */
        if (!dev->data->dev_conf.intr_conf.lsc) {
-               if (pmd->intr_handle.fd != -1) {
+               if (rte_intr_fd_get(pmd->intr_handle) != -1)
                        goto clean;
-               }
+
                return 0;
        }
        if (set) {
-               pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
-               if (unlikely(pmd->intr_handle.fd == -1))
+               rte_intr_fd_set(pmd->intr_handle, tap_nl_init(RTMGRP_LINK));
+               if (unlikely(rte_intr_fd_get(pmd->intr_handle) == -1))
                        return -EBADF;
                return rte_intr_callback_register(
-                       &pmd->intr_handle, tap_dev_intr_handler, dev);
+                       pmd->intr_handle, tap_dev_intr_handler, dev);
        }
 
 clean:
        do {
-               ret = rte_intr_callback_unregister(&pmd->intr_handle,
+               ret = rte_intr_callback_unregister(pmd->intr_handle,
                        tap_dev_intr_handler, dev);
                if (ret >= 0) {
                        break;
@@ -1735,8 +1783,10 @@ clean:
                }
        } while (true);
 
-       tap_nl_final(pmd->intr_handle.fd);
-       pmd->intr_handle.fd = -1;
+       if (rte_intr_fd_get(pmd->intr_handle) >= 0) {
+               tap_nl_final(rte_intr_fd_get(pmd->intr_handle));
+               rte_intr_fd_set(pmd->intr_handle, -1);
+       }
 
        return 0;
 }
@@ -1793,7 +1843,7 @@ static int
 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
                  struct rte_eth_fc_conf *fc_conf)
 {
-       fc_conf->mode = RTE_FC_NONE;
+       fc_conf->mode = RTE_ETH_FC_NONE;
        return 0;
 }
 
@@ -1801,7 +1851,7 @@ static int
 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
                  struct rte_eth_fc_conf *fc_conf)
 {
-       if (fc_conf->mode != RTE_FC_NONE)
+       if (fc_conf->mode != RTE_ETH_FC_NONE)
                return -ENOTSUP;
        return 0;
 }
@@ -1951,6 +2001,13 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
                goto error_exit;
        }
 
+       /* Allocate interrupt instance */
+       pmd->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+       if (pmd->intr_handle == NULL) {
+               TAP_LOG(ERR, "Failed to allocate intr handle");
+               goto error_exit;
+       }
+
        /* Setup some default values */
        data = dev->data;
        data->dev_private = pmd;
@@ -1968,9 +2025,9 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
        dev->rx_pkt_burst = pmd_rx_burst;
        dev->tx_pkt_burst = pmd_tx_burst;
 
-       pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
-       pmd->intr_handle.fd = -1;
-       dev->intr_handle = &pmd->intr_handle;
+       rte_intr_type_set(pmd->intr_handle, RTE_INTR_HANDLE_EXT);
+       rte_intr_fd_set(pmd->intr_handle, -1);
+       dev->intr_handle = pmd->intr_handle;
 
        /* Presetup the fds to -1 as being not valid */
        for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
@@ -2121,6 +2178,7 @@ error_exit:
        /* mac_addrs must not be freed alone because part of dev_private */
        dev->data->mac_addrs = NULL;
        rte_eth_dev_release_port(dev);
+       rte_intr_instance_free(pmd->intr_handle);
 
 error_exit_nodev:
        TAP_LOG(ERR, "%s Unable to initialize %s",
@@ -2295,7 +2353,7 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev)
                        }
                }
        }
-       pmd_link.link_speed = ETH_SPEED_NUM_10G;
+       pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
 
        TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
 
@@ -2465,11 +2523,21 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
                ret = tap_mp_attach_queues(name, eth_dev);
                if (ret != 0)
                        return -1;
+
+               if (!tap_devices_count) {
+                       ret = rte_mp_action_register(TAP_MP_REQ_START_RXTX, tap_mp_req_start_rxtx);
+                       if (ret < 0 && rte_errno != ENOTSUP) {
+                               TAP_LOG(ERR, "tap: Failed to register IPC callback: %s",
+                                       strerror(rte_errno));
+                               return -1;
+                       }
+               }
+               tap_devices_count++;
                rte_eth_dev_probing_finish(eth_dev);
                return 0;
        }
 
-       speed = ETH_SPEED_NUM_10G;
+       speed = RTE_ETH_SPEED_NUM_10G;
 
        /* use tap%d which causes kernel to choose next available */
        strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);