+tap_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
+ int ret;
+
+ ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ if (ret != 0)
+ return ret;
+
+ if (pmd->remote_if_index && !pmd->flow_isolate) {
+ dev->data->promiscuous = 0;
+ ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
+ if (ret != 0) {
+ /* Rollback promisc flag */
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ /*
+ * rte_eth_dev_promiscuous_disable() rollback
+ * dev->data->promiscuous in the case of failure.
+ */
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+tap_allmulti_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
+ int ret;
+
+ ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ if (ret != 0)
+ return ret;
+
+ if (pmd->remote_if_index && !pmd->flow_isolate) {
+ dev->data->all_multicast = 1;
+ ret = tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
+ if (ret != 0) {
+ /* Rollback allmulti flag */
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ /*
+ * rte_eth_dev_allmulticast_enable() rollback
+ * dev->data->all_multicast in the case of failure.
+ */
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+tap_allmulti_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
+ int ret;
+
+ ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ if (ret != 0)
+ return ret;
+
+ if (pmd->remote_if_index && !pmd->flow_isolate) {
+ dev->data->all_multicast = 0;
+ ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
+ if (ret != 0) {
+ /* Rollback allmulti flag */
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ /*
+ * rte_eth_dev_allmulticast_disable() rollback
+ * dev->data->all_multicast in the case of failure.
+ */
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+tap_mac_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ enum ioctl_mode mode = LOCAL_ONLY;
+ struct ifreq ifr;
+ int ret;
+
+ if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
+ TAP_LOG(ERR, "%s: can't MAC address for TUN",
+ dev->device->name);
+ return -ENOTSUP;
+ }
+
+ if (rte_is_zero_ether_addr(mac_addr)) {
+ TAP_LOG(ERR, "%s: can't set an empty MAC address",
+ dev->device->name);
+ return -EINVAL;
+ }
+ /* Check the actual current MAC address on the tap netdevice */
+ ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
+ if (ret < 0)
+ return ret;
+ if (rte_is_same_ether_addr(
+ (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data,
+ mac_addr))
+ return 0;
+ /* Check the current MAC address on the remote */
+ ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
+ if (ret < 0)
+ return ret;
+ if (!rte_is_same_ether_addr(
+ (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data,
+ mac_addr))
+ mode = LOCAL_AND_REMOTE;
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, RTE_ETHER_ADDR_LEN);
+ ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
+ if (ret < 0)
+ return ret;
+ rte_memcpy(&pmd->eth_addr, mac_addr, RTE_ETHER_ADDR_LEN);
+ if (pmd->remote_if_index && !pmd->flow_isolate) {
+ /* Replace MAC redirection rule after a MAC change */
+ ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "%s: Couldn't delete MAC redirection rule",
+ dev->device->name);
+ return ret;
+ }
+ ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "%s: Couldn't add MAC redirection rule",
+ dev->device->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
+{
+ uint32_t gso_types;
+ char pool_name[64];
+
+ /*
+ * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
+ * size per mbuf use this pool for both direct and indirect mbufs
+ */
+
+ struct rte_mempool *mp; /* Mempool for GSO packets */
+
+ /* initialize GSO context */
+ gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+ snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
+ mp = rte_mempool_lookup((const char *)pool_name);
+ if (!mp) {
+ mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
+ TAP_GSO_MBUF_CACHE_SIZE, 0,
+ RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
+ SOCKET_ID_ANY);
+ if (!mp) {
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ TAP_LOG(ERR,
+ "%s: failed to create mbuf pool for device %s\n",
+ pmd->name, dev->device->name);
+ return -1;
+ }
+ }
+
+ gso_ctx->direct_pool = mp;
+ gso_ctx->indirect_pool = mp;
+ gso_ctx->gso_types = gso_types;
+ gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
+ gso_ctx->flag = 0;
+
+ return 0;
+}
+
+static int
+tap_setup_queue(struct rte_eth_dev *dev,
+ struct pmd_internals *internals,
+ uint16_t qid,
+ int is_rx)
+{
+ int ret;
+ int *fd;
+ int *other_fd;
+ const char *dir;
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
+ struct rx_queue *rx = &internals->rxq[qid];
+ struct tx_queue *tx = &internals->txq[qid];
+ struct rte_gso_ctx *gso_ctx;
+
+ if (is_rx) {
+ fd = &process_private->rxq_fds[qid];
+ other_fd = &process_private->txq_fds[qid];
+ dir = "rx";
+ gso_ctx = NULL;
+ } else {
+ fd = &process_private->txq_fds[qid];
+ other_fd = &process_private->rxq_fds[qid];
+ dir = "tx";
+ gso_ctx = &tx->gso_ctx;
+ }
+ if (*fd != -1) {
+ /* fd for this queue already exists */
+ TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
+ pmd->name, *fd, dir, qid);
+ gso_ctx = NULL;
+ } else if (*other_fd != -1) {
+ /* Only other_fd exists. dup it */
+ *fd = dup(*other_fd);
+ if (*fd < 0) {
+ *fd = -1;
+ TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
+ return -1;
+ }
+ TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
+ pmd->name, *other_fd, dir, qid, *fd);
+ } else {
+ /* Both RX and TX fds do not exist (equal -1). Create fd */
+ *fd = tun_alloc(pmd, 0);
+ if (*fd < 0) {
+ *fd = -1; /* restore original value */
+ TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
+ return -1;
+ }
+ TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
+ pmd->name, dir, qid, *fd);
+ }
+
+ tx->mtu = &dev->data->mtu;
+ rx->rxmode = &dev->data->dev_conf.rxmode;
+ if (gso_ctx) {
+ ret = tap_gso_ctx_setup(gso_ctx, dev);
+ if (ret)
+ return -1;
+ }
+
+ tx->type = pmd->type;
+
+ return *fd;
+}
+
+static int
+tap_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,