X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftap%2Frte_eth_tap.c;h=37ac18f951cf2570ebde39cf79429a1308b7ffef;hb=0663a84524e5c63cb737cd723b4ea33493e8d17a;hp=046f17669d03d16b93e1d8f3d0ec9fd9c426fa68;hpb=7483341ae5533c5d5fa080a5d229e6f2daf03ea5;p=dpdk.git diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index 046f17669d..37ac18f951 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -70,16 +70,16 @@ #define TAP_IOV_DEFAULT_MAX 1024 -#define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER | \ - DEV_RX_OFFLOAD_IPV4_CKSUM | \ - DEV_RX_OFFLOAD_UDP_CKSUM | \ - DEV_RX_OFFLOAD_TCP_CKSUM) +#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER | \ + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \ + RTE_ETH_RX_OFFLOAD_TCP_CKSUM) -#define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS | \ - DEV_TX_OFFLOAD_IPV4_CKSUM | \ - DEV_TX_OFFLOAD_UDP_CKSUM | \ - DEV_TX_OFFLOAD_TCP_CKSUM | \ - DEV_TX_OFFLOAD_TCP_TSO) +#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_TSO) static int tap_devices_count; @@ -97,10 +97,10 @@ static const char *valid_arguments[] = { static volatile uint32_t tap_trigger; /* Rx trigger */ static struct rte_eth_link pmd_link = { - .link_speed = ETH_SPEED_NUM_10G, - .link_duplex = ETH_LINK_FULL_DUPLEX, - .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_FIXED, + .link_speed = RTE_ETH_SPEED_NUM_10G, + .link_duplex = RTE_ETH_LINK_FULL_DUPLEX, + .link_status = RTE_ETH_LINK_DOWN, + .link_autoneg = RTE_ETH_LINK_FIXED, }; static void @@ -340,8 +340,8 @@ tap_verify_csum(struct rte_mbuf *mbuf) cksum = ~rte_raw_cksum(iph, l3_len); mbuf->ol_flags |= cksum ? - PKT_RX_IP_CKSUM_BAD : - PKT_RX_IP_CKSUM_GOOD; + RTE_MBUF_F_RX_IP_CKSUM_BAD : + RTE_MBUF_F_RX_IP_CKSUM_GOOD; } else if (l3 == RTE_PTYPE_L3_IPV6) { struct rte_ipv6_hdr *iph = l3_hdr; @@ -376,7 +376,7 @@ tap_verify_csum(struct rte_mbuf *mbuf) * indicates that the sender did not * generate one [RFC 768]. */ - mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE; + mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE; return; } } @@ -387,7 +387,7 @@ tap_verify_csum(struct rte_mbuf *mbuf) l4_hdr); } mbuf->ol_flags |= cksum_ok ? - PKT_RX_L4_CKSUM_GOOD : PKT_RX_L4_CKSUM_BAD; + RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD; } } @@ -433,7 +433,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) len = readv(process_private->rxq_fds[rxq->queue_id], *rxq->iovecs, - 1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ? + 1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ? rxq->nb_rx_desc : 1)); if (len < (int)sizeof(struct tun_pi)) break; @@ -489,7 +489,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) seg->next = NULL; mbuf->packet_type = rte_net_get_ptype(mbuf, NULL, RTE_PTYPE_ALL_MASK); - if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) tap_verify_csum(mbuf); /* account for the receive frame */ @@ -544,7 +544,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len, { void *l3_hdr = packet + l2_len; - if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) { + if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) { struct rte_ipv4_hdr *iph = l3_hdr; uint16_t cksum; @@ -552,18 +552,18 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len, cksum = rte_raw_cksum(iph, l3_len); iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum; } - if (ol_flags & PKT_TX_L4_MASK) { + if (ol_flags & RTE_MBUF_F_TX_L4_MASK) { void *l4_hdr; l4_hdr = packet + l2_len + l3_len; - if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) + if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) *l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum; - else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) + else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) *l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum; else return; **l4_cksum = 0; - if (ol_flags & PKT_TX_IPV4) + if (ol_flags & RTE_MBUF_F_TX_IPV4) *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0); else *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0); @@ -627,9 +627,9 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs, nb_segs = mbuf->nb_segs; if (txq->csum && - ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) || - (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM || - (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) { + ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) || + (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM || + (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) { is_cksum = 1; /* Support only packets with at least layer 4 @@ -719,12 +719,12 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint16_t hdrs_len; uint64_t tso; - tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG; + tso = mbuf_in->ol_flags & RTE_MBUF_F_TX_TCP_SEG; if (tso) { struct rte_gso_ctx *gso_ctx = &txq->gso_ctx; /* TCP segmentation implies TCP checksum offload */ - mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM; + mbuf_in->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM; /* gso size is calculated without RTE_ETHER_CRC_LEN */ hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len + @@ -866,7 +866,7 @@ tap_link_set_down(struct rte_eth_dev *dev) struct pmd_internals *pmd = dev->data->dev_private; struct ifreq ifr = { .ifr_flags = IFF_UP }; - dev->data->dev_link.link_status = ETH_LINK_DOWN; + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY); } @@ -876,7 +876,7 @@ tap_link_set_up(struct rte_eth_dev *dev) struct pmd_internals *pmd = dev->data->dev_private; struct ifreq ifr = { .ifr_flags = IFF_UP }; - dev->data->dev_link.link_status = ETH_LINK_UP; + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE); } @@ -956,30 +956,30 @@ tap_dev_speed_capa(void) uint32_t speed = pmd_link.link_speed; uint32_t capa = 0; - if (speed >= ETH_SPEED_NUM_10M) - capa |= ETH_LINK_SPEED_10M; - if (speed >= ETH_SPEED_NUM_100M) - capa |= ETH_LINK_SPEED_100M; - if (speed >= ETH_SPEED_NUM_1G) - capa |= ETH_LINK_SPEED_1G; - if (speed >= ETH_SPEED_NUM_5G) - capa |= ETH_LINK_SPEED_2_5G; - if (speed >= ETH_SPEED_NUM_5G) - capa |= ETH_LINK_SPEED_5G; - if (speed >= ETH_SPEED_NUM_10G) - capa |= ETH_LINK_SPEED_10G; - if (speed >= ETH_SPEED_NUM_20G) - capa |= ETH_LINK_SPEED_20G; - if (speed >= ETH_SPEED_NUM_25G) - capa |= ETH_LINK_SPEED_25G; - if (speed >= ETH_SPEED_NUM_40G) - capa |= ETH_LINK_SPEED_40G; - if (speed >= ETH_SPEED_NUM_50G) - capa |= ETH_LINK_SPEED_50G; - if (speed >= ETH_SPEED_NUM_56G) - capa |= ETH_LINK_SPEED_56G; - if (speed >= ETH_SPEED_NUM_100G) - capa |= ETH_LINK_SPEED_100G; + if (speed >= RTE_ETH_SPEED_NUM_10M) + capa |= RTE_ETH_LINK_SPEED_10M; + if (speed >= RTE_ETH_SPEED_NUM_100M) + capa |= RTE_ETH_LINK_SPEED_100M; + if (speed >= RTE_ETH_SPEED_NUM_1G) + capa |= RTE_ETH_LINK_SPEED_1G; + if (speed >= RTE_ETH_SPEED_NUM_5G) + capa |= RTE_ETH_LINK_SPEED_2_5G; + if (speed >= RTE_ETH_SPEED_NUM_5G) + capa |= RTE_ETH_LINK_SPEED_5G; + if (speed >= RTE_ETH_SPEED_NUM_10G) + capa |= RTE_ETH_LINK_SPEED_10G; + if (speed >= RTE_ETH_SPEED_NUM_20G) + capa |= RTE_ETH_LINK_SPEED_20G; + if (speed >= RTE_ETH_SPEED_NUM_25G) + capa |= RTE_ETH_LINK_SPEED_25G; + if (speed >= RTE_ETH_SPEED_NUM_40G) + capa |= RTE_ETH_LINK_SPEED_40G; + if (speed >= RTE_ETH_SPEED_NUM_50G) + capa |= RTE_ETH_LINK_SPEED_50G; + if (speed >= RTE_ETH_SPEED_NUM_56G) + capa |= RTE_ETH_LINK_SPEED_56G; + if (speed >= RTE_ETH_SPEED_NUM_100G) + capa |= RTE_ETH_LINK_SPEED_100G; return capa; } @@ -1006,6 +1006,7 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) * functions together and not in partial combinations */ dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK; + dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; return 0; } @@ -1196,15 +1197,15 @@ tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY); if (!(ifr.ifr_flags & IFF_UP) || !(ifr.ifr_flags & IFF_RUNNING)) { - dev_link->link_status = ETH_LINK_DOWN; + dev_link->link_status = RTE_ETH_LINK_DOWN; return 0; } } tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY); dev_link->link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ? - ETH_LINK_UP : - ETH_LINK_DOWN); + RTE_ETH_LINK_UP : + RTE_ETH_LINK_DOWN); return 0; } @@ -1391,7 +1392,7 @@ tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev) int ret; /* initialize GSO context */ - gso_types = DEV_TX_OFFLOAD_TCP_TSO; + gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO; if (!pmd->gso_ctx_mp) { /* * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE @@ -1606,9 +1607,9 @@ tap_tx_queue_setup(struct rte_eth_dev *dev, offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; txq->csum = !!(offloads & - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM)); + (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM)); ret = tap_setup_queue(dev, internals, tx_queue_id, 0); if (ret == -1) @@ -1627,13 +1628,8 @@ tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct pmd_internals *pmd = dev->data->dev_private; struct ifreq ifr = { .ifr_mtu = mtu }; - int err = 0; - err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE); - if (!err) - dev->data->mtu = mtu; - - return err; + return tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE); } static int @@ -1668,7 +1664,8 @@ tap_dev_intr_handler(void *cb_arg) struct rte_eth_dev *dev = cb_arg; struct pmd_internals *pmd = dev->data->dev_private; - tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev); + tap_nl_recv(rte_intr_fd_get(pmd->intr_handle), + tap_nl_msg_handler, dev); } static int @@ -1679,22 +1676,22 @@ tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set) /* In any case, disable interrupt if the conf is no longer there. */ if (!dev->data->dev_conf.intr_conf.lsc) { - if (pmd->intr_handle.fd != -1) { + if (rte_intr_fd_get(pmd->intr_handle) != -1) goto clean; - } + return 0; } if (set) { - pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK); - if (unlikely(pmd->intr_handle.fd == -1)) + rte_intr_fd_set(pmd->intr_handle, tap_nl_init(RTMGRP_LINK)); + if (unlikely(rte_intr_fd_get(pmd->intr_handle) == -1)) return -EBADF; return rte_intr_callback_register( - &pmd->intr_handle, tap_dev_intr_handler, dev); + pmd->intr_handle, tap_dev_intr_handler, dev); } clean: do { - ret = rte_intr_callback_unregister(&pmd->intr_handle, + ret = rte_intr_callback_unregister(pmd->intr_handle, tap_dev_intr_handler, dev); if (ret >= 0) { break; @@ -1707,8 +1704,8 @@ clean: } } while (true); - tap_nl_final(pmd->intr_handle.fd); - pmd->intr_handle.fd = -1; + tap_nl_final(rte_intr_fd_get(pmd->intr_handle)); + rte_intr_fd_set(pmd->intr_handle, -1); return 0; } @@ -1765,7 +1762,7 @@ static int tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused, struct rte_eth_fc_conf *fc_conf) { - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; return 0; } @@ -1773,7 +1770,7 @@ static int tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused, struct rte_eth_fc_conf *fc_conf) { - if (fc_conf->mode != RTE_FC_NONE) + if (fc_conf->mode != RTE_ETH_FC_NONE) return -ENOTSUP; return 0; } @@ -1923,6 +1920,13 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, goto error_exit; } + /* Allocate interrupt instance */ + pmd->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED); + if (pmd->intr_handle == NULL) { + TAP_LOG(ERR, "Failed to allocate intr handle"); + goto error_exit; + } + /* Setup some default values */ data = dev->data; data->dev_private = pmd; @@ -1940,9 +1944,9 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, dev->rx_pkt_burst = pmd_rx_burst; dev->tx_pkt_burst = pmd_tx_burst; - pmd->intr_handle.type = RTE_INTR_HANDLE_EXT; - pmd->intr_handle.fd = -1; - dev->intr_handle = &pmd->intr_handle; + rte_intr_type_set(pmd->intr_handle, RTE_INTR_HANDLE_EXT); + rte_intr_fd_set(pmd->intr_handle, -1); + dev->intr_handle = pmd->intr_handle; /* Presetup the fds to -1 as being not valid */ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { @@ -2093,6 +2097,7 @@ error_exit: /* mac_addrs must not be freed alone because part of dev_private */ dev->data->mac_addrs = NULL; rte_eth_dev_release_port(dev); + rte_intr_instance_free(pmd->intr_handle); error_exit_nodev: TAP_LOG(ERR, "%s Unable to initialize %s", @@ -2267,7 +2272,7 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev) } } } - pmd_link.link_speed = ETH_SPEED_NUM_10G; + pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G; TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name); @@ -2441,7 +2446,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) return 0; } - speed = ETH_SPEED_NUM_10G; + speed = RTE_ETH_SPEED_NUM_10G; /* use tap%d which causes kernel to choose next available */ strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);