X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fthunderx%2Fnicvf_ethdev.c;h=b673b471655ae8250ab3f065fb527cfb3370ee54;hb=fe33fe37fd8b63e140e9ed91443dc44fa62472d4;hp=d34938c64b1da47fafc34f34556929906ba6f844;hpb=ffc905f3b856b96c6d8d864dba4052104fae4064;p=dpdk.git diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index d34938c64b..b673b47165 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include @@ -56,38 +55,27 @@ RTE_INIT(nicvf_init_log); static void nicvf_init_log(void) { - nicvf_logtype_mbox = rte_log_register("pmd.nicvf.mbox"); + nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox"); if (nicvf_logtype_mbox >= 0) rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE); - nicvf_logtype_init = rte_log_register("pmd.nicvf.init"); + nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init"); if (nicvf_logtype_init >= 0) rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE); - nicvf_logtype_driver = rte_log_register("pmd.nicvf.driver"); + nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver"); if (nicvf_logtype_driver >= 0) rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE); } -static inline int -nicvf_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) +static void +nicvf_link_status_update(struct nicvf *nic, + struct rte_eth_link *link) { - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; + memset(link, 0, sizeof(*link)); - return 0; -} + link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; -static inline void -nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) -{ - link->link_status = nic->link_up; - link->link_duplex = ETH_LINK_AUTONEG; if (nic->duplex == NICVF_HALF_DUPLEX) link->link_duplex = ETH_LINK_HALF_DUPLEX; else if (nic->duplex == NICVF_FULL_DUPLEX) @@ -101,12 +89,17 @@ nicvf_interrupt(void *arg) { struct rte_eth_dev *dev = arg; struct nicvf *nic = nicvf_pmd_priv(dev); + struct rte_eth_link link; if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { - if (dev->data->dev_conf.intr_conf.lsc) - nicvf_set_eth_link_status(nic, &dev->data->dev_link); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL); + if (dev->data->dev_conf.intr_conf.lsc) { + nicvf_link_status_update(nic, &link); + rte_eth_linkstatus_set(dev, &link); + + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } } rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, @@ -153,24 +146,23 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) if (wait_to_complete) { /* rte_eth_link_get() might need to wait up to 9 seconds */ for (i = 0; i < MAX_CHECK_TIME; i++) { - memset(&link, 0, sizeof(link)); - nicvf_set_eth_link_status(nic, &link); - if (link.link_status) + nicvf_link_status_update(nic, &link); + if (link.link_status == ETH_LINK_UP) break; rte_delay_ms(CHECK_INTERVAL); } } else { - memset(&link, 0, sizeof(link)); - nicvf_set_eth_link_status(nic, &link); + nicvf_link_status_update(nic, &link); } - return nicvf_atomic_write_link_status(dev, &link); + + return rte_eth_linkstatus_set(dev, &link); } static int nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct nicvf *nic = nicvf_pmd_priv(dev); - uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD; size_t i; struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; @@ -188,7 +180,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * Refuse mtu that requires the support of scattered packets * when this feature has not been enabled before. */ - if (!dev->data->scattered_rx && + if (dev->data->dev_started && !dev->data->scattered_rx && (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) return -EINVAL; @@ -202,11 +194,11 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) else rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; - if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) + if (nicvf_mbox_update_hw_max_frs(nic, mtu)) return -EINVAL; - /* Update max frame size */ - rxmode->max_rx_pkt_len = (uint32_t)frame_size; + /* Update max_rx_pkt_len */ + rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN; nic->mtu = mtu; for (i = 0; i < nic->sqs_count; i++) @@ -1316,7 +1308,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, } /* Mempool memory must be physically contiguous */ - if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) { + if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) { PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); return -EINVAL; } @@ -1408,8 +1400,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) PMD_INIT_FUNC_TRACE(); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - /* Autonegotiation may be disabled */ dev_info->speed_capa = ETH_LINK_SPEED_FIXED; dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M | @@ -1418,7 +1408,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa |= ETH_LINK_SPEED_40G; dev_info->min_rx_bufsize = ETHER_MIN_MTU; - dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; + dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN; dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); dev_info->max_tx_queues = @@ -1751,8 +1741,7 @@ nicvf_dev_start(struct rte_eth_dev *dev) /* Setup MTU based on max_rx_pkt_len or default */ mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ? dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - : ETHER_MTU; + - ETHER_HDR_LEN : ETHER_MTU; if (nicvf_dev_set_mtu(dev, mtu)) { PMD_INIT_LOG(ERR, "Failed to set default mtu size");