X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fthunderx%2Fnicvf_ethdev.c;h=99fcd516bd91b5a99aca33f9e99f3e08c0761625;hb=c6034a20d9e221dc6125db57e4378520af3a515d;hp=4ca2ff61cae5c60b4ea1604b0ecbbb1a98f94ddf;hpb=1e3a958f40b305d66e8d067121a30efbd2753f2a;p=dpdk.git diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index 4ca2ff61ca..99fcd516bd 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include @@ -24,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -56,38 +55,27 @@ RTE_INIT(nicvf_init_log); static void nicvf_init_log(void) { - nicvf_logtype_mbox = rte_log_register("pmd.nicvf.mbox"); + nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox"); if (nicvf_logtype_mbox >= 0) rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE); - nicvf_logtype_init = rte_log_register("pmd.nicvf.init"); + nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init"); if (nicvf_logtype_init >= 0) rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE); - nicvf_logtype_driver = rte_log_register("pmd.nicvf.driver"); + nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver"); if (nicvf_logtype_driver >= 0) rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE); } -static inline int -nicvf_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) +static void +nicvf_link_status_update(struct nicvf *nic, + struct rte_eth_link *link) { - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; + memset(link, 0, sizeof(*link)); - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} + link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; -static inline void -nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) -{ - link->link_status = nic->link_up; - link->link_duplex = ETH_LINK_AUTONEG; if (nic->duplex == NICVF_HALF_DUPLEX) link->link_duplex = ETH_LINK_HALF_DUPLEX; else if (nic->duplex == NICVF_FULL_DUPLEX) @@ -101,12 +89,17 @@ nicvf_interrupt(void *arg) { struct rte_eth_dev *dev = arg; struct nicvf *nic = nicvf_pmd_priv(dev); + struct rte_eth_link link; if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { - if (dev->data->dev_conf.intr_conf.lsc) - nicvf_set_eth_link_status(nic, &dev->data->dev_link); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL, NULL); + if (dev->data->dev_conf.intr_conf.lsc) { + nicvf_link_status_update(nic, &link); + rte_eth_linkstatus_set(dev, &link); + + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } } rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, @@ -153,25 +146,25 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) if (wait_to_complete) { /* rte_eth_link_get() might need to wait up to 9 seconds */ for (i = 0; i < MAX_CHECK_TIME; i++) { - memset(&link, 0, sizeof(link)); - nicvf_set_eth_link_status(nic, &link); - if (link.link_status) + nicvf_link_status_update(nic, &link); + if (link.link_status == ETH_LINK_UP) break; rte_delay_ms(CHECK_INTERVAL); } } else { - memset(&link, 0, sizeof(link)); - nicvf_set_eth_link_status(nic, &link); + nicvf_link_status_update(nic, &link); } - return nicvf_atomic_write_link_status(dev, &link); + + return rte_eth_linkstatus_set(dev, &link); } static int nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct nicvf *nic = nicvf_pmd_priv(dev); - uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD; size_t i; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; PMD_INIT_FUNC_TRACE(); @@ -187,7 +180,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * Refuse mtu that requires the support of scattered packets * when this feature has not been enabled before. */ - if (!dev->data->scattered_rx && + if (dev->data->dev_started && !dev->data->scattered_rx && (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) return -EINVAL; @@ -197,15 +190,15 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; - if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) + if (nicvf_mbox_update_hw_max_frs(nic, mtu)) return -EINVAL; - /* Update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size; + /* Update max_rx_pkt_len */ + rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN; nic->mtu = mtu; for (i = 0; i < nic->sqs_count; i++) @@ -896,7 +889,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) { + if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) { multiseg = true; break; } @@ -935,9 +928,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, const struct rte_eth_txconf *tx_conf) { uint16_t tx_free_thresh; - uint8_t is_single_pool; + bool is_single_pool; struct nicvf_txq *txq; struct nicvf *nic = nicvf_pmd_priv(dev); + uint64_t offloads; PMD_INIT_FUNC_TRACE(); @@ -1000,11 +994,12 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, txq->nic = nic; txq->queue_id = qidx; txq->tx_free_thresh = tx_free_thresh; - txq->txq_flags = tx_conf->txq_flags; txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; - is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT && - txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + txq->offloads = offloads; + + is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE); /* Choose optimum free threshold value for multipool case */ if (!is_single_pool) { @@ -1035,9 +1030,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, nicvf_tx_queue_reset(txq); - PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, + PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p" + " phys=0x%" PRIx64 " offloads=0x%" PRIx64, nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc, - txq->phys); + txq->phys, txq->offloads); dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = @@ -1263,6 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, uint16_t rx_free_thresh; struct nicvf_rxq *rxq; struct nicvf *nic = nicvf_pmd_priv(dev); + uint64_t offloads; PMD_INIT_FUNC_TRACE(); @@ -1283,7 +1280,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, } /* Mempool memory must be physically contiguous */ - if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) { + if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) { PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); return -EINVAL; } @@ -1356,9 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, nicvf_rx_queue_reset(rxq); - PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)" + " phy=0x%" PRIx64 " offloads=0x%" PRIx64, nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, - rte_mempool_avail_count(mp), rxq->phys); + rte_mempool_avail_count(mp), rxq->phys, offloads); dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = @@ -1374,8 +1373,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) PMD_INIT_FUNC_TRACE(); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - /* Autonegotiation may be disabled */ dev_info->speed_capa = ETH_LINK_SPEED_FIXED; dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M | @@ -1384,7 +1381,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa |= ETH_LINK_SPEED_40G; dev_info->min_rx_bufsize = ETHER_MIN_MTU; - dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; + dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN; dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); dev_info->max_tx_queues = @@ -1392,13 +1389,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = 1; dev_info->max_vfs = pci_dev->max_vfs; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA; + dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA; + dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA; + dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA; dev_info->reta_size = nic->rss_info.rss_size; dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; @@ -1409,16 +1403,15 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }; dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, - .txq_flags = - ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOREFCOUNT | - ETH_TXQ_FLAGS_NOMULTMEMP | - ETH_TXQ_FLAGS_NOVLANOFFL | - ETH_TXQ_FLAGS_NOXSUMSCTP, + .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM, }; } @@ -1459,6 +1452,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) struct rte_mbuf *mbuf; uint16_t rx_start, rx_end; uint16_t tx_start, tx_end; + bool vlan_strip; PMD_INIT_FUNC_TRACE(); @@ -1578,7 +1572,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) nic->rbdr->tail, nb_rbdr_desc, nic->vf_id); /* Configure VLAN Strip */ - nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip); + vlan_strip = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP); + nicvf_vlan_hw_strip(nic, vlan_strip); /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data * to the 64bit memory address. @@ -1706,14 +1702,13 @@ nicvf_dev_start(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buffsz) dev->data->scattered_rx = 1; - if (rx_conf->enable_scatter) + if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0) dev->data->scattered_rx = 1; /* Setup MTU based on max_rx_pkt_len or default */ - mtu = dev->data->dev_conf.rxmode.jumbo_frame ? + mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ? dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - : ETHER_MTU; + - ETHER_HDR_LEN : ETHER_MTU; if (nicvf_dev_set_mtu(dev, mtu)) { PMD_INIT_LOG(ERR, "Failed to set default mtu size"); @@ -1892,6 +1887,11 @@ nicvf_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } + if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) { + PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); + rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + } + if (txmode->mq_mode) { PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); return -EINVAL; @@ -1903,36 +1903,11 @@ nicvf_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } - if (!rxmode->hw_strip_crc) { - PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); - rxmode->hw_strip_crc = 1; - } - - if (rxmode->hw_ip_checksum) { - PMD_INIT_LOG(NOTICE, "Rxcksum not supported"); - rxmode->hw_ip_checksum = 0; - } - if (rxmode->split_hdr_size) { PMD_INIT_LOG(INFO, "Rxmode does not support split header"); return -EINVAL; } - if (rxmode->hw_vlan_filter) { - PMD_INIT_LOG(INFO, "VLAN filter not supported"); - return -EINVAL; - } - - if (rxmode->hw_vlan_extend) { - PMD_INIT_LOG(INFO, "VLAN extended not supported"); - return -EINVAL; - } - - if (rxmode->enable_lro) { - PMD_INIT_LOG(INFO, "LRO not supported"); - return -EINVAL; - } - if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); return -EINVAL;