X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fthunderx%2Fnicvf_ethdev.c;h=fc334cf734ef60e5427c65bf5dceca7049b196f3;hb=891f8260dd49d4bc683cffaca79b7f7d5247b968;hp=b93d45712e72990c990e7b50abf84b15b99d7893;hpb=9970a9ad07db7745ca6bc441819b287940ae86ea;p=dpdk.git diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index b93d45712e..fc334cf734 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -23,8 +23,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -44,31 +44,16 @@ #include "nicvf_svf.h" #include "nicvf_logs.h" -int nicvf_logtype_mbox; -int nicvf_logtype_init; -int nicvf_logtype_driver; - -static void nicvf_dev_stop(struct rte_eth_dev *dev); +static int nicvf_dev_stop(struct rte_eth_dev *dev); static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup); static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup); static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask); -RTE_INIT(nicvf_init_log) -{ - nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox"); - if (nicvf_logtype_mbox >= 0) - rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE); - - nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init"); - if (nicvf_logtype_init >= 0) - rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE); - - nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver"); - if (nicvf_logtype_driver >= 0) - rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_mbox, mbox, NOTICE); +RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_driver, driver, NOTICE); static void nicvf_link_status_update(struct nicvf *nic, @@ -76,14 +61,14 @@ nicvf_link_status_update(struct nicvf *nic, { memset(link, 0, sizeof(*link)); - link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; if (nic->duplex == NICVF_HALF_DUPLEX) - link->link_duplex = ETH_LINK_HALF_DUPLEX; + link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX; else if (nic->duplex == NICVF_FULL_DUPLEX) - link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; link->link_speed = nic->speed; - link->link_autoneg = ETH_LINK_AUTONEG; + link->link_autoneg = RTE_ETH_LINK_AUTONEG; } static void @@ -98,9 +83,9 @@ nicvf_interrupt(void *arg) nicvf_link_status_update(nic, &link); rte_eth_linkstatus_set(dev, &link); - _rte_eth_dev_callback_process(dev, - RTE_ETH_EVENT_INTR_LSC, - NULL); + rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); } } @@ -149,7 +134,7 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) /* rte_eth_link_get() might need to wait up to 9 seconds */ for (i = 0; i < MAX_CHECK_TIME; i++) { nicvf_link_status_update(nic, &link); - if (link.link_status == ETH_LINK_UP) + if (link.link_status == RTE_ETH_LINK_UP) break; rte_delay_ms(CHECK_INTERVAL); } @@ -166,16 +151,9 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) struct nicvf *nic = nicvf_pmd_priv(dev); uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD; size_t i; - struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; PMD_INIT_FUNC_TRACE(); - if (frame_size > NIC_HW_MAX_FRS) - return -EINVAL; - - if (frame_size < NIC_HW_MIN_FRS) - return -EINVAL; - buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; /* @@ -191,16 +169,9 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) return -EINVAL; - if (frame_size > RTE_ETHER_MAX_LEN) - rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; - else - rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; - if (nicvf_mbox_update_hw_max_frs(nic, mtu)) return -EINVAL; - /* Update max_rx_pkt_len */ - rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN; nic->mtu = mtu; for (i = 0; i < nic->sqs_count; i++) @@ -419,35 +390,35 @@ nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) { uint64_t nic_rss = 0; - if (ethdev_rss & ETH_RSS_IPV4) + if (ethdev_rss & RTE_ETH_RSS_IPV4) nic_rss |= RSS_IP_ENA; - if (ethdev_rss & ETH_RSS_IPV6) + if (ethdev_rss & RTE_ETH_RSS_IPV6) nic_rss |= RSS_IP_ENA; - if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) + if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP) nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); - if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) + if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP) nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); - if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) + if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP) nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); - if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) + if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP) nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); - if (ethdev_rss & ETH_RSS_PORT) + if (ethdev_rss & RTE_ETH_RSS_PORT) nic_rss |= RSS_L2_EXTENDED_HASH_ENA; if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { - if (ethdev_rss & ETH_RSS_VXLAN) + if (ethdev_rss & RTE_ETH_RSS_VXLAN) nic_rss |= RSS_TUN_VXLAN_ENA; - if (ethdev_rss & ETH_RSS_GENEVE) + if (ethdev_rss & RTE_ETH_RSS_GENEVE) nic_rss |= RSS_TUN_GENEVE_ENA; - if (ethdev_rss & ETH_RSS_NVGRE) + if (ethdev_rss & RTE_ETH_RSS_NVGRE) nic_rss |= RSS_TUN_NVGRE_ENA; } @@ -460,28 +431,28 @@ nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) uint64_t ethdev_rss = 0; if (nic_rss & RSS_IP_ENA) - ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); + ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6); if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) - ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | - ETH_RSS_NONFRAG_IPV6_TCP); + ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP | + RTE_ETH_RSS_NONFRAG_IPV6_TCP); if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) - ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | - ETH_RSS_NONFRAG_IPV6_UDP); + ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP | + RTE_ETH_RSS_NONFRAG_IPV6_UDP); if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) - ethdev_rss |= ETH_RSS_PORT; + ethdev_rss |= RTE_ETH_RSS_PORT; if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { if (nic_rss & RSS_TUN_VXLAN_ENA) - ethdev_rss |= ETH_RSS_VXLAN; + ethdev_rss |= RTE_ETH_RSS_VXLAN; if (nic_rss & RSS_TUN_GENEVE_ENA) - ethdev_rss |= ETH_RSS_GENEVE; + ethdev_rss |= RTE_ETH_RSS_GENEVE; if (nic_rss & RSS_TUN_NVGRE_ENA) - ethdev_rss |= ETH_RSS_NVGRE; + ethdev_rss |= RTE_ETH_RSS_NVGRE; } return ethdev_rss; } @@ -496,9 +467,10 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev, int ret, i, j; if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { - RTE_LOG(ERR, PMD, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured " + "(%u) doesn't match the number hardware can supported " + "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); return -EINVAL; } @@ -507,8 +479,8 @@ nicvf_dev_reta_query(struct rte_eth_dev *dev, return ret; /* Copy RETA table */ - for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { - for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) if ((reta_conf[i].mask >> j) & 0x01) reta_conf[i].reta[j] = tbl[j]; } @@ -526,9 +498,9 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev, int ret, i, j; if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { - RTE_LOG(ERR, PMD, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%u) doesn't match the number hardware can supported " + "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); return -EINVAL; } @@ -537,8 +509,8 @@ nicvf_dev_reta_update(struct rte_eth_dev *dev, return ret; /* Copy RETA table */ - for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { - for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) if ((reta_conf[i].mask >> j) & 0x01) tbl[j] = reta_conf[i].reta[j]; } @@ -569,8 +541,8 @@ nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, if (rss_conf->rss_key && rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { - RTE_LOG(ERR, PMD, "Hash key size mismatch %d", - rss_conf->rss_key_len); + PMD_DRV_LOG(ERR, "Hash key size mismatch %u", + rss_conf->rss_key_len); return -EINVAL; } @@ -652,6 +624,7 @@ nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic, NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); if (rz == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); + rte_free(rbdr); return -ENOMEM; } @@ -834,9 +807,9 @@ nicvf_configure_rss(struct rte_eth_dev *dev) dev->data->nb_rx_queues, dev->data->dev_conf.lpbk_mode, rsshf); - if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) + if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE) ret = nicvf_rss_term(nic); - else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf); if (ret) PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); @@ -871,13 +844,12 @@ nicvf_configure_rss_reta(struct rte_eth_dev *dev) } static void -nicvf_dev_tx_queue_release(void *sq) +nicvf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - struct nicvf_txq *txq; + struct nicvf_txq *txq = dev->data->tx_queues[qid]; PMD_INIT_FUNC_TRACE(); - txq = (struct nicvf_txq *)sq; if (txq) { if (txq->txbuffs != NULL) { nicvf_tx_queue_release_mbufs(txq); @@ -885,6 +857,7 @@ nicvf_dev_tx_queue_release(void *sq) txq->txbuffs = NULL; } rte_free(txq); + dev->data->tx_queues[qid] = NULL; } } @@ -897,7 +870,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) { + if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) { multiseg = true; break; } @@ -998,8 +971,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", nicvf_netdev_qidx(nic, qidx)); - nicvf_dev_tx_queue_release( - dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]); + nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; } @@ -1020,7 +992,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; txq->offloads = offloads; - is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE); + is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE); /* Choose optimum free threshold value for multipool case */ if (!is_single_pool) { @@ -1033,19 +1005,21 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, txq->pool_free = nicvf_single_pool_free_xmited_buffers; } + dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; + /* Allocate software ring */ txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", nb_desc * sizeof(struct rte_mbuf *), RTE_CACHE_LINE_SIZE, nic->node); if (txq->txbuffs == NULL) { - nicvf_dev_tx_queue_release(txq); + nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); return -ENOMEM; } if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) { PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); - nicvf_dev_tx_queue_release(txq); + nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); return -ENOMEM; } @@ -1056,7 +1030,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc, txq->phys, txq->offloads); - dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; @@ -1073,8 +1046,7 @@ nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq) if (dev->rx_pkt_burst == NULL) return; - while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, - nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) { + while ((rxq_cnt = nicvf_dev_rx_queue_count(rxq))) { nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, NICVF_MAX_RX_FREE_THRESH); PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); @@ -1174,11 +1146,11 @@ nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, } static void -nicvf_dev_rx_queue_release(void *rx_queue) +nicvf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { PMD_INIT_FUNC_TRACE(); - rte_free(rx_queue); + rte_free(dev->data->rx_queues[qid]); } static int @@ -1315,7 +1287,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, } /* Mempool memory must be physically contiguous */ - if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) { + if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) { PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); return -EINVAL; } @@ -1349,8 +1321,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", nicvf_netdev_qidx(nic, qidx)); - nicvf_dev_rx_queue_release( - dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]); + nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; } @@ -1378,12 +1349,14 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, else rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; + dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; + nicvf_rxq_mbuf_setup(rxq); /* Alloc completion queue */ if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) { PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); - nicvf_dev_rx_queue_release(rxq); + nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); return -ENOMEM; } @@ -1395,7 +1368,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, rte_mempool_avail_count(mp), rxq->phys, offloads); - dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; @@ -1410,11 +1382,11 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) PMD_INIT_FUNC_TRACE(); /* Autonegotiation may be disabled */ - dev_info->speed_capa = ETH_LINK_SPEED_FIXED; - dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED; + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M | + RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF) - dev_info->speed_capa |= ETH_LINK_SPEED_40G; + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G; dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN; @@ -1443,10 +1415,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, - .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM, + .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM, }; return 0; @@ -1610,8 +1582,8 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) nic->rbdr->tail, nb_rbdr_desc, nic->vf_id); /* Configure VLAN Strip */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK; + mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK; ret = nicvf_vlan_offload_config(dev, mask); /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data @@ -1737,16 +1709,13 @@ nicvf_dev_start(struct rte_eth_dev *dev) } /* Setup scatter mode if needed by jumbo */ - if (dev->data->dev_conf.rxmode.max_rx_pkt_len + - 2 * VLAN_TAG_SIZE > buffsz) + if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz) dev->data->scattered_rx = 1; - if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0) + if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0) dev->data->scattered_rx = 1; - /* Setup MTU based on max_rx_pkt_len or default */ - mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ? - dev->data->dev_conf.rxmode.max_rx_pkt_len - - RTE_ETHER_HDR_LEN : RTE_ETHER_MTU; + /* Setup MTU */ + mtu = dev->data->mtu; if (nicvf_dev_set_mtu(dev, mtu)) { PMD_INIT_LOG(ERR, "Failed to set default mtu size"); @@ -1780,6 +1749,7 @@ nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup) struct nicvf *nic = nicvf_pmd_priv(dev); PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; /* Teardown secondary vf first */ for (i = 0; i < nic->sqs_count; i++) { @@ -1803,12 +1773,14 @@ nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup) PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret); } -static void +static int nicvf_dev_stop(struct rte_eth_dev *dev) { PMD_INIT_FUNC_TRACE(); nicvf_dev_stop_cleanup(dev, false); + + return 0; } static void @@ -1866,13 +1838,15 @@ nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup) } } -static void +static int nicvf_dev_close(struct rte_eth_dev *dev) { size_t i; struct nicvf *nic = nicvf_pmd_priv(dev); PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; nicvf_dev_stop_cleanup(dev, true); nicvf_periodic_alarm_stop(nicvf_interrupt, dev); @@ -1883,6 +1857,10 @@ nicvf_dev_close(struct rte_eth_dev *dev) nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]); } + + rte_intr_instance_free(nic->intr_handle); + + return 0; } static int @@ -1920,6 +1898,9 @@ nicvf_dev_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + if (!rte_eal_has_hugepages()) { PMD_INIT_LOG(INFO, "Huge page is not configured"); return -EINVAL; @@ -1930,8 +1911,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } - if (rxmode->mq_mode != ETH_MQ_RX_NONE && - rxmode->mq_mode != ETH_MQ_RX_RSS) { + if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && + rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) { PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); return -EINVAL; } @@ -1941,7 +1922,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } - if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { + if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) { PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); return -EINVAL; } @@ -1976,7 +1957,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev) } } - if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) nic->offload_cksum = 1; PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, @@ -1985,6 +1966,37 @@ nicvf_dev_configure(struct rte_eth_dev *dev) return 0; } +static int +nicvf_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + int rc, i; + + rc = nicvf_mbox_set_link_up_down(nic, true); + if (rc) + goto done; + + /* Start tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + nicvf_dev_tx_queue_start(dev, i); + +done: + return rc; +} + +static int +nicvf_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + int i; + + /* Stop tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + nicvf_dev_tx_queue_stop(dev, i); + + return nicvf_mbox_set_link_up_down(nic, false); +} + /* Initialize and register driver with DPDK Application */ static const struct eth_dev_ops nicvf_eth_dev_ops = { .dev_configure = nicvf_dev_configure, @@ -2009,9 +2021,10 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = { .tx_queue_stop = nicvf_dev_tx_queue_stop, .rx_queue_setup = nicvf_dev_rx_queue_setup, .rx_queue_release = nicvf_dev_rx_queue_release, - .rx_queue_count = nicvf_dev_rx_queue_count, .tx_queue_setup = nicvf_dev_tx_queue_setup, .tx_queue_release = nicvf_dev_tx_queue_release, + .dev_set_link_up = nicvf_dev_set_link_up, + .dev_set_link_down = nicvf_dev_set_link_down, .get_reg = nicvf_dev_get_regs, }; @@ -2021,8 +2034,8 @@ nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask) struct rte_eth_rxmode *rxmode; struct nicvf *nic = nicvf_pmd_priv(dev); rxmode = &dev->data->dev_conf.rxmode; - if (mask & ETH_VLAN_STRIP_MASK) { - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (mask & RTE_ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) nicvf_vlan_hw_strip(nic, true); else nicvf_vlan_hw_strip(nic, false); @@ -2096,10 +2109,7 @@ static int nicvf_eth_dev_uninit(struct rte_eth_dev *dev) { PMD_INIT_FUNC_TRACE(); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - nicvf_dev_close(dev); - + nicvf_dev_close(dev); return 0; } static int @@ -2112,6 +2122,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &nicvf_eth_dev_ops; + eth_dev->rx_queue_count = nicvf_dev_rx_queue_count; /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { @@ -2129,6 +2140,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; nic->device_id = pci_dev->id.device_id; nic->vendor_id = pci_dev->id.vendor_id; @@ -2147,6 +2159,14 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) goto fail; } + /* Allocate interrupt instance */ + nic->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED); + if (nic->intr_handle == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate intr handle"); + ret = -ENODEV; + goto fail; + } + nicvf_disable_all_interrupts(nic); ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev); @@ -2217,7 +2237,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(ERR, "Failed to configure first skip"); goto malloc_fail; } - PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", + PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=" RTE_ETHER_ADDR_PRT_FMT, eth_dev->data->port_id, nic->vendor_id, nic->device_id, nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);