X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fiavf%2Fiavf_ethdev.c;h=f69c50df51871f8d02a9dcf731ac692216d8034c;hb=89214fe915b8afb118105a784740b6910e54ade5;hp=0111ce47aab34294f2915b452e726559b26afacd;hpb=281bd1aa39721a35c8fafe313a2ef82c2cfaf589;p=dpdk.git diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 0111ce47aa..f69c50df51 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -25,11 +25,6 @@ #include #include -#include "iavf_log.h" -#include "base/iavf_prototype.h" -#include "base/iavf_adminq_cmd.h" -#include "base/iavf_type.h" - #include "iavf.h" #include "iavf_rxtx.h" @@ -37,18 +32,18 @@ static int iavf_dev_configure(struct rte_eth_dev *dev); static int iavf_dev_start(struct rte_eth_dev *dev); static void iavf_dev_stop(struct rte_eth_dev *dev); static void iavf_dev_close(struct rte_eth_dev *dev); -static void iavf_dev_info_get(struct rte_eth_dev *dev, +static int iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev); static int iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); -static void iavf_dev_stats_reset(struct rte_eth_dev *dev); -static void iavf_dev_promiscuous_enable(struct rte_eth_dev *dev); -static void iavf_dev_promiscuous_disable(struct rte_eth_dev *dev); -static void iavf_dev_allmulticast_enable(struct rte_eth_dev *dev); -static void iavf_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int iavf_dev_stats_reset(struct rte_eth_dev *dev); +static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev); static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr, + struct rte_ether_addr *addr, uint32_t index, uint32_t pool); static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index); @@ -67,7 +62,7 @@ static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, @@ -76,6 +71,16 @@ static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, int iavf_logtype_init; int iavf_logtype_driver; +#ifdef RTE_LIBRTE_IAVF_DEBUG_RX +int iavf_logtype_rx; +#endif +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX +int iavf_logtype_tx; +#endif +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE +int iavf_logtype_tx_free; +#endif + static const struct rte_pci_id pci_id_iavf_map[] = { { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) }, { .vendor_id = 0, /* sentinel */ }, @@ -131,16 +136,14 @@ iavf_dev_configure(struct rte_eth_dev *dev) struct rte_eth_conf *dev_conf = &dev->data->dev_conf; ad->rx_bulk_alloc_allowed = true; -#ifdef RTE_LIBRTE_IAVF_INC_VECTOR /* Initialize to TRUE. If any of Rx queues doesn't meet the * vector Rx/Tx preconditions, it will be reset. */ ad->rx_vec_allowed = true; ad->tx_vec_allowed = true; -#else - ad->rx_vec_allowed = false; - ad->tx_vec_allowed = false; -#endif + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; /* Vlan stripping setting */ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) { @@ -225,23 +228,23 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq) * correctly. */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - if (max_pkt_len <= ETHER_MAX_LEN || + if (max_pkt_len <= RTE_ETHER_MAX_LEN || max_pkt_len > IAVF_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is enabled", - (uint32_t)ETHER_MAX_LEN, + (uint32_t)RTE_ETHER_MAX_LEN, (uint32_t)IAVF_FRAME_SIZE_MAX); return -EINVAL; } } else { - if (max_pkt_len < ETHER_MIN_LEN || - max_pkt_len > ETHER_MAX_LEN) { + if (max_pkt_len < RTE_ETHER_MIN_LEN || + max_pkt_len > RTE_ETHER_MAX_LEN) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is disabled", - (uint32_t)ETHER_MIN_LEN, - (uint32_t)ETHER_MAX_LEN); + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); return -EINVAL; } } @@ -315,9 +318,10 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* If WB_ON_ITR supports, enable it */ vf->msix_base = IAVF_RX_VEC_START; - IAVF_WRITE_REG(hw, IAVFINT_DYN_CTLN1(vf->msix_base - 1), - IAVFINT_DYN_CTLN1_ITR_INDX_MASK | - IAVFINT_DYN_CTLN1_WB_ON_ITR_MASK); + IAVF_WRITE_REG(hw, + IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1), + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | + IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK); } else { /* If no WB_ON_ITR offload flags, need to set * interrupt for descriptor write back. @@ -327,12 +331,12 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, /* set ITR to max */ interval = iavf_calc_itr_interval( IAVF_QUEUE_ITR_INTERVAL_MAX); - IAVF_WRITE_REG(hw, IAVFINT_DYN_CTL01, - IAVFINT_DYN_CTL01_INTENA_MASK | - (IAVF_ITR_INDEX_DEFAULT << - IAVFINT_DYN_CTL01_ITR_INDX_SHIFT) | - (interval << - IAVFINT_DYN_CTL01_INTERVAL_SHIFT)); + IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + (IAVF_ITR_INDEX_DEFAULT << + IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)); } IAVF_WRITE_FLUSH(hw); /* map all queues to the same interrupt */ @@ -412,12 +416,11 @@ iavf_dev_start(struct rte_eth_dev *dev) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_intr_handle *intr_handle = dev->intr_handle; PMD_INIT_FUNC_TRACE(); - hw->adapter_stopped = 0; + adapter->stopped = 0; vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, @@ -472,12 +475,11 @@ iavf_dev_stop(struct rte_eth_dev *dev) { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_intr_handle *intr_handle = dev->intr_handle; PMD_INIT_FUNC_TRACE(); - if (hw->adapter_stopped == 1) + if (adapter->stopped == 1) return; iavf_stop_queues(dev); @@ -492,10 +494,10 @@ iavf_dev_stop(struct rte_eth_dev *dev) /* remove all mac addrs */ iavf_add_del_all_mac_addr(adapter, FALSE); - hw->adapter_stopped = 1; + adapter->stopped = 1; } -static void +static int iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -517,7 +519,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_VLAN_FILTER; + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_RSS_HASH; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -556,6 +559,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_min = IAVF_MIN_RING_DESC, .nb_align = IAVF_ALIGN_RING_DESC, }; + + return 0; } static const uint32_t * @@ -586,24 +591,33 @@ iavf_dev_link_update(struct rte_eth_dev *dev, * when receive LINK_CHANGE evnet from PF by Virtchnnl. */ switch (vf->link_speed) { - case VIRTCHNL_LINK_SPEED_100MB: + case 10: + new_link.link_speed = ETH_SPEED_NUM_10M; + break; + case 100: new_link.link_speed = ETH_SPEED_NUM_100M; break; - case VIRTCHNL_LINK_SPEED_1GB: + case 1000: new_link.link_speed = ETH_SPEED_NUM_1G; break; - case VIRTCHNL_LINK_SPEED_10GB: + case 10000: new_link.link_speed = ETH_SPEED_NUM_10G; break; - case VIRTCHNL_LINK_SPEED_20GB: + case 20000: new_link.link_speed = ETH_SPEED_NUM_20G; break; - case VIRTCHNL_LINK_SPEED_25GB: + case 25000: new_link.link_speed = ETH_SPEED_NUM_25G; break; - case VIRTCHNL_LINK_SPEED_40GB: + case 40000: new_link.link_speed = ETH_SPEED_NUM_40G; break; + case 50000: + new_link.link_speed = ETH_SPEED_NUM_50G; + break; + case 100000: + new_link.link_speed = ETH_SPEED_NUM_100G; + break; default: new_link.link_speed = ETH_SPEED_NUM_NONE; break; @@ -623,7 +637,7 @@ iavf_dev_link_update(struct rte_eth_dev *dev, return 0; } -static void +static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct iavf_adapter *adapter = @@ -632,14 +646,18 @@ iavf_dev_promiscuous_enable(struct rte_eth_dev *dev) int ret; if (vf->promisc_unicast_enabled) - return; + return 0; ret = iavf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled); if (!ret) vf->promisc_unicast_enabled = TRUE; + else + ret = -EAGAIN; + + return ret; } -static void +static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct iavf_adapter *adapter = @@ -648,14 +666,18 @@ iavf_dev_promiscuous_disable(struct rte_eth_dev *dev) int ret; if (!vf->promisc_unicast_enabled) - return; + return 0; ret = iavf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled); if (!ret) vf->promisc_unicast_enabled = FALSE; + else + ret = -EAGAIN; + + return ret; } -static void +static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct iavf_adapter *adapter = @@ -664,14 +686,18 @@ iavf_dev_allmulticast_enable(struct rte_eth_dev *dev) int ret; if (vf->promisc_multicast_enabled) - return; + return 0; ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE); if (!ret) vf->promisc_multicast_enabled = TRUE; + else + ret = -EAGAIN; + + return ret; } -static void +static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct iavf_adapter *adapter = @@ -680,15 +706,19 @@ iavf_dev_allmulticast_disable(struct rte_eth_dev *dev) int ret; if (!vf->promisc_multicast_enabled) - return; + return 0; ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE); if (!ret) vf->promisc_multicast_enabled = FALSE; + else + ret = -EAGAIN; + + return ret; } static int -iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr, +iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr, __rte_unused uint32_t index, __rte_unused uint32_t pool) { @@ -697,7 +727,7 @@ iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr, struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); int err; - if (is_zero_ether_addr(addr)) { + if (rte_is_zero_ether_addr(addr)) { PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); return -EINVAL; } @@ -719,7 +749,7 @@ iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); - struct ether_addr *addr; + struct rte_ether_addr *addr; int err; addr = &dev->data->mac_addrs[index]; @@ -908,7 +938,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD; int ret = 0; - if (mtu < ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX) return -EINVAL; /* mtu setting is forbidden if port is start */ @@ -917,7 +947,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EBUSY; } - if (frame_size > ETHER_MAX_LEN) + if (frame_size > RTE_ETHER_MAX_LEN) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -931,22 +961,22 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr) + struct rte_ether_addr *mac_addr) { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); - struct ether_addr *perm_addr, *old_addr; + struct rte_ether_addr *perm_addr, *old_addr; int ret; - old_addr = (struct ether_addr *)hw->mac.addr; - perm_addr = (struct ether_addr *)hw->mac.perm_addr; + old_addr = (struct rte_ether_addr *)hw->mac.addr; + perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr; - if (is_same_ether_addr(mac_addr, old_addr)) + if (rte_is_same_ether_addr(mac_addr, old_addr)) return 0; /* If the MAC address is configured by host, skip the setting */ - if (is_valid_assigned_ether_addr(perm_addr)) + if (rte_is_valid_assigned_ether_addr(perm_addr)) return -EPERM; ret = iavf_add_del_eth_addr(adapter, old_addr, FALSE); @@ -974,7 +1004,7 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, if (ret) return -EIO; - ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr); + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr); return 0; } @@ -1032,12 +1062,13 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) if (ret == 0) { iavf_update_stats(vsi, pstats); stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + - pstats->rx_broadcast; + pstats->rx_broadcast - pstats->rx_discards; stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + pstats->tx_unicast; stats->imissed = pstats->rx_discards; stats->oerrors = pstats->tx_errors + pstats->tx_discards; stats->ibytes = pstats->rx_bytes; + stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN; stats->obytes = pstats->tx_bytes; } else { PMD_DRV_LOG(ERR, "Get statistics failed"); @@ -1045,7 +1076,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return -EIO; } -static void +static int iavf_dev_stats_reset(struct rte_eth_dev *dev) { int ret; @@ -1057,10 +1088,13 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev) /* read stat values to clear hardware registers */ ret = iavf_query_stats(adapter, &pstats); + if (ret != 0) + return ret; /* set stats offset base on current values */ - if (ret == 0) - vsi->eth_stats_offset = *pstats; + vsi->eth_stats_offset = *pstats; + + return 0; } static int @@ -1075,21 +1109,22 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; if (msix_intr == IAVF_MISC_VEC_ID) { PMD_DRV_LOG(INFO, "MISC is also enabled for control"); - IAVF_WRITE_REG(hw, IAVFINT_DYN_CTL01, - IAVFINT_DYN_CTL01_INTENA_MASK | - IAVFINT_DYN_CTL01_CLEARPBA_MASK | - IAVFINT_DYN_CTL01_ITR_INDX_MASK); + IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); } else { IAVF_WRITE_REG(hw, - IAVFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START), - IAVFINT_DYN_CTLN1_INTENA_MASK | - IAVFINT_DYN_CTL01_CLEARPBA_MASK | - IAVFINT_DYN_CTLN1_ITR_INDX_MASK); + IAVF_VFINT_DYN_CTLN1 + (msix_intr - IAVF_RX_VEC_START), + IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); } IAVF_WRITE_FLUSH(hw); - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_ack(&pci_dev->intr_handle); return 0; } @@ -1108,7 +1143,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) } IAVF_WRITE_REG(hw, - IAVFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START), + IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START), 0); IAVF_WRITE_FLUSH(hw); @@ -1121,9 +1156,9 @@ iavf_check_vf_reset_done(struct iavf_hw *hw) int i, reset; for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) { - reset = IAVF_READ_REG(hw, IAVFGEN_RSTAT) & - IAVFGEN_RSTAT_VFR_STATE_MASK; - reset = reset >> IAVFGEN_RSTAT_VFR_STATE_SHIFT; + reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; + reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT; if (reset == VIRTCHNL_VFR_VFACTIVE || reset == VIRTCHNL_VFR_COMPLETED) break; @@ -1220,10 +1255,13 @@ static inline void iavf_enable_irq0(struct iavf_hw *hw) { /* Enable admin queue interrupt trigger */ - IAVF_WRITE_REG(hw, IAVFINT_ICR0_ENA1, IAVFINT_ICR0_ENA1_ADMINQ_MASK); + IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, + IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); - IAVF_WRITE_REG(hw, IAVFINT_DYN_CTL01, IAVFINT_DYN_CTL01_INTENA_MASK | - IAVFINT_DYN_CTL01_CLEARPBA_MASK | IAVFINT_DYN_CTL01_ITR_INDX_MASK); + IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); IAVF_WRITE_FLUSH(hw); } @@ -1232,9 +1270,9 @@ static inline void iavf_disable_irq0(struct iavf_hw *hw) { /* Disable all interrupt types */ - IAVF_WRITE_REG(hw, IAVFINT_ICR0_ENA1, 0); - IAVF_WRITE_REG(hw, IAVFINT_DYN_CTL01, - IAVFINT_DYN_CTL01_ITR_INDX_MASK); + IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0); + IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); IAVF_WRITE_FLUSH(hw); } @@ -1288,6 +1326,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); adapter->eth_dev = eth_dev; + adapter->stopped = 1; if (iavf_init_vf(eth_dev) != 0) { PMD_INIT_LOG(ERR, "Init vf failed"); @@ -1296,21 +1335,20 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) /* copy mac addr */ eth_dev->data->mac_addrs = rte_zmalloc( - "iavf_mac", - ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, - 0); + "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0); if (!eth_dev->data->mac_addrs) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" " store MAC addresses", - ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX); + RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX); return -ENOMEM; } /* If the MAC address is not configured by host, * generate a random one. */ - if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr)) - eth_random_addr(hw->mac.addr); - ether_addr_copy((struct ether_addr *)hw->mac.addr, + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)hw->mac.addr)) + rte_eth_random_addr(hw->mac.addr); + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); /* register callback func to eal lib */ @@ -1349,7 +1387,6 @@ static int iavf_dev_uninit(struct rte_eth_dev *dev) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; @@ -1357,8 +1394,7 @@ iavf_dev_uninit(struct rte_eth_dev *dev) dev->dev_ops = NULL; dev->rx_pkt_burst = NULL; dev->tx_pkt_burst = NULL; - if (hw->adapter_stopped == 0) - iavf_dev_close(dev); + iavf_dev_close(dev); rte_free(vf->vf_res); vf->vsi_res = NULL; @@ -1394,8 +1430,7 @@ static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev) /* Adaptive virtual function driver struct */ static struct rte_pci_driver rte_iavf_pmd = { .id_table = pci_id_iavf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_iavf_pci_probe, .remove = eth_iavf_pci_remove, }; @@ -1411,82 +1446,22 @@ RTE_INIT(iavf_init_log) iavf_logtype_driver = rte_log_register("pmd.net.iavf.driver"); if (iavf_logtype_driver >= 0) rte_log_set_level(iavf_logtype_driver, RTE_LOG_NOTICE); -} - -/* memory func for base code */ -enum iavf_status_code -iavf_allocate_dma_mem_d(__rte_unused struct iavf_hw *hw, - struct iavf_dma_mem *mem, - u64 size, - u32 alignment) -{ - const struct rte_memzone *mz = NULL; - char z_name[RTE_MEMZONE_NAMESIZE]; - - if (!mem) - return IAVF_ERR_PARAM; - - snprintf(z_name, sizeof(z_name), "iavf_dma_%"PRIu64, rte_rand()); - mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, - RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); - if (!mz) - return IAVF_ERR_NO_MEMORY; - - mem->size = size; - mem->va = mz->addr; - mem->pa = mz->phys_addr; - mem->zone = (const void *)mz; - PMD_DRV_LOG(DEBUG, - "memzone %s allocated with physical address: %"PRIu64, - mz->name, mem->pa); - - return IAVF_SUCCESS; -} - -enum iavf_status_code -iavf_free_dma_mem_d(__rte_unused struct iavf_hw *hw, - struct iavf_dma_mem *mem) -{ - if (!mem) - return IAVF_ERR_PARAM; - - PMD_DRV_LOG(DEBUG, - "memzone %s to be freed with physical address: %"PRIu64, - ((const struct rte_memzone *)mem->zone)->name, mem->pa); - rte_memzone_free((const struct rte_memzone *)mem->zone); - mem->zone = NULL; - mem->va = NULL; - mem->pa = (u64)0; - - return IAVF_SUCCESS; -} - -enum iavf_status_code -iavf_allocate_virt_mem_d(__rte_unused struct iavf_hw *hw, - struct iavf_virt_mem *mem, - u32 size) -{ - if (!mem) - return IAVF_ERR_PARAM; - mem->size = size; - mem->va = rte_zmalloc("iavf", size, 0); - - if (mem->va) - return IAVF_SUCCESS; - else - return IAVF_ERR_NO_MEMORY; -} - -enum iavf_status_code -iavf_free_virt_mem_d(__rte_unused struct iavf_hw *hw, - struct iavf_virt_mem *mem) -{ - if (!mem) - return IAVF_ERR_PARAM; +#ifdef RTE_LIBRTE_IAVF_DEBUG_RX + iavf_logtype_rx = rte_log_register("pmd.net.iavf.rx"); + if (iavf_logtype_rx >= 0) + rte_log_set_level(iavf_logtype_rx, RTE_LOG_DEBUG); +#endif - rte_free(mem->va); - mem->va = NULL; +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX + iavf_logtype_tx = rte_log_register("pmd.net.iavf.tx"); + if (iavf_logtype_tx >= 0) + rte_log_set_level(iavf_logtype_tx, RTE_LOG_DEBUG); +#endif - return IAVF_SUCCESS; +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE + iavf_logtype_tx_free = rte_log_register("pmd.net.iavf.tx_free"); + if (iavf_logtype_tx_free >= 0) + rte_log_set_level(iavf_logtype_tx_free, RTE_LOG_DEBUG); +#endif }