X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_ethdev.c;h=a69c990ec805428e5dfa4d3dacaa3fe24d5967e4;hb=b1f91174a55cf03a149949c336c378faca71860d;hp=9d8b016ae5d76fbc83f886bad5a0161f3bdc01e3;hpb=f28dd9f43b2df232b2a8f88a0ee3ee7163efbe01;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 9d8b016ae5..a69c990ec8 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -63,6 +63,10 @@ static int fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add); +static void +fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev); +static void fm10k_tx_queue_release(void *queue); +static void fm10k_rx_queue_release(void *queue); static void fm10k_mbx_initlock(struct fm10k_hw *hw) @@ -178,7 +182,7 @@ rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) /* Wait 100us at most */ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) { rte_delay_us(1); - reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i)); + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum)); if (!(reg & FM10K_RXQCTL_ENABLE)) break; } @@ -201,7 +205,6 @@ tx_queue_reset(struct fm10k_tx_queue *q) q->next_free = 0; q->nb_used = 0; q->nb_free = q->nb_desc - 1; - q->free_trigger = q->nb_free - q->free_thresh; fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh); FM10K_PCI_REG_WRITE(q->tail_ptr, 0); } @@ -268,7 +271,7 @@ tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) /* Wait 100us at most */ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) { rte_delay_us(1); - reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i)); + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum)); if (!(reg & FM10K_TXDCTL_ENABLE)) break; } @@ -328,7 +331,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) * little-endian order. */ reta = 0; - for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) { + for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) { if (j == dev->data->nb_rx_queues) j = 0; reta = (reta << CHAR_BIT) | j; @@ -783,20 +786,18 @@ fm10k_dev_start(struct rte_eth_dev *dev) diag = fm10k_dev_tx_queue_start(dev, i); if (diag != 0) { int j; + for (j = 0; j < i; ++j) + tx_queue_clean(dev->data->tx_queues[j]); for (j = 0; j < dev->data->nb_rx_queues; ++j) rx_queue_clean(dev->data->rx_queues[j]); return diag; } } - if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID) { - /* Update default vlan */ + /* Update default vlan */ + if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID) fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); - /* Add default mac/vlan filter to PF/Switch manager */ - fm10k_MAC_filter_set(dev, hw->mac.addr, true); - } - return 0; } @@ -807,11 +808,31 @@ fm10k_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - for (i = 0; i < dev->data->nb_tx_queues; i++) - fm10k_dev_tx_queue_stop(dev, i); + if (dev->data->tx_queues) + for (i = 0; i < dev->data->nb_tx_queues; i++) + fm10k_dev_tx_queue_stop(dev, i); + + if (dev->data->rx_queues) + for (i = 0; i < dev->data->nb_rx_queues; i++) + fm10k_dev_rx_queue_stop(dev, i); +} + +static void +fm10k_dev_queue_release(struct rte_eth_dev *dev) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queues) { + for (i = 0; i < dev->data->nb_tx_queues; i++) + fm10k_tx_queue_release(dev->data->tx_queues[i]); + } - for (i = 0; i < dev->data->nb_rx_queues; i++) - fm10k_dev_rx_queue_stop(dev, i); + if (dev->data->rx_queues) { + for (i = 0; i < dev->data->nb_rx_queues; i++) + fm10k_rx_queue_release(dev->data->rx_queues[i]); + } } static void @@ -821,9 +842,12 @@ fm10k_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + fm10k_MACVLAN_remove_all(dev); + /* Stop mailbox service first */ fm10k_close_mbx_service(hw); fm10k_dev_stop(dev); + fm10k_dev_queue_release(dev); fm10k_stop_hw(hw); } @@ -905,10 +929,17 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, dev_info->max_vfs = dev->pci_dev->max_vfs; dev_info->max_vmdq_pools = ETH_64_POOLS; dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = 0; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); dev_info->reta_size = FM10K_MAX_RSS_INDICES; dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -948,12 +979,6 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); - /* @todo - add support for the VF */ - if (hw->mac.type != fm10k_mac_pf) { - PMD_INIT_LOG(ERR, "VLAN filter not available on VF"); - return -ENOTSUP; - } - if (vlan_id > ETH_VLAN_ID_MAX) { PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096"); return (-EINVAL); @@ -1010,6 +1035,27 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return 0; } +static void +fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask) +{ + if (mask & ETH_VLAN_STRIP_MASK) { + if (!dev->data->dev_conf.rxmode.hw_vlan_strip) + PMD_INIT_LOG(ERR, "VLAN stripping is " + "always on in fm10k"); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_extend) + PMD_INIT_LOG(ERR, "VLAN QinQ is not " + "supported in fm10k"); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (!dev->data->dev_conf.rxmode.hw_vlan_filter) + PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); + } +} + /* Add/Remove a MAC address, and update filters */ static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add) @@ -1021,12 +1067,6 @@ fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add) hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); - /* @todo - add support for the VF */ - if (hw->mac.type != fm10k_mac_pf) { - PMD_INIT_LOG(ERR, "MAC filter not available on VF"); - return; - } - i = 0; for (j = 0; j < FM10K_VFTA_SIZE; j++) { if (macvlan->vfta[j]) { @@ -1076,6 +1116,25 @@ fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) FALSE); } +/* Remove all VLAN and MAC address table entries */ +static void +fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev) +{ + uint32_t j, k; + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + for (j = 0; j < FM10K_VFTA_SIZE; j++) { + if (macvlan->vfta[j]) { + for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) { + if (macvlan->vfta[j] & (1 << k)) + fm10k_vlan_filter_set(dev, + j * FM10K_UINT32_BIT_SIZE + k, false); + } + } + } +} + static inline int check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request) { @@ -1268,7 +1327,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, return (-ENOMEM); } q->hw_ring = mz->addr; +#ifdef RTE_LIBRTE_XEN_DOM0 + q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); +#else q->hw_ring_phys_addr = mz->phys_addr; +#endif dev->data->rx_queues[queue_id] = q; return 0; @@ -1414,7 +1477,11 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, return (-ENOMEM); } q->hw_ring = mz->addr; +#ifdef RTE_LIBRTE_XEN_DOM0 + q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); +#else q->hw_ring_phys_addr = mz->phys_addr; +#endif /* * allocate memory for the RS bit tracker. Enough slots to hold the @@ -1646,6 +1713,36 @@ fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev) FM10K_WRITE_FLUSH(hw); } +static void +fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_DISABLE; + + int_map |= 0; + + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map); + + /* Disable misc causes */ + FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) | + FM10K_EIMR_DISABLE(THI_FAULT) | + FM10K_EIMR_DISABLE(FUM_FAULT) | + FM10K_EIMR_DISABLE(MAILBOX) | + FM10K_EIMR_DISABLE(SWITCHREADY) | + FM10K_EIMR_DISABLE(SWITCHNOTREADY) | + FM10K_EIMR_DISABLE(SRAMERROR) | + FM10K_EIMR_DISABLE(VFLR)); + + /* Disable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET); + FM10K_WRITE_FLUSH(hw); +} + static void fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev) { @@ -1664,6 +1761,22 @@ fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev) FM10K_WRITE_FLUSH(hw); } +static void +fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_DISABLE; + + int_map |= 0; + + /* Only INT 0 available, other 15 are reserved. */ + FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); + + /* Disable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET); + FM10K_WRITE_FLUSH(hw); +} + static int fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) { @@ -1672,7 +1785,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) const char *estr = "Unknown error"; /* Process PCA fault */ - if (eicr & FM10K_EIMR_PCA_FAULT) { + if (eicr & FM10K_EICR_PCA_FAULT) { err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault); if (err) goto error; @@ -1700,7 +1813,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) } /* Process THI fault */ - if (eicr & FM10K_EIMR_THI_FAULT) { + if (eicr & FM10K_EICR_THI_FAULT) { err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault); if (err) goto error; @@ -1718,7 +1831,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) } /* Process FUM fault */ - if (eicr & FM10K_EIMR_FUM_FAULT) { + if (eicr & FM10K_EICR_FUM_FAULT) { err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault); if (err) goto error; @@ -1755,8 +1868,6 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) fault.address, fault.specinfo); } - if (estr) - return 0; return 0; error: PMD_INIT_LOG(ERR, "Failed to handle fault event."); @@ -1927,6 +2038,7 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .link_update = fm10k_link_update, .dev_infos_get = fm10k_dev_infos_get, .vlan_filter_set = fm10k_vlan_filter_set, + .vlan_offload_set = fm10k_vlan_offload_set, .mac_addr_add = fm10k_macaddr_add, .mac_addr_remove = fm10k_macaddr_remove, .rx_queue_start = fm10k_dev_rx_queue_start, @@ -2101,13 +2213,6 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) /* Enable port first */ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1); - /* - * Add default mac. glort is assigned by SM for PF, while is - * unused for VF. PF will assign correct glort for VF. - */ - hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr, - 0, 1, 0); - /* Set unicast mode by default. App can change to other mode in other * API func. */ @@ -2116,6 +2221,56 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) fm10k_mbx_unlock(hw); + /* Add default mac address */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true); + + return 0; +} + +static int +eth_fm10k_dev_uninit(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* only uninitialize in the primary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* safe to close dev here */ + fm10k_dev_close(dev); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* disable uio/vfio intr */ + rte_intr_disable(&(dev->pci_dev->intr_handle)); + + /*PF/VF has different interrupt handling mechanism */ + if (hw->mac.type == fm10k_mac_pf) { + /* disable interrupt */ + fm10k_dev_disable_intr_pf(dev); + + /* unregister callback func to eal lib */ + rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + fm10k_dev_interrupt_handler_pf, (void *)dev); + } else { + /* disable interrupt */ + fm10k_dev_disable_intr_vf(dev); + + rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + fm10k_dev_interrupt_handler_vf, (void *)dev); + } + + /* free mac memory */ + if (dev->data->mac_addrs) { + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + } + + memset(hw, 0, sizeof(*hw)); return 0; } @@ -2135,9 +2290,10 @@ static struct eth_driver rte_pmd_fm10k = { .pci_drv = { .name = "rte_pmd_fm10k", .id_table = pci_id_fm10k_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, }, .eth_dev_init = eth_fm10k_dev_init, + .eth_dev_uninit = eth_fm10k_dev_uninit, .dev_private_size = sizeof(struct fm10k_adapter), };