X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev_vf.c;h=b1736e73ab9ad07e3e75a9004da0c78329cff4f9;hb=958edf6627d55e5ffe27731f4e1030e7e718458f;hp=2fecc8427bf8c299970adb1454a8c026a7e2f84a;hpb=2790c64647250c18db3c7cd4fca92a64533042fd;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 2fecc8427b..b1736e73ab 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -9,6 +9,8 @@ #include #include #include +#include + #include #include #include @@ -24,6 +26,7 @@ #include #include #include +#include #include "hns3_ethdev.h" #include "hns3_logs.h" @@ -31,6 +34,7 @@ #include "hns3_regs.h" #include "hns3_intr.h" #include "hns3_dcb.h" +#include "hns3_mp.h" #define HNS3VF_KEEP_ALIVE_INTERVAL 2000000 /* us */ #define HNS3VF_SERVICE_INTERVAL 1000000 /* us */ @@ -55,6 +59,81 @@ static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw, static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev); +/* set PCI bus mastering */ +static void +hns3vf_set_bus_master(const struct rte_pci_device *device, bool op) +{ + uint16_t reg; + + rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND); + + if (op) + /* set the master bit */ + reg |= PCI_COMMAND_MASTER; + else + reg &= ~(PCI_COMMAND_MASTER); + + rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND); +} + +/** + * hns3vf_find_pci_capability - lookup a capability in the PCI capability list + * @cap: the capability + * + * Return the address of the given capability within the PCI capability list. + */ +static int +hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap) +{ +#define MAX_PCIE_CAPABILITY 48 + uint16_t status; + uint8_t pos; + uint8_t id; + int ttl; + + rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS); + if (!(status & PCI_STATUS_CAP_LIST)) + return 0; + + ttl = MAX_PCIE_CAPABILITY; + rte_pci_read_config(device, &pos, sizeof(pos), PCI_CAPABILITY_LIST); + while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) { + rte_pci_read_config(device, &id, sizeof(id), + (pos + PCI_CAP_LIST_ID)); + + if (id == 0xFF) + break; + + if (id == cap) + return (int)pos; + + rte_pci_read_config(device, &pos, sizeof(pos), + (pos + PCI_CAP_LIST_NEXT)); + } + return 0; +} + +static int +hns3vf_enable_msix(const struct rte_pci_device *device, bool op) +{ + uint16_t control; + int pos; + + pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX); + if (pos) { + rte_pci_read_config(device, &control, sizeof(control), + (pos + PCI_MSIX_FLAGS)); + if (op) + control |= PCI_MSIX_FLAGS_ENABLE; + else + control &= ~PCI_MSIX_FLAGS_ENABLE; + rte_pci_write_config(device, &control, sizeof(control), + (pos + PCI_MSIX_FLAGS)); + return 0; + } + return -1; +} + static int hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, __attribute__ ((unused)) uint32_t idx, @@ -1017,6 +1096,14 @@ err_init_hardware: return ret; } +static int +hns3vf_clear_vport_list(struct hns3_hw *hw) +{ + return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL, + HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false, + NULL, 0); +} + static int hns3vf_init_vf(struct rte_eth_dev *eth_dev) { @@ -1068,6 +1155,12 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) rte_eth_random_addr(hw->mac.mac_addr); /* Generate a random mac addr */ + ret = hns3vf_clear_vport_list(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret); + goto err_get_config; + } + ret = hns3vf_init_hardware(hns); if (ret) goto err_get_config; @@ -1141,6 +1234,11 @@ hns3vf_dev_stop(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_STOPPING; hns3_set_rxtx_function(eth_dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(eth_dev); + /* Prevent crashes when queues are still in use. */ + rte_delay_ms(hw->tqps_num); rte_spinlock_lock(&hw->lock); if (rte_atomic16_read(&hw->reset.resetting) == 0) { @@ -1148,6 +1246,7 @@ hns3vf_dev_stop(struct rte_eth_dev *eth_dev) hns3_dev_release_mbufs(hns); hw->adapter_state = HNS3_NIC_CONFIGURED; } + rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev); rte_spinlock_unlock(&hw->lock); } @@ -1157,6 +1256,9 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + if (hw->adapter_state == HNS3_NIC_STARTED) hns3vf_dev_stop(eth_dev); @@ -1164,7 +1266,6 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) hns3_reset_abort(hns); hw->adapter_state = HNS3_NIC_CLOSED; rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev); - rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev); hns3vf_configure_all_mc_mac_addr(hns, true); hns3vf_remove_all_vlan_table(hns); hns3vf_uninit_vf(eth_dev); @@ -1172,6 +1273,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) rte_free(hw->reset.wait_data); rte_free(eth_dev->process_private); eth_dev->process_private = NULL; + hns3_mp_uninit_primary(); hns3_warn(hw, "Close port %d finished", hw->data->port_id); } @@ -1184,8 +1286,6 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev, struct hns3_mac *mac = &hw->mac; struct rte_eth_link new_link; - hns3vf_request_link_info(hw); - memset(&new_link, 0, sizeof(new_link)); switch (mac->link_speed) { case ETH_SPEED_NUM_10M: @@ -1249,6 +1349,9 @@ hns3vf_dev_start(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); hns3_set_rxtx_function(eth_dev); + hns3_mp_req_start_rxtx(eth_dev); + rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, + eth_dev); return 0; } @@ -1297,9 +1400,30 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) struct hns3_wait_data *wait_data = hw->reset.wait_data; struct timeval tv; - if (wait_data->result == HNS3_WAIT_SUCCESS) - return 0; - else if (wait_data->result == HNS3_WAIT_TIMEOUT) { + if (wait_data->result == HNS3_WAIT_SUCCESS) { + /* + * After vf reset is ready, the PF may not have completed + * the reset processing. The vf sending mbox to PF may fail + * during the pf reset, so it is better to add extra delay. + */ + if (hw->reset.level == HNS3_VF_FUNC_RESET || + hw->reset.level == HNS3_FLR_RESET) + return 0; + /* Reset retry process, no need to add extra delay. */ + if (hw->reset.attempts) + return 0; + if (wait_data->check_completion == NULL) + return 0; + + wait_data->check_completion = NULL; + wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC; + wait_data->count = 1; + wait_data->result = HNS3_WAIT_REQUEST; + rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, + wait_data); + hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete"); + return -EAGAIN; + } else if (wait_data->result == HNS3_WAIT_TIMEOUT) { gettimeofday(&tv, NULL); hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", tv.tv_sec, tv.tv_usec); @@ -1344,6 +1468,10 @@ hns3vf_stop_service(struct hns3_adapter *hns) hw->mac.link_status = ETH_LINK_DOWN; hns3_set_rxtx_function(eth_dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(eth_dev); + rte_delay_ms(hw->tqps_num); rte_spinlock_lock(&hw->lock); if (hw->adapter_state == HNS3_NIC_STARTED || @@ -1373,6 +1501,7 @@ hns3vf_start_service(struct hns3_adapter *hns) eth_dev = &rte_eth_devices[hw->data->port_id]; hns3_set_rxtx_function(eth_dev); + hns3_mp_req_start_rxtx(eth_dev); hns3vf_service_handler(eth_dev); return 0; @@ -1457,6 +1586,11 @@ hns3vf_reset_service(void *param) rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]); + reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); + if (reset_level == HNS3_NONE_RESET) { + hns3_err(hw, "No reset level is set, try global reset"); + hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); + } } rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); @@ -1482,14 +1616,35 @@ hns3vf_reset_service(void *param) static int hns3vf_reinit_dev(struct hns3_adapter *hns) { + struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id]; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct hns3_hw *hw = &hns->hw; int ret; + if (hw->reset.level == HNS3_VF_FULL_RESET) { + rte_intr_disable(&pci_dev->intr_handle); + hns3vf_set_bus_master(pci_dev, true); + } + /* Firmware command initialize */ ret = hns3_cmd_init(hw); if (ret) { hns3_err(hw, "Failed to init cmd: %d", ret); - return ret; + goto err_cmd_init; + } + + if (hw->reset.level == HNS3_VF_FULL_RESET) { + /* + * UIO enables msix by writing the pcie configuration space + * vfio_pci enables msix in rte_intr_enable. + */ + if (pci_dev->kdrv == RTE_KDRV_IGB_UIO || + pci_dev->kdrv == RTE_KDRV_UIO_GENERIC) { + if (hns3vf_enable_msix(pci_dev, true)) + hns3_err(hw, "Failed to enable msix"); + } + + rte_intr_enable(&pci_dev->intr_handle); } ret = hns3_reset_all_queues(hns); @@ -1506,6 +1661,8 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) return 0; +err_cmd_init: + hns3vf_set_bus_master(pci_dev, false); err_init: hns3_cmd_uninit(hw); return ret; @@ -1578,8 +1735,13 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) hns3_set_rxtx_function(eth_dev); eth_dev->dev_ops = &hns3vf_eth_dev_ops; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + hns3_mp_init_secondary(); + hw->secondary_cnt++; return 0; + } + + hns3_mp_init_primary(); hw->adapter_state = HNS3_NIC_UNINITIALIZED; hns->is_vf = true; @@ -1627,8 +1789,6 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) } rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler, eth_dev); - rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, - eth_dev); return 0; err_rte_zmalloc: