X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev.c;h=fa0af847ad2e59c0d2c70a9a06ac24e6dc8c08d0;hb=109e4dd1bd7a5f7e0400be974f2b9e18d10cb3a6;hp=acbb5265f466b95268019c1df6cf1286346bb9d9;hpb=2790c64647250c18db3c7cd4fca92a64533042fd;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index acbb5265f4..fa0af847ad 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -29,6 +29,7 @@ #include "hns3_intr.h" #include "hns3_regs.h" #include "hns3_dcb.h" +#include "hns3_mp.h" #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 @@ -76,6 +77,7 @@ static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on); +static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); static void hns3_pf_disable_irq0(struct hns3_hw *hw) @@ -217,6 +219,8 @@ hns3_interrupt_handler(void *param) hns3_schedule_reset(hns); } else if (event_cause == HNS3_VECTOR0_EVENT_RST) hns3_schedule_reset(hns); + else if (event_cause == HNS3_VECTOR0_EVENT_MBX) + hns3_dev_handle_mbx_msg(hw); else hns3_err(hw, "Received unknown event"); @@ -281,6 +285,11 @@ hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, struct hns3_hw *hw = &hns->hw; struct hns3_pf *pf = &hns->pf; + LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { + if (vlan_entry->vlan_id == vlan_id) + return; + } + vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); if (vlan_entry == NULL) { hns3_err(hw, "Failed to malloc hns3 vlan table"); @@ -2015,6 +2024,40 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev) return hns3_check_mq_mode(dev); } +static int +hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id, + bool mmap, uint16_t queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_cmd_desc desc; + struct hns3_ctrl_vector_chain_cmd *req = + (struct hns3_ctrl_vector_chain_cmd *)desc.data; + enum hns3_cmd_status status; + enum hns3_opcode_type op; + uint16_t tqp_type_and_id = 0; + + op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; + hns3_cmd_setup_basic_desc(&desc, op, false); + req->int_vector_id = vector_id; + + hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, + HNS3_RING_TYPE_RX); + hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); + hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, + HNS3_RING_GL_RX); + req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); + + req->int_cause_num = 1; + status = hns3_cmd_send(hw, &desc, 1); + if (status) { + hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.", + queue_id, vector_id, status); + return -EIO; + } + + return 0; +} + static int hns3_dev_configure(struct rte_eth_dev *dev) { @@ -2262,6 +2305,11 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, struct hns3_mac *mac = &hw->mac; struct rte_eth_link new_link; + if (!hns3_is_reset_pending(hns)) { + hns3_update_speed_duplex(eth_dev); + hns3_update_link_status(hw); + } + memset(&new_link, 0, sizeof(new_link)); switch (mac->link_speed) { case ETH_SPEED_NUM_10M: @@ -3766,14 +3814,16 @@ hns3_get_mac_link_status(struct hns3_hw *hw) return !!link_status; } -static void +void hns3_update_link_status(struct hns3_hw *hw) { int state; state = hns3_get_mac_link_status(hw); - if (state != hw->mac.link_status) + if (state != hw->mac.link_status) { hw->mac.link_status = state; + hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); + } } static void @@ -4014,15 +4064,83 @@ err_config_mac_mode: } static int -hns3_dev_start(struct rte_eth_dev *eth_dev) +hns3_map_rx_interrupt(struct rte_eth_dev *dev) { - struct hns3_adapter *hns = eth_dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t intr_vector; + uint8_t base = 0; + uint8_t vec = 0; + uint16_t q_id; int ret; + if (dev->data->dev_conf.intr_conf.rxq == 0) + return 0; + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* check and configure queue intr-vector mapping */ + if (rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) { + intr_vector = dev->data->nb_rx_queues; + /* creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL; + } + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + hns3_err(hw, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + ret = -ENOMEM; + goto alloc_intr_vec_error; + } + } + + if (rte_intr_allow_others(intr_handle)) { + vec = RTE_INTR_VEC_RXTX_OFFSET; + base = RTE_INTR_VEC_RXTX_OFFSET; + } + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) { + ret = hns3_bind_ring_with_vector(dev, vec, true, q_id); + if (ret) + goto bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + rte_intr_enable(intr_handle); + return 0; + +bind_vector_error: + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + return ret; +alloc_intr_vec_error: + rte_intr_efd_disable(intr_handle); + return ret; +} + +static int +hns3_dev_start(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret = 0; + PMD_INIT_FUNC_TRACE(); if (rte_atomic16_read(&hw->reset.resetting)) return -EBUSY; + rte_spinlock_lock(&hw->lock); hw->adapter_state = HNS3_NIC_STARTING; @@ -4035,7 +4153,12 @@ hns3_dev_start(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); - hns3_set_rxtx_function(eth_dev); + + ret = hns3_map_rx_interrupt(dev); + if (ret) + return ret; + hns3_set_rxtx_function(dev); + hns3_mp_req_start_rxtx(dev); hns3_info(hw, "hns3 dev start successful!"); return 0; @@ -4063,15 +4186,52 @@ hns3_do_stop(struct hns3_adapter *hns) } static void -hns3_dev_stop(struct rte_eth_dev *eth_dev) +hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) { - struct hns3_adapter *hns = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint8_t base = 0; + uint8_t vec = 0; + uint16_t q_id; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return; + + /* unmap the ring with vector */ + if (rte_intr_allow_others(intr_handle)) { + vec = RTE_INTR_VEC_RXTX_OFFSET; + base = RTE_INTR_VEC_RXTX_OFFSET; + } + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) { + (void)hns3_bind_ring_with_vector(dev, vec, false, q_id); + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +hns3_dev_stop(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; PMD_INIT_FUNC_TRACE(); hw->adapter_state = HNS3_NIC_STOPPING; - hns3_set_rxtx_function(eth_dev); + hns3_set_rxtx_function(dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(dev); + /* Prevent crashes when queues are still in use. */ + rte_delay_ms(hw->tqps_num); rte_spinlock_lock(&hw->lock); if (rte_atomic16_read(&hw->reset.resetting) == 0) { @@ -4080,6 +4240,7 @@ hns3_dev_stop(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_CONFIGURED; } rte_spinlock_unlock(&hw->lock); + hns3_unmap_rx_interrupt(dev); } static void @@ -4088,6 +4249,12 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + return; + } + if (hw->adapter_state == HNS3_NIC_STARTED) hns3_dev_stop(eth_dev); @@ -4104,6 +4271,7 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) rte_free(hw->reset.wait_data); rte_free(eth_dev->process_private); eth_dev->process_private = NULL; + hns3_mp_uninit_primary(); hns3_warn(hw, "Close port %d finished", hw->data->port_id); } @@ -4558,6 +4726,10 @@ hns3_stop_service(struct hns3_adapter *hns) hw->mac.link_status = ETH_LINK_DOWN; hns3_set_rxtx_function(eth_dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(eth_dev); + rte_delay_ms(hw->tqps_num); rte_spinlock_lock(&hw->lock); if (hns->hw.adapter_state == HNS3_NIC_STARTED || @@ -4590,6 +4762,7 @@ hns3_start_service(struct hns3_adapter *hns) hns3_set_rst_done(hw); eth_dev = &rte_eth_devices[hw->data->port_id]; hns3_set_rxtx_function(eth_dev); + hns3_mp_req_start_rxtx(eth_dev); hns3_service_handler(eth_dev); return 0; } @@ -4661,6 +4834,11 @@ hns3_reset_service(void *param) rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); + reset_level = hns3_get_reset_level(hns, &hw->reset.pending); + if (reset_level == HNS3_NONE_RESET) { + hns3_err(hw, "No reset level is set, try IMP reset"); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + } } rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); @@ -4719,6 +4897,8 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .tx_queue_setup = hns3_tx_queue_setup, .rx_queue_release = hns3_dev_rx_queue_release, .tx_queue_release = hns3_dev_tx_queue_release, + .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, .dev_configure = hns3_dev_configure, .flow_ctrl_get = hns3_flow_ctrl_get, .flow_ctrl_set = hns3_flow_ctrl_set, @@ -4776,9 +4956,13 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) hns3_set_rxtx_function(eth_dev); eth_dev->dev_ops = &hns3_eth_dev_ops; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + hns3_mp_init_secondary(); + hw->secondary_cnt++; return 0; + } + hns3_mp_init_primary(); hw->adapter_state = HNS3_NIC_UNINITIALIZED; if (device_id == HNS3_DEV_ID_25GE_RDMA ||