X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev_vf.c;h=61aad8c41592ea98360beb8ed719a7242603d66f;hb=171875d0678c5ee8f8ce01eb887aa94294611462;hp=244397e0e550adadd43a1374fc99f448db408ac8;hpb=496608a079ccc339900f67077d88fd7bcbf56d04;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 244397e0e5..61aad8c415 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -90,8 +90,9 @@ static int i40evf_dev_xstats_reset(struct rte_eth_dev *dev); static int i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask); -static void i40evf_dev_close(struct rte_eth_dev *dev); -static int i40evf_dev_reset(struct rte_eth_dev *dev); +static int i40evf_dev_close(struct rte_eth_dev *dev); +static int i40evf_dev_reset(struct rte_eth_dev *dev); +static int i40evf_check_vf_reset_done(struct rte_eth_dev *dev); static int i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev); static int i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev); static int i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev); @@ -198,12 +199,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .rx_queue_release = i40e_dev_rx_queue_release, .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable, .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable, - .rx_descriptor_done = i40e_dev_rx_descriptor_done, - .rx_descriptor_status = i40e_dev_rx_descriptor_status, - .tx_descriptor_status = i40e_dev_tx_descriptor_status, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, - .rx_queue_count = i40e_dev_rx_queue_count, .rxq_info_get = i40e_rxq_info_get, .txq_info_get = i40e_txq_info_get, .mac_addr_add = i40evf_add_mac_addr, @@ -468,7 +465,8 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev) VIRTCHNL_VF_OFFLOAD_RSS_AQ | VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN | - VIRTCHNL_VF_OFFLOAD_RX_POLLING; + VIRTCHNL_VF_OFFLOAD_RX_POLLING | + VIRTCHNL_VF_CAP_ADV_LINK_SPEED; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); } else { @@ -519,10 +517,19 @@ i40evf_config_promisc(struct rte_eth_dev *dev, err = i40evf_execute_vf_cmd(dev, &args); - if (err) + if (err) { PMD_DRV_LOG(ERR, "fail to execute command " "CONFIG_PROMISCUOUS_MODE"); - return err; + + if (err == I40E_NOT_SUPPORTED) + return -ENOTSUP; + + return -EAGAIN; + } + + vf->promisc_unicast_enabled = enable_unicast; + vf->promisc_multicast_enabled = enable_multicast; + return 0; } static int @@ -789,7 +796,6 @@ i40evf_stop_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { if (i40evf_dev_tx_queue_stop(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); - return -1; } } @@ -797,7 +803,6 @@ i40evf_stop_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { if (i40evf_dev_rx_queue_stop(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); - return -1; } } @@ -1083,12 +1088,28 @@ i40evf_request_queues(struct rte_eth_dev *dev, uint16_t num) args.out_size = I40E_AQ_BUF_SZ; rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev); + err = i40evf_execute_vf_cmd(dev, &args); - if (err) + + rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, i40evf_dev_alarm_handler, dev); + + if (err != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES"); + return err; + } + + /* The PF will issue a reset to the VF when change the number of + * queues. The PF will set I40E_VFGEN_RSTAT to COMPLETE first, then + * wait 10ms and set it to ACTIVE. In this duration, vf may not catch + * the moment that COMPLETE is set. So, for vf, we'll try to wait a + * long time. + */ + rte_delay_ms(100); + + err = i40evf_check_vf_reset_done(dev); + if (err) + PMD_DRV_LOG(ERR, "VF is still resetting"); - rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, - i40evf_dev_alarm_handler, dev); return err; } @@ -1309,10 +1330,8 @@ i40evf_init_vf(struct rte_eth_dev *dev) vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); /* Store the MAC address configured by host, or generate random one */ - if (rte_is_valid_assigned_ether_addr( + if (!rte_is_valid_assigned_ether_addr( (struct rte_ether_addr *)hw->mac.addr)) - vf->flags |= I40E_FLAG_VF_MAC_BY_PF; - else rte_eth_random_addr(hw->mac.addr); /* Generate a random one */ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, @@ -1359,13 +1378,55 @@ i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg, switch (pf_msg->event) { case VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, - NULL); + rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_RESET, NULL); break; case VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); - vf->link_up = pf_msg->event_data.link_event.link_status; - vf->link_speed = pf_msg->event_data.link_event.link_speed; + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + vf->link_up = + pf_msg->event_data.link_event_adv.link_status; + + switch (pf_msg->event_data.link_event_adv.link_speed) { + case ETH_SPEED_NUM_100M: + vf->link_speed = VIRTCHNL_LINK_SPEED_100MB; + break; + case ETH_SPEED_NUM_1G: + vf->link_speed = VIRTCHNL_LINK_SPEED_1GB; + break; + case ETH_SPEED_NUM_2_5G: + vf->link_speed = VIRTCHNL_LINK_SPEED_2_5GB; + break; + case ETH_SPEED_NUM_5G: + vf->link_speed = VIRTCHNL_LINK_SPEED_5GB; + break; + case ETH_SPEED_NUM_10G: + vf->link_speed = VIRTCHNL_LINK_SPEED_10GB; + break; + case ETH_SPEED_NUM_20G: + vf->link_speed = VIRTCHNL_LINK_SPEED_20GB; + break; + case ETH_SPEED_NUM_25G: + vf->link_speed = VIRTCHNL_LINK_SPEED_25GB; + break; + case ETH_SPEED_NUM_40G: + vf->link_speed = VIRTCHNL_LINK_SPEED_40GB; + break; + default: + vf->link_speed = VIRTCHNL_LINK_SPEED_UNKNOWN; + break; + } + } else { + vf->link_up = + pf_msg->event_data.link_event.link_status; + vf->link_speed = + pf_msg->event_data.link_event.link_speed; + } + + i40evf_dev_link_update(dev, 0); + rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, NULL); break; case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); @@ -1496,6 +1557,10 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) /* assign ops func pointer */ eth_dev->dev_ops = &i40evf_eth_dev_ops; + eth_dev->rx_queue_count = i40e_dev_rx_queue_count; + eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; + eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; + eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; eth_dev->rx_pkt_burst = &i40e_recv_pkts; eth_dev->tx_pkt_burst = &i40e_xmit_pkts; @@ -1518,7 +1583,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) hw->bus.device = pci_dev->addr.devid; hw->bus.func = pci_dev->addr.function; hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; - hw->adapter_stopped = 0; + hw->adapter_stopped = 1; hw->adapter_closed = 0; /* Pass the information to the rte_eth_dev_close() that it should also @@ -1587,7 +1652,7 @@ static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_i40evf_pmd = { .id_table = pci_id_i40evf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_i40evf_pci_probe, .remove = eth_i40evf_pci_remove, }; @@ -1613,8 +1678,24 @@ i40evf_dev_configure(struct rte_eth_dev *dev) ad->tx_simple_allowed = true; ad->tx_vec_allowed = true; + dev->data->dev_conf.intr_conf.lsc = + !!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC); + if (num_queue_pairs > vf->vsi_res->num_queue_pairs) { - int ret = 0; + struct i40e_hw *hw; + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + PMD_DRV_LOG(ERR, + "For secondary processes, change queue pairs is not supported!"); + return -ENOTSUP; + } + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw->adapter_stopped) { + PMD_DRV_LOG(ERR, "Device must be stopped first!"); + return -EBUSY; + } PMD_DRV_LOG(INFO, "change queue pairs from %u to %u", vf->vsi_res->num_queue_pairs, num_queue_pairs); @@ -2167,15 +2248,15 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, new_link.link_speed = ETH_SPEED_NUM_40G; break; default: - new_link.link_speed = ETH_SPEED_NUM_NONE; + if (vf->link_up) + new_link.link_speed = ETH_SPEED_NUM_UNKNOWN; + else + new_link.link_speed = ETH_SPEED_NUM_NONE; break; } /* full duplex only */ new_link.link_duplex = ETH_LINK_FULL_DUPLEX; - new_link.link_status = vf->link_up && - new_link.link_speed != ETH_SPEED_NUM_NONE - ? ETH_LINK_UP - : ETH_LINK_DOWN; + new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; new_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); @@ -2186,68 +2267,32 @@ static int i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - int ret; - - ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled); - if (ret == 0) - vf->promisc_unicast_enabled = TRUE; - else if (ret == I40E_NOT_SUPPORTED) - ret = -ENOTSUP; - else - ret = -EAGAIN; - return ret; + return i40evf_config_promisc(dev, true, vf->promisc_multicast_enabled); } static int i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - int ret; - - ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled); - if (ret == 0) - vf->promisc_unicast_enabled = FALSE; - else if (ret == I40E_NOT_SUPPORTED) - ret = -ENOTSUP; - else - ret = -EAGAIN; - return ret; + return i40evf_config_promisc(dev, false, vf->promisc_multicast_enabled); } static int i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - int ret; - - ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1); - if (ret == 0) - vf->promisc_multicast_enabled = TRUE; - else if (ret == I40E_NOT_SUPPORTED) - ret = -ENOTSUP; - else - ret = -EAGAIN; - return ret; + return i40evf_config_promisc(dev, vf->promisc_unicast_enabled, true); } static int i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - int ret; - ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0); - if (ret == 0) - vf->promisc_multicast_enabled = FALSE; - else if (ret == I40E_NOT_SUPPORTED) - ret = -ENOTSUP; - else - ret = -EAGAIN; - - return ret; + return i40evf_config_promisc(dev, vf->promisc_unicast_enabled, false); } static int @@ -2356,7 +2401,7 @@ i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return ret; } -static void +static int i40evf_dev_close(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2369,8 +2414,9 @@ i40evf_dev_close(struct rte_eth_dev *dev) * it is a workaround solution when work with kernel driver * and it is not the normal way */ - i40evf_dev_promiscuous_disable(dev); - i40evf_dev_allmulticast_disable(dev); + if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) + i40evf_config_promisc(dev, false, false); + rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev); i40evf_reset_vf(dev); @@ -2387,6 +2433,7 @@ i40evf_dev_close(struct rte_eth_dev *dev) vf->aq_resp = NULL; hw->adapter_closed = 1; + return 0; } /* @@ -2780,7 +2827,6 @@ static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) { - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!rte_is_valid_assigned_ether_addr(mac_addr)) { @@ -2788,9 +2834,6 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev, return -EINVAL; } - if (vf->flags & I40E_FLAG_VF_MAC_BY_PF) - return -EPERM; - i40evf_del_mac_addr_by_addr(dev, (struct rte_ether_addr *)hw->mac.addr); if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)