X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev_vf.c;h=244397e0e550adadd43a1374fc99f448db408ac8;hb=62d2083dd78edf628dc03b46ced95e3c4a4b3c4b;hp=63dbe14c358d2211e264d1e036d747b3bdf9cd5b;hpb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 63dbe14c35..244397e0e5 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -75,8 +75,8 @@ enum i40evf_aq_result { static int i40evf_dev_configure(struct rte_eth_dev *dev); static int i40evf_dev_start(struct rte_eth_dev *dev); static void i40evf_dev_stop(struct rte_eth_dev *dev); -static void i40evf_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int i40evf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int i40evf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int i40evf_dev_stats_get(struct rte_eth_dev *dev, @@ -86,16 +86,16 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned limit); -static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev); +static int i40evf_dev_xstats_reset(struct rte_eth_dev *dev); static int i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void i40evf_dev_close(struct rte_eth_dev *dev); static int i40evf_dev_reset(struct rte_eth_dev *dev); -static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev); -static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev); -static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev); -static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev); static int i40evf_init_vlan(struct rte_eth_dev *dev); static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); @@ -140,6 +140,8 @@ static int i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); +static void +i40evf_dev_alarm_handler(void *param); /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1]; @@ -213,6 +215,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get, .mtu_set = i40evf_dev_mtu_set, .mac_addr_set = i40evf_set_default_mac_addr, + .tx_done_cleanup = i40e_tx_done_cleanup, }; /* @@ -573,7 +576,7 @@ i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info, { txq_info->vsi_id = vsi_id; txq_info->queue_id = queue_id; - if (queue_id < nb_txq) { + if (queue_id < nb_txq && txq) { txq_info->ring_len = txq->nb_tx_desc; txq_info->dma_ring_addr = txq->tx_ring_phys_addr; } @@ -590,7 +593,7 @@ i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info, rxq_info->vsi_id = vsi_id; rxq_info->queue_id = queue_id; rxq_info->max_pkt_size = max_pkt_size; - if (queue_id < nb_rxq) { + if (queue_id < nb_rxq && rxq) { rxq_info->ring_len = rxq->nb_rx_desc; rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr; rxq_info->databuffer_size = @@ -623,10 +626,11 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev) for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) { i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq, - vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]); + vc_vqci->vsi_id, i, dev->data->nb_tx_queues, + txq ? txq[i] : NULL); i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq, vc_vqci->vsi_id, i, dev->data->nb_rx_queues, - vf->max_pkt_len, rxq[i]); + vf->max_pkt_len, rxq ? rxq[i] : NULL); } memset(&args, 0, sizeof(args)); args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES; @@ -647,44 +651,69 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct vf_cmd_info args; - uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \ - sizeof(struct virtchnl_vector_map)]; + uint8_t *cmd_buffer = NULL; struct virtchnl_irq_map_info *map_info; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - uint32_t vector_id; - int i, err; + uint32_t vec, cmd_buffer_size, max_vectors, nb_msix, msix_base, i; + uint16_t rxq_map[vf->vf_res->max_vectors]; + int err; + memset(rxq_map, 0, sizeof(rxq_map)); if (dev->data->dev_conf.intr_conf.rxq != 0 && - rte_intr_allow_others(intr_handle)) - vector_id = I40E_RX_VEC_START; - else - vector_id = I40E_MISC_VEC_ID; + rte_intr_allow_others(intr_handle)) { + msix_base = I40E_RX_VEC_START; + /* For interrupt mode, available vector id is from 1. */ + max_vectors = vf->vf_res->max_vectors - 1; + nb_msix = RTE_MIN(max_vectors, intr_handle->nb_efd); + + vec = msix_base; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= vf->vf_res->max_vectors) + vec = msix_base; + } + } else { + msix_base = I40E_MISC_VEC_ID; + nb_msix = 1; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[msix_base] |= 1 << i; + if (rte_intr_dp_is_en(intr_handle)) + intr_handle->intr_vec[i] = msix_base; + } + } + + cmd_buffer_size = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * nb_msix; + cmd_buffer = rte_zmalloc("i40e", cmd_buffer_size, 0); + if (!cmd_buffer) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } map_info = (struct virtchnl_irq_map_info *)cmd_buffer; - map_info->num_vectors = 1; - map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT; - map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id; - /* Alway use default dynamic MSIX interrupt */ - map_info->vecmap[0].vector_id = vector_id; - /* Don't map any tx queue */ - map_info->vecmap[0].txq_map = 0; - map_info->vecmap[0].rxq_map = 0; - for (i = 0; i < dev->data->nb_rx_queues; i++) { - map_info->vecmap[0].rxq_map |= 1 << i; - if (rte_intr_dp_is_en(intr_handle)) - intr_handle->intr_vec[i] = vector_id; + map_info->num_vectors = nb_msix; + for (i = 0; i < nb_msix; i++) { + map_info->vecmap[i].rxitr_idx = I40E_ITR_INDEX_DEFAULT; + map_info->vecmap[i].vsi_id = vf->vsi_res->vsi_id; + map_info->vecmap[i].vector_id = msix_base + i; + map_info->vecmap[i].txq_map = 0; + map_info->vecmap[i].rxq_map = rxq_map[msix_base + i]; } args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; args.in_args = (u8 *)cmd_buffer; - args.in_args_size = sizeof(cmd_buffer); + args.in_args_size = cmd_buffer_size; args.out_buffer = vf->aq_resp; args.out_size = I40E_AQ_BUF_SZ; err = i40evf_execute_vf_cmd(dev, &args); if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES"); + rte_free(cmd_buffer); + return err; } @@ -947,7 +976,7 @@ i40evf_update_stats(struct i40e_vsi *vsi, i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } -static void +static int i40evf_dev_xstats_reset(struct rte_eth_dev *dev) { int ret; @@ -960,6 +989,8 @@ i40evf_dev_xstats_reset(struct rte_eth_dev *dev) /* set stats offset base on current values */ if (ret == 0) vf->vsi.eth_stats_offset = *pstats; + + return ret; } static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, @@ -1050,10 +1081,14 @@ i40evf_request_queues(struct rte_eth_dev *dev, uint16_t num) args.in_args_size = sizeof(vfres); args.out_buffer = vf->aq_resp; args.out_size = I40E_AQ_BUF_SZ; + + rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev); err = i40evf_execute_vf_cmd(dev, &args); if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES"); + rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, + i40evf_dev_alarm_handler, dev); return err; } @@ -1303,17 +1338,12 @@ err: static int i40evf_uninit_vf(struct rte_eth_dev *dev) { - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); if (hw->adapter_closed == 0) i40evf_dev_close(dev); - rte_free(vf->vf_res); - vf->vf_res = NULL; - rte_free(vf->aq_resp); - vf->aq_resp = NULL; return 0; } @@ -1404,7 +1434,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev) } break; default: - PMD_DRV_LOG(ERR, "Request %u is not supported yet", + PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", aq_opc); break; } @@ -1479,7 +1509,6 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) return 0; } i40e_set_default_ptype_table(eth_dev); - i40e_set_default_pctype_table(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); hw->vendor_id = pci_dev->id.vendor_id; @@ -1492,11 +1521,17 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) hw->adapter_stopped = 0; hw->adapter_closed = 0; + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + if(i40evf_init_vf(eth_dev) != 0) { PMD_INIT_LOG(ERR, "Init vf failed"); return -1; } + i40e_set_default_pctype_table(eth_dev); rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, i40evf_dev_alarm_handler, eth_dev); @@ -1527,10 +1562,6 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - if (i40evf_uninit_vf(eth_dev) != 0) { PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed"); return -1; @@ -1556,7 +1587,7 @@ static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_i40evf_pmd = { .id_table = pci_id_i40evf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, .probe = eth_i40evf_pci_probe, .remove = eth_i40evf_pci_remove, }; @@ -2136,80 +2167,90 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, new_link.link_speed = ETH_SPEED_NUM_40G; break; default: - new_link.link_speed = ETH_SPEED_NUM_100M; + new_link.link_speed = ETH_SPEED_NUM_NONE; break; } /* full duplex only */ new_link.link_duplex = ETH_LINK_FULL_DUPLEX; - new_link.link_status = vf->link_up ? ETH_LINK_UP : - ETH_LINK_DOWN; + new_link.link_status = vf->link_up && + new_link.link_speed != ETH_SPEED_NUM_NONE + ? ETH_LINK_UP + : ETH_LINK_DOWN; new_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); return rte_eth_linkstatus_set(dev, &new_link); } -static void +static int i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); int ret; - /* If enabled, just return */ - if (vf->promisc_unicast_enabled) - return; - ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled); if (ret == 0) vf->promisc_unicast_enabled = TRUE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; + else + ret = -EAGAIN; + + return ret; } -static void +static int i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); int ret; - /* If disabled, just return */ - if (!vf->promisc_unicast_enabled) - return; - ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled); if (ret == 0) vf->promisc_unicast_enabled = FALSE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; + else + ret = -EAGAIN; + + return ret; } -static void +static int i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); int ret; - /* If enabled, just return */ - if (vf->promisc_multicast_enabled) - return; - ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1); if (ret == 0) vf->promisc_multicast_enabled = TRUE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; + else + ret = -EAGAIN; + + return ret; } -static void +static int i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); int ret; - /* If enabled, just return */ - if (!vf->promisc_multicast_enabled) - return; - ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0); if (ret == 0) vf->promisc_multicast_enabled = FALSE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; + else + ret = -EAGAIN; + + return ret; } -static void +static int i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -2285,6 +2326,8 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_min = I40E_MIN_RING_DESC, .nb_align = I40E_ALIGN_RING_DESC, }; + + return 0; } static int @@ -2317,6 +2360,7 @@ static void i40evf_dev_close(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); i40evf_dev_stop(dev); i40e_dev_free_queues(dev); @@ -2332,6 +2376,16 @@ i40evf_dev_close(struct rte_eth_dev *dev) i40evf_reset_vf(dev); i40e_shutdown_adminq(hw); i40evf_disable_irq0(hw); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_free(vf->vf_res); + vf->vf_res = NULL; + rte_free(vf->aq_resp); + vf->aq_resp = NULL; + hw->adapter_closed = 1; } @@ -2591,7 +2645,10 @@ i40evf_config_rss(struct i40e_vf *vf) struct i40e_hw *hw = I40E_VF_TO_HW(vf); struct rte_eth_rss_conf rss_conf; uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4; + uint32_t rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4; uint16_t num; + uint8_t *lut_info; + int ret; if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { i40evf_disable_rss(vf); @@ -2601,12 +2658,29 @@ i40evf_config_rss(struct i40e_vf *vf) num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF); /* Fill out the look up table */ - for (i = 0, j = 0; i < nb_q; i++, j++) { - if (j >= num) - j = 0; - lut = (lut << 8) | j; - if ((i & 3) == 3) - I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut); + if (!(vf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) { + for (i = 0, j = 0; i < nb_q; i++, j++) { + if (j >= num) + j = 0; + lut = (lut << 8) | j; + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut); + } + } else { + lut_info = rte_zmalloc("i40e_rss_lut", rss_lut_size, 0); + if (!lut_info) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + for (i = 0; i < rss_lut_size; i++) + lut_info[i] = i % vf->num_queue_pairs; + + ret = i40evf_set_rss_lut(&vf->vsi, lut_info, + rss_lut_size); + rte_free(lut_info); + if (ret) + return ret; } rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf; @@ -2808,3 +2882,9 @@ i40evf_set_mc_addr_list(struct rte_eth_dev *dev, return 0; } + +bool +is_i40evf_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_i40evf_pmd); +}