X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev_vf.c;h=eca716a6a871091ba43917faa6cfee6030fa2f96;hb=2fc1d6da882563ab80786d69b6d7c9d0e4ce860a;hp=bec993390c1273fd80bb38bfc0ee45be973a5f88;hpb=6a6cf5f88b4ad3e89b5b7769354892c58bafc4e7;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index bec993390c..eca716a6a8 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -215,6 +215,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get, .mtu_set = i40evf_dev_mtu_set, .mac_addr_set = i40evf_set_default_mac_addr, + .tx_done_cleanup = i40e_tx_done_cleanup, }; /* @@ -650,51 +651,69 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct vf_cmd_info args; - uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \ - sizeof(struct virtchnl_vector_map) * dev->data->nb_rx_queues]; + uint8_t *cmd_buffer = NULL; struct virtchnl_irq_map_info *map_info; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - uint32_t vector_id; - int i, err; - uint16_t nb_msix; + uint32_t vec, cmd_buffer_size, max_vectors, nb_msix, msix_base, i; + uint16_t rxq_map[vf->vf_res->max_vectors]; + int err; + memset(rxq_map, 0, sizeof(rxq_map)); if (dev->data->dev_conf.intr_conf.rxq != 0 && - rte_intr_allow_others(intr_handle)) - vector_id = I40E_RX_VEC_START; - else - vector_id = I40E_MISC_VEC_ID; + rte_intr_allow_others(intr_handle)) { + msix_base = I40E_RX_VEC_START; + /* For interrupt mode, available vector id is from 1. */ + max_vectors = vf->vf_res->max_vectors - 1; + nb_msix = RTE_MIN(max_vectors, intr_handle->nb_efd); + + vec = msix_base; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= vf->vf_res->max_vectors) + vec = msix_base; + } + } else { + msix_base = I40E_MISC_VEC_ID; + nb_msix = 1; - nb_msix = RTE_MIN(vf->vf_res->max_vectors, - intr_handle->nb_efd); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[msix_base] |= 1 << i; + if (rte_intr_dp_is_en(intr_handle)) + intr_handle->intr_vec[i] = msix_base; + } + } + + cmd_buffer_size = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * nb_msix; + cmd_buffer = rte_zmalloc("i40e", cmd_buffer_size, 0); + if (!cmd_buffer) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } map_info = (struct virtchnl_irq_map_info *)cmd_buffer; - map_info->num_vectors = dev->data->nb_rx_queues; - for (i = 0; i < dev->data->nb_rx_queues; i++) { + map_info->num_vectors = nb_msix; + for (i = 0; i < nb_msix; i++) { map_info->vecmap[i].rxitr_idx = I40E_ITR_INDEX_DEFAULT; map_info->vecmap[i].vsi_id = vf->vsi_res->vsi_id; - /* Always use default dynamic MSIX interrupt */ - map_info->vecmap[i].vector_id = vector_id; - /* Don't map any tx queue */ + map_info->vecmap[i].vector_id = msix_base + i; map_info->vecmap[i].txq_map = 0; - map_info->vecmap[i].rxq_map = 1 << i; - if (rte_intr_dp_is_en(intr_handle)) - intr_handle->intr_vec[i] = vector_id; - if (vector_id > I40E_MISC_VEC_ID) - vector_id++; - if (vector_id > nb_msix) - vector_id = I40E_RX_VEC_START; + map_info->vecmap[i].rxq_map = rxq_map[msix_base + i]; } args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; args.in_args = (u8 *)cmd_buffer; - args.in_args_size = sizeof(cmd_buffer); + args.in_args_size = cmd_buffer_size; args.out_buffer = vf->aq_resp; args.out_size = I40E_AQ_BUF_SZ; err = i40evf_execute_vf_cmd(dev, &args); if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES"); + rte_free(cmd_buffer); + return err; } @@ -770,7 +789,6 @@ i40evf_stop_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { if (i40evf_dev_tx_queue_stop(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); - return -1; } } @@ -778,7 +796,6 @@ i40evf_stop_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { if (i40evf_dev_rx_queue_stop(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); - return -1; } } @@ -1290,10 +1307,8 @@ i40evf_init_vf(struct rte_eth_dev *dev) vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); /* Store the MAC address configured by host, or generate random one */ - if (rte_is_valid_assigned_ether_addr( + if (!rte_is_valid_assigned_ether_addr( (struct rte_ether_addr *)hw->mac.addr)) - vf->flags |= I40E_FLAG_VF_MAC_BY_PF; - else rte_eth_random_addr(hw->mac.addr); /* Generate a random one */ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, @@ -2172,6 +2187,8 @@ i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev) ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled); if (ret == 0) vf->promisc_unicast_enabled = TRUE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; else ret = -EAGAIN; @@ -2187,6 +2204,8 @@ i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev) ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled); if (ret == 0) vf->promisc_unicast_enabled = FALSE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; else ret = -EAGAIN; @@ -2202,6 +2221,8 @@ i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev) ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1); if (ret == 0) vf->promisc_multicast_enabled = TRUE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; else ret = -EAGAIN; @@ -2217,6 +2238,8 @@ i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev) ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0); if (ret == 0) vf->promisc_multicast_enabled = FALSE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; else ret = -EAGAIN; @@ -2753,7 +2776,6 @@ static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) { - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!rte_is_valid_assigned_ether_addr(mac_addr)) { @@ -2761,9 +2783,6 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev, return -EINVAL; } - if (vf->flags & I40E_FLAG_VF_MAC_BY_PF) - return -EPERM; - i40evf_del_mac_addr_by_addr(dev, (struct rte_ether_addr *)hw->mac.addr); if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)