X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev_vf.c;h=2d5a9b55d87b495697f9d5f2bae0df583d37ec2f;hb=6ced3dd72f5f7fe8f0d717edd0cee10f45a1eb84;hp=cb34023e9ed7308fc92848312477578dfaec49be;hpb=67f0380766573ed2378da87e02b5b893dc1ccc9e;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index cb34023e9e..2d5a9b55d8 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -108,7 +108,7 @@ static void i40evf_dev_stop(struct rte_eth_dev *dev); static void i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int i40evf_dev_link_update(struct rte_eth_dev *dev, - __rte_unused int wait_to_complete); + int wait_to_complete); static void i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, @@ -136,10 +136,10 @@ static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); -static void i40evf_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr, - uint32_t index, - uint32_t pool); +static int i40evf_add_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *addr, + uint32_t index, + uint32_t pool); static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index); static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, @@ -159,7 +159,7 @@ static int i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); -static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev, +static void i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg, uint16_t msglen); @@ -727,7 +727,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \ sizeof(struct i40e_virtchnl_vector_map)]; struct i40e_virtchnl_irq_map_info *map_info; - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t vector_id; int i, err; @@ -855,7 +855,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev) return 0; } -static void +static int i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr, __rte_unused uint32_t index, @@ -873,7 +873,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, addr->addr_bytes[0], addr->addr_bytes[1], addr->addr_bytes[2], addr->addr_bytes[3], addr->addr_bytes[4], addr->addr_bytes[5]); - return; + return I40E_ERR_INVALID_MAC_ADDR; } list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer; @@ -892,7 +892,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "fail to execute command " "OP_ADD_ETHER_ADDRESS"); - return; + return err; } static void @@ -1318,9 +1318,8 @@ i40evf_uninit_vf(struct rte_eth_dev *dev) } static void -i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev, - uint8_t *msg, - __rte_unused uint16_t msglen) +i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg, + __rte_unused uint16_t msglen) { struct i40e_virtchnl_pf_event *pf_msg = (struct i40e_virtchnl_pf_event *)msg; @@ -1460,7 +1459,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); PMD_INIT_FUNC_TRACE(); @@ -1569,7 +1568,7 @@ static struct rte_pci_driver rte_i40evf_pmd = { RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map); -RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci"); static int i40evf_dev_configure(struct rte_eth_dev *dev) @@ -1884,7 +1883,7 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; if (!rte_intr_allow_others(intr_handle)) { @@ -1917,7 +1916,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; if (!rte_intr_allow_others(intr_handle)) { @@ -1944,7 +1943,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) static int i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = @@ -1979,7 +1978,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; @@ -2064,7 +2063,7 @@ i40evf_dev_start(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; @@ -2130,7 +2129,7 @@ err_queue: static void i40evf_dev_stop(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -2261,7 +2260,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); memset(dev_info, 0, sizeof(*dev_info)); - dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; @@ -2330,7 +2329,7 @@ static void i40evf_dev_close(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; i40evf_dev_stop(dev); @@ -2687,8 +2686,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct rte_eth_dev_data *dev_data = vf->dev_data; - uint32_t frame_size = mtu + ETHER_HDR_LEN - + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE; + uint32_t frame_size = mtu + I40E_ETH_OVERHEAD; int ret = 0; /* check if mtu is within the allowed range */