From: Konstantin Ananyev Date: Tue, 6 May 2014 14:31:12 +0000 (+0100) Subject: ixgbevf: fix jumbo frame X-Git-Tag: spdx-start~10791 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=88fccb7a05c64cbb10f2c82d61fafc68f2f2150d;p=dpdk.git ixgbevf: fix jumbo frame When latest Linux ixgbe PF is used, and DPDK VF is used in DPDK application, jumbo frames are not received. Also - if Linux ixgbe PF has MTU set to 1500 (default), then normal sized packets can be received by DPDK VF. However, if Linux PF has MTU > 1500, then DPDK VF receives no packets (normal or jumbo). With ixgbe_mbox_api_10 ixgbe simply didn't allow set VF MTU > 1514 for 82599. With ixgbe_mbox_ajpi_11 it does, though now, if PF uses jumbo frames, it simply disables RX for all VFs. So to work with PF ithat using jumbo frames, at startup each VF has to: 1. negotiate with PF mbox_api_11. 2. Send to PF SET_LPE message with desired MTU. Note, that if PF already uses MTU bigger then asked by the VF, then PF wouldn't take any action. Signed-off-by: Konstantin Ananyev Acked-by: Ivan Boule --- diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c index 4608595437..6b454a5b4b 100644 --- a/lib/librte_pmd_e1000/igb_rxtx.c +++ b/lib/librte_pmd_e1000/igb_rxtx.c @@ -2077,6 +2077,11 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* setup MTU */ + e1000_rlpml_set_vf(hw, + (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE)); + /* Configure and enable each RX queue. */ rctl_bsize = 0; dev->rx_pkt_burst = eth_igb_recv_pkts; diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index c9b5fe4132..a50bce4cb2 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -809,19 +809,30 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return 0; } -static void ixgbevf_get_queue_num(struct ixgbe_hw *hw) + +/* + * Negotiate mailbox API version with the PF. + * After reset API version is always set to the basic one (ixgbe_mbox_api_10). + * Then we try to negotiate starting with the most recent one. + * If all negotiation attempts fail, then we will proceed with + * the default one (ixgbe_mbox_api_10). + */ +static void +ixgbevf_negotiate_api(struct ixgbe_hw *hw) { - /* Traffic classes are not supported by now */ - unsigned int tcs, tc; + int32_t i; - /* - * Must let PF know we are at mailbox API version 1.1. - * Otherwise PF won't answer properly. - * In case that PF fails to provide Rx/Tx queue number, - * max_tx_queues and max_rx_queues remain to be 1. - */ - if (!ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11)) - ixgbevf_get_queues(hw, &tcs, &tc); + /* start with highest supported, proceed down */ + static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + }; + + for (i = 0; + i != RTE_DIM(sup_ver) && + ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; + i++) + ; } /* @@ -831,9 +842,11 @@ static int eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); int diag; + uint32_t tc, tcs; + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct ixgbe_vfta * shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = @@ -892,8 +905,11 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return (diag); } + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ - ixgbevf_get_queue_num(hw); + ixgbevf_get_queues(hw, &tcs, &tc); /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * @@ -2519,6 +2535,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) hw->mac.ops.reset_hw(hw); + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + ixgbevf_dev_tx_init(dev); /* This can fail when allocating mbufs for descriptor rings */ diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 37d02aae05..303144b451 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -3594,6 +3594,10 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* setup MTU */ + ixgbevf_rlpml_set_vf(hw, + (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + /* Setup RX queues */ dev->rx_pkt_burst = ixgbe_recv_pkts; for (i = 0; i < dev->data->nb_rx_queues; i++) {