From: Intel Date: Wed, 19 Dec 2012 23:00:00 +0000 (+0100) Subject: e1000: support EM devices (also known as e1000/e1000e) X-Git-Tag: spdx-start~11351 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=805803445a02;p=dpdk.git e1000: support EM devices (also known as e1000/e1000e) Signed-off-by: Intel --- diff --git a/config/defconfig_i686-default-linuxapp-gcc b/config/defconfig_i686-default-linuxapp-gcc index 0523670c0f..8bfb668e24 100644 --- a/config/defconfig_i686-default-linuxapp-gcc +++ b/config/defconfig_i686-default-linuxapp-gcc @@ -130,8 +130,9 @@ CONFIG_RTE_LIBRTE_IEEE1588=n CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16 # -# Compile burst-oriented IGB PMD driver +# Compile burst-oriented IGB & EM PMD drivers # +CONFIG_RTE_LIBRTE_EM_PMD=y CONFIG_RTE_LIBRTE_IGB_PMD=y CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n diff --git a/config/defconfig_i686-default-linuxapp-icc b/config/defconfig_i686-default-linuxapp-icc index c1c364adcf..6f209b0120 100644 --- a/config/defconfig_i686-default-linuxapp-icc +++ b/config/defconfig_i686-default-linuxapp-icc @@ -130,8 +130,9 @@ CONFIG_RTE_LIBRTE_IEEE1588=n CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16 # -# Compile burst-oriented IGB PMD driver +# Compile burst-oriented IGB & EM PMD drivers # +CONFIG_RTE_LIBRTE_EM_PMD=y CONFIG_RTE_LIBRTE_IGB_PMD=y CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n diff --git a/config/defconfig_x86_64-default-linuxapp-gcc b/config/defconfig_x86_64-default-linuxapp-gcc index acd2f94fbc..04997cdf3b 100644 --- a/config/defconfig_x86_64-default-linuxapp-gcc +++ b/config/defconfig_x86_64-default-linuxapp-gcc @@ -130,8 +130,9 @@ CONFIG_RTE_LIBRTE_IEEE1588=n CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16 # -# Compile burst-oriented IGB PMD driver +# Compile burst-oriented IGB & EM PMD drivers # +CONFIG_RTE_LIBRTE_EM_PMD=y CONFIG_RTE_LIBRTE_IGB_PMD=y CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n diff --git a/config/defconfig_x86_64-default-linuxapp-icc b/config/defconfig_x86_64-default-linuxapp-icc index 7fd1195795..e628280015 100644 --- a/config/defconfig_x86_64-default-linuxapp-icc +++ b/config/defconfig_x86_64-default-linuxapp-icc @@ -130,8 +130,9 @@ CONFIG_RTE_LIBRTE_IEEE1588=n CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16 # -# Compile burst-oriented IGB PMD driver +# Compile burst-oriented IGB & EM PMD drivers # +CONFIG_RTE_LIBRTE_EM_PMD=y CONFIG_RTE_LIBRTE_IGB_PMD=y CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n diff --git a/lib/librte_pmd_e1000/Makefile b/lib/librte_pmd_e1000/Makefile index 890b13ecef..df08fba25c 100644 --- a/lib/librte_pmd_e1000/Makefile +++ b/lib/librte_pmd_e1000/Makefile @@ -64,6 +64,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_phy.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c # this lib depends upon: DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_eal lib/librte_ether diff --git a/lib/librte_pmd_e1000/e1000_ethdev.h b/lib/librte_pmd_e1000/e1000_ethdev.h index 3f33034a0e..0ebd7cc353 100644 --- a/lib/librte_pmd_e1000/e1000_ethdev.h +++ b/lib/librte_pmd_e1000/e1000_ethdev.h @@ -117,4 +117,35 @@ int eth_igbvf_rx_init(struct rte_eth_dev *dev); void eth_igbvf_tx_init(struct rte_eth_dev *dev); +/* + * RX/TX EM function prototypes + */ +void eth_em_tx_queue_release(void *txq); +void eth_em_rx_queue_release(void *rxq); + +void em_dev_clear_queues(struct rte_eth_dev *dev); + +int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +int eth_em_rx_init(struct rte_eth_dev *dev); + +void eth_em_tx_init(struct rte_eth_dev *dev); + +uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + + #endif /* _E1000_ETHDEV_H_ */ diff --git a/lib/librte_pmd_e1000/em_ethdev.c b/lib/librte_pmd_e1000/em_ethdev.c new file mode 100644 index 0000000000..ea994b07d2 --- /dev/null +++ b/lib/librte_pmd_e1000/em_ethdev.c @@ -0,0 +1,1411 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "e1000/e1000_api.h" +#include "e1000_ethdev.h" + +#define EM_EIAC 0x000DC + +#define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y)) + + +static int eth_em_configure(struct rte_eth_dev *dev); +static int eth_em_start(struct rte_eth_dev *dev); +static void eth_em_stop(struct rte_eth_dev *dev); +static void eth_em_close(struct rte_eth_dev *dev); +static void eth_em_promiscuous_enable(struct rte_eth_dev *dev); +static void eth_em_promiscuous_disable(struct rte_eth_dev *dev); +static void eth_em_allmulticast_enable(struct rte_eth_dev *dev); +static void eth_em_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_em_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static void eth_em_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static void eth_em_stats_reset(struct rte_eth_dev *dev); +static void eth_em_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_em_interrupt_setup(struct rte_eth_dev *dev); +static int eth_em_interrupt_get_status(struct rte_eth_dev *dev); +static int eth_em_interrupt_action(struct rte_eth_dev *dev); +static void eth_em_interrupt_handler(struct rte_intr_handle *handle, + void *param); + +static int em_hw_init(struct e1000_hw *hw); +static int em_hardware_init(struct e1000_hw *hw); +static void em_hw_control_acquire(struct e1000_hw *hw); +static void em_hw_control_release(struct e1000_hw *hw); +static void em_init_manageability(struct e1000_hw *hw); +static void em_release_manageability(struct e1000_hw *hw); + +static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static void eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev); +static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev); +static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev); +static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev); + +/* +static void eth_em_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +*/ +static int eth_em_led_on(struct rte_eth_dev *dev); +static int eth_em_led_off(struct rte_eth_dev *dev); + +static void em_intr_disable(struct e1000_hw *hw); +static int em_get_rx_buffer_size(struct e1000_hw *hw); +static void eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); + +#define EM_FC_PAUSE_TIME 0x0680 +#define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ +#define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ + +static enum e1000_fc_mode em_fc_setting = e1000_fc_full; + +/* + * The set of PCI devices this driver supports + */ +static struct rte_pci_id pci_id_em_map[] = { + +#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{.device_id = 0}, +}; + +static struct eth_dev_ops eth_em_ops = { + .dev_configure = eth_em_configure, + .dev_start = eth_em_start, + .dev_stop = eth_em_stop, + .dev_close = eth_em_close, + .promiscuous_enable = eth_em_promiscuous_enable, + .promiscuous_disable = eth_em_promiscuous_disable, + .allmulticast_enable = eth_em_allmulticast_enable, + .allmulticast_disable = eth_em_allmulticast_disable, + .link_update = eth_em_link_update, + .stats_get = eth_em_stats_get, + .stats_reset = eth_em_stats_reset, + .dev_infos_get = eth_em_infos_get, + .vlan_filter_set = eth_em_vlan_filter_set, + .vlan_offload_set = eth_em_vlan_offload_set, + .rx_queue_setup = eth_em_rx_queue_setup, + .rx_queue_release = eth_em_rx_queue_release, + .tx_queue_setup = eth_em_tx_queue_setup, + .tx_queue_release = eth_em_tx_queue_release, + .dev_led_on = eth_em_led_on, + .dev_led_off = eth_em_led_off, + .flow_ctrl_set = eth_em_flow_ctrl_set, + .mac_addr_add = eth_em_rar_set, + .mac_addr_remove = eth_em_rar_clear, +}; + +/** + * Atomically reads the link status information from global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static int +eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, + struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + + pci_dev = eth_dev->pci_dev; + eth_dev->dev_ops = ð_em_ops; + eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; + eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = + (eth_rx_burst_t)ð_em_recv_scattered_pkts; + return 0; + } + + hw->hw_addr = (void *)pci_dev->mem_resource.addr; + hw->device_id = pci_dev->id.device_id; + + /* For ICH8 support we'll need to map the flash memory BAR */ + + if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || + em_hw_init(hw) != 0) { + PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " + "failed to init HW", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + return -(ENODEV); + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN * + hw->mac.rar_entry_count, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " + "store MAC addresses", + ETHER_ADDR_LEN * hw->mac.rar_entry_count); + return -(ENOMEM); + } + + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *) hw->mac.addr, + eth_dev->data->mac_addrs); + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(&(pci_dev->intr_handle), + eth_em_interrupt_handler, (void *)eth_dev); + + return (0); +} + +static struct eth_driver rte_em_pmd = { + { + .name = "rte_em_pmd", + .id_table = pci_id_em_map, + .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, + }, + .eth_dev_init = eth_em_dev_init, + .dev_private_size = sizeof(struct e1000_adapter), +}; + +int +rte_em_pmd_init(void) +{ + rte_eth_driver_register(&rte_em_pmd); + return 0; +} + +static int +em_hw_init(struct e1000_hw *hw) +{ + int diag; + + diag = hw->mac.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "MAC Initialization Error\n"); + return diag; + } + diag = hw->nvm.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "NVM Initialization Error\n"); + return diag; + } + diag = hw->phy.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "PHY Initialization Error\n"); + return diag; + } + (void) e1000_get_bus_info(hw); + + hw->mac.autoneg = 1; + hw->phy.autoneg_wait_to_complete = 0; + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + + e1000_init_script_state_82541(hw, TRUE); + e1000_set_tbi_compatibility_82543(hw, TRUE); + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = 0; /* AUTO_ALL_MODES */ + hw->phy.disable_polarity_correction = 0; + hw->phy.ms_type = e1000_ms_hw_default; + } + + /* + * Start from a known state, this is important in reading the nvm + * and mac from that. + */ + e1000_reset_hw(hw); + + /* Make sure we have a good EEPROM before we read from it */ + if (e1000_validate_nvm_checksum(hw) < 0) { + /* + * Some PCI-E parts fail the first check due to + * the link being in sleep state, call it again, + * if it fails a second time its a real issue. + */ + diag = e1000_validate_nvm_checksum(hw); + if (diag < 0) { + PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); + goto error; + } + } + + /* Read the permanent MAC address out of the EEPROM */ + diag = e1000_read_mac_addr(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); + goto error; + } + + /* Now initialize the hardware */ + diag = em_hardware_init(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Hardware initialization failed"); + goto error; + } + + hw->mac.get_link_status = 1; + + /* Indicate SOL/IDER usage */ + diag = e1000_check_reset_block(hw); + if (diag < 0) { + PMD_INIT_LOG(ERR, "PHY reset is blocked due to " + "SOL/IDER session"); + } + return (0); + +error: + em_hw_control_release(hw); + return (diag); +} + +static int +eth_em_configure(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + PMD_INIT_LOG(DEBUG, ">>"); + + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + + PMD_INIT_LOG(DEBUG, "<<"); + return (0); +} + +static void +em_set_pba(struct e1000_hw *hw) +{ + uint32_t pba; + + /* + * Packet Buffer Allocation (PBA) + * Writing PBA sets the receive portion of the buffer + * the remainder is used for the transmit buffer. + * Devices before the 82547 had a Packet Buffer of 64K. + * After the 82547 the buffer was reduced to 40K. + */ + switch (hw->mac.type) { + case e1000_82547: + case e1000_82547_rev_2: + /* 82547: Total Packet Buffer is 40K */ + pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ + break; + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ + break; + case e1000_82573: /* 82573: Total Packet Buffer is 32K */ + pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ + break; + case e1000_82574: + case e1000_82583: + pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ + break; + case e1000_ich8lan: + pba = E1000_PBA_8K; + break; + case e1000_ich9lan: + case e1000_ich10lan: + pba = E1000_PBA_10K; + break; + case e1000_pchlan: + case e1000_pch2lan: + pba = E1000_PBA_26K; + break; + default: + pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ + } + + E1000_WRITE_REG(hw, E1000_PBA, pba); +} + +static int +eth_em_start(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret, mask; + + PMD_INIT_LOG(DEBUG, ">>"); + + eth_em_stop(dev); + + e1000_power_up_phy(hw); + + /* Set default PBA value */ + em_set_pba(hw); + + /* Put the address into the Receive Address Array */ + e1000_rar_set(hw, hw->mac.addr, 0); + + /* + * With the 82571 adapter, RAR[0] may be overwritten + * when the other port is reset, we make a duplicate + * in RAR[14] for that eventuality, this assures + * the interface continues to function. + */ + if (hw->mac.type == e1000_82571) { + e1000_set_laa_state_82571(hw, TRUE); + e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1); + } + + /* Initialize the hardware */ + if (em_hardware_init(hw)) { + PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); + return (-EIO); + } + + E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + + /* Configure for OS presence */ + em_init_manageability(hw); + + eth_em_tx_init(dev); + + ret = eth_em_rx_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + em_dev_clear_queues(dev); + return ret; + } + + e1000_clear_hw_cntrs_base_generic(hw); + + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + ETH_VLAN_EXTEND_MASK; + eth_em_vlan_offload_set(dev, mask); + + /* Set Interrupt Throttling Rate to maximum allowed value. */ + E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX); + + /* Setup link speed and duplex */ + switch (dev->data->dev_conf.link_speed) { + case ETH_LINK_SPEED_AUTONEG: + if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + else if (dev->data->dev_conf.link_duplex == + ETH_LINK_HALF_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX; + else if (dev->data->dev_conf.link_duplex == + ETH_LINK_FULL_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX; + else + goto error_invalid_config; + break; + case ETH_LINK_SPEED_10: + if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_10_SPEED; + else if (dev->data->dev_conf.link_duplex == + ETH_LINK_HALF_DUPLEX) + hw->phy.autoneg_advertised = ADVERTISE_10_HALF; + else if (dev->data->dev_conf.link_duplex == + ETH_LINK_FULL_DUPLEX) + hw->phy.autoneg_advertised = ADVERTISE_10_FULL; + else + goto error_invalid_config; + break; + case ETH_LINK_SPEED_100: + if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_100_SPEED; + else if (dev->data->dev_conf.link_duplex == + ETH_LINK_HALF_DUPLEX) + hw->phy.autoneg_advertised = ADVERTISE_100_HALF; + else if (dev->data->dev_conf.link_duplex == + ETH_LINK_FULL_DUPLEX) + hw->phy.autoneg_advertised = ADVERTISE_100_FULL; + else + goto error_invalid_config; + break; + case ETH_LINK_SPEED_1000: + if ((dev->data->dev_conf.link_duplex == + ETH_LINK_AUTONEG_DUPLEX) || + (dev->data->dev_conf.link_duplex == + ETH_LINK_FULL_DUPLEX)) + hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; + else + goto error_invalid_config; + break; + case ETH_LINK_SPEED_10000: + default: + goto error_invalid_config; + } + e1000_setup_link(hw); + + /* check if lsc interrupt feature is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) { + ret = eth_em_interrupt_setup(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to setup interrupts"); + em_dev_clear_queues(dev); + return ret; + } + } + + PMD_INIT_LOG(DEBUG, "<<"); + + return (0); + +error_invalid_config: + PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port " + "%u\n", dev->data->dev_conf.link_speed, + dev->data->dev_conf.link_duplex, dev->data->port_id); + em_dev_clear_queues(dev); + return (-EINVAL); +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + * + **********************************************************************/ +static void +eth_em_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + em_intr_disable(hw); + e1000_reset_hw(hw); + if (hw->mac.type >= e1000_82544) + E1000_WRITE_REG(hw, E1000_WUC, 0); + + /* Power down the phy. Needed to make the link go down */ + e1000_power_down_phy(hw); + + em_dev_clear_queues(dev); + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_em_dev_atomic_write_link_status(dev, &link); +} + +static void +eth_em_close(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + eth_em_stop(dev); + e1000_phy_hw_reset(hw); + em_release_manageability(hw); + em_hw_control_release(hw); +} + +static int +em_get_rx_buffer_size(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + + rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10); + return rx_buf_size; +} + +/********************************************************************* + * + * Initialize the hardware + * + **********************************************************************/ +static int +em_hardware_init(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + int diag; + + /* Issue a global reset */ + e1000_reset_hw(hw); + + /* Let the firmware know the OS is in control */ + em_hw_control_acquire(hw); + + /* + * These parameters control the automatic generation (Tx) and + * response (Rx) to Ethernet PAUSE frames. + * - High water mark should allow for at least two standard size (1518) + * frames to be received after sending an XOFF. + * - Low water mark works best when it is very near the high water mark. + * This allows the receiver to restart by sending XON when it has + * drained a bit. Here we use an arbitary value of 1500 which will + * restart after one full frame is pulled from the buffer. There + * could be several smaller frames in the buffer and if so they will + * not trigger the XON until their total number reduces the buffer + * by 1500. + * - The pause time is fairly large at 1000 x 512ns = 512 usec. + */ + rx_buf_size = em_get_rx_buffer_size(hw); + + hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024); + hw->fc.low_water = hw->fc.high_water - 1500; + + if (hw->mac.type == e1000_80003es2lan) + hw->fc.pause_time = UINT16_MAX; + else + hw->fc.pause_time = EM_FC_PAUSE_TIME; + + hw->fc.send_xon = 1; + + /* Set Flow control, use the tunable location if sane */ + if (em_fc_setting <= e1000_fc_full) + hw->fc.requested_mode = em_fc_setting; + else + hw->fc.requested_mode = e1000_fc_none; + + /* Workaround: no TX flow ctrl for PCH */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + + /* Override - settings for PCH2LAN, ya its magic :) */ + if (hw->mac.type == e1000_pch2lan) { + hw->fc.high_water = 0x5C20; + hw->fc.low_water = 0x5048; + hw->fc.pause_time = 0x0650; + hw->fc.refresh_time = 0x0400; + } + + diag = e1000_init_hw(hw); + if (diag < 0) + return (diag); + e1000_check_for_link(hw); + return (0); +} + +/* This function is based on em_update_stats_counters() in e1000/if_em.c */ +static void +eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + int pause_frames; + + if(hw->phy.media_type == e1000_media_type_copper || + (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); + stats->sec += E1000_READ_REG(hw, E1000_SEC); + } + + stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); + stats->mpc += E1000_READ_REG(hw, E1000_MPC); + stats->scc += E1000_READ_REG(hw, E1000_SCC); + stats->ecol += E1000_READ_REG(hw, E1000_ECOL); + + stats->mcc += E1000_READ_REG(hw, E1000_MCC); + stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); + stats->colc += E1000_READ_REG(hw, E1000_COLC); + stats->dc += E1000_READ_REG(hw, E1000_DC); + stats->rlec += E1000_READ_REG(hw, E1000_RLEC); + stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); + stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); + + /* + * For watchdog management we need to know if we have been + * paused during the last interval, so capture that here. + */ + pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); + stats->xoffrxc += pause_frames; + stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); + stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); + stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); + stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); + stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); + stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); + stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); + stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); + stats->gprc += E1000_READ_REG(hw, E1000_GPRC); + stats->bprc += E1000_READ_REG(hw, E1000_BPRC); + stats->mprc += E1000_READ_REG(hw, E1000_MPRC); + stats->gptc += E1000_READ_REG(hw, E1000_GPTC); + + /* + * For the 64-bit byte counters the low dword must be read first. + * Both registers clear on the read of the high dword. + */ + + stats->gorc += E1000_READ_REG(hw, E1000_GORCL); + stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); + stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); + stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); + + stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); + stats->ruc += E1000_READ_REG(hw, E1000_RUC); + stats->rfc += E1000_READ_REG(hw, E1000_RFC); + stats->roc += E1000_READ_REG(hw, E1000_ROC); + stats->rjc += E1000_READ_REG(hw, E1000_RJC); + + stats->tor += E1000_READ_REG(hw, E1000_TORH); + stats->tot += E1000_READ_REG(hw, E1000_TOTH); + + stats->tpr += E1000_READ_REG(hw, E1000_TPR); + stats->tpt += E1000_READ_REG(hw, E1000_TPT); + stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); + stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); + stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); + stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); + stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); + stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); + stats->mptc += E1000_READ_REG(hw, E1000_MPTC); + stats->bptc += E1000_READ_REG(hw, E1000_BPTC); + + /* Interrupt Counts */ + + if (hw->mac.type >= e1000_82571) { + stats->iac += E1000_READ_REG(hw, E1000_IAC); + stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); + stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); + stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); + stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); + stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); + stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); + stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); + } + + if (hw->mac.type >= e1000_82543) { + stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); + stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); + stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); + stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); + stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); + } + + if (rte_stats == NULL) + return; + + /* Rx Errors */ + rte_stats->ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc + + stats->ruc + stats->roc + stats->mpc + stats->cexterr; + + /* Tx Errors */ + rte_stats->oerrors = stats->ecol + stats->latecol; + + rte_stats->ipackets = stats->gprc; + rte_stats->opackets = stats->gptc; + rte_stats->ibytes = stats->gorc; + rte_stats->obytes = stats->gotc; +} + +static void +eth_em_stats_reset(struct rte_eth_dev *dev) +{ + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + eth_em_stats_get(dev, NULL); + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); +} + +static uint32_t +em_get_max_pktlen(const struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + case e1000_ich9lan: + case e1000_ich10lan: + case e1000_pch2lan: + case e1000_82574: + case e1000_80003es2lan: /* 9K Jumbo Frame size */ + return (0x2412); + case e1000_pchlan: + return (0x1000); + /* Adapters that do not support jumbo frames */ + case e1000_82583: + case e1000_ich8lan: + return (ETHER_MAX_LEN); + default: + return (MAX_JUMBO_FRAME_SIZE); + } +} + +static void +eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = em_get_max_pktlen(hw); + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + + /* + * Starting with 631xESB hw supports 2 TX/RX queues per port. + * Unfortunatelly, all these nics have just one TX context. + * So we have few choises for TX: + * - Use just one TX queue. + * - Allow cksum offload only for one TX queue. + * - Don't allow TX cksum offload at all. + * For now, option #1 was chosen. + * To use second RX queue we have to use extended RX descriptor + * (Multiple Receive Queues are mutually exclusive with UDP + * fragmentation and are not supported when a legacy receive + * descriptor format is used). + * Which means separate RX routinies - as legacy nics (82540, 82545) + * don't support extended RXD. + * To avoid it we support just one RX queue for now (no RSS). + */ + + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link, old; + int link_check, count; + + link_check = 0; + hw->mac.get_link_status = 1; + + /* possible wait-to-complete in up to 9 seconds */ + for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) { + /* Read the real link status */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + /* Do the work to read phy */ + e1000_check_for_link(hw); + link_check = !hw->mac.get_link_status; + break; + + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_check = (E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LU); + break; + + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_check = hw->mac.serdes_has_link; + break; + + default: + break; + } + if (link_check || wait_to_complete == 0) + break; + rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); + } + memset(&link, 0, sizeof(link)); + rte_em_dev_atomic_read_link_status(dev, &link); + old = link; + + /* Now we check if a transition has happened */ + if (link_check && (link.link_status == 0)) { + hw->mac.ops.get_link_up_info(hw, &link.link_speed, + &link.link_duplex); + link.link_status = 1; + } else if (!link_check && (link.link_status == 1)) { + link.link_speed = 0; + link.link_duplex = 0; + link.link_status = 0; + } + rte_em_dev_atomic_write_link_status(dev, &link); + + /* not changed */ + if (old.link_status == link.link_status) + return -1; + + /* changed */ + return 0; +} + +/* + * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means + * that the driver is loaded. For AMT version type f/w + * this means that the network i/f is open. + */ +static void +em_hw_control_acquire(struct e1000_hw *hw) +{ + uint32_t ctrl_ext, swsm; + + /* Let firmware know the driver has taken over */ + if (hw->mac.type == e1000_82573) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); + + } else { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/* + * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT versions of the + * f/w this means that the network i/f is closed. + */ +static void +em_hw_control_release(struct e1000_hw *hw) +{ + uint32_t ctrl_ext, swsm; + + /* Let firmware taken over control of h/w */ + if (hw->mac.type == e1000_82573) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/* + * Bit of a misnomer, what this really means is + * to enable OS management of the system... aka + * to disable special hardware management features. + */ +static void +em_init_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h |= 1 << 5; /* Mng Port 623 */ + manc2h |= 1 << 6; /* Mng Port 664 */ + E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +/* + * Give control back to hardware management + * controller if there is one. + */ +static void +em_release_manageability(struct e1000_hw *hw) +{ + uint32_t manc; + + if (e1000_enable_mng_pass_thru(hw)) { + manc = E1000_READ_REG(hw, E1000_MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static void +eth_em_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_em_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); + if (dev->data->all_multicast == 1) + rctl |= E1000_RCTL_MPE; + else + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_em_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_MPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_em_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + if (dev->data->promiscuous == 1) + return; /* must remain in all_multicast mode */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static int +eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK); + vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +em_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* Filter Table Disable */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg &= ~E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); +} + +static void +em_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t reg; + int i; + + /* Filter Table Enable, CFI not used for packet acceptance */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); + + /* restore vfta from local copy */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); +} + +static void +em_vlan_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Disable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + +} + +static void +em_vlan_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Enable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static void +eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + if(mask & ETH_VLAN_STRIP_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + em_vlan_hw_strip_enable(dev); + else + em_vlan_hw_strip_disable(dev); + } + + if(mask & ETH_VLAN_FILTER_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + em_vlan_hw_filter_enable(dev); + else + em_vlan_hw_filter_disable(dev); + } +} + +static void +em_intr_disable(struct e1000_hw *hw) +{ + E1000_WRITE_REG(hw, E1000_IMC, ~0); +} + +/** + * It enables the interrupt mask and then enable the interrupt. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_setup(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC); + rte_intr_enable(&(dev->pci_dev->intr_handle)); + return (0); +} + +/* + * It reads ICR and gets interrupt causes, check it and set a bit flag + * to update link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t icr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + /* read-on-clear nic registers here */ + icr = E1000_READ_REG(hw, E1000_ICR); + if (icr & E1000_ICR_LSC) { + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + } + + return 0; +} + +/* + * It executes link_update after knowing an interrupt is prsent. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_action(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t tctl, rctl; + struct rte_eth_link link; + int ret; + + if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) + return -1; + + intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; + rte_intr_enable(&(dev->pci_dev->intr_handle)); + + /* set get_link_status to check register later */ + hw->mac.get_link_status = 1; + ret = eth_em_link_update(dev, 0); + + /* check if link has changed */ + if (ret < 0) + return 0; + + memset(&link, 0, sizeof(link)); + rte_em_dev_atomic_read_link_status(dev, &link); + if (link.link_status) { + PMD_INIT_LOG(INFO, + " Port %d: Link Up - speed %u Mbps - %s\n", + dev->data->port_id, (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down\n", + dev->data->port_id); + } + PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", + dev->pci_dev->addr.domain, + dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, + dev->pci_dev->addr.function); + tctl = E1000_READ_REG(hw, E1000_TCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); + if (link.link_status) { + /* enable Tx/Rx */ + tctl |= E1000_TCTL_EN; + rctl |= E1000_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~E1000_TCTL_EN; + rctl &= ~E1000_RCTL_EN; + } + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +/** + * Interrupt handler which shall be registered at first. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +eth_em_interrupt_handler(struct rte_intr_handle *handle, void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_em_interrupt_get_status(dev); + eth_em_interrupt_action(dev); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +static int +eth_em_led_on(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP); +} + +static int +eth_em_led_off(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP); +} + +static int +eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + int err; + enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { + e1000_fc_none, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full + }; + uint32_t rx_buf_size; + uint32_t max_high_water; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rx_buf_size = em_get_rx_buffer_size(hw); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + + /* At least reserve one Ethernet frame for watermark */ + max_high_water = rx_buf_size - ETHER_MAX_LEN; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water); + return (-EINVAL); + } + + hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water = fc_conf->high_water; + hw->fc.low_water = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + + err = e1000_setup_link_generic(hw); + if (err == E1000_SUCCESS) { + return 0; + } + + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err); + return (-EIO); +} + +static void +eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, __rte_unused uint32_t pool) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + e1000_rar_set(hw, mac_addr->addr_bytes, index); +} + +static void +eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) +{ + uint8_t addr[ETHER_ADDR_LEN]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(addr, 0, sizeof(addr)); + + e1000_rar_set(hw, addr, index); +} diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c new file mode 100644 index 0000000000..26a387c6a4 --- /dev/null +++ b/lib/librte_pmd_e1000/em_rxtx.c @@ -0,0 +1,1775 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "e1000/e1000_api.h" +#include "e1000_ethdev.h" + +#define E1000_TXD_VLAN_SHIFT 16 + +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0); + return (m); +} + +#define RTE_MBUF_DATA_DMA_ADDR(mb) \ + (uint64_t) ((mb)->buf_physaddr + \ + (uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr)) + +#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ + (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct em_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct em_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct em_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + struct em_rx_entry *sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ +}; + +/** + * Hardware context number + */ +enum { + EM_CTX_0 = 0, /**< CTX0 */ + EM_CTX_NUM = 1, /**< CTX NUM */ +}; + +/** + * Structure to check if new context need be built + */ +struct em_ctx_info { + uint16_t flags; /**< ol_flags related to context build. */ + uint32_t cmp_mask; /**< compare mask */ + union rte_vlan_macip hdrlen; /**< L2 and L3 header lenghts */ +}; + +/** + * Structure associated with each TX queue. + */ +struct em_tx_queue { + volatile struct e1000_data_desc *tx_ring; /**< TX ring address */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */ + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< Current value of TDT register. */ + uint16_t tx_free_thresh;/**< minimum TX before freeing. */ + /**< Number of TX descriptors to use before RS bit is set. */ + uint16_t tx_rs_thresh; + /** Number of TX descriptors used since RS bit was set. */ + uint16_t nb_tx_used; + /** Index to last TX descriptor to have been cleaned. */ + uint16_t last_desc_cleaned; + /** Total number of TX descriptors ready to be allocated. */ + uint16_t nb_tx_free; + uint16_t queue_id; /**< TX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + struct em_ctx_info ctx_cache; + /**< Hardware context history.*/ +}; + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +#define rte_em_prefetch(p) rte_prefetch0(p) +#else +#define rte_em_prefetch(p) do {} while(0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +#ifndef DEFAULT_TX_FREE_THRESH +#define DEFAULT_TX_FREE_THRESH 32 +#endif /* DEFAULT_TX_FREE_THRESH */ + +#ifndef DEFAULT_TX_RS_THRESH +#define DEFAULT_TX_RS_THRESH 32 +#endif /* DEFAULT_TX_RS_THRESH */ + + +/********************************************************************* + * + * TX function + * + **********************************************************************/ + +/* + * Populates TX context descriptor. + */ +static inline void +em_set_xmit_ctx(struct em_tx_queue* txq, + volatile struct e1000_context_desc *ctx_txd, + uint16_t flags, + union rte_vlan_macip hdrlen) +{ + uint32_t cmp_mask, cmd_len; + uint16_t ipcse, l2len; + struct e1000_context_desc ctx; + + cmp_mask = 0; + cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C; + + l2len = hdrlen.f.l2_len; + ipcse = l2len + hdrlen.f.l3_len; + + /* setup IPCS* fields */ + ctx.lower_setup.ip_fields.ipcss = l2len; + ctx.lower_setup.ip_fields.ipcso =l2len + + offsetof(struct ipv4_hdr, hdr_checksum); + + /* + * When doing checksum or TCP segmentation with IPv6 headers, + * IPCSE field should be set t0 0. + */ + if (flags & PKT_TX_IP_CKSUM) { + ctx.lower_setup.ip_fields.ipcse = rte_cpu_to_le_16(ipcse - 1); + cmd_len |= E1000_TXD_CMD_IP; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + } else { + ctx.lower_setup.ip_fields.ipcse = 0; + } + + /* setup TUCS* fields */ + ctx.upper_setup.tcp_fields.tucss = ipcse; + ctx.upper_setup.tcp_fields.tucse = 0; + + switch (flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + ctx.upper_setup.tcp_fields.tucso = ipcse + + offsetof(struct udp_hdr, dgram_cksum); + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + case PKT_TX_TCP_CKSUM: + ctx.upper_setup.tcp_fields.tucso = ipcse + + offsetof(struct tcp_hdr, cksum); + cmd_len |= E1000_TXD_CMD_TCP; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + default: + ctx.upper_setup.tcp_fields.tucso = 0; + } + + ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len); + ctx.tcp_seg_setup.data = 0; + + *ctx_txd = ctx; + + txq->ctx_cache.flags = flags; + txq->ctx_cache.cmp_mask = cmp_mask; + txq->ctx_cache.hdrlen = hdrlen; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_ctx_update(struct em_tx_queue *txq, uint16_t flags, + union rte_vlan_macip hdrlen) +{ + /* If match with the current context */ + if (likely (txq->ctx_cache.flags == flags && + ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) & + txq->ctx_cache.cmp_mask) == 0)) + return (EM_CTX_0); + + /* Mismatch */ + return (EM_CTX_NUM); +} + +/* Reset transmit descriptors after they have been used */ +static inline int +em_xmit_cleanup(struct em_tx_queue *txq) +{ + struct em_tx_entry *sw_ring = txq->sw_ring; + volatile struct e1000_data_desc *txr = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + /* Determine the last descriptor needing to be cleaned */ + desc_to_clean_to = last_desc_cleaned + txq->tx_rs_thresh; + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = desc_to_clean_to - nb_tx_desc; + + /* Check to make sure the last descriptor to clean is done */ + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD)) + { + PMD_TX_FREE_LOG(DEBUG, + "TX descriptor %4u is not done" + "(port=%d queue=%d)", + desc_to_clean_to, + txq->port_id, txq->queue_id); + /* Failed to clean any descriptors, better luck next time */ + return -(1); + } + + /* Figure out how many descriptors will be cleaned */ + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = ((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = desc_to_clean_to - last_desc_cleaned; + + PMD_TX_FREE_LOG(DEBUG, + "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, + txq->port_id, txq->queue_id); + + /* + * The last descriptor to clean is done, so that means all the + * descriptors from the last descriptor that was cleaned + * up to the last descriptor with the RS bit set + * are done. Only reset the threshold descriptor. + */ + txr[desc_to_clean_to].upper.fields.status = 0; + + /* Update the txq to reflect the last descriptor that was cleaned */ + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free += nb_tx_to_clean; + + /* No Error */ + return (0); +} + +static inline uint32_t +tx_desc_cksum_flags_to_upper(uint16_t ol_flags) +{ + static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8}; + static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8}; + uint32_t tmp; + + tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; + tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + return (tmp); +} + +uint16_t +eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct em_tx_queue *txq; + struct em_tx_entry *sw_ring; + struct em_tx_entry *txe, *txn; + volatile struct e1000_data_desc *txr; + volatile struct e1000_data_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t popts_spec; + uint32_t cmd_type_len; + uint16_t slen; + uint16_t ol_flags; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint16_t nb_used; + uint16_t tx_ol_req; + uint32_t ctx; + uint32_t new_ctx; + union rte_vlan_macip hdrlen; + + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Determine if the descriptor ring needs to be cleaned. */ + if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) { + em_xmit_cleanup(txq); + } + + /* TX loop */ + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + new_ctx = 0; + tx_pkt = *tx_pkts++; + + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + /* + * Determine how many (if any) context descriptors + * are needed for offload functionality. + */ + ol_flags = tx_pkt->ol_flags; + + /* If hardware offload required */ + tx_ol_req = ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK); + if (tx_ol_req) { + hdrlen = tx_pkt->pkt.vlan_macip; + /* If new context to be built or reuse the exist ctx. */ + ctx = what_ctx_update(txq, tx_ol_req, hdrlen); + + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == EM_CTX_NUM); + } + + /* + * Keep track of how many descriptors are used this loop + * This will always be the number of segments + the number of + * Context descriptors required to transmit the packet + */ + nb_used = tx_pkt->pkt.nb_segs + new_ctx; + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the hardware offload, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u\n", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) tx_pkt->pkt.pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Make sure there are enough TX descriptors available to + * transmit the entire packet. + * nb_used better be less than or equal to txq->tx_rs_thresh + */ + while (unlikely (nb_used > txq->nb_tx_free)) { + PMD_TX_FREE_LOG(DEBUG, + "Not enough free TX descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); + + if (em_xmit_cleanup(txq) != 0) { + /* Could not clean any descriptors */ + if (nb_tx == 0) + return (0); + goto end_of_tx; + } + } + + /* + * By now there are enough free TX descriptors to transmit + * the packet. + */ + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - E1000_TXD_DTYP_DATA + * - E1000_TXD_DTYP_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - E1000_TXD_POPTS_IXSM + * - E1000_TXD_POPTS_TXSM + * + * The following bits must be set in the last Data Descriptor + * and are ignored in the other ones: + * - E1000_TXD_CMD_VLE + * - E1000_TXD_CMD_IFCS + * + * The following bits must only be set in the last Data + * Descriptor: + * - E1000_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - E1000_TXD_CMD_RS + */ + cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_IFCS; + popts_spec = 0; + + /* Set VLAN Tag offload fields. */ + if (ol_flags & PKT_TX_VLAN_PKT) { + cmd_type_len |= E1000_TXD_CMD_VLE; + popts_spec = tx_pkt->pkt.vlan_macip.f.vlan_tci << + E1000_TXD_VLAN_SHIFT; + } + + if (tx_ol_req) { + /* + * Setup the TX Context Descriptor if required + */ + if (new_ctx) { + volatile struct e1000_context_desc *ctx_txd; + + ctx_txd = (volatile struct e1000_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + em_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + hdrlen); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* + * Setup the TX Data Descriptor, + * This path will go through + * whatever new/reuse the context descriptor + */ + popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags); + } + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up Transmit Data Descriptor. + */ + slen = m_seg->pkt.data_len; + buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + + txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen); + txd->upper.data = rte_cpu_to_le_32(popts_spec); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->pkt.next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + */ + cmd_type_len |= E1000_TXD_CMD_EOP; + txq->nb_tx_used += nb_used; + txq->nb_tx_free -= nb_used; + + /* Set RS bit only on threshold packets' last descriptor */ + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + cmd_type_len |= E1000_TXD_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + } + txd->lower.data |= rte_cpu_to_le_32(cmd_type_len); + } +end_of_tx: + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT) + */ + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + txq->tx_tail = tx_id; + + return (nb_tx); +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ + +static inline uint16_t +rx_desc_status_to_pkt_flags(uint32_t rx_status) +{ + uint16_t pkt_flags; + + /* Check if VLAN present */ + pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ? + PKT_RX_VLAN_PKT : 0; + + return pkt_flags; +} + +static inline uint16_t +rx_desc_error_to_pkt_flags(uint32_t rx_error) +{ + uint16_t pkt_flags = 0; + + if (rx_error & E1000_RXD_ERR_IPE) + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + if (rx_error & E1000_RXD_ERR_TCPE) + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + return (pkt_flags); +} + +uint16_t +eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile struct e1000_rx_desc *rx_ring; + volatile struct e1000_rx_desc *rxdp; + struct em_rx_queue *rxq; + struct em_rx_entry *sw_ring; + struct em_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + struct e1000_rx_desc rxd; + uint64_t dma_addr; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint8_t status; + + rxq = rx_queue; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + status = rxdp->status; + if (! (status & E1000_RXD_STAT_DD)) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is + * likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " + "status=0x%x pkt_len=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u\n", + (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_em_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_em_prefetch(&rx_ring[rx_id]); + rte_em_prefetch(&sw_ring[rx_id]); + } + + /* Rearm RXD: attach new mbuf and reset status to zero. */ + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rxdp->buffer_addr = dma_addr; + rxdp->status = 0; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) - + rxq->crc_len); + rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch(rxm->pkt.data); + rxm->pkt.nb_segs = 1; + rxm->pkt.next = NULL; + rxm->pkt.pkt_len = pkt_len; + rxm->pkt.data_len = pkt_len; + rxm->pkt.in_port = rxq->port_id; + + rxm->ol_flags = rx_desc_status_to_pkt_flags(status); + rxm->ol_flags |= rx_desc_error_to_pkt_flags(rxd.errors); + + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return (nb_rx); +} + +uint16_t +eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct em_rx_queue *rxq; + volatile struct e1000_rx_desc *rx_ring; + volatile struct e1000_rx_desc *rxdp; + struct em_rx_entry *sw_ring; + struct em_rx_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + struct e1000_rx_desc rxd; + uint64_t dma; /* Physical address of mbuf data buffer */ + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t data_len; + uint8_t status; + + rxq = rx_queue; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + + /* + * Retrieve RX context of current packet, if any. + */ + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + + while (nb_rx < nb_pkts) { + next_desc: + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + status = rxdp->status; + if (! (status & E1000_RXD_STAT_DD)) + break; + rxd = *rxdp; + + /* + * Descriptor done. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " + "status=0x%x data_len=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u\n", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_em_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_em_prefetch(&rx_ring[rx_id]); + rte_em_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rxdp->buffer_addr = dma; + rxdp->status = 0; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.length); + rxm->pkt.data_len = data_len; + rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt.pkt_len = data_len; + first_seg->pkt.nb_segs = 1; + } else { + first_seg->pkt.pkt_len += data_len; + first_seg->pkt.nb_segs++; + last_seg->pkt.next = rxm; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (! (status & E1000_RXD_STAT_EOP)) { + last_seg = rxm; + goto next_desc; + } + + /* + * This is the last buffer of the received packet. + * If the CRC is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. + * If part of the CRC is also contained in the previous + * mbuf, subtract the length of that CRC part from the + * data length of the previous mbuf. + */ + rxm->pkt.next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt.pkt_len -= ETHER_CRC_LEN; + if (data_len <= ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->pkt.nb_segs--; + last_seg->pkt.data_len = (uint16_t) + (last_seg->pkt.data_len - + (ETHER_CRC_LEN - data_len)); + last_seg->pkt.next = NULL; + } else + rxm->pkt.data_len = + (uint16_t) (data_len - ETHER_CRC_LEN); + } + + /* + * Initialize the first mbuf of the returned packet: + * - RX port identifier, + * - hardware offload data, if any: + * - IP checksum flag, + * - error flags. + */ + first_seg->pkt.in_port = rxq->port_id; + + first_seg->ol_flags = rx_desc_status_to_pkt_flags(status); + first_seg->ol_flags |= rx_desc_error_to_pkt_flags(rxd.errors); + + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch(first_seg->pkt.data); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * Save receive context. + */ + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return (nb_rx); +} + +/* + * Rings setup and release. + * + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. + * This will also optimize cache line size effect. + * H/W supports up to cache line size 128. + */ +#define EM_ALIGN 128 + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * desscriptors should meet the following condition: + * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 + */ +#define EM_MIN_RING_DESC 32 +#define EM_MAX_RING_DESC 4096 + +#define EM_MAX_BUF_SIZE 16384 +#define EM_RCTL_FLXBUF_STEP 1024 + +static const struct rte_memzone * +ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, uint32_t ring_size, int socket_id) +{ + const struct rte_memzone *mz; + char z_name[RTE_MEMZONE_NAMESIZE]; + + rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + dev->driver->pci_drv.name, ring_name, dev->data->port_id, + queue_id); + + if ((mz = rte_memzone_lookup(z_name)) != 0) + return (mz); + + return rte_memzone_reserve(z_name, (uint64_t) ring_size, socket_id, 0); +} + +static void +em_tx_queue_release_mbufs(struct em_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i != txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +em_tx_queue_release(struct em_tx_queue *txq) +{ + if (txq != NULL) { + em_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); + } +} + +void +eth_em_tx_queue_release(void *txq) +{ + em_tx_queue_release(txq); +} + +/* (Re)set dynamic em_tx_queue fields to defaults */ +static void +em_reset_tx_queue(struct em_tx_queue *txq) +{ + uint16_t i, nb_desc, prev; + static const struct e1000_data_desc txd_init = { + .upper.fields = {.status = E1000_TXD_STAT_DD}, + }; + + nb_desc = txq->nb_tx_desc; + + /* Initialize ring entries */ + + prev = (uint16_t) (nb_desc - 1); + + for (i = 0; i < nb_desc; i++) { + txq->tx_ring[i] = txd_init; + txq->sw_ring[i].mbuf = NULL; + txq->sw_ring[i].last_id = i; + txq->sw_ring[prev].next_id = i; + prev = i; + } + + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->nb_tx_free = (uint16_t)(nb_desc - 1); + txq->last_desc_cleaned = (uint16_t)(nb_desc - 1); + txq->nb_tx_used = 0; + txq->tx_tail = 0; + + memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); +} + +int +eth_em_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct em_tx_queue *txq; + struct e1000_hw *hw; + uint32_t tsize; + uint16_t tx_rs_thresh, tx_free_thresh; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of EM_ALIGN. + */ + if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 || + (nb_desc > EM_MAX_RING_DESC) || + (nb_desc < EM_MIN_RING_DESC)) { + return -(EINVAL); + } + + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = RTE_MIN(nb_desc / 4, DEFAULT_TX_FREE_THRESH); + + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh == 0) + tx_rs_thresh = RTE_MIN(tx_free_thresh, DEFAULT_TX_RS_THRESH); + + if (tx_free_thresh >= (nb_desc - 3)) { + RTE_LOG(ERR, PMD, + "tx_free_thresh must be less than the " + "number of TX descriptors minus 3. " + "(tx_free_thresh=%u port=%d queue=%d)\n", + tx_free_thresh, dev->data->port_id, queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > tx_free_thresh) { + RTE_LOG(ERR, PMD, + "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. " + "(tx_free_thresh=%u tx_rs_thresh=%u " + "port=%d queue=%d)\n", + tx_free_thresh, tx_rs_thresh, dev->data->port_id, + queue_idx); + return -(EINVAL); + } + + /* + * If rs_bit_thresh is greater than 1, then TX WTHRESH should be + * set to 0. If WTHRESH is greater than zero, the RS bit is ignored + * by the NIC and all descriptors are written back after the NIC + * accumulates WTHRESH descriptors. + */ + if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) { + RTE_LOG(ERR, PMD, + "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. " + "(tx_rs_thresh=%u port=%d queue=%d)\n", + tx_rs_thresh, dev->data->port_id, queue_idx); + return -(EINVAL); + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->tx_queues[queue_idx] != NULL) { + em_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC; + if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize, + socket_id)) == NULL) + return (-ENOMEM); + + /* Allocate the tx queue data structure. */ + if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq), + CACHE_LINE_SIZE)) == NULL) + return (-ENOMEM); + + /* Allocate software ring */ + if ((txq->sw_ring = rte_zmalloc("txq->sw_ring", + sizeof(txq->sw_ring[0]) * nb_desc, + CACHE_LINE_SIZE)) == NULL) { + em_tx_queue_release(txq); + return (-ENOMEM); + } + + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->tx_rs_thresh = tx_rs_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + + txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx)); + txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr; + txq->tx_ring = (struct e1000_data_desc *) tz->addr; + + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + em_reset_tx_queue(txq); + + dev->data->tx_queues[queue_idx] = txq; + return (0); +} + +static void +em_rx_queue_release_mbufs(struct em_rx_queue *rxq) +{ + unsigned i; + + if (rxq->sw_ring != NULL) { + for (i = 0; i != rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +em_rx_queue_release(struct em_rx_queue *rxq) +{ + if (rxq != NULL) { + em_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); + } +} + +void +eth_em_rx_queue_release(void *rxq) +{ + em_rx_queue_release(rxq); +} + +/* Reset dynamic em_rx_queue fields back to defaults */ +static void +em_reset_rx_queue(struct em_rx_queue *rxq) +{ + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +int +eth_em_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct em_rx_queue *rxq; + struct e1000_hw *hw; + uint32_t rsize; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of EM_ALIGN. + */ + if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 || + (nb_desc > EM_MAX_RING_DESC) || + (nb_desc < EM_MIN_RING_DESC)) { + return (-EINVAL); + } + + /* + * EM devices don't support drop_en functionality + */ + if (rx_conf->rx_drop_en) { + RTE_LOG(ERR, PMD, "drop_en functionality not supported by device\n"); + return (-EINVAL); + } + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->rx_queues[queue_idx] != NULL) { + em_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocate RX ring for max possible mumber of hardware descriptors. */ + rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC; + if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize, + socket_id)) == NULL) + return (-ENOMEM); + + /* Allocate the RX queue data structure. */ + if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq), + CACHE_LINE_SIZE)) == NULL) + return (-ENOMEM); + + /* Allocate software ring. */ + if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring", + sizeof (rxq->sw_ring[0]) * nb_desc, + CACHE_LINE_SIZE)) == NULL) { + em_rx_queue_release(rxq); + return (-ENOMEM); + } + + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->pthresh = rx_conf->rx_thresh.pthresh; + rxq->hthresh = rx_conf->rx_thresh.hthresh; + rxq->wthresh = rx_conf->rx_thresh.wthresh; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? + 0 : ETHER_CRC_LEN); + + rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); + rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr; + rxq->rx_ring = (struct e1000_rx_desc *) rz->addr; + + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + + dev->data->rx_queues[queue_idx] = rxq; + em_reset_rx_queue(rxq); + + return (0); +} + +void +em_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + struct em_tx_queue *txq; + struct em_rx_queue *rxq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq != NULL) { + em_tx_queue_release_mbufs(txq); + em_reset_tx_queue(txq); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + em_rx_queue_release_mbufs(rxq); + em_reset_rx_queue(rxq); + } + } +} + +/* + * Takes as input/output parameter RX buffer size. + * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register. + */ +static uint32_t +em_rctl_bsize(enum e1000_mac_type hwtyp, uint32_t *bufsz) +{ + /* + * For BSIZE & BSEX all configurable sizes are: + * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX); + * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX); + * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX); + * 2048: rctl |= E1000_RCTL_SZ_2048; + * 1024: rctl |= E1000_RCTL_SZ_1024; + * 512: rctl |= E1000_RCTL_SZ_512; + * 256: rctl |= E1000_RCTL_SZ_256; + */ + static const struct { + uint32_t bufsz; + uint32_t rctl; + } bufsz_to_rctl[] = { + {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)}, + {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)}, + {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)}, + {2048, E1000_RCTL_SZ_2048}, + {1024, E1000_RCTL_SZ_1024}, + {512, E1000_RCTL_SZ_512}, + {256, E1000_RCTL_SZ_256}, + }; + + int i; + uint32_t rctl_bsize; + + rctl_bsize = *bufsz; + + /* + * Starting from 82571 it is possible to specify RX buffer size + * by RCTL.FLXBUF. When this field is different from zero, the + * RX buffer size = RCTL.FLXBUF * 1K + * (e.g. t is possible to specify RX buffer size 1,2,...,15KB). + * It is working ok on real HW, but by some reason doesn't work + * on VMware emulated 82574L. + * So for now, always use BSIZE/BSEX to setup RX buffer size. + * If you don't plan to use it on VMware emulated 82574L and + * would like to specify RX buffer size in 1K granularity, + * uncomment the following lines: + * *************************************************************** + * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 && + * rctl_bsize >= EM_RCTL_FLXBUF_STEP) { + * rctl_bsize /= EM_RCTL_FLXBUF_STEP; + * *bufsz = rctl_bsize; + * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT & + * E1000_RCTL_FLXBUF_MASK); + * } + * *************************************************************** + */ + + for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]); + i++) { + if (rctl_bsize >= bufsz_to_rctl[i].bufsz) { + *bufsz = bufsz_to_rctl[i].bufsz; + return (bufsz_to_rctl[i].rctl); + } + } + + /* Should never happen. */ + return (-EINVAL); +} + +static int +em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) +{ + struct em_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned i; + static const struct e1000_rx_desc rxd_init = { + .buffer_addr = 0, + }; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile struct e1000_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed " + "queue_id=%hu\n", rxq->queue_id); + em_rx_queue_release(rxq); + return (-ENOMEM); + } + + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + + /* Clear HW ring memory */ + rxq->rx_ring[i] = rxd_init; + + rxd = &rxq->rx_ring[i]; + rxd->buffer_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +/********************************************************************* + * + * Enable receive unit. + * + **********************************************************************/ +int +eth_em_rx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct em_rx_queue *rxq; + uint32_t rctl; + uint32_t rfctl; + uint32_t rxcsum; + uint32_t rctl_bsize; + uint16_t i; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Make sure receives are disabled while setting + * up the descriptor ring. + */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + + /* Disable extended descriptor type. */ + rfctl &= ~E1000_RFCTL_EXTEN; + /* Disable accelerated acknowledge */ + if (hw->mac.type == e1000_82574) + rfctl |= E1000_RFCTL_ACK_DIS; + + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* + * XXX TEMPORARY WORKAROUND: on some systems with 82573 + * long latencies are observed, like Lenovo X60. This + * change eliminates the problem, but since having positive + * values in RDTR is a known source of problems on other + * platforms another solution is being sought. + */ + if (hw->mac.type == e1000_82573) + E1000_WRITE_REG(hw, E1000_RDTR, 0x20); + + dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts; + + /* Determine RX bufsize. */ + rctl_bsize = EM_MAX_BUF_SIZE; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct rte_pktmbuf_pool_private *mbp_priv; + uint32_t buf_size; + + rxq = dev->data->rx_queues[i]; + mbp_priv = rte_mempool_get_priv(rxq->mb_pool); + buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; + rctl_bsize = RTE_MIN(rctl_bsize, buf_size); + } + + rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize); + + /* Configure and enable each RX queue. */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings and setup queue */ + ret = em_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure + */ + rxq->crc_len = + (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? + 0 : ETHER_CRC_LEN); + + bus_addr = rxq->rx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_RDLEN(i), + rxq->nb_rx_desc * + sizeof(*rxq->rx_ring)); + E1000_WRITE_REG(hw, E1000_RDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); + + E1000_WRITE_REG(hw, E1000_RDH(i), 0); + E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1); + + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); + rxdctl &= 0xFE000000; + rxdctl |= rxq->pthresh & 0x3F; + rxdctl |= (rxq->hthresh & 0x3F) << 8; + rxdctl |= (rxq->wthresh & 0x3F) << 16; + rxdctl |= E1000_RXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + + /* + * Due to EM devices not having any sort of hardware + * limit for packet length, jumbo frame of any size + * can be accepted, thus we have to enable scattered + * rx if jumbo frames are enabled (or if buffer size + * is too small to accomodate non-jumbo packets) + * to avoid splitting packets that don't fit into + * one buffer. + */ + if (dev->data->dev_conf.rxmode.jumbo_frame || + rctl_bsize < ETHER_MAX_LEN) { + dev->rx_pkt_burst = + (eth_rx_burst_t)eth_em_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + } + + /* + * Setup the Checksum Register. + * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. + */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + + if (dev->data->dev_conf.rxmode.hw_ip_checksum) + rxcsum |= E1000_RXCSUM_IPOFL; + else + rxcsum &= ~E1000_RXCSUM_IPOFL; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* No MRQ or RSS support for now */ + + /* Set early receive threshold on appropriate hw */ + if ((hw->mac.type == e1000_ich9lan || + hw->mac.type == e1000_pch2lan || + hw->mac.type == e1000_ich10lan) && + dev->data->dev_conf.rxmode.jumbo_frame == 1) { + u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); + E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); + E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13)); + } + + if (hw->mac.type == e1000_pch2lan) { + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); + else + e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); + } + + /* Setup the Receive Control Register. */ + if (dev->data->dev_conf.rxmode.hw_strip_crc) + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + else + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Make sure VLAN Filters are off. */ + rctl &= ~E1000_RCTL_VFE; + /* Don't store bad packets. */ + rctl &= ~E1000_RCTL_SBP; + /* Legacy descriptor type. */ + rctl &= ~E1000_RCTL_DTYP_MASK; + + /* + * Configure support of jumbo frames, if any. + */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + rctl |= E1000_RCTL_LPE; + else + rctl &= ~E1000_RCTL_LPE; + + /* Enable Receives. */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +void +eth_em_tx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct em_tx_queue *txq; + uint32_t tctl; + uint32_t txdctl; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + uint64_t bus_addr; + + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_TDLEN(i), + txq->nb_tx_desc * + sizeof(*txq->tx_ring)); + E1000_WRITE_REG(hw, E1000_TDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + E1000_WRITE_REG(hw, E1000_TDT(i), 0); + E1000_WRITE_REG(hw, E1000_TDH(i), 0); + + /* Setup Transmit threshold registers. */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i)); + /* + * bit 22 is reserved, on some models should always be 0, + * on others - always 1. + */ + txdctl &= E1000_TXDCTL_COUNT_DESC; + txdctl |= txq->pthresh & 0x3F; + txdctl |= (txq->hthresh & 0x3F) << 8; + txdctl |= (txq->wthresh & 0x3F) << 16; + txdctl |= E1000_TXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + } + + /* Program the Transmit Control Register. */ + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + + /* This write will effectively turn on the transmit unit. */ + E1000_WRITE_REG(hw, E1000_TCTL, tctl); +} + diff --git a/mk/rte.vars.mk b/mk/rte.vars.mk index c45d4c00ae..2c8fcff891 100644 --- a/mk/rte.vars.mk +++ b/mk/rte.vars.mk @@ -80,6 +80,9 @@ include $(RTE_SDK)/mk/rte.extvars.mk endif CONFIG_RTE_LIBRTE_E1000_PMD = $(CONFIG_RTE_LIBRTE_IGB_PMD) +ifneq ($(CONFIG_RTE_LIBRTE_E1000_PMD),y) + CONFIG_RTE_LIBRTE_E1000_PMD = $(CONFIG_RTE_LIBRTE_EM_PMD) +endif ifeq ($(RTE_ARCH),) $(error RTE_ARCH is not defined)