X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fem_ethdev.c;h=8850957902a984197252793d3de56379beae81d2;hb=bdad90d12ec8eea8c9552880d715f10b0af93cc6;hp=a0c3b4dc50a7df8fed0f54e6f2997a0d935030d3;hpb=4c00cfdc0ea225f2518a35db928ad1ab02b2a724;p=dpdk.git diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index a0c3b4dc50..8850957902 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -40,16 +11,14 @@ #include #include #include -#include #include #include #include #include -#include +#include #include #include #include -#include #include #include @@ -75,7 +44,7 @@ static int eth_em_link_update(struct rte_eth_dev *dev, static int eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); static void eth_em_stats_reset(struct rte_eth_dev *dev); -static void eth_em_infos_get(struct rte_eth_dev *dev, +static int eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); @@ -120,12 +89,15 @@ static int eth_em_led_on(struct rte_eth_dev *dev); static int eth_em_led_off(struct rte_eth_dev *dev); static int em_get_rx_buffer_size(struct e1000_hw *hw); -static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static int eth_em_rar_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr); static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); #define EM_FC_PAUSE_TIME 0x0680 @@ -161,6 +133,7 @@ static const struct rte_pci_id pci_id_em_map[] = { { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, @@ -215,6 +188,7 @@ static const struct eth_dev_ops eth_em_ops = { .dev_led_off = eth_em_led_off, .flow_ctrl_get = eth_em_flow_ctrl_get, .flow_ctrl_set = eth_em_flow_ctrl_set, + .mac_addr_set = eth_em_default_mac_addr_set, .mac_addr_add = eth_em_rar_set, .mac_addr_remove = eth_em_rar_clear, .set_mc_addr_list = eth_em_set_mc_addr_list, @@ -222,57 +196,6 @@ static const struct eth_dev_ops eth_em_ops = { .txq_info_get = em_txq_info_get, }; -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} /** * eth_em_dev_is_ich8 - Check for ICH8 device @@ -286,6 +209,7 @@ eth_em_dev_is_ich8(struct e1000_hw *hw) DEBUGFUNC("eth_em_dev_is_ich8"); switch (hw->device_id) { + case E1000_DEV_ID_PCH2_LV_LM: case E1000_DEV_ID_PCH_LPT_I217_LM: case E1000_DEV_ID_PCH_LPT_I217_V: case E1000_DEV_ID_PCH_LPTLP_I218_LM: @@ -360,17 +284,17 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev) } /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " "store MAC addresses", - ETHER_ADDR_LEN * hw->mac.rar_entry_count); + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); return -ENOMEM; } /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, eth_dev->data->mac_addrs); /* initialize the vfta */ @@ -406,9 +330,6 @@ eth_em_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; - /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); rte_intr_callback_unregister(intr_handle, @@ -431,8 +352,7 @@ static int eth_em_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_em_pmd = { .id_table = pci_id_em_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_em_pci_probe, .remove = eth_em_pci_remove, }; @@ -530,6 +450,7 @@ eth_em_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); return 0; @@ -586,6 +507,30 @@ em_set_pba(struct e1000_hw *hw) E1000_WRITE_REG(hw, E1000_PBA, pba); } +static void +eth_em_rxtx_control(struct rte_eth_dev *dev, + bool enable) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tctl, rctl; + + tctl = E1000_READ_REG(hw, E1000_TCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); + if (enable) { + /* enable Tx/Rx */ + tctl |= E1000_TCTL_EN; + rctl |= E1000_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~E1000_TCTL_EN; + rctl &= ~E1000_RCTL_EN; + } + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); +} + static int eth_em_start(struct rte_eth_dev *dev) { @@ -630,7 +575,7 @@ eth_em_start(struct rte_eth_dev *dev) return -EIO; } - E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN); /* Configure for OS presence */ em_init_manageability(hw); @@ -759,6 +704,9 @@ eth_em_start(struct rte_eth_dev *dev) adapter->stopped = 0; + eth_em_rxtx_control(dev, true); + eth_em_link_update(dev, 0); + PMD_INIT_LOG(DEBUG, "<<"); return 0; @@ -784,10 +732,16 @@ eth_em_stop(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + eth_em_rxtx_control(dev, false); em_rxq_intr_disable(hw); em_lsc_intr_disable(hw); e1000_reset_hw(hw); + + /* Flush desc rings for i219 */ + if (hw->mac.type == e1000_pch_spt || hw->mac.type == e1000_pch_cnp) + em_flush_desc_rings(dev); + if (hw->mac.type >= e1000_82544) E1000_WRITE_REG(hw, E1000_WUC, 0); @@ -798,7 +752,7 @@ eth_em_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -871,7 +825,8 @@ em_hardware_init(struct e1000_hw *hw) */ rx_buf_size = em_get_rx_buffer_size(hw); - hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024); + hw->fc.high_water = rx_buf_size - + PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024); hw->fc.low_water = hw->fc.high_water - 1500; if (hw->mac.type == e1000_80003es2lan) @@ -1050,7 +1005,7 @@ eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; em_rxq_intr_enable(hw); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -1065,9 +1020,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu return 0; } -static uint32_t -em_get_max_pktlen(const struct e1000_hw *hw) +uint32_t +em_get_max_pktlen(struct rte_eth_dev *dev) { + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + switch (hw->mac.type) { case e1000_82571: case e1000_82572: @@ -1085,31 +1042,20 @@ em_get_max_pktlen(const struct e1000_hw *hw) return 0x1000; /* Adapters that do not support jumbo frames */ case e1000_ich8lan: - return ETHER_MAX_LEN; + return RTE_ETHER_MAX_LEN; default: return MAX_JUMBO_FRAME_SIZE; } } -static void +static int eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ - dev_info->max_rx_pktlen = em_get_max_pktlen(hw); + dev_info->max_rx_pktlen = em_get_max_pktlen(dev); dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; /* * Starting with 631xESB hw supports 2 TX/RX queues per port. @@ -1131,6 +1077,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = E1000_MAX_RING_DESC, .nb_min = E1000_MIN_RING_DESC, @@ -1148,6 +1101,14 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + + /* Preferred queue parameters */ + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = 256; + dev_info->default_rxportconf.ring_size = 256; + + return 0; } /* return 0 means link status changed, -1 means not changed */ @@ -1156,7 +1117,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; int link_check, count; link_check = 0; @@ -1191,8 +1152,6 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); - old = link; /* Now we check if a transition has happened */ if (link_check && (link.link_status == ETH_LINK_DOWN)) { @@ -1206,19 +1165,13 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); } else if (!link_check && (link.link_status == ETH_LINK_UP)) { - link.link_speed = 0; + link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_status = ETH_LINK_DOWN; - link.link_autoneg = ETH_LINK_SPEED_FIXED; + link.link_autoneg = ETH_LINK_FIXED; } - rte_em_dev_atomic_write_link_status(dev, &link); - - /* not changed */ - if (old.link_status == link.link_status) - return -1; - /* changed */ - return 0; + return rte_eth_linkstatus_set(dev, &link); } /* @@ -1456,15 +1409,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev) static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; if(mask & ETH_VLAN_STRIP_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) em_vlan_hw_strip_enable(dev); else em_vlan_hw_strip_disable(dev); } if(mask & ETH_VLAN_FILTER_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) em_vlan_hw_filter_enable(dev); else em_vlan_hw_filter_disable(dev); @@ -1493,7 +1449,8 @@ eth_em_interrupt_setup(struct rte_eth_dev *dev) /* clear interrupt */ E1000_READ_REG(hw, E1000_ICR); regval = E1000_READ_REG(hw, E1000_IMS); - E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC); + E1000_WRITE_REG(hw, E1000_IMS, + regval | E1000_ICR_LSC | E1000_ICR_OTHER); return 0; } @@ -1543,7 +1500,7 @@ em_rxq_intr_enable(struct e1000_hw *hw) static void em_lsc_intr_disable(struct e1000_hw *hw) { - E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC); + E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER); E1000_WRITE_FLUSH(hw); } @@ -1610,7 +1567,6 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - uint32_t tctl, rctl; struct rte_eth_link link; int ret; @@ -1618,7 +1574,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, return -1; intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); /* set get_link_status to check register later */ hw->mac.get_link_status = 1; @@ -1628,8 +1584,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, if (ret < 0) return 0; - memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", dev->data->port_id, link.link_speed, @@ -1642,21 +1598,6 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); - tctl = E1000_READ_REG(hw, E1000_TCTL); - rctl = E1000_READ_REG(hw, E1000_RCTL); - if (link.link_status) { - /* enable Tx/Rx */ - tctl |= E1000_TCTL_EN; - rctl |= E1000_RCTL_EN; - } else { - /* disable Tx/Rx */ - tctl &= ~E1000_TCTL_EN; - rctl &= ~E1000_RCTL_EN; - } - E1000_WRITE_REG(hw, E1000_TCTL, tctl); - E1000_WRITE_REG(hw, E1000_RCTL, rctl); - E1000_WRITE_FLUSH(hw); - return 0; } @@ -1678,7 +1619,7 @@ eth_em_interrupt_handler(void *param) eth_em_interrupt_get_status(dev); eth_em_interrupt_action(dev, dev->intr_handle); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } static int @@ -1763,7 +1704,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* At least reserve one Ethernet frame for watermark */ - max_high_water = rx_buf_size - ETHER_MAX_LEN; + max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); @@ -1802,7 +1743,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) } static int -eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index, __rte_unused uint32_t pool) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1813,7 +1754,7 @@ eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) { - uint8_t addr[ETHER_ADDR_LEN]; + uint8_t addr[RTE_ETHER_ADDR_LEN]; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); memset(addr, 0, sizeof(addr)); @@ -1821,6 +1762,15 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) e1000_rar_set(hw, addr, index); } +static int +eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + eth_em_rar_clear(dev, 0); + + return eth_em_rar_set(dev, (void *)addr, 0, 0); +} + static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { @@ -1828,12 +1778,17 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) struct e1000_hw *hw; uint32_t frame_size; uint32_t rctl; + int ret; + + ret = eth_em_infos_get(dev, &dev_info); + if (ret != 0) + return ret; - eth_em_infos_get(dev, &dev_info); - frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; + frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE; /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) return -EINVAL; /* refuse mtu that requires the support of scattered packets when this @@ -1846,11 +1801,13 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) rctl = E1000_READ_REG(hw, E1000_RCTL); /* switch to jumbo mode if needed */ - if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + if (frame_size > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; rctl |= E1000_RCTL_LPE; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; rctl &= ~E1000_RCTL_LPE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -1862,7 +1819,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct e1000_hw *hw; @@ -1875,3 +1832,9 @@ eth_em_set_mc_addr_list(struct rte_eth_dev *dev, RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); + +/* see e1000_logs.c */ +RTE_INIT(igb_init_log) +{ + e1000_igb_init_log(); +}