X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fem_ethdev.c;h=902b1cdca07463d7b60c2722af1b26fe1b11ea42;hb=fae4b8c47c25286e5d87cde55ff16d1bed298e15;hp=77ef13490690745f2a4afc3181e07be50817ca11;hpb=a2efa4f66094c1fa78ac956bb065925ca263082a;p=dpdk.git diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index 77ef134906..902b1cdca0 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -11,16 +11,14 @@ #include #include #include -#include #include #include #include #include -#include +#include #include #include #include -#include #include #include @@ -37,16 +35,16 @@ static int eth_em_configure(struct rte_eth_dev *dev); static int eth_em_start(struct rte_eth_dev *dev); static void eth_em_stop(struct rte_eth_dev *dev); static void eth_em_close(struct rte_eth_dev *dev); -static void eth_em_promiscuous_enable(struct rte_eth_dev *dev); -static void eth_em_promiscuous_disable(struct rte_eth_dev *dev); -static void eth_em_allmulticast_enable(struct rte_eth_dev *dev); -static void eth_em_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_em_promiscuous_enable(struct rte_eth_dev *dev); +static int eth_em_promiscuous_disable(struct rte_eth_dev *dev); +static int eth_em_allmulticast_enable(struct rte_eth_dev *dev); +static int eth_em_allmulticast_disable(struct rte_eth_dev *dev); static int eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); -static void eth_em_stats_reset(struct rte_eth_dev *dev); -static void eth_em_infos_get(struct rte_eth_dev *dev, +static int eth_em_stats_reset(struct rte_eth_dev *dev); +static int eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); @@ -91,12 +89,15 @@ static int eth_em_led_on(struct rte_eth_dev *dev); static int eth_em_led_off(struct rte_eth_dev *dev); static int em_get_rx_buffer_size(struct e1000_hw *hw); -static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static int eth_em_rar_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr); static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); #define EM_FC_PAUSE_TIME 0x0680 @@ -105,9 +106,6 @@ static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, static enum e1000_fc_mode em_fc_setting = e1000_fc_full; -int e1000_logtype_init; -int e1000_logtype_driver; - /* * The set of PCI devices this driver supports */ @@ -135,6 +133,7 @@ static const struct rte_pci_id pci_id_em_map[] = { { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, @@ -189,6 +188,7 @@ static const struct eth_dev_ops eth_em_ops = { .dev_led_off = eth_em_led_off, .flow_ctrl_get = eth_em_flow_ctrl_get, .flow_ctrl_set = eth_em_flow_ctrl_set, + .mac_addr_set = eth_em_default_mac_addr_set, .mac_addr_add = eth_em_rar_set, .mac_addr_remove = eth_em_rar_clear, .set_mc_addr_list = eth_em_set_mc_addr_list, @@ -196,57 +196,6 @@ static const struct eth_dev_ops eth_em_ops = { .txq_info_get = em_txq_info_get, }; -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} /** * eth_em_dev_is_ich8 - Check for ICH8 device @@ -260,6 +209,7 @@ eth_em_dev_is_ich8(struct e1000_hw *hw) DEBUGFUNC("eth_em_dev_is_ich8"); switch (hw->device_id) { + case E1000_DEV_ID_PCH2_LV_LM: case E1000_DEV_ID_PCH_LPT_I217_LM: case E1000_DEV_ID_PCH_LPT_I217_V: case E1000_DEV_ID_PCH_LPTLP_I218_LM: @@ -334,19 +284,24 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev) } /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " "store MAC addresses", - ETHER_ADDR_LEN * hw->mac.rar_entry_count); + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); return -ENOMEM; } /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, eth_dev->data->mac_addrs); + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + /* initialize the vfta */ memset(shadow_vfta, 0, sizeof(*shadow_vfta)); @@ -363,30 +318,12 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev) static int eth_em_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct e1000_adapter *adapter = - E1000_DEV_PRIVATE(eth_dev->data->dev_private); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - if (adapter->stopped == 0) - eth_em_close(eth_dev); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; + return 0; - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); - rte_intr_callback_unregister(intr_handle, - eth_em_interrupt_handler, eth_dev); + eth_em_close(eth_dev); return 0; } @@ -405,8 +342,7 @@ static int eth_em_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_em_pmd = { .id_table = pci_id_em_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_em_pci_probe, .remove = eth_em_pci_remove, }; @@ -504,6 +440,7 @@ eth_em_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); return 0; @@ -628,7 +565,7 @@ eth_em_start(struct rte_eth_dev *dev) return -EIO; } - E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN); /* Configure for OS presence */ em_init_manageability(hw); @@ -790,6 +727,11 @@ eth_em_stop(struct rte_eth_dev *dev) em_lsc_intr_disable(hw); e1000_reset_hw(hw); + + /* Flush desc rings for i219 */ + if (hw->mac.type == e1000_pch_spt || hw->mac.type == e1000_pch_cnp) + em_flush_desc_rings(dev); + if (hw->mac.type >= e1000_82544) E1000_WRITE_REG(hw, E1000_WUC, 0); @@ -800,7 +742,7 @@ eth_em_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -822,6 +764,8 @@ eth_em_close(struct rte_eth_dev *dev) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; eth_em_stop(dev); adapter->stopped = 1; @@ -829,6 +773,15 @@ eth_em_close(struct rte_eth_dev *dev) e1000_phy_hw_reset(hw); em_release_manageability(hw); em_hw_control_release(hw); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + eth_em_interrupt_handler, dev); } static int @@ -873,7 +826,8 @@ em_hardware_init(struct e1000_hw *hw) */ rx_buf_size = em_get_rx_buffer_size(hw); - hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024); + hw->fc.high_water = rx_buf_size - + PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024); hw->fc.low_water = hw->fc.high_water - 1500; if (hw->mac.type == e1000_80003es2lan) @@ -1031,7 +985,7 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) return 0; } -static void +static int eth_em_stats_reset(struct rte_eth_dev *dev) { struct e1000_hw_stats *hw_stats = @@ -1042,6 +996,8 @@ eth_em_stats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(hw_stats, 0, sizeof(*hw_stats)); + + return 0; } static int @@ -1052,7 +1008,7 @@ eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; em_rxq_intr_enable(hw); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -1067,9 +1023,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu return 0; } -static uint32_t -em_get_max_pktlen(const struct e1000_hw *hw) +uint32_t +em_get_max_pktlen(struct rte_eth_dev *dev) { + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + switch (hw->mac.type) { case e1000_82571: case e1000_82572: @@ -1087,31 +1045,20 @@ em_get_max_pktlen(const struct e1000_hw *hw) return 0x1000; /* Adapters that do not support jumbo frames */ case e1000_ich8lan: - return ETHER_MAX_LEN; + return RTE_ETHER_MAX_LEN; default: return MAX_JUMBO_FRAME_SIZE; } } -static void +static int eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ - dev_info->max_rx_pktlen = em_get_max_pktlen(hw); + dev_info->max_rx_pktlen = em_get_max_pktlen(dev); dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; /* * Starting with 631xESB hw supports 2 TX/RX queues per port. @@ -1133,6 +1080,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = E1000_MAX_RING_DESC, .nb_min = E1000_MIN_RING_DESC, @@ -1150,6 +1104,14 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + + /* Preferred queue parameters */ + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = 256; + dev_info->default_rxportconf.ring_size = 256; + + return 0; } /* return 0 means link status changed, -1 means not changed */ @@ -1158,10 +1120,10 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; - int link_check, count; + struct rte_eth_link link; + int link_up, count; - link_check = 0; + link_up = 0; hw->mac.get_link_status = 1; /* possible wait-to-complete in up to 9 seconds */ @@ -1171,33 +1133,31 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) case e1000_media_type_copper: /* Do the work to read phy */ e1000_check_for_link(hw); - link_check = !hw->mac.get_link_status; + link_up = !hw->mac.get_link_status; break; case e1000_media_type_fiber: e1000_check_for_link(hw); - link_check = (E1000_READ_REG(hw, E1000_STATUS) & + link_up = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); - link_check = hw->mac.serdes_has_link; + link_up = hw->mac.serdes_has_link; break; default: break; } - if (link_check || wait_to_complete == 0) + if (link_up || wait_to_complete == 0) break; rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); - old = link; /* Now we check if a transition has happened */ - if (link_check && (link.link_status == ETH_LINK_DOWN)) { + if (link_up) { uint16_t duplex, speed; hw->mac.ops.get_link_up_info(hw, &speed, &duplex); link.link_duplex = (duplex == FULL_DUPLEX) ? @@ -1207,20 +1167,14 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_status = ETH_LINK_UP; link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); - } else if (!link_check && (link.link_status == ETH_LINK_UP)) { - link.link_speed = 0; + } else { + link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_status = ETH_LINK_DOWN; link.link_autoneg = ETH_LINK_FIXED; } - rte_em_dev_atomic_write_link_status(dev, &link); - - /* not changed */ - if (old.link_status == link.link_status) - return -1; - /* changed */ - return 0; + return rte_eth_linkstatus_set(dev, &link); } /* @@ -1312,7 +1266,7 @@ em_release_manageability(struct e1000_hw *hw) } } -static void +static int eth_em_promiscuous_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -1322,9 +1276,11 @@ eth_em_promiscuous_enable(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_em_promiscuous_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -1338,9 +1294,11 @@ eth_em_promiscuous_disable(struct rte_eth_dev *dev) else rctl &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_em_allmulticast_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -1350,9 +1308,11 @@ eth_em_allmulticast_enable(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_em_allmulticast_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -1360,10 +1320,12 @@ eth_em_allmulticast_disable(struct rte_eth_dev *dev) uint32_t rctl; if (dev->data->promiscuous == 1) - return; /* must remain in all_multicast mode */ + return 0; /* must remain in all_multicast mode */ rctl = E1000_READ_REG(hw, E1000_RCTL); rctl &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } static int @@ -1458,15 +1420,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev) static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; if(mask & ETH_VLAN_STRIP_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) em_vlan_hw_strip_enable(dev); else em_vlan_hw_strip_disable(dev); } if(mask & ETH_VLAN_FILTER_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) em_vlan_hw_filter_enable(dev); else em_vlan_hw_filter_disable(dev); @@ -1495,7 +1460,8 @@ eth_em_interrupt_setup(struct rte_eth_dev *dev) /* clear interrupt */ E1000_READ_REG(hw, E1000_ICR); regval = E1000_READ_REG(hw, E1000_IMS); - E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC); + E1000_WRITE_REG(hw, E1000_IMS, + regval | E1000_ICR_LSC | E1000_ICR_OTHER); return 0; } @@ -1545,7 +1511,7 @@ em_rxq_intr_enable(struct e1000_hw *hw) static void em_lsc_intr_disable(struct e1000_hw *hw) { - E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC); + E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER); E1000_WRITE_FLUSH(hw); } @@ -1619,7 +1585,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, return -1; intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); /* set get_link_status to check register later */ hw->mac.get_link_status = 1; @@ -1629,8 +1595,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, if (ret < 0) return 0; - memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", dev->data->port_id, link.link_speed, @@ -1639,7 +1605,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, } else { PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); } - PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); @@ -1749,7 +1715,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* At least reserve one Ethernet frame for watermark */ - max_high_water = rx_buf_size - ETHER_MAX_LEN; + max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); @@ -1788,7 +1754,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) } static int -eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index, __rte_unused uint32_t pool) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1799,7 +1765,7 @@ eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) { - uint8_t addr[ETHER_ADDR_LEN]; + uint8_t addr[RTE_ETHER_ADDR_LEN]; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); memset(addr, 0, sizeof(addr)); @@ -1807,6 +1773,15 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) e1000_rar_set(hw, addr, index); } +static int +eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + eth_em_rar_clear(dev, 0); + + return eth_em_rar_set(dev, (void *)addr, 0, 0); +} + static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { @@ -1814,12 +1789,17 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) struct e1000_hw *hw; uint32_t frame_size; uint32_t rctl; + int ret; - eth_em_infos_get(dev, &dev_info); - frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; + ret = eth_em_infos_get(dev, &dev_info); + if (ret != 0) + return ret; + + frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE; /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) return -EINVAL; /* refuse mtu that requires the support of scattered packets when this @@ -1832,11 +1812,13 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) rctl = E1000_READ_REG(hw, E1000_RCTL); /* switch to jumbo mode if needed */ - if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + if (frame_size > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; rctl |= E1000_RCTL_LPE; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; rctl &= ~E1000_RCTL_LPE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -1848,7 +1830,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct e1000_hw *hw; @@ -1862,14 +1844,8 @@ RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(e1000_init_log); -static void -e1000_init_log(void) +/* see e1000_logs.c */ +RTE_INIT(igb_init_log) { - e1000_logtype_init = rte_log_register("pmd.e1000.init"); - if (e1000_logtype_init >= 0) - rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); - e1000_logtype_driver = rte_log_register("pmd.e1000.driver"); - if (e1000_logtype_driver >= 0) - rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); + e1000_igb_init_log(); }