X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fem_ethdev.c;h=31c4870086fd98ab11e8c01a4793c099da2bfa8c;hb=af397b3c93f82b0803c0890874d7ee3b5127522d;hp=de7db2650e8edddabb4eed7146e2c205d8458f78;hpb=caccf8b318cafcdafe39faa3c5ce3eef67007621;p=dpdk.git diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index de7db2650e..31c4870086 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -11,13 +11,12 @@ #include #include #include -#include #include #include #include #include -#include -#include +#include +#include #include #include #include @@ -34,18 +33,18 @@ static int eth_em_configure(struct rte_eth_dev *dev); static int eth_em_start(struct rte_eth_dev *dev); -static void eth_em_stop(struct rte_eth_dev *dev); -static void eth_em_close(struct rte_eth_dev *dev); -static void eth_em_promiscuous_enable(struct rte_eth_dev *dev); -static void eth_em_promiscuous_disable(struct rte_eth_dev *dev); -static void eth_em_allmulticast_enable(struct rte_eth_dev *dev); -static void eth_em_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_em_stop(struct rte_eth_dev *dev); +static int eth_em_close(struct rte_eth_dev *dev); +static int eth_em_promiscuous_enable(struct rte_eth_dev *dev); +static int eth_em_promiscuous_disable(struct rte_eth_dev *dev); +static int eth_em_allmulticast_enable(struct rte_eth_dev *dev); +static int eth_em_allmulticast_disable(struct rte_eth_dev *dev); static int eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); -static void eth_em_stats_reset(struct rte_eth_dev *dev); -static void eth_em_infos_get(struct rte_eth_dev *dev, +static int eth_em_stats_reset(struct rte_eth_dev *dev); +static int eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); @@ -90,14 +89,15 @@ static int eth_em_led_on(struct rte_eth_dev *dev); static int eth_em_led_off(struct rte_eth_dev *dev); static int em_get_rx_buffer_size(struct e1000_hw *hw); -static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static int eth_em_rar_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, - struct ether_addr *addr); + struct rte_ether_addr *addr); static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); #define EM_FC_PAUSE_TIME 0x0680 @@ -106,9 +106,6 @@ static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, static enum e1000_fc_mode em_fc_setting = e1000_fc_full; -int e1000_logtype_init; -int e1000_logtype_driver; - /* * The set of PCI devices this driver supports */ @@ -179,10 +176,6 @@ static const struct eth_dev_ops eth_em_ops = { .vlan_offload_set = eth_em_vlan_offload_set, .rx_queue_setup = eth_em_rx_queue_setup, .rx_queue_release = eth_em_rx_queue_release, - .rx_queue_count = eth_em_rx_queue_count, - .rx_descriptor_done = eth_em_rx_descriptor_done, - .rx_descriptor_status = eth_em_rx_descriptor_status, - .tx_descriptor_status = eth_em_tx_descriptor_status, .tx_queue_setup = eth_em_tx_queue_setup, .tx_queue_release = eth_em_tx_queue_release, .rx_queue_intr_enable = eth_em_rx_queue_intr_enable, @@ -244,7 +237,7 @@ static int eth_em_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct e1000_adapter *adapter = E1000_DEV_PRIVATE(eth_dev->data->dev_private); struct e1000_hw *hw = @@ -253,6 +246,9 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev) E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); eth_dev->dev_ops = ð_em_ops; + eth_dev->rx_queue_count = eth_em_rx_queue_count; + eth_dev->rx_descriptor_status = eth_em_rx_descriptor_status; + eth_dev->tx_descriptor_status = eth_em_tx_descriptor_status; eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts; @@ -287,17 +283,17 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev) } /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " "store MAC addresses", - ETHER_ADDR_LEN * hw->mac.rar_entry_count); + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); return -ENOMEM; } /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, eth_dev->data->mac_addrs); /* initialize the vfta */ @@ -316,30 +312,12 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev) static int eth_em_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct e1000_adapter *adapter = - E1000_DEV_PRIVATE(eth_dev->data->dev_private); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - if (adapter->stopped == 0) - eth_em_close(eth_dev); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; + return 0; - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); - rte_intr_callback_unregister(intr_handle, - eth_em_interrupt_handler, eth_dev); + eth_em_close(eth_dev); return 0; } @@ -358,8 +336,7 @@ static int eth_em_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_em_pmd = { .id_table = pci_id_em_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_em_pci_probe, .remove = eth_em_pci_remove, }; @@ -454,29 +431,10 @@ eth_em_configure(struct rte_eth_dev *dev) { struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct rte_eth_dev_info dev_info; - uint64_t rx_offloads; - uint64_t tx_offloads; PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; - eth_em_infos_get(dev, &dev_info); - rx_offloads = dev->data->dev_conf.rxmode.offloads; - if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) { - PMD_DRV_LOG(ERR, "Some Rx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64, - rx_offloads, dev_info.rx_offload_capa); - return -ENOTSUP; - } - tx_offloads = dev->data->dev_conf.txmode.offloads; - if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) { - PMD_DRV_LOG(ERR, "Some Tx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64, - tx_offloads, dev_info.tx_offload_capa); - return -ENOTSUP; - } - PMD_INIT_FUNC_TRACE(); return 0; @@ -565,7 +523,7 @@ eth_em_start(struct rte_eth_dev *dev) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int ret, mask; uint32_t intr_vector = 0; uint32_t *speeds; @@ -574,7 +532,9 @@ eth_em_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - eth_em_stop(dev); + ret = eth_em_stop(dev); + if (ret != 0) + return ret; e1000_power_up_phy(hw); @@ -601,7 +561,7 @@ eth_em_start(struct rte_eth_dev *dev) return -EIO; } - E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN); /* Configure for OS presence */ em_init_manageability(hw); @@ -613,12 +573,10 @@ eth_em_start(struct rte_eth_dev *dev) } if (rte_intr_dp_is_en(intr_handle)) { - intr_handle->intr_vec = - rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(int), 0); - if (intr_handle->intr_vec == NULL) { + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec", dev->data->nb_rx_queues); + " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } @@ -637,8 +595,8 @@ eth_em_start(struct rte_eth_dev *dev) e1000_clear_hw_cntrs_base_generic(hw); - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ - ETH_VLAN_EXTEND_MASK; + mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK; ret = eth_em_vlan_offload_set(dev, mask); if (ret) { PMD_INIT_LOG(ERR, "Unable to update vlan offload"); @@ -651,39 +609,39 @@ eth_em_start(struct rte_eth_dev *dev) /* Setup link speed and duplex */ speeds = &dev->data->dev_conf.link_speeds; - if (*speeds == ETH_LINK_SPEED_AUTONEG) { + if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) { hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; hw->mac.autoneg = 1; } else { num_speeds = 0; - autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; + autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0; /* Reset */ hw->phy.autoneg_advertised = 0; - if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | - ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { + if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M | + RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M | + RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) { num_speeds = -1; goto error_invalid_config; } - if (*speeds & ETH_LINK_SPEED_10M_HD) { + if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) { hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; num_speeds++; } - if (*speeds & ETH_LINK_SPEED_10M) { + if (*speeds & RTE_ETH_LINK_SPEED_10M) { hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; num_speeds++; } - if (*speeds & ETH_LINK_SPEED_100M_HD) { + if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) { hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; num_speeds++; } - if (*speeds & ETH_LINK_SPEED_100M) { + if (*speeds & RTE_ETH_LINK_SPEED_100M) { hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; num_speeds++; } - if (*speeds & ETH_LINK_SPEED_1G) { + if (*speeds & RTE_ETH_LINK_SPEED_1G) { hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; num_speeds++; } @@ -750,19 +708,26 @@ error_invalid_config: * global reset on the MAC. * **********************************************************************/ -static void +static int eth_em_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + + dev->data->dev_started = 0; eth_em_rxtx_control(dev, false); em_rxq_intr_disable(hw); em_lsc_intr_disable(hw); e1000_reset_hw(hw); + + /* Flush desc rings for i219 */ + if (hw->mac.type == e1000_pch_spt || hw->mac.type == e1000_pch_cnp) + em_flush_desc_rings(dev); + if (hw->mac.type >= e1000_82544) E1000_WRITE_REG(hw, E1000_WUC, 0); @@ -783,25 +748,37 @@ eth_em_stop(struct rte_eth_dev *dev) /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec != NULL) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + rte_intr_vec_list_free(intr_handle); + + return 0; } -static void +static int eth_em_close(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + int ret; - eth_em_stop(dev); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + ret = eth_em_stop(dev); adapter->stopped = 1; em_dev_free_queues(dev); e1000_phy_hw_reset(hw); em_release_manageability(hw); em_hw_control_release(hw); + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + eth_em_interrupt_handler, dev); + + return ret; } static int @@ -846,7 +823,8 @@ em_hardware_init(struct e1000_hw *hw) */ rx_buf_size = em_get_rx_buffer_size(hw); - hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024); + hw->fc.high_water = rx_buf_size - + PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024); hw->fc.low_water = hw->fc.high_water - 1500; if (hw->mac.type == e1000_80003es2lan) @@ -990,8 +968,7 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) /* Rx Errors */ rte_stats->imissed = stats->mpc; - rte_stats->ierrors = stats->crcerrs + - stats->rlec + stats->ruc + stats->roc + + rte_stats->ierrors = stats->crcerrs + stats->rlec + stats->rxerrc + stats->algnerrc + stats->cexterr; /* Tx Errors */ @@ -1004,7 +981,7 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) return 0; } -static void +static int eth_em_stats_reset(struct rte_eth_dev *dev) { struct e1000_hw_stats *hw_stats = @@ -1015,6 +992,8 @@ eth_em_stats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(hw_stats, 0, sizeof(*hw_stats)); + + return 0; } static int @@ -1022,10 +1001,10 @@ eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; em_rxq_intr_enable(hw); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -1062,13 +1041,13 @@ em_get_max_pktlen(struct rte_eth_dev *dev) return 0x1000; /* Adapters that do not support jumbo frames */ case e1000_ich8lan: - return ETHER_MAX_LEN; + return RTE_ETHER_MAX_LEN; default: return MAX_JUMBO_FRAME_SIZE; } } -static void +static int eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1097,8 +1076,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; - dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); - dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() | dev_info->rx_queue_offload_capa; dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | @@ -1118,15 +1097,19 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG, }; - dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | - ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M | + RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M | + RTE_ETH_LINK_SPEED_1G; + + dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; /* Preferred queue parameters */ dev_info->default_rxportconf.nb_queues = 1; dev_info->default_txportconf.nb_queues = 1; dev_info->default_txportconf.ring_size = 256; dev_info->default_rxportconf.ring_size = 256; + + return 0; } /* return 0 means link status changed, -1 means not changed */ @@ -1136,9 +1119,9 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_link link; - int link_check, count; + int link_up, count; - link_check = 0; + link_up = 0; hw->mac.get_link_status = 1; /* possible wait-to-complete in up to 9 seconds */ @@ -1148,45 +1131,45 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) case e1000_media_type_copper: /* Do the work to read phy */ e1000_check_for_link(hw); - link_check = !hw->mac.get_link_status; + link_up = !hw->mac.get_link_status; break; case e1000_media_type_fiber: e1000_check_for_link(hw); - link_check = (E1000_READ_REG(hw, E1000_STATUS) & + link_up = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); - link_check = hw->mac.serdes_has_link; + link_up = hw->mac.serdes_has_link; break; default: break; } - if (link_check || wait_to_complete == 0) + if (link_up || wait_to_complete == 0) break; rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); /* Now we check if a transition has happened */ - if (link_check && (link.link_status == ETH_LINK_DOWN)) { + if (link_up) { uint16_t duplex, speed; hw->mac.ops.get_link_up_info(hw, &speed, &duplex); link.link_duplex = (duplex == FULL_DUPLEX) ? - ETH_LINK_FULL_DUPLEX : - ETH_LINK_HALF_DUPLEX; + RTE_ETH_LINK_FULL_DUPLEX : + RTE_ETH_LINK_HALF_DUPLEX; link.link_speed = speed; - link.link_status = ETH_LINK_UP; + link.link_status = RTE_ETH_LINK_UP; link.link_autoneg = !(dev->data->dev_conf.link_speeds & - ETH_LINK_SPEED_FIXED); - } else if (!link_check && (link.link_status == ETH_LINK_UP)) { - link.link_speed = 0; - link.link_duplex = ETH_LINK_HALF_DUPLEX; - link.link_status = ETH_LINK_DOWN; - link.link_autoneg = ETH_LINK_FIXED; + RTE_ETH_LINK_SPEED_FIXED); + } else { + link.link_speed = RTE_ETH_SPEED_NUM_NONE; + link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; + link.link_status = RTE_ETH_LINK_DOWN; + link.link_autoneg = RTE_ETH_LINK_FIXED; } return rte_eth_linkstatus_set(dev, &link); @@ -1281,7 +1264,7 @@ em_release_manageability(struct e1000_hw *hw) } } -static void +static int eth_em_promiscuous_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -1291,9 +1274,11 @@ eth_em_promiscuous_enable(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_em_promiscuous_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -1307,9 +1292,11 @@ eth_em_promiscuous_disable(struct rte_eth_dev *dev) else rctl &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_em_allmulticast_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -1319,9 +1306,11 @@ eth_em_allmulticast_enable(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } -static void +static int eth_em_allmulticast_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = @@ -1329,10 +1318,12 @@ eth_em_allmulticast_disable(struct rte_eth_dev *dev) uint32_t rctl; if (dev->data->promiscuous == 1) - return; /* must remain in all_multicast mode */ + return 0; /* must remain in all_multicast mode */ rctl = E1000_READ_REG(hw, E1000_RCTL); rctl &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; } static int @@ -1430,15 +1421,15 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct rte_eth_rxmode *rxmode; rxmode = &dev->data->dev_conf.rxmode; - if(mask & ETH_VLAN_STRIP_MASK){ - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (mask & RTE_ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) em_vlan_hw_strip_enable(dev); else em_vlan_hw_strip_disable(dev); } - if(mask & ETH_VLAN_FILTER_MASK){ - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + if (mask & RTE_ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) em_vlan_hw_filter_enable(dev); else em_vlan_hw_filter_disable(dev); @@ -1467,7 +1458,8 @@ eth_em_interrupt_setup(struct rte_eth_dev *dev) /* clear interrupt */ E1000_READ_REG(hw, E1000_ICR); regval = E1000_READ_REG(hw, E1000_IMS); - E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC); + E1000_WRITE_REG(hw, E1000_IMS, + regval | E1000_ICR_LSC | E1000_ICR_OTHER); return 0; } @@ -1517,7 +1509,7 @@ em_rxq_intr_enable(struct e1000_hw *hw) static void em_lsc_intr_disable(struct e1000_hw *hw) { - E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC); + E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER); E1000_WRITE_FLUSH(hw); } @@ -1591,7 +1583,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, return -1; intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); /* set get_link_status to check register later */ hw->mac.get_link_status = 1; @@ -1606,12 +1598,12 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", dev->data->port_id, link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? + link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? "full-duplex" : "half-duplex"); } else { PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); } - PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); @@ -1636,7 +1628,7 @@ eth_em_interrupt_handler(void *param) eth_em_interrupt_get_status(dev); eth_em_interrupt_action(dev, dev->intr_handle); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } static int @@ -1688,13 +1680,13 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) rx_pause = 0; if (rx_pause && tx_pause) - fc_conf->mode = RTE_FC_FULL; + fc_conf->mode = RTE_ETH_FC_FULL; else if (rx_pause) - fc_conf->mode = RTE_FC_RX_PAUSE; + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; else if (tx_pause) - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; else - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; return 0; } @@ -1721,7 +1713,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* At least reserve one Ethernet frame for watermark */ - max_high_water = rx_buf_size - ETHER_MAX_LEN; + max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); @@ -1760,7 +1752,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) } static int -eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index, __rte_unused uint32_t pool) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1771,7 +1763,7 @@ eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) { - uint8_t addr[ETHER_ADDR_LEN]; + uint8_t addr[RTE_ETHER_ADDR_LEN]; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); memset(addr, 0, sizeof(addr)); @@ -1781,7 +1773,7 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, - struct ether_addr *addr) + struct rte_ether_addr *addr) { eth_em_rar_clear(dev, 0); @@ -1791,47 +1783,38 @@ eth_em_default_mac_addr_set(struct rte_eth_dev *dev, static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { - struct rte_eth_dev_info dev_info; struct e1000_hw *hw; uint32_t frame_size; uint32_t rctl; - eth_em_infos_get(dev, &dev_info); - frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; + frame_size = mtu + E1000_ETH_OVERHEAD; - /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) - return -EINVAL; - - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ - if (!dev->data->scattered_rx && - frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + /* + * If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. + */ + if (dev->data->dev_started && !dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); rctl = E1000_READ_REG(hw, E1000_RCTL); /* switch to jumbo mode if needed */ - if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_JUMBO_FRAME; + if (mtu > RTE_ETHER_MTU) rctl |= E1000_RCTL_LPE; - } else { - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; + else rctl &= ~E1000_RCTL_LPE; - } E1000_WRITE_REG(hw, E1000_RCTL, rctl); - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; return 0; } static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct e1000_hw *hw; @@ -1844,15 +1827,3 @@ eth_em_set_mc_addr_list(struct rte_eth_dev *dev, RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); - -RTE_INIT(e1000_init_log); -static void -e1000_init_log(void) -{ - e1000_logtype_init = rte_log_register("pmd.net.e1000.init"); - if (e1000_logtype_init >= 0) - rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); - e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver"); - if (e1000_logtype_driver >= 0) - rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); -}