#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_byteorder.h>
-#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_malloc.h>
static int eth_em_configure(struct rte_eth_dev *dev);
static int eth_em_start(struct rte_eth_dev *dev);
-static void eth_em_stop(struct rte_eth_dev *dev);
-static void eth_em_close(struct rte_eth_dev *dev);
-static void eth_em_promiscuous_enable(struct rte_eth_dev *dev);
-static void eth_em_promiscuous_disable(struct rte_eth_dev *dev);
-static void eth_em_allmulticast_enable(struct rte_eth_dev *dev);
-static void eth_em_allmulticast_disable(struct rte_eth_dev *dev);
+static int eth_em_stop(struct rte_eth_dev *dev);
+static int eth_em_close(struct rte_eth_dev *dev);
+static int eth_em_promiscuous_enable(struct rte_eth_dev *dev);
+static int eth_em_promiscuous_disable(struct rte_eth_dev *dev);
+static int eth_em_allmulticast_enable(struct rte_eth_dev *dev);
+static int eth_em_allmulticast_disable(struct rte_eth_dev *dev);
static int eth_em_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int eth_em_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
-static void eth_em_stats_reset(struct rte_eth_dev *dev);
-static void eth_em_infos_get(struct rte_eth_dev *dev,
+static int eth_em_stats_reset(struct rte_eth_dev *dev);
+static int eth_em_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_em_led_off(struct rte_eth_dev *dev);
static int em_get_rx_buffer_size(struct e1000_hw *hw);
-static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int eth_em_rar_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
-static void eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
- struct ether_addr *addr);
+static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr);
static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
- struct ether_addr *mc_addr_set,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
#define EM_FC_PAUSE_TIME 0x0680
static enum e1000_fc_mode em_fc_setting = e1000_fc_full;
-int e1000_logtype_init;
-int e1000_logtype_driver;
-
/*
* The set of PCI devices this driver supports
*/
.vlan_offload_set = eth_em_vlan_offload_set,
.rx_queue_setup = eth_em_rx_queue_setup,
.rx_queue_release = eth_em_rx_queue_release,
- .rx_queue_count = eth_em_rx_queue_count,
- .rx_descriptor_done = eth_em_rx_descriptor_done,
- .rx_descriptor_status = eth_em_rx_descriptor_status,
- .tx_descriptor_status = eth_em_tx_descriptor_status,
.tx_queue_setup = eth_em_tx_queue_setup,
.tx_queue_release = eth_em_tx_queue_release,
.rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
eth_dev->dev_ops = ð_em_ops;
+ eth_dev->rx_queue_count = eth_em_rx_queue_count;
+ eth_dev->rx_descriptor_done = eth_em_rx_descriptor_done;
+ eth_dev->rx_descriptor_status = eth_em_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = eth_em_tx_descriptor_status;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts;
eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts;
eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
}
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN *
hw->mac.rar_entry_count, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
"store MAC addresses",
- ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
return -ENOMEM;
}
/* Copy the permanent MAC address */
- ether_addr_copy((struct ether_addr *) hw->mac.addr,
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
eth_dev->data->mac_addrs);
/* initialize the vfta */
static int
eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- struct e1000_adapter *adapter =
- E1000_DEV_PRIVATE(eth_dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
-
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
-
- if (adapter->stopped == 0)
- eth_em_close(eth_dev);
-
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
+ return 0;
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
- rte_intr_callback_unregister(intr_handle,
- eth_em_interrupt_handler, eth_dev);
+ eth_em_close(eth_dev);
return 0;
}
static struct rte_pci_driver rte_em_pmd = {
.id_table = pci_id_em_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_IOVA_AS_VA,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = eth_em_pci_probe,
.remove = eth_em_pci_remove,
};
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- struct rte_eth_dev_info dev_info;
- uint64_t rx_offloads;
- uint64_t tx_offloads;
PMD_INIT_FUNC_TRACE();
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
- eth_em_infos_get(dev, &dev_info);
- rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
- PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- rx_offloads, dev_info.rx_offload_capa);
- return -ENOTSUP;
- }
- tx_offloads = dev->data->dev_conf.txmode.offloads;
- if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
- PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, dev_info.tx_offload_capa);
- return -ENOTSUP;
- }
-
PMD_INIT_FUNC_TRACE();
return 0;
PMD_INIT_FUNC_TRACE();
- eth_em_stop(dev);
+ ret = eth_em_stop(dev);
+ if (ret != 0)
+ return ret;
e1000_power_up_phy(hw);
return -EIO;
}
- E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
+ E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN);
/* Configure for OS presence */
em_init_manageability(hw);
* global reset on the MAC.
*
**********************************************************************/
-static void
+static int
eth_em_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ dev->data->dev_started = 0;
+
eth_em_rxtx_control(dev, false);
em_rxq_intr_disable(hw);
em_lsc_intr_disable(hw);
e1000_reset_hw(hw);
+
+ /* Flush desc rings for i219 */
+ if (hw->mac.type == e1000_pch_spt || hw->mac.type == e1000_pch_cnp)
+ em_flush_desc_rings(dev);
+
if (hw->mac.type >= e1000_82544)
E1000_WRITE_REG(hw, E1000_WUC, 0);
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+
+ return 0;
}
-static void
+static int
eth_em_close(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int ret;
- eth_em_stop(dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ ret = eth_em_stop(dev);
adapter->stopped = 1;
em_dev_free_queues(dev);
e1000_phy_hw_reset(hw);
em_release_manageability(hw);
em_hw_control_release(hw);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ eth_em_interrupt_handler, dev);
+
+ return ret;
}
static int
*/
rx_buf_size = em_get_rx_buffer_size(hw);
- hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024);
+ hw->fc.high_water = rx_buf_size -
+ PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024);
hw->fc.low_water = hw->fc.high_water - 1500;
if (hw->mac.type == e1000_80003es2lan)
return 0;
}
-static void
+static int
eth_em_stats_reset(struct rte_eth_dev *dev)
{
struct e1000_hw_stats *hw_stats =
/* Reset software totals */
memset(hw_stats, 0, sizeof(*hw_stats));
+
+ return 0;
}
static int
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_enable(hw);
- rte_intr_enable(intr_handle);
+ rte_intr_ack(intr_handle);
return 0;
}
return 0x1000;
/* Adapters that do not support jumbo frames */
case e1000_ich8lan:
- return ETHER_MAX_LEN;
+ return RTE_ETHER_MAX_LEN;
default:
return MAX_JUMBO_FRAME_SIZE;
}
}
-static void
+static int
eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_txportconf.ring_size = 256;
dev_info->default_rxportconf.ring_size = 256;
+
+ return 0;
}
/* return 0 means link status changed, -1 means not changed */
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link;
- int link_check, count;
+ int link_up, count;
- link_check = 0;
+ link_up = 0;
hw->mac.get_link_status = 1;
/* possible wait-to-complete in up to 9 seconds */
case e1000_media_type_copper:
/* Do the work to read phy */
e1000_check_for_link(hw);
- link_check = !hw->mac.get_link_status;
+ link_up = !hw->mac.get_link_status;
break;
case e1000_media_type_fiber:
e1000_check_for_link(hw);
- link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+ link_up = (E1000_READ_REG(hw, E1000_STATUS) &
E1000_STATUS_LU);
break;
case e1000_media_type_internal_serdes:
e1000_check_for_link(hw);
- link_check = hw->mac.serdes_has_link;
+ link_up = hw->mac.serdes_has_link;
break;
default:
break;
}
- if (link_check || wait_to_complete == 0)
+ if (link_up || wait_to_complete == 0)
break;
rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL);
}
memset(&link, 0, sizeof(link));
/* Now we check if a transition has happened */
- if (link_check && (link.link_status == ETH_LINK_DOWN)) {
+ if (link_up) {
uint16_t duplex, speed;
hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
link.link_duplex = (duplex == FULL_DUPLEX) ?
link.link_status = ETH_LINK_UP;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- } else if (!link_check && (link.link_status == ETH_LINK_UP)) {
- link.link_speed = 0;
+ } else {
+ link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_status = ETH_LINK_DOWN;
link.link_autoneg = ETH_LINK_FIXED;
}
}
-static void
+static int
eth_em_promiscuous_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ return 0;
}
-static void
+static int
eth_em_promiscuous_disable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
else
rctl &= (~E1000_RCTL_MPE);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ return 0;
}
-static void
+static int
eth_em_allmulticast_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ return 0;
}
-static void
+static int
eth_em_allmulticast_disable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
uint32_t rctl;
if (dev->data->promiscuous == 1)
- return; /* must remain in all_multicast mode */
+ return 0; /* must remain in all_multicast mode */
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl &= (~E1000_RCTL_MPE);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ return 0;
}
static int
/* clear interrupt */
E1000_READ_REG(hw, E1000_ICR);
regval = E1000_READ_REG(hw, E1000_IMS);
- E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC);
+ E1000_WRITE_REG(hw, E1000_IMS,
+ regval | E1000_ICR_LSC | E1000_ICR_OTHER);
return 0;
}
static void
em_lsc_intr_disable(struct e1000_hw *hw)
{
- E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC);
+ E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER);
E1000_WRITE_FLUSH(hw);
}
return -1;
intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
- rte_intr_enable(intr_handle);
+ rte_intr_ack(intr_handle);
/* set get_link_status to check register later */
hw->mac.get_link_status = 1;
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
}
- PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
+ PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
eth_em_interrupt_get_status(dev);
eth_em_interrupt_action(dev, dev->intr_handle);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/* At least reserve one Ethernet frame for watermark */
- max_high_water = rx_buf_size - ETHER_MAX_LEN;
+ max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
}
static int
-eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
uint32_t index, __rte_unused uint32_t pool)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
static void
eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
{
- uint8_t addr[ETHER_ADDR_LEN];
+ uint8_t addr[RTE_ETHER_ADDR_LEN];
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
memset(addr, 0, sizeof(addr));
e1000_rar_set(hw, addr, index);
}
-static void
+static int
eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
- struct ether_addr *addr)
+ struct rte_ether_addr *addr)
{
eth_em_rar_clear(dev, 0);
- eth_em_rar_set(dev, (void *)addr, 0, 0);
+ return eth_em_rar_set(dev, (void *)addr, 0, 0);
}
static int
struct e1000_hw *hw;
uint32_t frame_size;
uint32_t rctl;
+ int ret;
+
+ ret = eth_em_infos_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
- eth_em_infos_get(dev, &dev_info);
- frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;
+ frame_size = mtu + E1000_ETH_OVERHEAD;
/* check that mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
- /* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
- if (!dev->data->scattered_rx &&
- frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ /*
+ * If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
+ */
+ if (dev->data->dev_started && !dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
return -EINVAL;
+ }
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* switch to jumbo mode if needed */
- if (frame_size > ETHER_MAX_LEN) {
+ if (frame_size > E1000_ETH_MAX_LEN) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
static int
eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
- struct ether_addr *mc_addr_set,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
struct e1000_hw *hw;
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(e1000_init_log);
-static void
-e1000_init_log(void)
+/* see e1000_logs.c */
+RTE_INIT(igb_init_log)
{
- e1000_logtype_init = rte_log_register("pmd.net.e1000.init");
- if (e1000_logtype_init >= 0)
- rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE);
- e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver");
- if (e1000_logtype_driver >= 0)
- rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE);
+ e1000_igb_init_log();
}