/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_malloc.h>
#include "e1000_logs.h"
-#include "igb/e1000_api.h"
-#include "igb/e1000_hw.h"
+#include "e1000/e1000_api.h"
#include "e1000_ethdev.h"
-static int eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
- uint16_t nb_tx_q);
+static int eth_igb_configure(struct rte_eth_dev *dev);
static int eth_igb_start(struct rte_eth_dev *dev);
static void eth_igb_stop(struct rte_eth_dev *dev);
static void eth_igb_close(struct rte_eth_dev *dev);
struct rte_eth_dev_info *dev_info);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
static void igb_hw_control_release(struct e1000_hw *hw);
static void igb_init_manageability(struct e1000_hw *hw);
static void igb_release_manageability(struct e1000_hw *hw);
-static void igb_vlan_hw_support_enable(struct rte_eth_dev *dev);
-static void igb_vlan_hw_support_disable(struct rte_eth_dev *dev);
-static void eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
- uint16_t vlan_id,
- int on);
+
+static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
+static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
+
static int eth_igb_led_on(struct rte_eth_dev *dev);
static int eth_igb_led_off(struct rte_eth_dev *dev);
static void igb_intr_disable(struct e1000_hw *hw);
static int igb_get_rx_buffer_size(struct e1000_hw *hw);
-static void eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+static void eth_igb_rar_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
.stats_reset = eth_igb_stats_reset,
.dev_infos_get = eth_igb_infos_get,
.vlan_filter_set = eth_igb_vlan_filter_set,
+ .vlan_tpid_set = eth_igb_vlan_tpid_set,
+ .vlan_offload_set = eth_igb_vlan_offload_set,
.rx_queue_setup = eth_igb_rx_queue_setup,
+ .rx_queue_release = eth_igb_rx_queue_release,
.tx_queue_setup = eth_igb_tx_queue_setup,
+ .tx_queue_release = eth_igb_tx_queue_release,
.dev_led_on = eth_igb_led_on,
.dev_led_off = eth_igb_led_off,
.flow_ctrl_set = eth_igb_flow_ctrl_set,
return 0;
}
+static inline void
+igb_intr_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static void
+igb_intr_disable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+}
+
static void
igb_identify_hardware(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
- E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
pci_dev = eth_dev->pci_dev;
eth_dev->dev_ops = ð_igb_ops;
return 0;
}
- hw->hw_addr= (void *)pci_dev->mem_resource.addr;
+ hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
igb_identify_hardware(eth_dev);
-
if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
error = -EIO;
goto err_late;
rte_intr_callback_register(&(pci_dev->intr_handle),
eth_igb_interrupt_handler, (void *)eth_dev);
+ /* enable uio intr after callback register */
+ rte_intr_enable(&(pci_dev->intr_handle));
+
+ /* enable support intr */
+ igb_intr_enable(eth_dev);
+
return 0;
err_late:
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
- hw->hw_addr = (void *)pci_dev->mem_resource.addr;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
/* Initialize the shared code */
diag = e1000_setup_init_funcs(hw, TRUE);
}
static int
-eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
+eth_igb_configure(struct rte_eth_dev *dev)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- int diag;
PMD_INIT_LOG(DEBUG, ">>");
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
- /* Allocate the array of pointers to RX structures */
- diag = igb_dev_rx_queue_alloc(dev, nb_rx_q);
- if (diag != 0) {
- PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
- " pointers to RX queues failed",
- dev->data->port_id, nb_rx_q);
- return diag;
- }
-
- /* Allocate the array of pointers to TX structures */
- diag = igb_dev_tx_queue_alloc(dev, nb_tx_q);
- if (diag != 0) {
- PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
- " pointers to TX queues failed",
- dev->data->port_id, nb_tx_q);
-
- return diag;
- }
-
PMD_INIT_LOG(DEBUG, "<<");
return (0);
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret, i;
+ int ret, i, mask;
PMD_INIT_LOG(DEBUG, ">>");
- igb_intr_disable(hw);
-
/* Power up the phy. Needed to make the link go Up */
e1000_power_up_phy(hw);
/* Initialize the hardware */
if (igb_hardware_init(hw)) {
PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
- return (-1);
+ return (-EIO);
}
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
ret = eth_igb_rx_init(dev);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ igb_dev_clear_queues(dev);
return ret;
}
e1000_clear_hw_cntrs_base_generic(hw);
/*
- * If VLAN filtering is enabled, set up VLAN tag offload and filtering
- * and restore the VFTA.
+ * VLAN Offload Settings
*/
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
- igb_vlan_hw_support_enable(dev);
- else
- igb_vlan_hw_support_disable(dev);
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ ETH_VLAN_EXTEND_MASK;
+ eth_igb_vlan_offload_set(dev, mask);
/*
* Configure the Interrupt Moderation register (EITR) with the maximum
E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
}
- /* Don't reset the phy next time init gets called */
- hw->phy.reset_disable = 1;
-
/* Setup link speed and duplex */
switch (dev->data->dev_conf.link_speed) {
case ETH_LINK_SPEED_AUTONEG:
}
e1000_setup_link(hw);
- PMD_INIT_LOG(DEBUG, "<<");
-
/* check if lsc interrupt feature is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- return eth_igb_interrupt_setup(dev);
+ ret = eth_igb_lsc_interrupt_setup(dev);
+
+ /* resume enabled intr since hw reset */
+ igb_intr_enable(dev);
+
+ PMD_INIT_LOG(DEBUG, "<<");
return (0);
PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
dev->data->dev_conf.link_speed,
dev->data->dev_conf.link_duplex, dev->data->port_id);
- return -1;
+ igb_dev_clear_queues(dev);
+ return (-EINVAL);
}
/*********************************************************************
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
-static void
+static int
eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct e1000_hw *hw =
/* update local VFTA copy */
shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg = ETHER_TYPE_VLAN ;
+
+ reg |= (tpid << 16);
+ E1000_WRITE_REG(hw, E1000_VET, reg);
+}
+
+static void
+igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* Filter Table Disable */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
}
static void
-igb_vlan_hw_support_enable(struct rte_eth_dev *dev)
+igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
int i;
- /* VLAN Mode Enable */
- reg = E1000_READ_REG(hw, E1000_CTRL);
- reg |= E1000_CTRL_VME;
- E1000_WRITE_REG(hw, E1000_CTRL, reg);
-
- /* Filter Table Enable */
+ /* Filter Table Enable, CFI not used for packet acceptance */
reg = E1000_READ_REG(hw, E1000_RCTL);
reg &= ~E1000_RCTL_CFIEN;
reg |= E1000_RCTL_VFE;
E1000_WRITE_REG(hw, E1000_RCTL, reg);
- /* Update maximum frame size */
- reg = E1000_READ_REG(hw, E1000_RLPML);
- reg += VLAN_TAG_SIZE;
- E1000_WRITE_REG(hw, E1000_RLPML, reg);
-
/* restore VFTA table */
- for (i = 0; i < E1000_VFTA_SIZE; i++)
+ for (i = 0; i < IGB_VFTA_SIZE; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
}
static void
-igb_vlan_hw_support_disable(struct rte_eth_dev *dev)
+igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
- /* VLAN Mode disable */
+ /* VLAN Mode Disable */
reg = E1000_READ_REG(hw, E1000_CTRL);
reg &= ~E1000_CTRL_VME;
E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ /* Update maximum frame size */
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len + VLAN_TAG_SIZE);
}
static void
-igb_intr_disable(struct e1000_hw *hw)
+igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
{
- E1000_WRITE_REG(hw, E1000_IMC, ~0);
- E1000_WRITE_FLUSH(hw);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Enable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ /* Update maximum frame size */
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+}
+
+static void
+igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* CTRL_EXT: Extended VLAN */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+}
+
+static void
+igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* CTRL_EXT: Extended VLAN */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_EXTEND_VLAN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+}
+
+static void
+eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ if(mask & ETH_VLAN_STRIP_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ igb_vlan_hw_strip_enable(dev);
+ else
+ igb_vlan_hw_strip_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_FILTER_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ igb_vlan_hw_filter_enable(dev);
+ else
+ igb_vlan_hw_filter_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_EXTEND_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ igb_vlan_hw_extend_enable(dev);
+ else
+ igb_vlan_hw_extend_disable(dev);
+ }
}
+
/**
* It enables the interrupt mask and then enable the interrupt.
*
* - On failure, a negative value.
*/
static int
-eth_igb_interrupt_setup(struct rte_eth_dev *dev)
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
- E1000_WRITE_FLUSH(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ intr->mask |= E1000_ICR_LSC;
return 0;
}
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ igb_intr_disable(hw);
+
/* read-on-clear nic registers here */
icr = E1000_READ_REG(hw, E1000_ICR);
+
+ intr->flags = 0;
if (icr & E1000_ICR_LSC) {
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
}
struct rte_eth_link link;
int ret;
- if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
- return -1;
- intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+ igb_intr_enable(dev);
rte_intr_enable(&(dev->pci_dev->intr_handle));
- /* set get_link_status to check register later */
- hw->mac.get_link_status = 1;
- ret = eth_igb_link_update(dev, 0);
-
- /* check if link has changed */
- if (ret < 0)
- return 0;
-
- memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_read_link_status(dev, &link);
- if (link.link_status) {
- PMD_INIT_LOG(INFO,
- " Port %d: Link Up - speed %u Mbps - %s\n",
- dev->data->port_id, (unsigned)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
- "full-duplex" : "half-duplex");
- } else {
- PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
- dev->data->port_id);
- }
- PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
- dev->pci_dev->addr.domain,
- dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid,
- dev->pci_dev->addr.function);
- tctl = E1000_READ_REG(hw, E1000_TCTL);
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- if (link.link_status) {
- /* enable Tx/Rx */
- tctl |= E1000_TCTL_EN;
- rctl |= E1000_RCTL_EN;
- } else {
- /* disable Tx/Rx */
- tctl &= ~E1000_TCTL_EN;
- rctl &= ~E1000_RCTL_EN;
+ if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
+ intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+
+ /* set get_link_status to check register later */
+ hw->mac.get_link_status = 1;
+ ret = eth_igb_link_update(dev, 0);
+
+ /* check if link has changed */
+ if (ret < 0)
+ return 0;
+
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_read_link_status(dev, &link);
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO,
+ " Port %d: Link Up - speed %u Mbps - %s\n",
+ dev->data->port_id, (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
+ dev->data->port_id);
+ }
+ PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ dev->pci_dev->addr.domain,
+ dev->pci_dev->addr.bus,
+ dev->pci_dev->addr.devid,
+ dev->pci_dev->addr.function);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ if (link.link_status) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
- E1000_WRITE_REG(hw, E1000_TCTL, tctl);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- E1000_WRITE_FLUSH(hw);
return 0;
}
* void
*/
static void
-eth_igb_interrupt_handler(struct rte_intr_handle *handle, void *param)
+eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
eth_igb_interrupt_get_status(dev);
eth_igb_interrupt_action(dev);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
static int
PMD_INIT_LOG(DEBUG, "igbvf_intr_disable");
/* Clear interrupt mask to stop from interrupts being generated */
- E1000_WRITE_REG(hw, E1000_EIMC, ~0);
+ E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
E1000_WRITE_FLUSH(hw);
}
eth_igb_infos_get(dev, &dev_info);
/* Clear interrupt mask to stop from interrupts being generated */
- E1000_WRITE_REG(hw, E1000_EIMC, ~0);
+ igbvf_intr_disable(hw);
/* Clear any pending interrupts, flush previous writes */
E1000_READ_REG(hw, E1000_EICR);
static int
igbvf_dev_start(struct rte_eth_dev *dev)
{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
PMD_INIT_LOG(DEBUG, "igbvf_dev_start");
+ hw->mac.ops.reset_hw(hw);
+
/* Set all vfta */
igbvf_set_vfta_all(dev,1);
mask = 1;
for (j = 0; j < 32; j++){
if(vfta & mask)
- igbvf_set_vfta(hw, (i<<5)+j, on);
+ igbvf_set_vfta(hw,
+ (uint16_t)((i<<5)+j), on);
mask<<=1;
}
}