#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+#define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
+
#define I40E_RSS_OFFLOAD_ALL ( \
ETH_RSS_NONF_IPV4_UDP | \
ETH_RSS_NONF_IPV4_TCP | \
static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
static void i40e_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
struct i40e_vsi *vsi);
static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
-static int i40e_pf_disable_all_queues(struct i40e_hw *hw);
static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
int num,
.promiscuous_disable = i40e_dev_promiscuous_disable,
.allmulticast_enable = i40e_dev_allmulticast_enable,
.allmulticast_disable = i40e_dev_allmulticast_disable,
+ .dev_set_link_up = i40e_dev_set_link_up,
+ .dev_set_link_down = i40e_dev_set_link_down,
.link_update = i40e_dev_link_update,
.stats_get = i40e_dev_stats_get,
.stats_reset = i40e_dev_stats_reset,
hw->bus.device = pci_dev->addr.devid;
hw->bus.func = pci_dev->addr.function;
- /* Disable all queues before PF reset, as required */
- ret = i40e_pf_disable_all_queues(hw);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to disable queues %u\n", ret);
- return ret;
- }
+ /* Make sure all is clean before doing PF reset */
+ i40e_clear_hw(hw);
/* Reset here to make sure all is clean for each PF */
ret = i40e_pf_reset(hw);
return ret;
}
- /* Initialize the shared code */
+ /* Initialize the shared code (base driver) */
ret = i40e_init_shared_code(hw);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to init shared code: %d", ret);
+ PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
return ret;
}
}
vsi = pf->main_vsi;
+
+ /* Disable double vlan by default */
+ i40e_vsi_config_double_vlan(vsi, FALSE);
+
if (!vsi->max_macaddrs)
len = ETHER_ADDR_LEN;
else
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
}
+static inline uint8_t
+i40e_parse_link_speed(uint16_t eth_link_speed)
+{
+ uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
+
+ switch (eth_link_speed) {
+ case ETH_LINK_SPEED_40G:
+ link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ case ETH_LINK_SPEED_20G:
+ link_speed = I40E_LINK_SPEED_20GB;
+ break;
+ case ETH_LINK_SPEED_10G:
+ link_speed = I40E_LINK_SPEED_10GB;
+ break;
+ case ETH_LINK_SPEED_1000:
+ link_speed = I40E_LINK_SPEED_1GB;
+ break;
+ case ETH_LINK_SPEED_100:
+ link_speed = I40E_LINK_SPEED_100MB;
+ break;
+ }
+
+ return link_speed;
+}
+
+static int
+i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp phy_ab;
+ struct i40e_aq_set_phy_config phy_conf;
+ const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
+ I40E_AQ_PHY_FLAG_PAUSE_RX |
+ I40E_AQ_PHY_FLAG_LOW_POWER;
+ const uint8_t advt = I40E_LINK_SPEED_40GB |
+ I40E_LINK_SPEED_10GB |
+ I40E_LINK_SPEED_1GB |
+ I40E_LINK_SPEED_100MB;
+ int ret = -ENOTSUP;
+
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
+ NULL);
+ if (status)
+ return ret;
+
+ memset(&phy_conf, 0, sizeof(phy_conf));
+
+ /* bits 0-2 use the values from get_phy_abilities_resp */
+ abilities &= ~mask;
+ abilities |= phy_ab.abilities & mask;
+
+ /* update ablities and speed */
+ if (abilities & I40E_AQ_PHY_AN_ENABLED)
+ phy_conf.link_speed = advt;
+ else
+ phy_conf.link_speed = force_speed;
+
+ phy_conf.abilities = abilities;
+
+ /* use get_phy_abilities_resp value for the rest */
+ phy_conf.phy_type = phy_ab.phy_type;
+ phy_conf.eee_capability = phy_ab.eee_capability;
+ phy_conf.eeer = phy_ab.eeer_val;
+ phy_conf.low_power_ctrl = phy_ab.d3_lpan;
+
+ PMD_DRV_LOG(DEBUG, "\n\tCurrent: abilities %x, link_speed %x\n"
+ "\tConfig: abilities %x, link_speed %x",
+ phy_ab.abilities, phy_ab.link_speed,
+ phy_conf.abilities, phy_conf.link_speed);
+
+ status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
+ if (status)
+ return ret;
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_apply_link_speed(struct rte_eth_dev *dev)
+{
+ uint8_t speed;
+ uint8_t abilities = 0;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+
+ speed = i40e_parse_link_speed(conf->link_speed);
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
+ abilities |= I40E_AQ_PHY_AN_ENABLED;
+ else
+ abilities |= I40E_AQ_PHY_LINK_ENABLED;
+
+ return i40e_phy_conf_link(hw, abilities, speed);
+}
+
static int
i40e_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vsi *vsi = pf->main_vsi;
int ret;
+ if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
+ (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
+ PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
+ dev->data->dev_conf.link_duplex,
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
/* Initialize VSI */
ret = i40e_vsi_init(vsi);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
}
+ /* Apply link configure */
+ ret = i40e_apply_link_speed(dev);
+ if (I40E_SUCCESS != ret) {
+ PMD_DRV_LOG(ERR, "Fail to apply link setting\n");
+ goto err_up;
+ }
+
return I40E_SUCCESS;
err_up:
/* Clear all queues and release memory */
i40e_dev_clear_queues(dev);
+ /* Set link down */
+ i40e_dev_set_link_down(dev);
+
/* un-map queues with interrupt registers */
i40e_vsi_disable_queues_intr(vsi);
i40e_vsi_queues_unbind_intr(vsi);
PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
}
+/*
+ * Set device link up.
+ */
+static int
+i40e_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ /* re-apply link speed setting */
+ return i40e_apply_link_speed(dev);
+}
+
+/*
+ * Set device link down.
+ */
+static int
+i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
+{
+ uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
+ uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return i40e_phy_conf_link(hw, abilities, speed);
+}
+
int
i40e_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
for (j = 0, lut = 0; j < 4; j++) {
- if (mask & (0x1 < j))
+ if (mask & (0x1 << j))
lut |= reta_conf->reta[i + j] << (8 * j);
else
lut |= l & (0xFF << (8 * j));
}
/**
- * i40e_allocate_dma_mem_d - specific memory alloc for shared code
+ * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
* @hw: pointer to the HW structure
* @mem: pointer to mem struct to fill out
* @size: size of memory requested
return I40E_ERR_PARAM;
id++;
- rte_snprintf(z_name, sizeof(z_name), "i40e_dma_%lu", id);
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
+#ifdef RTE_LIBRTE_XEN_DOM0
+ mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
+ RTE_PGSIZE_2M);
+#else
mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
+#endif
if (!mz)
return I40E_ERR_NO_MEMORY;
mem->id = id;
mem->size = size;
mem->va = mz->addr;
+#ifdef RTE_LIBRTE_XEN_DOM0
+ mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+#else
mem->pa = mz->phys_addr;
+#endif
return I40E_SUCCESS;
}
/**
- * i40e_free_dma_mem_d - specific memory free for shared code
+ * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
}
/**
- * i40e_allocate_virt_mem_d - specific memory alloc for shared code
+ * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
* @hw: pointer to the HW structure
* @mem: pointer to mem struct to fill out
* @size: size of memory requested
}
/**
- * i40e_free_virt_mem_d - specific memory free for shared code
+ * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
* @hw: pointer to the HW structure
* @mem: pointer to mem struct to free
**/
ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
if (ret != I40E_SUCCESS) {
struct i40e_mac_filter *f;
- PMD_DRV_LOG(WARNING, "Failed to remove default [mac,vlan] config\n");
- /* Even failed to update default setting, still needs to add the permanent
- * mac into mac list.
- */
+ PMD_DRV_LOG(WARNING, "Cannot remove the default "
+ "macvlan filter\n");
+ /* It needs to add the permanent mac into mac list */
f = rte_zmalloc("macv_filter", sizeof(*f), 0);
if (f == NULL) {
PMD_DRV_LOG(ERR, "failed to allocate memory\n");
ETH_ADDR_LEN);
TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
vsi->mac_num++;
+
return ret;
}
(void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
ETH_ADDR_LEN);
- ret = i40e_update_default_filter_setting(vsi);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to remove default "
- "filter setting\n");
- goto fail_msix_alloc;
- }
- }
- else if (type == I40E_VSI_SRIOV) {
+
+ /**
+ * Updating default filter settings are necessary to prevent
+ * reception of tagged packets.
+ * Some old firmware configurations load a default macvlan
+ * filter which accepts both tagged and untagged packets.
+ * The updating is to use a normal filter instead if needed.
+ * For NVM 4.2.2 or after, the updating is not needed anymore.
+ * The firmware with correct configurations load the default
+ * macvlan filter which is expected and cannot be removed.
+ */
+ i40e_update_default_filter_setting(vsi);
+ } else if (type == I40E_VSI_SRIOV) {
memset(&ctxt, 0, sizeof(ctxt));
/**
* For other VSI, the uplink_seid equals to uplink VSI's
uint32_t reg;
uint16_t j;
+ /**
+ * Set or clear TX Queue Disable flags,
+ * which is required by hardware.
+ */
+ i40e_pre_tx_queue_cfg(hw, q_idx, on);
+ rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
+
/* Wait until the request is finished */
for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
if (on) {
if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
return I40E_SUCCESS; /* already on, skip next steps */
+
+ I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
reg |= I40E_QTX_ENA_QENA_REQ_MASK;
} else {
if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
(on ? "enable" : "disable"), q_idx);
return I40E_ERR_TIMEOUT;
}
+
return I40E_SUCCESS;
}
+
/* Swith on or off the tx queues */
static int
i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
return 0;
}
-
-static int
-i40e_disable_queue(struct i40e_hw *hw, uint16_t q_idx)
-{
- uint16_t i;
- uint32_t reg;
-
- /* Disable TX queue */
- for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
- reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
- if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
- ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 0x1)))
- break;
- rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
- }
- if (i >= I40E_CHK_Q_ENA_COUNT) {
- PMD_DRV_LOG(ERR, "Failed to disable "
- "tx queue[%u]\n", q_idx);
- return I40E_ERR_TIMEOUT;
- }
-
- if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
- reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
- for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
- rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
- reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
- if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
- !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- }
- if (i >= I40E_CHK_Q_ENA_COUNT) {
- PMD_DRV_LOG(ERR, "Failed to disable "
- "tx queue[%u]\n", q_idx);
- return I40E_ERR_TIMEOUT;
- }
- }
-
- /* Disable RX queue */
- for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
- reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
- if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
- ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
- break;
- rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
- }
- if (i >= I40E_CHK_Q_ENA_COUNT) {
- PMD_DRV_LOG(ERR, "Failed to disable "
- "rx queue[%u]\n", q_idx);
- return I40E_ERR_TIMEOUT;
- }
-
- if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
- reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
- for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
- rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
- reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
- if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
- !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- }
- if (i >= I40E_CHK_Q_ENA_COUNT) {
- PMD_DRV_LOG(ERR, "Failed to disable "
- "rx queue[%u]\n", q_idx);
- return I40E_ERR_TIMEOUT;
- }
- }
-
- return I40E_SUCCESS;
-}
-
-static int
-i40e_pf_disable_all_queues(struct i40e_hw *hw)
-{
- uint32_t reg;
- uint16_t firstq, lastq, maxq, i;
- int ret;
- reg = I40E_READ_REG(hw, I40E_PFLAN_QALLOC);
- if (!(reg & I40E_PFLAN_QALLOC_VALID_MASK)) {
- PMD_DRV_LOG(INFO, "PF queue allocation is invalid\n");
- return I40E_ERR_PARAM;
- }
- firstq = reg & I40E_PFLAN_QALLOC_FIRSTQ_MASK;
- lastq = (reg & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
- I40E_PFLAN_QALLOC_LASTQ_SHIFT;
- maxq = lastq - firstq;
- for (i = 0; i <= maxq; i++) {
- ret = i40e_disable_queue(hw, i);
- if (ret != I40E_SUCCESS)
- return ret;
- }
- return I40E_SUCCESS;
-}