i40e: support autoneg or force link speed
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev.c
index 6624586..0a918a6 100644 (file)
@@ -85,6 +85,8 @@
 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
 #define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
 
+#define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
+
 #define I40E_RSS_OFFLOAD_ALL ( \
        ETH_RSS_NONF_IPV4_UDP | \
        ETH_RSS_NONF_IPV4_TCP | \
@@ -126,6 +128,8 @@ static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
                               struct rte_eth_stats *stats);
 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
@@ -220,6 +224,8 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
        .promiscuous_disable          = i40e_dev_promiscuous_disable,
        .allmulticast_enable          = i40e_dev_allmulticast_enable,
        .allmulticast_disable         = i40e_dev_allmulticast_disable,
+       .dev_set_link_up              = i40e_dev_set_link_up,
+       .dev_set_link_down            = i40e_dev_set_link_down,
        .link_update                  = i40e_dev_link_update,
        .stats_get                    = i40e_dev_stats_get,
        .stats_reset                  = i40e_dev_stats_reset,
@@ -647,6 +653,102 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
        I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
 }
 
+static inline uint8_t
+i40e_parse_link_speed(uint16_t eth_link_speed)
+{
+       uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
+
+       switch (eth_link_speed) {
+       case ETH_LINK_SPEED_40G:
+               link_speed = I40E_LINK_SPEED_40GB;
+               break;
+       case ETH_LINK_SPEED_20G:
+               link_speed = I40E_LINK_SPEED_20GB;
+               break;
+       case ETH_LINK_SPEED_10G:
+               link_speed = I40E_LINK_SPEED_10GB;
+               break;
+       case ETH_LINK_SPEED_1000:
+               link_speed = I40E_LINK_SPEED_1GB;
+               break;
+       case ETH_LINK_SPEED_100:
+               link_speed = I40E_LINK_SPEED_100MB;
+               break;
+       }
+
+       return link_speed;
+}
+
+static int
+i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
+{
+       enum i40e_status_code status;
+       struct i40e_aq_get_phy_abilities_resp phy_ab;
+       struct i40e_aq_set_phy_config phy_conf;
+       const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
+                       I40E_AQ_PHY_FLAG_PAUSE_RX |
+                       I40E_AQ_PHY_FLAG_LOW_POWER;
+       const uint8_t advt = I40E_LINK_SPEED_40GB |
+                       I40E_LINK_SPEED_10GB |
+                       I40E_LINK_SPEED_1GB |
+                       I40E_LINK_SPEED_100MB;
+       int ret = -ENOTSUP;
+
+       status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
+                                             NULL);
+       if (status)
+               return ret;
+
+       memset(&phy_conf, 0, sizeof(phy_conf));
+
+       /* bits 0-2 use the values from get_phy_abilities_resp */
+       abilities &= ~mask;
+       abilities |= phy_ab.abilities & mask;
+
+       /* update ablities and speed */
+       if (abilities & I40E_AQ_PHY_AN_ENABLED)
+               phy_conf.link_speed = advt;
+       else
+               phy_conf.link_speed = force_speed;
+
+       phy_conf.abilities = abilities;
+
+       /* use get_phy_abilities_resp value for the rest */
+       phy_conf.phy_type = phy_ab.phy_type;
+       phy_conf.eee_capability = phy_ab.eee_capability;
+       phy_conf.eeer = phy_ab.eeer_val;
+       phy_conf.low_power_ctrl = phy_ab.d3_lpan;
+
+       PMD_DRV_LOG(DEBUG, "\n\tCurrent: abilities %x, link_speed %x\n"
+                   "\tConfig:  abilities %x, link_speed %x",
+                   phy_ab.abilities, phy_ab.link_speed,
+                   phy_conf.abilities, phy_conf.link_speed);
+
+       status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
+       if (status)
+               return ret;
+
+       return I40E_SUCCESS;
+}
+
+static int
+i40e_apply_link_speed(struct rte_eth_dev *dev)
+{
+       uint8_t speed;
+       uint8_t abilities = 0;
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_conf *conf = &dev->data->dev_conf;
+
+       speed = i40e_parse_link_speed(conf->link_speed);
+       abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+       if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
+               abilities |= I40E_AQ_PHY_AN_ENABLED;
+       else
+               abilities |= I40E_AQ_PHY_LINK_ENABLED;
+
+       return i40e_phy_conf_link(hw, abilities, speed);
+}
+
 static int
 i40e_dev_start(struct rte_eth_dev *dev)
 {
@@ -655,6 +757,14 @@ i40e_dev_start(struct rte_eth_dev *dev)
        struct i40e_vsi *vsi = pf->main_vsi;
        int ret;
 
+       if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
+               (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
+               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
+                               dev->data->dev_conf.link_duplex,
+                               dev->data->port_id);
+               return -EINVAL;
+       }
+
        /* Initialize VSI */
        ret = i40e_vsi_init(vsi);
        if (ret != I40E_SUCCESS) {
@@ -680,6 +790,13 @@ i40e_dev_start(struct rte_eth_dev *dev)
                        PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
        }
 
+       /* Apply link configure */
+       ret = i40e_apply_link_speed(dev);
+       if (I40E_SUCCESS != ret) {
+               PMD_DRV_LOG(ERR, "Fail to apply link setting\n");
+               goto err_up;
+       }
+
        return I40E_SUCCESS;
 
 err_up:
@@ -701,6 +818,9 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        /* Clear all queues and release memory */
        i40e_dev_clear_queues(dev);
 
+       /* Set link down */
+       i40e_dev_set_link_down(dev);
+
        /* un-map queues with interrupt registers */
        i40e_vsi_disable_queues_intr(vsi);
        i40e_vsi_queues_unbind_intr(vsi);
@@ -796,6 +916,29 @@ i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
                PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
 }
 
+/*
+ * Set device link up.
+ */
+static int
+i40e_dev_set_link_up(struct rte_eth_dev *dev)
+{
+       /* re-apply link speed setting */
+       return i40e_apply_link_speed(dev);
+}
+
+/*
+ * Set device link down.
+ */
+static int
+i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
+{
+       uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
+       uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       return i40e_phy_conf_link(hw, abilities, speed);
+}
+
 int
 i40e_dev_link_update(struct rte_eth_dev *dev,
                     __rte_unused int wait_to_complete)
@@ -1513,14 +1656,23 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
 
        id++;
        snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
+#ifdef RTE_LIBRTE_XEN_DOM0
+       mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
+                                                       RTE_PGSIZE_2M);
+#else
        mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
+#endif
        if (!mz)
                return I40E_ERR_NO_MEMORY;
 
        mem->id = id;
        mem->size = size;
        mem->va = mz->addr;
+#ifdef RTE_LIBRTE_XEN_DOM0
+       mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+#else
        mem->pa = mz->phys_addr;
+#endif
 
        return I40E_SUCCESS;
 }
@@ -2780,6 +2932,13 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
        uint32_t reg;
        uint16_t j;
 
+       /**
+        * Set or clear TX Queue Disable flags,
+        * which is required by hardware.
+        */
+       i40e_pre_tx_queue_cfg(hw, q_idx, on);
+       rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
+
        /* Wait until the request is finished */
        for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
                rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
@@ -2793,6 +2952,8 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
        if (on) {
                if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
                        return I40E_SUCCESS; /* already on, skip next steps */
+
+               I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
                reg |= I40E_QTX_ENA_QENA_REQ_MASK;
        } else {
                if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
@@ -2821,8 +2982,10 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
                        (on ? "enable" : "disable"), q_idx);
                return I40E_ERR_TIMEOUT;
        }
+
        return I40E_SUCCESS;
 }
+
 /* Swith on or off the tx queues */
 static int
 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)