net/ice: fix speed capability
[dpdk.git] / drivers / net / ice / ice_ethdev.c
index a23c63a..1482ced 100644 (file)
@@ -4,12 +4,19 @@
 
 #include <rte_ethdev_pci.h>
 
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
 #include "base/ice_sched.h"
+#include "base/ice_flow.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
+#define ICE_DFLT_PKG_FILE "/lib/firmware/intel/ice/ddp/ice.pkg"
 
 int ice_logtype_init;
 int ice_logtype_driver;
@@ -1259,6 +1266,69 @@ ice_pf_setup(struct ice_pf *pf)
        return 0;
 }
 
+static int ice_load_pkg(struct rte_eth_dev *dev)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       const char *pkg_file = ICE_DFLT_PKG_FILE;
+       int err;
+       uint8_t *buf;
+       int buf_len;
+       FILE *file;
+       struct stat fstat;
+
+       file = fopen(pkg_file, "rb");
+       if (!file)  {
+               PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
+               return -1;
+       }
+
+       err = stat(pkg_file, &fstat);
+       if (err) {
+               PMD_INIT_LOG(ERR, "failed to get file stats\n");
+               fclose(file);
+               return err;
+       }
+
+       buf_len = fstat.st_size;
+       buf = rte_malloc(NULL, buf_len, 0);
+
+       if (!buf) {
+               PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
+                               buf_len);
+               fclose(file);
+               return -1;
+       }
+
+       err = fread(buf, buf_len, 1, file);
+       if (err != 1) {
+               PMD_INIT_LOG(ERR, "failed to read package data\n");
+               fclose(file);
+               err = -1;
+               goto fail_exit;
+       }
+
+       fclose(file);
+
+       err = ice_copy_and_init_pkg(hw, buf, buf_len);
+       if (err) {
+               PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
+               goto fail_exit;
+       }
+       err = ice_init_hw_tbls(hw);
+       if (err) {
+               PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
+               goto fail_init_tbls;
+       }
+
+       return 0;
+
+fail_init_tbls:
+       rte_free(hw->pkg_copy);
+fail_exit:
+       rte_free(buf);
+       return err;
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1266,6 +1336,8 @@ ice_dev_init(struct rte_eth_dev *dev)
        struct rte_intr_handle *intr_handle;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct ice_adapter *ad =
+               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        struct ice_vsi *vsi;
        int ret;
 
@@ -1298,6 +1370,13 @@ ice_dev_init(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
+       ret = ice_load_pkg(dev);
+       if (ret) {
+               PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
+                               "Entering Safe Mode");
+               ad->is_safe_mode = 1;
+       }
+
        PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
                     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
                     hw->api_maj_ver, hw->api_min_ver);
@@ -1512,12 +1591,18 @@ static int ice_init_rss(struct ice_pf *pf)
        struct ice_aqc_get_set_rss_keys key;
        uint16_t i, nb_q;
        int ret = 0;
+       bool is_safe_mode = pf->adapter->is_safe_mode;
 
        rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
        nb_q = dev->data->nb_rx_queues;
        vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
        vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
 
+       if (is_safe_mode) {
+               PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
+               return 0;
+       }
+
        if (!vsi->rss_key)
                vsi->rss_key = rte_zmalloc(NULL,
                                           vsi->rss_key_size, 0);
@@ -1550,6 +1635,44 @@ static int ice_init_rss(struct ice_pf *pf)
        if (ret)
                return -EINVAL;
 
+       /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+                             ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
+       if (ret)
+               PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
+
+       /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+                             ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
+       if (ret)
+               PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
+
+       /* configure RSS for sctp6 with input set IPv6 src/dst */
+       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+                             ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
+       if (ret)
+               PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
+                               __func__, ret);
+
+       /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+                             ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+       if (ret)
+               PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
+
+       /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+                             ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+       if (ret)
+               PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
+
+       /* configure RSS for sctp4 with input set IP src/dst */
+       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+                             ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+       if (ret)
+               PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
+                               __func__, ret);
+
        return 0;
 }
 
@@ -1741,6 +1864,7 @@ ice_dev_start(struct rte_eth_dev *dev)
        }
 
        ice_set_rx_function(dev);
+       ice_set_tx_function(dev);
 
        mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
                        ETH_VLAN_EXTEND_MASK;
@@ -1821,6 +1945,9 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ice_vsi *vsi = pf->main_vsi;
        struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+       bool is_safe_mode = pf->adapter->is_safe_mode;
+       u64 phy_type_low;
+       u64 phy_type_high;
 
        dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
        dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
@@ -1831,33 +1958,40 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        dev_info->rx_offload_capa =
                DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM |
-               DEV_RX_OFFLOAD_TCP_CKSUM |
-               DEV_RX_OFFLOAD_QINQ_STRIP |
-               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_VLAN_EXTEND |
                DEV_RX_OFFLOAD_JUMBO_FRAME |
                DEV_RX_OFFLOAD_KEEP_CRC |
                DEV_RX_OFFLOAD_SCATTER |
                DEV_RX_OFFLOAD_VLAN_FILTER;
        dev_info->tx_offload_capa =
                DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_QINQ_INSERT |
-               DEV_TX_OFFLOAD_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_UDP_CKSUM |
-               DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_SCTP_CKSUM |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
                DEV_TX_OFFLOAD_TCP_TSO |
                DEV_TX_OFFLOAD_MULTI_SEGS |
                DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+       dev_info->flow_type_rss_offloads = 0;
+
+       if (!is_safe_mode) {
+               dev_info->rx_offload_capa |=
+                       DEV_RX_OFFLOAD_IPV4_CKSUM |
+                       DEV_RX_OFFLOAD_UDP_CKSUM |
+                       DEV_RX_OFFLOAD_TCP_CKSUM |
+                       DEV_RX_OFFLOAD_QINQ_STRIP |
+                       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+                       DEV_RX_OFFLOAD_VLAN_EXTEND;
+               dev_info->tx_offload_capa |=
+                       DEV_TX_OFFLOAD_QINQ_INSERT |
+                       DEV_TX_OFFLOAD_IPV4_CKSUM |
+                       DEV_TX_OFFLOAD_UDP_CKSUM |
+                       DEV_TX_OFFLOAD_TCP_CKSUM |
+                       DEV_TX_OFFLOAD_SCTP_CKSUM |
+                       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+               dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
+       }
+
        dev_info->rx_queue_offload_capa = 0;
        dev_info->tx_queue_offload_capa = 0;
 
        dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
        dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
-       dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -1900,10 +2034,17 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                               ETH_LINK_SPEED_5G |
                               ETH_LINK_SPEED_10G |
                               ETH_LINK_SPEED_20G |
-                              ETH_LINK_SPEED_25G |
-                              ETH_LINK_SPEED_40G |
-                              ETH_LINK_SPEED_50G |
-                              ETH_LINK_SPEED_100G;
+                              ETH_LINK_SPEED_25G;
+
+       phy_type_low = hw->port_info->phy.phy_type_low;
+       phy_type_high = hw->port_info->phy.phy_type_high;
+
+       if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
+               dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+
+       if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
+                       ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
+               dev_info->speed_capa |= ETH_LINK_SPEED_100G;
 
        dev_info->nb_rx_queues = dev->data->nb_rx_queues;
        dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -2814,26 +2955,26 @@ ice_get_eeprom(struct rte_eth_dev *dev,
 {
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t *data = eeprom->data;
-       uint16_t offset, length, i;
-       enum ice_status ret_code = ICE_SUCCESS;
+       uint16_t first_word, last_word, nwords;
+       enum ice_status status = ICE_SUCCESS;
 
-       offset = eeprom->offset >> 1;
-       length = eeprom->length >> 1;
+       first_word = eeprom->offset >> 1;
+       last_word = (eeprom->offset + eeprom->length - 1) >> 1;
+       nwords = last_word - first_word + 1;
 
-       if (offset > hw->nvm.sr_words ||
-           offset + length > hw->nvm.sr_words) {
+       if (first_word > hw->nvm.sr_words ||
+           last_word > hw->nvm.sr_words) {
                PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
                return -EINVAL;
        }
 
        eeprom->magic = hw->vendor_id | (hw->device_id << 16);
 
-       for (i = 0; i < length; i++) {
-               ret_code = ice_read_sr_word(hw, offset + i, &data[i]);
-               if (ret_code != ICE_SUCCESS) {
-                       PMD_DRV_LOG(ERR, "EEPROM read failed.");
-                       return -EIO;
-               }
+       status = ice_read_sr_buf(hw, first_word, &nwords, data);
+       if (status) {
+               PMD_DRV_LOG(ERR, "EEPROM read failed.");
+               eeprom->length = sizeof(uint16_t) * nwords;
+               return -EIO;
        }
 
        return 0;