net/hns3: support masking device capability
[dpdk.git] / drivers / net / hns3 / hns3_cmd.c
index 5ec3dfe..62dfc19 100644 (file)
@@ -1,23 +1,8 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
  */
 
-#include <errno.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/queue.h>
-#include <inttypes.h>
-#include <unistd.h>
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_dev.h>
-#include <rte_eal.h>
-#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_pci.h>
 #include <rte_io.h>
 
 #include "hns3_ethdev.h"
@@ -210,15 +195,17 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
 {
        struct hns3_cmq_ring *csq = &hw->cmq.csq;
        uint32_t head;
+       uint32_t addr;
        int clean;
 
        head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
-
-       if (!is_valid_csq_clean_head(csq, head)) {
-               hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
-                           csq->next_to_use, csq->next_to_clean);
+       addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
+       if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
+               hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
+                        csq->next_to_use, csq->next_to_clean);
                if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-                       rte_atomic16_set(&hw->reset.disable_cmd, 1);
+                       __atomic_store_n(&hw->reset.disable_cmd, 1,
+                                        __ATOMIC_RELAXED);
                        hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
                }
 
@@ -250,7 +237,12 @@ hns3_is_special_opcode(uint16_t opcode)
                                  HNS3_OPC_STATS_MAC,
                                  HNS3_OPC_STATS_MAC_ALL,
                                  HNS3_OPC_QUERY_32_BIT_REG,
-                                 HNS3_OPC_QUERY_64_BIT_REG};
+                                 HNS3_OPC_QUERY_64_BIT_REG,
+                                 HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
+                                 HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
+                                 HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+                                 HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
+                                 HNS3_OPC_QUERY_ALL_ERR_INFO,};
        uint32_t i;
 
        for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
@@ -263,34 +255,32 @@ hns3_is_special_opcode(uint16_t opcode)
 static int
 hns3_cmd_convert_err_code(uint16_t desc_ret)
 {
-       switch (desc_ret) {
-       case HNS3_CMD_EXEC_SUCCESS:
-               return 0;
-       case HNS3_CMD_NO_AUTH:
-               return -EPERM;
-       case HNS3_CMD_NOT_SUPPORTED:
-               return -EOPNOTSUPP;
-       case HNS3_CMD_QUEUE_FULL:
-               return -EXFULL;
-       case HNS3_CMD_NEXT_ERR:
-               return -ENOSR;
-       case HNS3_CMD_UNEXE_ERR:
-               return -ENOTBLK;
-       case HNS3_CMD_PARA_ERR:
-               return -EINVAL;
-       case HNS3_CMD_RESULT_ERR:
-               return -ERANGE;
-       case HNS3_CMD_TIMEOUT:
-               return -ETIME;
-       case HNS3_CMD_HILINK_ERR:
-               return -ENOLINK;
-       case HNS3_CMD_QUEUE_ILLEGAL:
-               return -ENXIO;
-       case HNS3_CMD_INVALID:
-               return -EBADR;
-       default:
-               return -EIO;
-       }
+       static const struct {
+               uint16_t imp_errcode;
+               int linux_errcode;
+       } hns3_cmdq_status[] = {
+               {HNS3_CMD_EXEC_SUCCESS, 0},
+               {HNS3_CMD_NO_AUTH, -EPERM},
+               {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+               {HNS3_CMD_QUEUE_FULL, -EXFULL},
+               {HNS3_CMD_NEXT_ERR, -ENOSR},
+               {HNS3_CMD_UNEXE_ERR, -ENOTBLK},
+               {HNS3_CMD_PARA_ERR, -EINVAL},
+               {HNS3_CMD_RESULT_ERR, -ERANGE},
+               {HNS3_CMD_TIMEOUT, -ETIME},
+               {HNS3_CMD_HILINK_ERR, -ENOLINK},
+               {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
+               {HNS3_CMD_INVALID, -EBADR},
+               {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
+       };
+
+       uint32_t i;
+
+       for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++)
+               if (hns3_cmdq_status[i].imp_errcode == desc_ret)
+                       return hns3_cmdq_status[i].linux_errcode;
+
+       return -EREMOTEIO;
 }
 
 static int
@@ -329,7 +319,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
                if (hns3_cmd_csq_done(hw))
                        return 0;
 
-               if (rte_atomic16_read(&hw->reset.disable_cmd)) {
+               if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
                        hns3_err(hw,
                                 "Don't wait for reply because of disable_cmd");
                        return -EBUSY;
@@ -349,11 +339,23 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 
 /*
  * hns3_cmd_send - send command to command queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor for describing the command
- * @num : the number of descriptors to be sent
  *
- * This is the main send command for command queue, it
+ * @param hw
+ *   pointer to the hw struct
+ * @param desc
+ *   prefilled descriptor for describing the command
+ * @param num
+ *   the number of descriptors to be sent
+ * @return
+ *   - -EBUSY if detect device is in resetting
+ *   - -EIO   if detect cmd csq corrupted (due to reset) or
+ *            there is reset pending
+ *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
+ *   - Zero   if operation completed successfully
+ *
+ * Note -BUSY/-EIO only used in reset case
+ *
+ * Note this is the main send command for command queue, it
  * sends the queue, cleans the queue, etc
  */
 int
@@ -364,7 +366,7 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
        int retval;
        uint32_t ntc;
 
-       if (rte_atomic16_read(&hw->reset.disable_cmd))
+       if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
                return -EBUSY;
 
        rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -414,8 +416,122 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
        return retval;
 }
 
-static enum hns3_cmd_status
-hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version)
+static const char *
+hns3_get_caps_name(uint32_t caps_id)
+{
+       const struct {
+               enum HNS3_CAPS_BITS caps;
+               const char *name;
+       } dev_caps[] = {
+               { HNS3_CAPS_UDP_GSO_B,         "udp_gso"         },
+               { HNS3_CAPS_ATR_B,             "atr"             },
+               { HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
+               { HNS3_CAPS_PTP_B,             "ptp"             },
+               { HNS3_CAPS_INT_QL_B,          "int_ql"          },
+               { HNS3_CAPS_SIMPLE_BD_B,       "simple_bd"       },
+               { HNS3_CAPS_TX_PUSH_B,         "tx_push"         },
+               { HNS3_CAPS_PHY_IMP_B,         "phy_imp"         },
+               { HNS3_CAPS_TQP_TXRX_INDEP_B,  "tqp_txrx_indep"  },
+               { HNS3_CAPS_HW_PAD_B,          "hw_pad"          },
+               { HNS3_CAPS_STASH_B,           "stash"           },
+               { HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
+               { HNS3_CAPS_RAS_IMP_B,         "ras_imp"         },
+               { HNS3_CAPS_FEC_B,             "fec"             },
+               { HNS3_CAPS_PAUSE_B,           "pause"           },
+               { HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  }
+       };
+       uint32_t i;
+
+       for (i = 0; i < RTE_DIM(dev_caps); i++) {
+               if (dev_caps[i].caps == caps_id)
+                       return dev_caps[i].name;
+       }
+
+       return "unknown";
+}
+
+static void
+hns3_mask_capability(struct hns3_hw *hw,
+                    struct hns3_query_version_cmd *cmd)
+{
+#define MAX_CAPS_BIT   64
+
+       struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+       uint64_t caps_org, caps_new, caps_masked;
+       uint32_t i;
+
+       if (hns->dev_caps_mask == 0)
+               return;
+
+       memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
+       caps_org = rte_le_to_cpu_64(caps_org);
+       caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
+       caps_masked = caps_org ^ caps_new;
+       caps_new = rte_cpu_to_le_64(caps_new);
+       memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
+
+       for (i = 0; i < MAX_CAPS_BIT; i++) {
+               if (!(caps_masked & BIT_ULL(i)))
+                       continue;
+               hns3_info(hw, "mask capabiliy: id-%u, name-%s.",
+                         i, hns3_get_caps_name(i));
+       }
+}
+
+static void
+hns3_parse_capability(struct hns3_hw *hw,
+                     struct hns3_query_version_cmd *cmd)
+{
+       uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
+
+       if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
+               hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
+       if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
+               hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
+                            1);
+       if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
+               /*
+                * PTP depends on special packet type reported by hardware which
+                * enabled rxd advanced layout, so if the hardware doesn't
+                * support rxd advanced layout, driver should ignore the PTP
+                * capability.
+                */
+               if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
+                       hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
+               else
+                       hns3_warn(hw, "ignore PTP capability due to lack of "
+                                 "rxd advanced layout capability.");
+       }
+       if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
+               hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
+       if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
+               hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
+       if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
+               hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
+       if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
+               hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
+       if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
+               hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
+                            1);
+       if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
+               hns3_set_bit(hw->capability,
+                               HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
+       if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
+               hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
+}
+
+static uint32_t
+hns3_build_api_caps(void)
+{
+       uint32_t api_caps = 0;
+
+       hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
+
+       return rte_cpu_to_le_32(api_caps);
+}
+
+static int
+hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
 {
        struct hns3_query_version_cmd *resp;
        struct hns3_cmd_desc desc;
@@ -423,13 +539,22 @@ hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version)
 
        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
        resp = (struct hns3_query_version_cmd *)desc.data;
+       resp->api_caps = hns3_build_api_caps();
 
        /* Initialize the cmd function */
        ret = hns3_cmd_send(hw, &desc, 1);
-       if (ret == 0)
-               *version = rte_le_to_cpu_32(resp->firmware);
+       if (ret)
+               return ret;
 
-       return ret;
+       hw->fw_version = rte_le_to_cpu_32(resp->firmware);
+       /*
+        * Make sure mask the capability before parse capability because it
+        * may overwrite resp's data.
+        */
+       hns3_mask_capability(hw, resp);
+       hns3_parse_capability(hw, resp);
+
+       return 0;
 }
 
 int
@@ -475,9 +600,100 @@ err_crq:
        return ret;
 }
 
+static void
+hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+
+       if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
+               return;
+
+       if (fw_compact_cmd_result != 0) {
+               /*
+                * If fw_compact_cmd_result is not zero, it means firmware don't
+                * support link status change interrupt.
+                * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
+                * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
+                * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
+                * don't support link status change interrupt.
+                */
+               dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+       }
+}
+
+static int
+hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
+{
+       if (result != 0 && hns3_dev_copper_supported(hw)) {
+               hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
+                        result);
+               return result;
+       }
+
+       hns3_update_dev_lsc_cap(hw, result);
+
+       return 0;
+}
+
+static int
+hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
+{
+       struct hns3_firmware_compat_cmd *req;
+       struct hns3_cmd_desc desc;
+       uint32_t compat = 0;
+
+#if defined(RTE_HNS3_ONLY_1630_FPGA)
+       /* If resv reg enabled phy driver of imp is not configured, driver
+        * will use temporary phy driver.
+        */
+       struct rte_pci_device *pci_dev;
+       struct rte_eth_dev *eth_dev;
+       uint8_t revision;
+       int ret;
+
+       eth_dev = &rte_eth_devices[hw->data->port_id];
+       pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+       /* Get PCI revision id */
+       ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
+                                 HNS3_PCI_REVISION_ID);
+       if (ret != HNS3_PCI_REVISION_ID_LEN) {
+               PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
+                            ret);
+               return -EIO;
+       }
+       if (revision == PCI_REVISION_ID_HIP09_A) {
+               struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+               if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
+                       PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
+                       pf->is_tmp_phy = true;
+                       hns3_set_bit(hw->capability,
+                                    HNS3_DEV_SUPPORT_COPPER_B, 1);
+                       return 0;
+               }
+
+               PMD_INIT_LOG(ERR, "***use phy driver in imp***");
+       }
+#endif
+
+       hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
+       req = (struct hns3_firmware_compat_cmd *)desc.data;
+
+       if (is_init) {
+               hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
+               hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
+               if (hns3_dev_copper_supported(hw))
+                       hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
+       }
+       req->compat = rte_cpu_to_le_32(compat);
+
+       return hns3_cmd_send(hw, &desc, 1);
+}
+
 int
 hns3_cmd_init(struct hns3_hw *hw)
 {
+       struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+       uint32_t version;
        int ret;
 
        rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -504,20 +720,50 @@ hns3_cmd_init(struct hns3_hw *hw)
                ret = -EBUSY;
                goto err_cmd_init;
        }
-       rte_atomic16_clear(&hw->reset.disable_cmd);
+       __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
 
-       ret = hns3_cmd_query_firmware_version(hw, &hw->fw_version);
+       ret = hns3_cmd_query_firmware_version_and_capability(hw);
        if (ret) {
                PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
                goto err_cmd_init;
        }
 
-       PMD_INIT_LOG(INFO, "The firmware version is %08x", hw->fw_version);
+       version = hw->fw_version;
+       PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
+                    hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
+                                   HNS3_FW_VERSION_BYTE3_S),
+                    hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
+                                   HNS3_FW_VERSION_BYTE2_S),
+                    hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
+                                   HNS3_FW_VERSION_BYTE1_S),
+                    hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
+                                   HNS3_FW_VERSION_BYTE0_S));
+
+       if (hns->is_vf)
+               return 0;
+
+       /*
+        * Requiring firmware to enable some features, firber port can still
+        * work without it, but copper port can't work because the firmware
+        * fails to take over the PHY.
+        */
+       ret = hns3_firmware_compat_config(hw, true);
+       if (ret)
+               PMD_INIT_LOG(WARNING, "firmware compatible features not "
+                            "supported, ret = %d.", ret);
+
+       /*
+        * Perform some corresponding operations based on the firmware
+        * compatibility configuration result.
+        */
+       ret = hns3_apply_fw_compat_cmd_result(hw, ret);
+       if (ret)
+               goto err_cmd_init;
 
        return 0;
 
 err_cmd_init:
-       hns3_cmd_uninit(hw);
+       __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
        return ret;
 }
 
@@ -541,9 +787,26 @@ hns3_cmd_destroy_queue(struct hns3_hw *hw)
 void
 hns3_cmd_uninit(struct hns3_hw *hw)
 {
+       struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+
+       if (!hns->is_vf)
+               (void)hns3_firmware_compat_config(hw, false);
+
+       __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+
+       /*
+        * A delay is added to ensure that the register cleanup operations
+        * will not be performed concurrently with the firmware command and
+        * ensure that all the reserved commands are executed.
+        * Concurrency may occur in two scenarios: asynchronous command and
+        * timeout command. If the command fails to be executed due to busy
+        * scheduling, the command will be processed in the next scheduling
+        * of the firmware.
+        */
+       rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
+
        rte_spinlock_lock(&hw->cmq.csq.lock);
        rte_spinlock_lock(&hw->cmq.crq.lock);
-       rte_atomic16_set(&hw->reset.disable_cmd, 1);
        hns3_cmd_clear_regs(hw);
        rte_spinlock_unlock(&hw->cmq.crq.lock);
        rte_spinlock_unlock(&hw->cmq.csq.lock);