#define HNS3_RESET_WAIT_MS 100
#define HNS3_RESET_WAIT_CNT 200
+/* FEC mode order defined in HNS3 hardware */
+#define HNS3_HW_FEC_MODE_NOFEC 0
+#define HNS3_HW_FEC_MODE_BASER 1
+#define HNS3_HW_FEC_MODE_RS 2
+
enum hns3_evt_cause {
HNS3_VECTOR0_EVENT_RST,
HNS3_VECTOR0_EVENT_MBX,
HNS3_VECTOR0_EVENT_OTHER,
};
+static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
+ { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
+
+ { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
+
+ { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
+
+ { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
+
+ { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
+
+ { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
+};
+
static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
uint64_t *levels);
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
struct rte_ether_addr *mac_addr);
static int hns3_remove_mc_addr(struct hns3_hw *hw,
struct rte_ether_addr *mac_addr);
+static int hns3_restore_fec(struct hns3_hw *hw);
+static int hns3_query_dev_fec_info(struct rte_eth_dev *dev);
static void
hns3_pf_disable_irq0(struct hns3_hw *hw)
device_id == HNS3_DEV_ID_200G_RDMA)
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
+ ret = hns3_query_dev_fec_info(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "failed to query FEC information, ret = %d", ret);
+ return ret;
+ }
+
/* Get PCI revision id */
ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
HNS3_PCI_REVISION_ID);
if (ret)
goto err_promisc;
+ ret = hns3_restore_fec(hw);
+ if (ret)
+ goto err_promisc;
+
if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
ret = hns3_do_start(hns, false);
if (ret)
hns3_msix_process(hns, reset_level);
}
+static unsigned int
+hns3_get_speed_capa_num(uint16_t device_id)
+{
+ unsigned int num;
+
+ switch (device_id) {
+ case HNS3_DEV_ID_25GE:
+ case HNS3_DEV_ID_25GE_RDMA:
+ num = 2;
+ break;
+ case HNS3_DEV_ID_100G_RDMA_MACSEC:
+ case HNS3_DEV_ID_200G_RDMA:
+ num = 1;
+ break;
+ default:
+ num = 0;
+ break;
+ }
+
+ return num;
+}
+
+static int
+hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
+ uint16_t device_id)
+{
+ switch (device_id) {
+ case HNS3_DEV_ID_25GE:
+ /* fallthrough */
+ case HNS3_DEV_ID_25GE_RDMA:
+ speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
+ speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
+
+ /* In HNS3 device, the 25G NIC is compatible with 10G rate */
+ speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
+ speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
+ break;
+ case HNS3_DEV_ID_100G_RDMA_MACSEC:
+ speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
+ speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
+ break;
+ case HNS3_DEV_ID_200G_RDMA:
+ speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
+ speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static int
+hns3_fec_get_capability(struct rte_eth_dev *dev,
+ struct rte_eth_fec_capa *speed_fec_capa,
+ unsigned int num)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t device_id = pci_dev->id.device_id;
+ unsigned int capa_num;
+ int ret;
+
+ capa_num = hns3_get_speed_capa_num(device_id);
+ if (capa_num == 0) {
+ hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
+ device_id);
+ return -ENOTSUP;
+ }
+
+ if (speed_fec_capa == NULL || num < capa_num)
+ return capa_num;
+
+ ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
+ if (ret)
+ return -ENOTSUP;
+
+ return capa_num;
+}
+
+static int
+get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
+{
+ struct hns3_config_fec_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
+ req = (struct hns3_config_fec_cmd *)desc.data;
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "get current fec auto state failed, ret = %d",
+ ret);
+ return ret;
+ }
+
+ *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
+ return 0;
+}
+
+static int
+hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+{
+#define QUERY_ACTIVE_SPEED 1
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_sfp_speed_cmd *resp;
+ uint32_t tmp_fec_capa;
+ uint8_t auto_state;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ /*
+ * If link is down and AUTO is enabled, AUTO is returned, otherwise,
+ * configured FEC mode is returned.
+ * If link is up, current FEC mode is returned.
+ */
+ if (hw->mac.link_status == ETH_LINK_DOWN) {
+ ret = get_current_fec_auto_state(hw, &auto_state);
+ if (ret)
+ return ret;
+
+ if (auto_state == 0x1) {
+ *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
+ return 0;
+ }
+ }
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true);
+ resp = (struct hns3_sfp_speed_cmd *)desc.data;
+ resp->query_type = QUERY_ACTIVE_SPEED;
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
+ return ret;
+ } else if (ret) {
+ hns3_err(hw, "get FEC failed, ret = %d", ret);
+ return ret;
+ }
+
+ /*
+ * FEC mode order defined in hns3 hardware is inconsistend with
+ * that defined in the ethdev library. So the sequence needs
+ * to be converted.
+ */
+ switch (resp->active_fec) {
+ case HNS3_HW_FEC_MODE_NOFEC:
+ tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+ break;
+ case HNS3_HW_FEC_MODE_BASER:
+ tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+ break;
+ case HNS3_HW_FEC_MODE_RS:
+ tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+ break;
+ default:
+ tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+ break;
+ }
+
+ *fec_capa = tmp_fec_capa;
+ return 0;
+}
+
+static int
+hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
+{
+ struct hns3_config_fec_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
+
+ req = (struct hns3_config_fec_cmd *)desc.data;
+ switch (mode) {
+ case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
+ hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+ HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
+ break;
+ case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
+ hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+ HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
+ break;
+ case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
+ hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+ HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
+ break;
+ case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
+ hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
+ break;
+ default:
+ return 0;
+ }
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "set fec mode failed, ret = %d", ret);
+
+ return ret;
+}
+
+static uint32_t
+get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
+{
+ struct hns3_mac *mac = &hw->mac;
+ uint32_t cur_capa;
+
+ switch (mac->link_speed) {
+ case ETH_SPEED_NUM_10G:
+ cur_capa = fec_capa[1].capa;
+ break;
+ case ETH_SPEED_NUM_25G:
+ case ETH_SPEED_NUM_100G:
+ case ETH_SPEED_NUM_200G:
+ cur_capa = fec_capa[0].capa;
+ break;
+ default:
+ cur_capa = 0;
+ break;
+ }
+
+ return cur_capa;
+}
+
+static bool
+is_fec_mode_one_bit_set(uint32_t mode)
+{
+ int cnt = 0;
+ uint8_t i;
+
+ for (i = 0; i < sizeof(mode); i++)
+ if (mode >> i & 0x1)
+ cnt++;
+
+ return cnt == 1 ? true : false;
+}
+
+static int
+hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
+{
+#define FEC_CAPA_NUM 2
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
+ struct hns3_pf *pf = &hns->pf;
+
+ struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
+ uint32_t cur_capa;
+ uint32_t num = FEC_CAPA_NUM;
+ int ret;
+
+ ret = hns3_fec_get_capability(dev, fec_capa, num);
+ if (ret < 0)
+ return ret;
+
+ /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
+ if (!is_fec_mode_one_bit_set(mode))
+ hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD,"
+ "FEC mode should be only one bit set", mode);
+
+ /*
+ * Check whether the configured mode is within the FEC capability.
+ * If not, the configured mode will not be supported.
+ */
+ cur_capa = get_current_speed_fec_cap(hw, fec_capa);
+ if (!(cur_capa & mode)) {
+ hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
+ return -EINVAL;
+ }
+
+ ret = hns3_set_fec_hw(hw, mode);
+ if (ret)
+ return ret;
+
+ pf->fec_mode = mode;
+ return 0;
+}
+
+static int
+hns3_restore_fec(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+ uint32_t mode = pf->fec_mode;
+ int ret;
+
+ ret = hns3_set_fec_hw(hw, mode);
+ if (ret)
+ hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d",
+ mode, ret);
+
+ return ret;
+}
+
+static int
+hns3_query_dev_fec_info(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
+ struct hns3_pf *pf = &hns->pf;
+ int ret;
+
+ ret = hns3_fec_get(dev, &pf->fec_mode);
+ if (ret)
+ hns3_err(hw, "query device FEC info failed, ret = %d", ret);
+
+ return ret;
+}
+
static const struct eth_dev_ops hns3_eth_dev_ops = {
.dev_configure = hns3_dev_configure,
.dev_start = hns3_dev_start,
.get_reg = hns3_get_regs,
.get_dcb_info = hns3_get_dcb_info,
.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
+ .fec_get_capability = hns3_fec_get_capability,
+ .fec_get = hns3_fec_get,
+ .fec_set = hns3_fec_set,
};
static const struct hns3_reset_ops hns3_reset_ops = {