#include <ethdev_pci.h>
#include <rte_io.h>
-#include "hns3_ethdev.h"
+#include "hns3_common.h"
#include "hns3_regs.h"
#include "hns3_intr.h"
#include "hns3_logs.h"
-#define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
-
-#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
-
static int
hns3_ring_space(struct hns3_cmq_ring *ring)
{
hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
uint64_t size, uint32_t alignment)
{
+ static uint64_t hns3_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
- snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
+ snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
+ __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
ring->desc = mz->addr;
ring->desc_dma_addr = mz->iova;
ring->zone = (const void *)mz;
- hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
- mz->name, ring->desc_dma_addr);
+ hns3_dbg(hw, "cmd ring memzone name: %s", mz->name);
return 0;
}
static void
-hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
+hns3_free_dma_mem(struct hns3_cmq_ring *ring)
{
- hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
- ((const struct rte_memzone *)ring->zone)->name,
- ring->desc_dma_addr);
rte_memzone_free((const struct rte_memzone *)ring->zone);
ring->buf_size = 0;
ring->desc = NULL;
}
static void
-hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
+hns3_free_cmd_desc(__rte_unused struct hns3_hw *hw, struct hns3_cmq_ring *ring)
{
if (ring->desc)
- hns3_free_dma_mem(hw, ring);
+ hns3_free_dma_mem(ring);
}
static int
HNS3_OPC_STATS_MAC,
HNS3_OPC_STATS_MAC_ALL,
HNS3_OPC_QUERY_32_BIT_REG,
- HNS3_OPC_QUERY_64_BIT_REG};
+ HNS3_OPC_QUERY_64_BIT_REG,
+ HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
+ HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
+ HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HNS3_OPC_QUERY_ALL_ERR_INFO,};
uint32_t i;
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
+ for (i = 0; i < RTE_DIM(spec_opcode); i++)
if (spec_opcode[i] == opcode)
return true;
uint32_t i;
- for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++)
+ for (i = 0; i < RTE_DIM(hns3_cmdq_status); i++)
if (hns3_cmdq_status[i].imp_errcode == desc_ret)
return hns3_cmdq_status[i].linux_errcode;
return retval;
}
+static const char *
+hns3_get_caps_name(uint32_t caps_id)
+{
+ const struct {
+ enum HNS3_CAPS_BITS caps;
+ const char *name;
+ } dev_caps[] = {
+ { HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
+ { HNS3_CAPS_PTP_B, "ptp" },
+ { HNS3_CAPS_TX_PUSH_B, "tx_push" },
+ { HNS3_CAPS_PHY_IMP_B, "phy_imp" },
+ { HNS3_CAPS_TQP_TXRX_INDEP_B, "tqp_txrx_indep" },
+ { HNS3_CAPS_HW_PAD_B, "hw_pad" },
+ { HNS3_CAPS_STASH_B, "stash" },
+ { HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
+ { HNS3_CAPS_RAS_IMP_B, "ras_imp" },
+ { HNS3_CAPS_RXD_ADV_LAYOUT_B, "rxd_adv_layout" },
+ { HNS3_CAPS_TM_B, "tm_capability" }
+ };
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(dev_caps); i++) {
+ if (dev_caps[i].caps == caps_id)
+ return dev_caps[i].name;
+ }
+
+ return "unknown";
+}
+
+static void
+hns3_mask_capability(struct hns3_hw *hw,
+ struct hns3_query_version_cmd *cmd)
+{
+#define MAX_CAPS_BIT 64
+
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ uint64_t caps_org, caps_new, caps_masked;
+ uint32_t i;
+
+ if (hns->dev_caps_mask == 0)
+ return;
+
+ memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
+ caps_org = rte_le_to_cpu_64(caps_org);
+ caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
+ caps_masked = caps_org ^ caps_new;
+ caps_new = rte_cpu_to_le_64(caps_new);
+ memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
+
+ for (i = 0; i < MAX_CAPS_BIT; i++) {
+ if (!(caps_masked & BIT_ULL(i)))
+ continue;
+ hns3_info(hw, "mask capability: id-%u, name-%s.",
+ i, hns3_get_caps_name(i));
+ }
+}
+
static void
hns3_parse_capability(struct hns3_hw *hw,
struct hns3_query_version_cmd *cmd)
{
uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
- if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
- hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
1);
if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
hns3_set_bit(hw->capability,
HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
+ if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
+ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
+ if (hns3_get_bit(caps, HNS3_CAPS_TM_B))
+ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1);
}
static uint32_t
return ret;
hw->fw_version = rte_le_to_cpu_32(resp->firmware);
+ /*
+ * Make sure mask the capability before parse capability because it
+ * may overwrite resp's data.
+ */
+ hns3_mask_capability(hw, resp);
hns3_parse_capability(hw, resp);
return 0;
static int
hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
{
- if (result != 0 && hns3_dev_copper_supported(hw)) {
+ if (result != 0 && hns3_dev_get_support(hw, COPPER)) {
hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
result);
return result;
}
if (revision == PCI_REVISION_ID_HIP09_A) {
struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
- if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
+ if (hns3_dev_get_support(hw, COPPER) == 0 || pf->is_tmp_phy) {
PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
pf->is_tmp_phy = true;
hns3_set_bit(hw->capability,
if (is_init) {
hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
- if (hns3_dev_copper_supported(hw))
+ if (hns3_dev_get_support(hw, COPPER))
hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
}
req->compat = rte_cpu_to_le_32(compat);
return 0;
/*
- * Requiring firmware to enable some features, firber port can still
+ * Requiring firmware to enable some features, fiber port can still
* work without it, but copper port can't work because the firmware
* fails to take over the PHY.
*/