#include <rte_alarm.h>
#include <rte_bus_pci.h>
#include <ethdev_pci.h>
-#include <rte_pci.h>
#include "hns3_ethdev.h"
#include "hns3_common.h"
return ret;
}
-static bool
-hns3_is_1588_event_type(uint32_t event_type)
-{
- return (event_type == HNS3_VECTOR0_EVENT_PTP);
-}
-
static void
hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
{
if (event_type == HNS3_VECTOR0_EVENT_RST ||
- hns3_is_1588_event_type(event_type))
+ event_type == HNS3_VECTOR0_EVENT_PTP)
hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
else if (event_type == HNS3_VECTOR0_EVENT_MBX)
hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
vcfg->vlan2_vlan_prionly ? 1 : 0);
- /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
+ /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
vcfg->strip_tag1_discard_en ? 1 : 0);
hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
return ret;
}
-static void
-hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
- struct hns3_rx_vtag_cfg *vcfg)
-{
- struct hns3_pf *pf = &hns->pf;
- memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
-}
-
-static void
-hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
- struct hns3_tx_vtag_cfg *vcfg)
-{
- struct hns3_pf *pf = &hns->pf;
- memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
-}
-
static int
hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
{
return ret;
}
- hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
+ memcpy(&hns->pf.vtag_config.rx_vcfg, &rxvlan_cfg,
+ sizeof(struct hns3_rx_vtag_cfg));
return ret;
}
vcfg->insert_tag2_en ? 1 : 0);
hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
- /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
+ /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
vcfg->tag_shift_mode_en ? 1 : 0);
return ret;
}
- hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
+ memcpy(&hns->pf.vtag_config.tx_vcfg, &txvlan_cfg,
+ sizeof(struct hns3_tx_vtag_cfg));
+
return ret;
}
if (ret)
return ret;
- hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
+ memcpy(&hns->pf.vtag_config.rx_vcfg, &rx_vlan_cfg,
+ sizeof(struct hns3_rx_vtag_cfg));
+
return ret;
}
return ret;
/*
* Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx
- * need be processed by PMD driver.
+ * need be processed by PMD.
*/
if (pvid_en_state_change &&
hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
return 0;
}
-static int
-hns3_init_ring_with_vector(struct hns3_hw *hw)
-{
- uint16_t vec;
- int ret;
- int i;
-
- /*
- * In hns3 network engine, vector 0 is always the misc interrupt of this
- * function, vector 1~N can be used respectively for the queues of the
- * function. Tx and Rx queues with the same number share the interrupt
- * vector. In the initialization clearing the all hardware mapping
- * relationship configurations between queues and interrupt vectors is
- * needed, so some error caused by the residual configurations, such as
- * the unexpected Tx interrupt, can be avoid.
- */
- vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
- if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
- vec = vec - 1; /* the last interrupt is reserved */
- hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
- for (i = 0; i < hw->intr_tqps_num; i++) {
- /*
- * Set gap limiter/rate limiter/quanity limiter algorithm
- * configuration for interrupt coalesce of queue's interrupt.
- */
- hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
- HNS3_TQP_INTR_GL_DEFAULT);
- hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
- HNS3_TQP_INTR_GL_DEFAULT);
- hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
- /*
- * QL(quantity limiter) is not used currently, just set 0 to
- * close it.
- */
- hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
-
- ret = hns3_bind_ring_with_vector(hw, vec, false,
- HNS3_RING_TYPE_TX, i);
- if (ret) {
- PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
- "vector: %u, ret=%d", i, vec, ret);
- return ret;
- }
-
- ret = hns3_bind_ring_with_vector(hw, vec, false,
- HNS3_RING_TYPE_RX, i);
- if (ret) {
- PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
- "vector: %u, ret=%d", i, vec, ret);
- return ret;
- }
- }
-
- return 0;
-}
-
static int
hns3_setup_dcb(struct rte_eth_dev *dev)
{
goto cfg_err;
}
- /* When RSS is not configured, redirect the packet queue 0 */
if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
rss_conf = conf->rx_adv_conf.rss_conf;
- hw->rss_dis_flag = false;
ret = hns3_dev_rss_hash_update(dev, &rss_conf);
if (ret)
goto cfg_err;
hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
{
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
- uint16_t original_mps = hns->pf.mps;
int err;
int ret;
return ret;
}
- hns->pf.mps = mps;
ret = hns3_buffer_alloc(hw);
if (ret) {
hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
goto rollback;
}
+ hns->pf.mps = mps;
+
return 0;
rollback:
- err = hns3_set_mac_mtu(hw, original_mps);
- if (err) {
+ err = hns3_set_mac_mtu(hw, hns->pf.mps);
+ if (err)
hns3_err(hw, "fail to rollback MTU, err = %d", err);
- return ret;
- }
- hns->pf.mps = original_mps;
return ret;
}
return speed_capa;
}
-static uint32_t
+uint32_t
hns3_get_speed_capa(struct hns3_hw *hw)
{
struct hns3_mac *mac = &hw->mac;
return speed_capa;
}
-int
-hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
-{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
- uint16_t queue_num = hw->tqps_num;
-
- /*
- * In interrupt mode, 'max_rx_queues' is set based on the number of
- * MSI-X interrupt resources of the hardware.
- */
- if (hw->data->dev_conf.intr_conf.rxq == 1)
- queue_num = hw->intr_tqps_num;
-
- info->max_rx_queues = queue_num;
- info->max_tx_queues = hw->tqps_num;
- info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
- info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
- info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
- info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
- info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
- info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
- RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
- RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
- RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
- RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
- RTE_ETH_RX_OFFLOAD_KEEP_CRC |
- RTE_ETH_RX_OFFLOAD_SCATTER |
- RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
- RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
- RTE_ETH_RX_OFFLOAD_RSS_HASH |
- RTE_ETH_RX_OFFLOAD_TCP_LRO);
- info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
- RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
- RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
- RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
- RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
- RTE_ETH_TX_OFFLOAD_TCP_TSO |
- RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
- RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
- RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
- RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
- hns3_txvlan_cap_get(hw));
-
- if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
- info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
-
- if (hns3_dev_get_support(hw, INDEP_TXRX))
- info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
- RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
- info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
-
- if (hns3_dev_get_support(hw, PTP))
- info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
-
- info->rx_desc_lim = (struct rte_eth_desc_lim) {
- .nb_max = HNS3_MAX_RING_DESC,
- .nb_min = HNS3_MIN_RING_DESC,
- .nb_align = HNS3_ALIGN_RING_DESC,
- };
-
- info->tx_desc_lim = (struct rte_eth_desc_lim) {
- .nb_max = HNS3_MAX_RING_DESC,
- .nb_min = HNS3_MIN_RING_DESC,
- .nb_align = HNS3_ALIGN_RING_DESC,
- .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
- .nb_mtu_seg_max = hw->max_non_tso_bd_num,
- };
-
- info->speed_capa = hns3_get_speed_capa(hw);
- info->default_rxconf = (struct rte_eth_rxconf) {
- .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
- /*
- * If there are no available Rx buffer descriptors, incoming
- * packets are always dropped by hardware based on hns3 network
- * engine.
- */
- .rx_drop_en = 1,
- .offloads = 0,
- };
- info->default_txconf = (struct rte_eth_txconf) {
- .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
- .offloads = 0,
- };
-
- info->reta_size = hw->rss_ind_tbl_size;
- info->hash_key_size = HNS3_RSS_KEY_SIZE;
- info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
-
- info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
- info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
- info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
- info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
- info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
- info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
-
- return 0;
-}
-
-static int
-hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
- size_t fw_size)
-{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
- uint32_t version = hw->fw_version;
- int ret;
-
- ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
- hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
- HNS3_FW_VERSION_BYTE3_S),
- hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
- HNS3_FW_VERSION_BYTE2_S),
- hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
- HNS3_FW_VERSION_BYTE1_S),
- hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
- HNS3_FW_VERSION_BYTE0_S));
- if (ret < 0)
- return -EINVAL;
-
- ret += 1; /* add the size of '\0' */
- if (fw_size < (size_t)ret)
- return ret;
- else
- return 0;
-}
-
static int
hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
{
/* get the configuration */
cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
- cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
- HNS3_CFG_TQP_DESC_N_M,
- HNS3_CFG_TQP_DESC_N_S);
cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
HNS3_CFG_PHY_ADDR_M,
cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
HNS3_CFG_MEDIA_TP_M,
HNS3_CFG_MEDIA_TP_S);
- cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
- HNS3_CFG_RX_BUF_LEN_M,
- HNS3_CFG_RX_BUF_LEN_S);
/* get mac address */
mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
* Field ext_rss_size_max obtained from firmware will be more flexible
* for future changes and expansions, which is an exponent of 2, instead
* of reading out directly. If this field is not zero, hns3 PF PMD
- * driver uses it as rss_size_max under one TC. Device, whose revision
+ * uses it as rss_size_max under one TC. Device, whose revision
* id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the
* maximum number of queues supported under a TC through this field.
*/
struct hns3_pf *pf = &hns->pf;
struct rte_eth_dev *eth_dev;
uint16_t device_id;
- uint8_t revision;
int ret;
eth_dev = &rte_eth_devices[hw->data->port_id];
device_id == HNS3_DEV_ID_200G_RDMA)
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
- /* Get PCI revision id */
- ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
- HNS3_PCI_REVISION_ID);
- if (ret != HNS3_PCI_REVISION_ID_LEN) {
- PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
- ret);
- return -EIO;
- }
- hw->revision = revision;
+ ret = hns3_get_pci_revision_id(hw, &hw->revision);
+ if (ret)
+ return ret;
+
+ ret = hns3_query_mac_stats_reg_num(hw);
+ if (ret)
+ return ret;
- if (revision < PCI_REVISION_ID_HIP09_A) {
+ if (hw->revision < PCI_REVISION_ID_HIP09_A) {
hns3_set_default_dev_specifications(hw);
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
hw->mac.media_type = cfg.media_type;
hw->rss_size_max = cfg.rss_size_max;
- hw->rss_dis_flag = false;
memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
hw->mac.phy_addr = cfg.phy_addr;
- hw->num_tx_desc = cfg.tqp_desc_num;
- hw->num_rx_desc = cfg.tqp_desc_num;
hw->dcb_info.num_pg = 1;
hw->dcb_info.hw_pfc_map = 0;
* hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hw: pointer to struct hns3_hw
* @buf_alloc: pointer to buffer calculation data
- * @return: 0: calculate sucessful, negative: fail
+ * @return: 0: calculate successful, negative: fail
*/
static int
hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- if (!hns3_is_reset_pending(hns))
+ if (!hns3_is_reset_pending(hns)) {
hns3_update_linkstatus_and_event(hw, true);
- else
+ hns3_update_hw_stats(hw);
+ } else {
hns3_warn(hw, "Cancel the query when reset is pending");
+ }
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
}
struct hns3_hw *hw = &hns->hw;
int ret;
+ /*
+ * All queue-related HW operations must be performed after the TCAM
+ * table is configured.
+ */
ret = hns3_map_tqp(hw);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
}
/*
- * Validity of supported_speed for firber and copper media type can be
+ * Validity of supported_speed for fiber and copper media type can be
* guaranteed by the following policy:
* Copper:
* Although the initialization of the phy in the firmware may not be
* completed, the firmware can guarantees that the supported_speed is
* an valid value.
* Firber:
- * If the version of firmware supports the acitive query way of the
+ * If the version of firmware supports the active query way of the
* HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
* through it. If unsupported, use the SFP's speed as the value of the
* supported_speed.
goto err_cmd_init;
}
- /* Hardware statistics of imissed registers cleared. */
- ret = hns3_update_imissed_stats(hw, true);
- if (ret) {
- hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
- goto err_cmd_init;
- }
-
hns3_config_all_msix_error(hw, true);
ret = rte_intr_callback_register(pci_dev->intr_handle,
goto err_get_config;
}
- ret = hns3_tqp_stats_init(hw);
+ ret = hns3_stats_init(hw);
if (ret)
goto err_get_config;
err_fdir:
hns3_uninit_umv_space(hw);
err_init_hw:
- hns3_tqp_stats_uninit(hw);
+ hns3_stats_uninit(hw);
err_get_config:
hns3_pf_disable_irq0(hw);
rte_intr_disable(pci_dev->intr_handle);
hns3_flow_uninit(eth_dev);
hns3_fdir_filter_uninit(hns);
hns3_uninit_umv_space(hw);
- hns3_tqp_stats_uninit(hw);
+ hns3_stats_uninit(hw);
hns3_config_mac_tnl_int(hw, false);
hns3_pf_disable_irq0(hw);
rte_intr_disable(pci_dev->intr_handle);
return 0;
}
-static inline uint32_t
+static uint32_t
hns3_get_link_speed(uint32_t link_speeds)
{
uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
return ret;
}
-static int
-hns3_map_rx_interrupt(struct rte_eth_dev *dev)
-{
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
- uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
- uint32_t intr_vector;
- uint16_t q_id;
- int ret;
-
- /*
- * hns3 needs a separate interrupt to be used as event interrupt which
- * could not be shared with task queue pair, so KERNEL drivers need
- * support multiple interrupt vectors.
- */
- if (dev->data->dev_conf.intr_conf.rxq == 0 ||
- !rte_intr_cap_multiple(intr_handle))
- return 0;
-
- rte_intr_disable(intr_handle);
- intr_vector = hw->used_rx_queues;
- /* creates event fd for each intr vector when MSIX is used */
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -EINVAL;
-
- /* Allocate vector list */
- if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
- hw->used_rx_queues)) {
- hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
- hw->used_rx_queues);
- ret = -ENOMEM;
- goto alloc_intr_vec_error;
- }
-
- if (rte_intr_allow_others(intr_handle)) {
- vec = RTE_INTR_VEC_RXTX_OFFSET;
- base = RTE_INTR_VEC_RXTX_OFFSET;
- }
-
- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- ret = hns3_bind_ring_with_vector(hw, vec, true,
- HNS3_RING_TYPE_RX, q_id);
- if (ret)
- goto bind_vector_error;
-
- if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
- goto bind_vector_error;
- /*
- * If there are not enough efds (e.g. not enough interrupt),
- * remaining queues will be bond to the last interrupt.
- */
- if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
- vec++;
- }
- rte_intr_enable(intr_handle);
- return 0;
-
-bind_vector_error:
- rte_intr_vec_list_free(intr_handle);
-alloc_intr_vec_error:
- rte_intr_efd_disable(intr_handle);
- return ret;
-}
-
-static int
-hns3_restore_rx_interrupt(struct hns3_hw *hw)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
- uint16_t q_id;
- int ret;
-
- if (dev->data->dev_conf.intr_conf.rxq == 0)
- return 0;
-
- if (rte_intr_dp_is_en(intr_handle)) {
- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- ret = hns3_bind_ring_with_vector(hw,
- rte_intr_vec_list_index_get(intr_handle,
- q_id),
- true, HNS3_RING_TYPE_RX, q_id);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
static void
hns3_restore_filter(struct rte_eth_dev *dev)
{
return 0;
}
-static void
-hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
-{
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
- struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
- uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
- uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
- uint16_t q_id;
-
- if (dev->data->dev_conf.intr_conf.rxq == 0)
- return;
-
- /* unmap the ring with vector */
- if (rte_intr_allow_others(intr_handle)) {
- vec = RTE_INTR_VEC_RXTX_OFFSET;
- base = RTE_INTR_VEC_RXTX_OFFSET;
- }
- if (rte_intr_dp_is_en(intr_handle)) {
- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- (void)hns3_bind_ring_with_vector(hw, vec, false,
- HNS3_RING_TYPE_RX,
- q_id);
- if (vec < base + rte_intr_nb_efd_get(intr_handle)
- - 1)
- vec++;
- }
- }
- /* Clean datapath event and queue/vec mapping */
- rte_intr_efd_disable(intr_handle);
- rte_intr_vec_list_free(intr_handle);
-}
-
static int
hns3_dev_stop(struct rte_eth_dev *dev)
{
/*
* Flow control auto-negotiation is not supported for fiber and
- * backpalne media type.
+ * backplane media type.
*/
case HNS3_MEDIA_TYPE_FIBER:
case HNS3_MEDIA_TYPE_BACKPLANE:
return hns3_get_autoneg_fc_mode(hw);
}
-static int
+int
hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
return ret;
}
- ret = hns3_reset_all_tqps(hns);
+ ret = hns3_init_hardware(hns);
if (ret) {
- hns3_err(hw, "Failed to reset all queues: %d", ret);
+ hns3_err(hw, "Failed to init hardware: %d", ret);
return ret;
}
- ret = hns3_init_hardware(hns);
+ ret = hns3_reset_all_tqps(hns);
if (ret) {
- hns3_err(hw, "Failed to init hardware: %d", ret);
+ hns3_err(hw, "Failed to reset all queues: %d", ret);
return ret;
}
}
/*
- * FEC mode order defined in hns3 hardware is inconsistend with
+ * FEC mode order defined in hns3 hardware is inconsistent with
* that defined in the ethdev library. So the sequence needs
* to be converted.
*/
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
struct hns3_pf *pf = &hns->pf;
-
struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
uint32_t cur_capa;
uint32_t num = FEC_CAPA_NUM;
if (ret < 0)
return ret;
- /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
+ /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */
if (!is_fec_mode_one_bit_set(mode)) {
hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
"FEC mode should be only one bit set", mode);
.timesync_adjust_time = hns3_timesync_adjust_time,
.timesync_read_time = hns3_timesync_read_time,
.timesync_write_time = hns3_timesync_write_time,
+ .eth_dev_priv_dump = hns3_eth_dev_priv_dump,
};
static const struct hns3_reset_ops hns3_reset_ops = {
hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr;
hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr;
hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr;
+ hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector;
}
static int
hns3_dev_init(struct rte_eth_dev *eth_dev)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
- char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
- struct rte_ether_addr *eth_addr;
struct hns3_hw *hw = &hns->hw;
int ret;
goto err_init_pf;
}
- /* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac",
- sizeof(struct rte_ether_addr) *
- HNS3_UC_MACADDR_NUM, 0);
- if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
- "to store MAC addresses",
- sizeof(struct rte_ether_addr) *
- HNS3_UC_MACADDR_NUM);
- ret = -ENOMEM;
- goto err_rte_zmalloc;
- }
-
- eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
- if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
- rte_eth_random_addr(hw->mac.mac_addr);
- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
- (struct rte_ether_addr *)hw->mac.mac_addr);
- hns3_warn(hw, "default mac_addr from firmware is an invalid "
- "unicast address, using random MAC address %s",
- mac_str);
- }
- rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
- ð_dev->data->mac_addrs[0]);
+ ret = hns3_init_mac_addrs(eth_dev);
+ if (ret != 0)
+ goto err_init_mac_addrs;
hw->adapter_state = HNS3_NIC_INITIALIZED;
hns3_info(hw, "hns3 dev initialization successful!");
return 0;
-err_rte_zmalloc:
+err_init_mac_addrs:
hns3_uninit_pf(eth_dev);
err_init_pf: