* Copyright(c) 2018-2019 Hisilicon Limited.
*/
-#include <errno.h>
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <unistd.h>
-#include <rte_atomic.h>
+#include <rte_alarm.h>
#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_dev.h>
-#include <rte_eal.h>
-#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
-#include <rte_interrupts.h>
#include <rte_io.h>
-#include <rte_log.h>
#include <rte_pci.h>
#include "hns3_ethdev.h"
hns3_pf_disable_irq0(hw);
event_cause = hns3_check_event_cause(hns, &clearval);
-
/* vector 0 interrupt is shared with reset and mailbox source events. */
if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
hns3_warn(hw, "Received err interrupt");
hns3_warn(hw,
"hw_vlan_reject_tagged or hw_vlan_reject_untagged "
"configuration is not supported! Ignore these two "
- "parameters: hw_vlan_reject_tagged(%d), "
- "hw_vlan_reject_untagged(%d)",
+ "parameters: hw_vlan_reject_tagged(%u), "
+ "hw_vlan_reject_untagged(%u)",
txmode->hw_vlan_reject_tagged,
txmode->hw_vlan_reject_untagged);
ret = hns3_vlan_pvid_set(dev, txmode->pvid,
txmode->hw_vlan_insert_pvid);
if (ret)
- hns3_err(hw, "dev config vlan pvid(%d) failed, ret = %d",
+ hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
txmode->pvid, ret);
return ret;
uint32_t j;
if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
- hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) "
+ hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
"invalid. valid range: 0~%d",
nb_mc_addr, HNS3_MC_MACADDR_NUM);
return -EINVAL;
for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
- hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
+ hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
"is not equal to one in tx direction.",
i, dcb_rx_conf->dcb_tc[i]);
return -EINVAL;
op_str = mmap ? "Map" : "Unmap";
status = hns3_cmd_send(hw, &desc, 1);
if (status) {
- hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+ hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.",
op_str, queue_id, req->int_vector_id, status);
return status;
}
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+ /*
+ * QL(quantity limiter) is not used currently, just set 0 to
+ * close it.
+ */
hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
ret = hns3_bind_ring_with_vector(hw, vec, false,
HNS3_RING_TYPE_TX, i);
if (ret) {
PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
HNS3_RING_TYPE_RX, i);
if (ret) {
PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
}
hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
hw->rss_key_size = HNS3_RSS_KEY_SIZE;
hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
+ hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
}
static void
hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
+ hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
}
static int
if (revision < PCI_REVISION_ID_HIP09_A) {
hns3_set_default_dev_specifications(hw);
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
- hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
+ hw->rss_info.ipv6_sctp_offload_supported = false;
return 0;
}
}
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
- hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
+ hw->rss_info.ipv6_sctp_offload_supported = true;
return 0;
}
ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
if (ret) {
- PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d",
+ PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
cfg.default_speed, ret);
return ret;
}
hi_thrd = shared_buf - pf->dv_buf_size;
if (tc_num <= NEED_RESERVE_TC_NUM)
- hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
- / BUF_MAX_PERCENT;
+ hi_thrd = hi_thrd * BUF_RESERVE_PERCENT /
+ BUF_MAX_PERCENT;
if (tc_num)
hi_thrd = hi_thrd / tc_num;
for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
priv = &buf_alloc->priv_buf[i];
mask = BIT((uint8_t)i);
-
- if (hw->hw_tc_map & mask &&
- hw->dcb_info.hw_pfc_map & mask) {
+ if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
/* Reduce the number of pfc TC with private buffer */
priv->wl.low = 0;
priv->enable = 0;
for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
-
priv->enable = 0;
priv->wl.low = 0;
priv->wl.high = 0;
if (cmdq_resp) {
PMD_INIT_LOG(ERR,
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
cmdq_resp);
return -EIO;
}
break;
default:
PMD_INIT_LOG(ERR,
- "add mac ethertype failed for undefined, code=%d.",
+ "add mac ethertype failed for undefined, code=%u.",
resp_code);
return_status = -EIO;
break;
hns3_promisc_param_init(¶m, false, false, false, func_id);
ret = hns3_cmd_set_promisc_mode(hw, ¶m);
if (ret) {
- PMD_INIT_LOG(ERR, "failed to clear vf:%d promisc mode,"
+ PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
" ret = %d", func_id, ret);
return ret;
}
hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
{
struct hns3_mac *mac = &hw->mac;
+ uint32_t cur_speed = mac->link_speed;
int ret;
duplex = hns3_check_speed_dup(duplex, speed);
return ret;
mac->link_speed = speed;
+ ret = hns3_dcb_port_shaper_cfg(hw);
+ if (ret) {
+ hns3_err(hw, "failed to configure port shaper, ret = %d.", ret);
+ mac->link_speed = cur_speed;
+ return ret;
+ }
+
mac->link_duplex = duplex;
return 0;
err_config_mac_mode:
hns3_dev_release_mbufs(hns);
- hns3_reset_all_tqps(hns);
+ /*
+ * Here is exception handling, hns3_reset_all_tqps will have the
+ * corresponding error message if it is handled incorrectly, so it is
+ * not necessary to check hns3_reset_all_tqps return value, here keep
+ * ret as the error code causing the exception.
+ */
+ (void)hns3_reset_all_tqps(hns);
return ret;
}
rte_zmalloc("intr_vec",
hw->used_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- hns3_err(hw, "Failed to allocate %d rx_queues"
+ hns3_err(hw, "Failed to allocate %u rx_queues"
" intr_vec", hw->used_rx_queues);
ret = -ENOMEM;
goto alloc_intr_vec_error;
}
}
-static void
+static int
hns3_dev_stop(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_started = 0;
hw->adapter_state = HNS3_NIC_STOPPING;
hns3_set_rxtx_function(dev);
hns3_rx_scattered_reset(dev);
rte_eal_alarm_cancel(hns3_service_handler, dev);
rte_spinlock_unlock(&hw->lock);
+
+ return 0;
}
static int
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ int ret = 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
rte_free(eth_dev->process_private);
}
if (hw->adapter_state == HNS3_NIC_STARTED)
- hns3_dev_stop(eth_dev);
+ ret = hns3_dev_stop(eth_dev);
hw->adapter_state = HNS3_NIC_CLOSING;
hns3_reset_abort(hns);
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
hns3_mp_uninit_primary();
- hns3_warn(hw, "Close port %d finished", hw->data->port_id);
+ hns3_warn(hw, "Close port %u finished", hw->data->port_id);
- return 0;
+ return ret;
}
static int
return -EINVAL;
}
if (!fc_conf->pause_time) {
- hns3_err(hw, "Invalid pause time %d setting.",
+ hns3_err(hw, "Invalid pause time %u setting.",
fc_conf->pause_time);
return -EINVAL;
}
return -EINVAL;
}
if (pfc_conf->fc.pause_time == 0) {
- hns3_err(hw, "Invalid pause time %d setting.",
+ hns3_err(hw, "Invalid pause time %u setting.",
pfc_conf->fc.pause_time);
return -EINVAL;
}
/* Enable interrupt of all rx queues before enabling queues */
hns3_dev_all_rx_queue_intr_enable(hw, true);
+ /*
+ * Enable state of each rxq and txq will be recovered after
+ * reset, so we need to restore them before enable all tqps;
+ */
+ hns3_restore_tqp_enable_state(hw);
/*
* When finished the initialization, enable queues to receive
* and transmit packets.
hns3_set_rxtx_function(eth_dev);
eth_dev->dev_ops = &hns3_eth_dev_ops;
+ eth_dev->rx_queue_count = hns3_rx_queue_count;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
ret = hns3_mp_init_secondary();
if (ret) {
return 0;
}
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
ret = hns3_mp_init_primary();
if (ret) {
PMD_INIT_LOG(ERR,
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
- eth_dev->tx_pkt_prepare = NULL;
if (hw->adapter_state < HNS3_NIC_CLOSING)
hns3_dev_close(eth_dev);
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
- { .vendor_id = 0, /* sentinel */ },
+ { .vendor_id = 0, }, /* sentinel */
};
static struct rte_pci_driver rte_hns3_pmd = {