(pos + PCI_MSIX_FLAGS));
return 0;
}
- return -1;
+ return -ENXIO;
}
static int
hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
- __attribute__ ((unused)) uint32_t idx,
- __attribute__ ((unused)) uint32_t pool)
+ __rte_unused uint32_t idx,
+ __rte_unused uint32_t pool)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
int ret;
- if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
- mac_addr);
- hns3_err(hw, "Failed to set mac addr, addr(%s) invalid.",
- mac_str);
- return -EINVAL;
- }
-
+ /*
+ * It has been guaranteed that input parameter named mac_addr is valid
+ * address in the rte layer of DPDK framework.
+ */
old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
rte_spinlock_lock(&hw->lock);
memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
- HNS3_TWO_ETHER_ADDR_LEN, false, NULL, 0);
+ HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
if (ret) {
- rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
- mac_addr);
- hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str,
- ret);
+ /*
+ * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
+ * driver. When user has configured a MAC address for VF device
+ * by "ip link set ..." command based on the PF device, the hns3
+ * PF kernel ethdev driver does not allow VF driver to request
+ * reconfiguring a different default MAC address, and return
+ * -EPREM to VF driver through mailbox.
+ */
+ if (ret == -EPERM) {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ old_addr);
+ hns3_warn(hw, "Has permanet mac addr(%s) for vf",
+ mac_str);
+ } else {
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ mac_addr);
+ hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
+ mac_str, ret);
+ }
}
rte_ether_addr_copy(mac_addr,
mac_addr);
hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
mac_str, ret);
- return ret;
}
- return 0;
+ return ret;
}
static int
mac_addr);
hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
mac_str, ret);
- return ret;
}
- return 0;
+ return ret;
}
static int
}
static int
-hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
+hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
+ bool en_uc_pmc, bool en_mc_pmc)
{
struct hns3_mbx_vf_to_pf_cmd *req;
struct hns3_cmd_desc desc;
req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
+ /*
+ * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver,
+ * so there are some features for promiscuous/allmulticast mode in hns3
+ * VF PMD driver as below:
+ * 1. The promiscuous/allmulticast mode can be configured successfully
+ * only based on the trusted VF device. If based on the non trusted
+ * VF device, configuring promiscuous/allmulticast mode will fail.
+ * The hns3 VF device can be confiruged as trusted device by hns3 PF
+ * kernel ethdev driver on the host by the following command:
+ * "ip link set <eth num> vf <vf id> turst on"
+ * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
+ * driver can receive the ingress and outgoing traffic. In the words,
+ * all the ingress packets, all the packets sent from the PF and
+ * other VFs on the same physical port.
+ * 3. Note: Because of the hardware constraints, By default vlan filter
+ * is enabled and couldn't be turned off based on VF device, so vlan
+ * filter is still effective even in promiscuous mode. If upper
+ * applications don't call rte_eth_dev_vlan_filter API function to
+ * set vlan based on VF device, hns3 VF PMD driver will can't receive
+ * the packets with vlan tag in promiscuoue mode.
+ */
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
req->msg[1] = en_bc_pmc ? 1 : 0;
+ req->msg[2] = en_uc_pmc ? 1 : 0;
+ req->msg[3] = en_mc_pmc ? 1 : 0;
ret = hns3_cmd_send(hw, &desc, 1);
if (ret)
- hns3_err(hw, "Set promisc mode fail, status is %d", ret);
+ hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
+
+ return ret;
+}
+
+static int
+hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3vf_set_promisc_mode(hw, true, true, true);
+ if (ret)
+ hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
+ ret);
+ return ret;
+}
+
+static int
+hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ bool allmulti = dev->data->all_multicast ? true : false;
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
+ if (ret)
+ hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
+ ret);
+ return ret;
+}
+
+static int
+hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (dev->data->promiscuous)
+ return 0;
+
+ ret = hns3vf_set_promisc_mode(hw, true, false, true);
+ if (ret)
+ hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
+ ret);
+ return ret;
+}
+
+static int
+hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (dev->data->promiscuous)
+ return 0;
+
+ ret = hns3vf_set_promisc_mode(hw, true, false, false);
+ if (ret)
+ hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
+ ret);
+ return ret;
+}
+
+static int
+hns3vf_restore_promisc(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ bool allmulti = hw->data->all_multicast ? true : false;
+
+ if (hw->data->promiscuous)
+ return hns3vf_set_promisc_mode(hw, true, true, true);
+
+ return hns3vf_set_promisc_mode(hw, true, false, allmulti);
+}
+
+static int
+hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
+ bool mmap, enum hns3_ring_type queue_type,
+ uint16_t queue_id)
+{
+ struct hns3_vf_bind_vector_msg bind_msg;
+ const char *op_str;
+ uint16_t code;
+ int ret;
+
+ memset(&bind_msg, 0, sizeof(bind_msg));
+ code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+ HNS3_MBX_UNMAP_RING_TO_VECTOR;
+ bind_msg.vector_id = vector_id;
+
+ if (queue_type == HNS3_RING_TYPE_RX)
+ bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
+ else
+ bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
+
+ bind_msg.param[0].ring_type = queue_type;
+ bind_msg.ring_num = 1;
+ bind_msg.param[0].tqp_index = queue_id;
+ op_str = mmap ? "Map" : "Unmap";
+ ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+ sizeof(bind_msg), false, NULL, 0);
+ if (ret)
+ hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
+ op_str, queue_id, bind_msg.vector_id, ret);
return ret;
}
+static int
+hns3vf_init_ring_with_vector(struct hns3_hw *hw)
+{
+ uint8_t vec;
+ int ret;
+ int i;
+
+ /*
+ * In hns3 network engine, vector 0 is always the misc interrupt of this
+ * function, vector 1~N can be used respectively for the queues of the
+ * function. Tx and Rx queues with the same number share the interrupt
+ * vector. In the initialization clearing the all hardware mapping
+ * relationship configurations between queues and interrupt vectors is
+ * needed, so some error caused by the residual configurations, such as
+ * the unexpected Tx interrupt, can be avoid. Because of the hardware
+ * constraints in hns3 hardware engine, we have to implement clearing
+ * the mapping relationship configurations by binding all queues to the
+ * last interrupt vector and reserving the last interrupt vector. This
+ * method results in a decrease of the maximum queues when upper
+ * applications call the rte_eth_dev_configure API function to enable
+ * Rx interrupt.
+ */
+ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+ hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+ for (i = 0; i < hw->intr_tqps_num; i++) {
+ /*
+ * Set gap limiter and rate limiter configuration of queue's
+ * interrupt.
+ */
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+ ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_TX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
+
+ ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int
hns3vf_dev_configure(struct rte_eth_dev *dev)
{
int ret;
/*
- * Hardware does not support where the number of rx and tx queues is
- * not equal in hip08.
+ * Hardware does not support individually enable/disable/reset the Tx or
+ * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
+ * and Rx queues at the same time. When the numbers of Tx queues
+ * allocated by upper applications are not equal to the numbers of Rx
+ * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
+ * of Tx/Rx queues. otherwise, network engine can not work as usual. But
+ * these fake queues are imperceptible, and can not be used by upper
+ * applications.
*/
- if (nb_rx_q != nb_tx_q) {
- hns3_err(hw,
- "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
- "Hardware does not support this configuration!",
- nb_rx_q, nb_tx_q);
- return -EINVAL;
+ ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+ if (ret) {
+ hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
+ return ret;
}
+ hw->adapter_state = HNS3_NIC_CONFIGURING;
if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
hns3_err(hw, "setting link speed/duplex not supported");
- return -EINVAL;
+ ret = -EINVAL;
+ goto cfg_err;
}
- hw->adapter_state = HNS3_NIC_CONFIGURING;
-
/* When RSS is not configured, redirect the packet queue 0 */
if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
rss_conf = conf->rx_adv_conf.rss_conf;
return 0;
cfg_err:
+ (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
hw->adapter_state = HNS3_NIC_INITIALIZED;
+
return ret;
}
uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
int ret;
- if (dev->data->dev_started) {
- hns3_err(hw, "Failed to set mtu, port %u must be stopped "
- "before configuration", dev->data->port_id);
- return -EBUSY;
- }
-
+ /*
+ * The hns3 PF/VF devices on the same port share the hardware MTU
+ * configuration. Currently, we send mailbox to inform hns3 PF kernel
+ * ethdev driver to finish hardware MTU configuration in hns3 VF PMD
+ * driver, there is no need to stop the port for hns3 VF device, and the
+ * MTU value issued by hns3 VF PMD driver must be less than or equal to
+ * PF's MTU.
+ */
if (rte_atomic16_read(&hw->reset.resetting)) {
hns3_err(hw, "Failed to set mtu during resetting");
return -EIO;
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ uint16_t q_num = hw->tqps_num;
- info->max_rx_queues = hw->tqps_num;
+ /*
+ * In interrupt mode, 'max_rx_queues' is set based on the number of
+ * MSI-X interrupt resources of the hardware.
+ */
+ if (hw->data->dev_conf.intr_conf.rxq == 1)
+ q_num = hw->intr_tqps_num;
+
+ info->max_rx_queues = q_num;
info->max_tx_queues = hw->tqps_num;
info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
info->min_rx_bufsize = hw->rx_buf_len;
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
info->tx_queue_offload_capa);
info->rx_desc_lim = (struct rte_eth_desc_lim) {
return 0;
}
+static int
+hns3vf_get_host_mac_addr(struct hns3_hw *hw)
+{
+ uint8_t host_mac[RTE_ETHER_ADDR_LEN];
+ int ret;
+
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
+ true, host_mac, RTE_ETHER_ADDR_LEN);
+ if (ret) {
+ hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
+ return ret;
+ }
+
+ memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
+
+ return 0;
+}
+
static int
hns3vf_get_configuration(struct hns3_hw *hw)
{
int ret;
hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
+ hw->rss_dis_flag = false;
/* Get queue configuration from PF */
ret = hns3vf_get_queue_info(hw);
if (ret)
return ret;
+ /* Get user defined VF MAC addr from PF */
+ ret = hns3vf_get_host_mac_addr(hw);
+ if (ret)
+ return ret;
+
/* Get tc configuration from PF */
return hns3vf_get_tc_info(hw);
}
-static void
+static int
hns3vf_set_tc_info(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
uint16_t nb_rx_q = hw->data->nb_rx_queues;
- uint16_t new_tqps;
+ uint16_t nb_tx_q = hw->data->nb_tx_queues;
uint8_t i;
hw->num_tc = 0;
if (hw->hw_tc_map & BIT(i))
hw->num_tc++;
- new_tqps = RTE_MIN(hw->tqps_num, nb_rx_q);
- hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, new_tqps / hw->num_tc);
- hw->alloc_tqps = hw->alloc_rss_size * hw->num_tc;
+ if (nb_rx_q < hw->num_tc) {
+ hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+ nb_rx_q, hw->num_tc);
+ return -EINVAL;
+ }
- hns3_tc_queue_mapping_cfg(hw);
+ if (nb_tx_q < hw->num_tc) {
+ hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+ nb_tx_q, hw->num_tc);
+ return -EINVAL;
+ }
+
+ hns3_set_rss_size(hw, nb_rx_q);
+ hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
+
+ return 0;
}
static void
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
unsigned int tmp_mask;
+ int ret = 0;
+
+ if (rte_atomic16_read(&hw->reset.resetting)) {
+ hns3_err(hw, "vf set vlan offload failed during resetting, "
+ "mask = 0x%x", mask);
+ return -EIO;
+ }
tmp_mask = (unsigned int)mask;
/* Vlan stripping setting */
rte_spinlock_lock(&hw->lock);
/* Enable or disable VLAN stripping */
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- hns3vf_en_hw_strip_rxvtag(hw, true);
+ ret = hns3vf_en_hw_strip_rxvtag(hw, true);
else
- hns3vf_en_hw_strip_rxvtag(hw, false);
+ ret = hns3vf_en_hw_strip_rxvtag(hw, false);
rte_spinlock_unlock(&hw->lock);
}
- return 0;
+ return ret;
}
static int
eth_dev);
}
+static int
+hns3_query_vf_resource(struct hns3_hw *hw)
+{
+ struct hns3_vf_res_cmd *req;
+ struct hns3_cmd_desc desc;
+ uint16_t num_msi;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "query vf resource failed, ret = %d", ret);
+ return ret;
+ }
+
+ req = (struct hns3_vf_res_cmd *)desc.data;
+ num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
+ HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+ if (num_msi < HNS3_MIN_VECTOR_NUM) {
+ hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
+ num_msi, HNS3_MIN_VECTOR_NUM);
+ return -EINVAL;
+ }
+
+ hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
+
+ return 0;
+}
+
static int
hns3vf_init_hardware(struct hns3_adapter *hns)
{
uint16_t mtu = hw->data->mtu;
int ret;
- ret = hns3vf_set_promisc_mode(hw, true);
+ ret = hns3vf_set_promisc_mode(hw, true, false, false);
if (ret)
return ret;
return 0;
err_init_hardware:
- (void)hns3vf_set_promisc_mode(hw, false);
+ (void)hns3vf_set_promisc_mode(hw, false, false, false);
return ret;
}
goto err_cmd_init;
}
+ /* Get VF resource */
+ ret = hns3_query_vf_resource(hw);
+ if (ret)
+ goto err_cmd_init;
+
rte_spinlock_init(&hw->mbx_resp.lock);
hns3vf_clear_event_cause(hw, 0);
goto err_get_config;
}
- rte_eth_random_addr(hw->mac.mac_addr); /* Generate a random mac addr */
+ /*
+ * The hns3 PF ethdev driver in kernel support setting VF MAC address
+ * on the host by "ip link set ..." command. To avoid some incorrect
+ * scenes, for example, hns3 VF PMD driver fails to receive and send
+ * packets after user configure the MAC address by using the
+ * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
+ * address strategy as the hns3 kernel ethdev driver in the
+ * initialization. If user configure a MAC address by the ip command
+ * for VF device, then hns3 VF PMD driver will start with it, otherwise
+ * start with a random MAC address in the initialization.
+ */
+ ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr);
+ if (ret)
+ rte_eth_random_addr(hw->mac.mac_addr);
ret = hns3vf_clear_vport_list(hw);
if (ret) {
hns3_set_default_rss_args(hw);
- (void)hns3_stats_reset(eth_dev);
+ /*
+ * In the initialization clearing the all hardware mapping relationship
+ * configurations between queues and interrupt vectors is needed, so
+ * some error caused by the residual configurations, such as the
+ * unexpected interrupt, can be avoid.
+ */
+ ret = hns3vf_init_ring_with_vector(hw);
+ if (ret)
+ goto err_get_config;
+
return 0;
err_get_config:
hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
eth_dev);
err_intr_callback_register:
- hns3_cmd_uninit(hw);
-
err_cmd_init:
+ hns3_cmd_uninit(hw);
hns3_cmd_destroy_queue(hw);
-
err_cmd_init_queue:
hw->io_base = NULL;
hns3_rss_uninit(hns);
(void)hns3vf_set_alive(hw, false);
- (void)hns3vf_set_promisc_mode(hw, false);
+ (void)hns3vf_set_promisc_mode(hw, false, false, false);
hns3vf_disable_irq0(hw);
rte_intr_disable(&pci_dev->intr_handle);
hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
}
static void
-hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
+hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint8_t base = 0;
+ uint8_t vec = 0;
+ uint16_t q_id;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return;
+
+ /* unmap the ring with vector */
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = RTE_INTR_VEC_RXTX_OFFSET;
+ base = RTE_INTR_VEC_RXTX_OFFSET;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+ (void)hns3vf_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX,
+ q_id);
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+ }
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void
+hns3vf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
PMD_INIT_FUNC_TRACE();
hw->adapter_state = HNS3_NIC_STOPPING;
- hns3_set_rxtx_function(eth_dev);
+ hns3_set_rxtx_function(dev);
rte_wmb();
/* Disable datapath on secondary process. */
- hns3_mp_req_stop_rxtx(eth_dev);
+ hns3_mp_req_stop_rxtx(dev);
/* Prevent crashes when queues are still in use. */
rte_delay_ms(hw->tqps_num);
hns3_dev_release_mbufs(hns);
hw->adapter_state = HNS3_NIC_CONFIGURED;
}
+ rte_eal_alarm_cancel(hns3vf_service_handler, dev);
rte_spinlock_unlock(&hw->lock);
+
+ hns3vf_unmap_rx_interrupt(dev);
}
static void
hns3_reset_abort(hns);
hw->adapter_state = HNS3_NIC_CLOSED;
rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
- rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
hns3vf_configure_all_mc_mac_addr(hns, true);
hns3vf_remove_all_vlan_table(hns);
hns3vf_uninit_vf(eth_dev);
struct hns3_mac *mac = &hw->mac;
struct rte_eth_link new_link;
- hns3vf_request_link_info(hw);
-
memset(&new_link, 0, sizeof(new_link));
switch (mac->link_speed) {
case ETH_SPEED_NUM_10M:
struct hns3_hw *hw = &hns->hw;
int ret;
- hns3vf_set_tc_info(hns);
+ ret = hns3vf_set_tc_info(hns);
+ if (ret)
+ return ret;
ret = hns3_start_queues(hns, reset_queue);
- if (ret) {
+ if (ret)
hns3_err(hw, "Failed to start queues: %d", ret);
- return ret;
+
+ return ret;
+}
+
+static int
+hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t intr_vector;
+ uint8_t base = 0;
+ uint8_t vec = 0;
+ uint16_t q_id;
+ int ret;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return 0;
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) {
+ intr_vector = hw->used_rx_queues;
+ /* It creates event fd for each intr vector when MSIX is used */
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -EINVAL;
+ }
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ hw->used_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ hns3_err(hw, "Failed to allocate %d rx_queues"
+ " intr_vec", hw->used_rx_queues);
+ ret = -ENOMEM;
+ goto vf_alloc_intr_vec_error;
+ }
}
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = RTE_INTR_VEC_RXTX_OFFSET;
+ base = RTE_INTR_VEC_RXTX_OFFSET;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
+ ret = hns3vf_bind_ring_with_vector(hw, vec, true,
+ HNS3_RING_TYPE_RX,
+ q_id);
+ if (ret)
+ goto vf_bind_vector_error;
+ intr_handle->intr_vec[q_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+ }
+ rte_intr_enable(intr_handle);
return 0;
+
+vf_bind_vector_error:
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ return ret;
+vf_alloc_intr_vec_error:
+ rte_intr_efd_disable(intr_handle);
+ return ret;
+}
+
+static void
+hns3vf_restore_filter(struct rte_eth_dev *dev)
+{
+ hns3_restore_rss_filter(dev);
}
static int
-hns3vf_dev_start(struct rte_eth_dev *eth_dev)
+hns3vf_dev_start(struct rte_eth_dev *dev)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
int ret;
PMD_INIT_FUNC_TRACE();
if (rte_atomic16_read(&hw->reset.resetting))
return -EBUSY;
+
rte_spinlock_lock(&hw->lock);
hw->adapter_state = HNS3_NIC_STARTING;
ret = hns3vf_do_start(hns, true);
}
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
- hns3_set_rxtx_function(eth_dev);
- hns3_mp_req_start_rxtx(eth_dev);
- return 0;
+
+ ret = hns3vf_map_rx_interrupt(dev);
+ if (ret)
+ return ret;
+ hns3_set_rxtx_function(dev);
+ hns3_mp_req_start_rxtx(dev);
+ rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
+
+ hns3vf_restore_filter(dev);
+
+ return ret;
}
static bool
struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id];
- rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+ if (hw->adapter_state == HNS3_NIC_STARTED)
+ rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
hw->mac.link_status = ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev);
eth_dev = &rte_eth_devices[hw->data->port_id];
hns3_set_rxtx_function(eth_dev);
hns3_mp_req_start_rxtx(eth_dev);
+ if (hw->adapter_state == HNS3_NIC_STARTED)
+ hns3vf_service_handler(eth_dev);
+
+ return 0;
+}
+
+static int
+hns3vf_check_default_mac_change(struct hns3_hw *hw)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct rte_ether_addr *hw_mac;
+ int ret;
+
+ /*
+ * The hns3 PF ethdev driver in kernel support setting VF MAC address
+ * on the host by "ip link set ..." command. If the hns3 PF kernel
+ * ethdev driver sets the MAC address for VF device after the
+ * initialization of the related VF device, the PF driver will notify
+ * VF driver to reset VF device to make the new MAC address effective
+ * immediately. The hns3 VF PMD driver should check whether the MAC
+ * address has been changed by the PF kernel ethdev driver, if changed
+ * VF driver should configure hardware using the new MAC address in the
+ * recovering hardware configuration stage of the reset process.
+ */
+ ret = hns3vf_get_host_mac_addr(hw);
+ if (ret)
+ return ret;
+
+ hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
+ ret = rte_is_zero_ether_addr(hw_mac);
+ if (ret) {
+ rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
+ } else {
+ ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
+ if (!ret) {
+ rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+ &hw->data->mac_addrs[0]);
+ hns3_warn(hw, "Default MAC address has been changed to:"
+ " %s by the host PF kernel ethdev driver",
+ mac_str);
+ }
+ }
- hns3vf_service_handler(eth_dev);
return 0;
}
struct hns3_hw *hw = &hns->hw;
int ret;
+ ret = hns3vf_check_default_mac_change(hw);
+ if (ret)
+ return ret;
+
ret = hns3vf_configure_mac_addr(hns, false);
if (ret)
return ret;
if (ret)
goto err_mc_mac;
+ ret = hns3vf_restore_promisc(hns);
+ if (ret)
+ goto err_vlan_table;
+
ret = hns3vf_restore_vlan_conf(hns);
if (ret)
goto err_vlan_table;
ret = hns3_cmd_init(hw);
if (ret) {
hns3_err(hw, "Failed to init cmd: %d", ret);
- goto err_cmd_init;
+ return ret;
}
if (hw->reset.level == HNS3_VF_FULL_RESET) {
ret = hns3_reset_all_queues(hns);
if (ret) {
hns3_err(hw, "Failed to reset all queues: %d", ret);
- goto err_init;
+ return ret;
}
ret = hns3vf_init_hardware(hns);
if (ret) {
hns3_err(hw, "Failed to init hardware: %d", ret);
- goto err_init;
+ return ret;
}
return 0;
-
-err_cmd_init:
- hns3vf_set_bus_master(pci_dev, false);
-err_init:
- hns3_cmd_uninit(hw);
- return ret;
}
static const struct eth_dev_ops hns3vf_eth_dev_ops = {
.dev_stop = hns3vf_dev_stop,
.dev_close = hns3vf_dev_close,
.mtu_set = hns3vf_dev_mtu_set,
+ .promiscuous_enable = hns3vf_dev_promiscuous_enable,
+ .promiscuous_disable = hns3vf_dev_promiscuous_disable,
+ .allmulticast_enable = hns3vf_dev_allmulticast_enable,
+ .allmulticast_disable = hns3vf_dev_allmulticast_disable,
.stats_get = hns3_stats_get,
.stats_reset = hns3_stats_reset,
.xstats_get = hns3_dev_xstats_get,
.tx_queue_setup = hns3_tx_queue_setup,
.rx_queue_release = hns3_dev_rx_queue_release,
.tx_queue_release = hns3_dev_tx_queue_release,
+ .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
.dev_configure = hns3vf_dev_configure,
.mac_addr_add = hns3vf_add_mac_addr,
.mac_addr_remove = hns3vf_remove_mac_addr,
}
rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
eth_dev);
- rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
- eth_dev);
return 0;
err_rte_zmalloc: