X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_dcf.c;h=366ff0a9074b43ed6511d4a7abeafb011e1ed416;hb=323263717774df318d8a6e64ac8bfe546e03b8f6;hp=44dbd3bb8445825e06b5b40b3d09f76a93a89fed;hpb=98a181ed865db0880be3c12a72772a649417d707;p=dpdk.git diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 44dbd3bb84..366ff0a907 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -17,8 +17,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -234,7 +234,9 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw) caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING | VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF | - VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; + VIRTCHNL_VF_OFFLOAD_VLAN_V2 | + VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | + VIRTCHNL_VF_OFFLOAD_QOS; err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES, (uint8_t *)&caps, sizeof(caps)); @@ -335,6 +337,9 @@ ice_dcf_mode_disable(struct ice_dcf_hw *hw) { int err; + if (hw->resetting) + return 0; + err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE, NULL, 0); if (err) { @@ -504,9 +509,7 @@ ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc, } do { - if ((!desc_cmd.pending && !buff_cmd.pending) || - (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) || - (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS)) + if (!desc_cmd.pending && !buff_cmd.pending) break; rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); @@ -531,17 +534,28 @@ int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev); - int err = 0; + int i = 0; + int err = -1; rte_spinlock_lock(&hw->vc_cmd_send_lock); - rte_intr_disable(&pci_dev->intr_handle); + rte_intr_disable(pci_dev->intr_handle); ice_dcf_disable_irq0(hw); - if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw) < 0) - err = -1; + for (;;) { + if (ice_dcf_get_vf_resource(hw) == 0 && + ice_dcf_get_vf_vsi_map(hw) >= 0) { + err = 0; + break; + } + + if (++i >= ICE_DCF_ARQ_MAX_RETRIES) + break; - rte_intr_enable(&pci_dev->intr_handle); + rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); + } + + rte_intr_enable(pci_dev->intr_handle); ice_dcf_enable_irq0(hw); rte_spinlock_unlock(&hw->vc_cmd_send_lock); @@ -577,7 +591,9 @@ int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - int ret; + int ret, size; + + hw->resetting = false; hw->avf.hw_addr = pci_dev->mem_resource[0].addr; hw->avf.back = hw; @@ -669,10 +685,20 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) } } + if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) { + ice_dcf_tm_conf_init(eth_dev); + size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs; + hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0); + if (!hw->qos_bw_cfg) { + PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg"); + goto err_rss; + } + } + hw->eth_dev = eth_dev; - rte_intr_callback_register(&pci_dev->intr_handle, + rte_intr_callback_register(pci_dev->intr_handle, ice_dcf_dev_interrupt_handler, hw); - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_enable(pci_dev->intr_handle); ice_dcf_enable_irq0(hw); return 0; @@ -694,7 +720,13 @@ void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + + if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) + if (hw->tm_conf.committed) { + ice_dcf_clear_bw(hw); + ice_dcf_tm_conf_uninit(eth_dev); + } ice_dcf_disable_irq0(hw); rte_intr_disable(intr_handle); @@ -705,10 +737,25 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) iavf_shutdown_adminq(&hw->avf); rte_free(hw->arq_buf); + hw->arq_buf = NULL; + rte_free(hw->vf_vsi_map); + hw->vf_vsi_map = NULL; + rte_free(hw->vf_res); + hw->vf_res = NULL; + rte_free(hw->rss_lut); + hw->rss_lut = NULL; + rte_free(hw->rss_key); + hw->rss_key = NULL; + + rte_free(hw->qos_bw_cfg); + hw->qos_bw_cfg = NULL; + + rte_free(hw->ets_config); + hw->ets_config = NULL; } static int @@ -790,7 +837,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) PMD_DRV_LOG(DEBUG, "RSS is not supported"); return -ENOTSUP; } - if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { + if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) { PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default"); /* set all lut items to default queue */ memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size); @@ -830,7 +877,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) #define IAVF_RXDID_LEGACY_0 0 #define IAVF_RXDID_LEGACY_1 1 -#define IAVF_RXDID_COMMS_GENERIC 16 +#define IAVF_RXDID_COMMS_OVS_1 22 int ice_dcf_configure_queues(struct ice_dcf_hw *hw) @@ -865,11 +912,11 @@ ice_dcf_configure_queues(struct ice_dcf_hw *hw) } vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id; vc_qp->rxq.queue_id = i; - vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len; if (i >= hw->eth_dev->data->nb_rx_queues) continue; + vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len; vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc; vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma; vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len; @@ -878,8 +925,8 @@ ice_dcf_configure_queues(struct ice_dcf_hw *hw) if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && hw->supported_rxdid & - BIT(IAVF_RXDID_COMMS_GENERIC)) { - vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC; + BIT(IAVF_RXDID_COMMS_OVS_1)) { + vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1; PMD_DRV_LOG(NOTICE, "request RXDID == %d in " "Queue[%d]", vc_qp->rxq.rxdid, i); } else { @@ -992,6 +1039,9 @@ ice_dcf_disable_queues(struct ice_dcf_hw *hw) struct dcf_virtchnl_cmd args; int err; + if (hw->resetting) + return 0; + memset(&queue_select, 0, sizeof(queue_select)); queue_select.vsi_id = hw->vsi_res->vsi_id; @@ -1046,6 +1096,14 @@ ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add) struct dcf_virtchnl_cmd args; int len, err = 0; + if (hw->resetting) { + if (!add) + return 0; + + PMD_DRV_LOG(ERR, "fail to add all MACs for VF resetting"); + return -EIO; + } + len = sizeof(struct virtchnl_ether_addr_list); addr = hw->eth_dev->data->mac_addrs; len += sizeof(struct virtchnl_ether_addr); @@ -1058,10 +1116,8 @@ ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add) rte_memcpy(list->list[0].addr, addr->addr_bytes, sizeof(addr->addr_bytes)); - PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x", - addr->addr_bytes[0], addr->addr_bytes[1], - addr->addr_bytes[2], addr->addr_bytes[3], - addr->addr_bytes[4], addr->addr_bytes[5]); + PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(addr)); list->vsi_id = hw->vsi_res->vsi_id; list->num_elements = 1;