struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+ if (hw->tm_conf.committed) {
+ ice_dcf_clear_bw(hw);
+ ice_dcf_tm_conf_uninit(eth_dev);
+ }
+
ice_dcf_disable_irq0(hw);
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
ice_dcf_mode_disable(hw);
iavf_shutdown_adminq(&hw->avf);
- if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
- ice_dcf_tm_conf_uninit(eth_dev);
-
rte_free(hw->arq_buf);
rte_free(hw->vf_vsi_map);
rte_free(hw->vf_res);
rte_free(hw->rss_lut);
rte_free(hw->rss_key);
+ rte_free(hw->qos_bw_cfg);
}
static int
#define IAVF_RXDID_LEGACY_0 0
#define IAVF_RXDID_LEGACY_1 1
-#define IAVF_RXDID_COMMS_GENERIC 16
+#define IAVF_RXDID_COMMS_OVS_1 22
int
ice_dcf_configure_queues(struct ice_dcf_hw *hw)
}
vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
vc_qp->rxq.queue_id = i;
- vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
if (i >= hw->eth_dev->data->nb_rx_queues)
continue;
+ vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
if (hw->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
hw->supported_rxdid &
- BIT(IAVF_RXDID_COMMS_GENERIC)) {
- vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC;
+ BIT(IAVF_RXDID_COMMS_OVS_1)) {
+ vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
"Queue[%d]", vc_qp->rxq.rxdid, i);
} else {
rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
- PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
+ PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT,
addr->addr_bytes[0], addr->addr_bytes[1],
addr->addr_bytes[2], addr->addr_bytes[3],
addr->addr_bytes[4], addr->addr_bytes[5]);