#include <rte_atomic.h>
#include <rte_eal.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_dev.h>
caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
- VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
+ VIRTCHNL_VF_OFFLOAD_QOS;
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
(uint8_t *)&caps, sizeof(caps));
{
int err;
+ if (hw->resetting)
+ return 0;
+
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
NULL, 0);
if (err) {
}
do {
- if ((!desc_cmd.pending && !buff_cmd.pending) ||
- (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) ||
- (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS))
+ if (!desc_cmd.pending && !buff_cmd.pending)
break;
rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
- int err = 0;
+ int i = 0;
+ int err = -1;
rte_spinlock_lock(&hw->vc_cmd_send_lock);
- rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_disable(pci_dev->intr_handle);
ice_dcf_disable_irq0(hw);
- if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw) < 0)
- err = -1;
+ for (;;) {
+ if (ice_dcf_get_vf_resource(hw) == 0 &&
+ ice_dcf_get_vf_vsi_map(hw) >= 0) {
+ err = 0;
+ break;
+ }
+
+ if (++i >= ICE_DCF_ARQ_MAX_RETRIES)
+ break;
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
+ }
+
+ rte_intr_enable(pci_dev->intr_handle);
ice_dcf_enable_irq0(hw);
rte_spinlock_unlock(&hw->vc_cmd_send_lock);
ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- int ret;
+ int ret, size;
+
+ hw->resetting = false;
hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
hw->avf.back = hw;
}
}
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
+ ice_dcf_tm_conf_init(eth_dev);
+ size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs;
+ hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0);
+ if (!hw->qos_bw_cfg) {
+ PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg");
+ goto err_rss;
+ }
+ }
+
hw->eth_dev = eth_dev;
- rte_intr_callback_register(&pci_dev->intr_handle,
+ rte_intr_callback_register(pci_dev->intr_handle,
ice_dcf_dev_interrupt_handler, hw);
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_enable(pci_dev->intr_handle);
ice_dcf_enable_irq0(hw);
return 0;
ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
+
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+ if (hw->tm_conf.committed) {
+ ice_dcf_clear_bw(hw);
+ ice_dcf_tm_conf_uninit(eth_dev);
+ }
ice_dcf_disable_irq0(hw);
rte_intr_disable(intr_handle);
iavf_shutdown_adminq(&hw->avf);
rte_free(hw->arq_buf);
+ hw->arq_buf = NULL;
+
rte_free(hw->vf_vsi_map);
+ hw->vf_vsi_map = NULL;
+
rte_free(hw->vf_res);
+ hw->vf_res = NULL;
+
rte_free(hw->rss_lut);
+ hw->rss_lut = NULL;
+
rte_free(hw->rss_key);
+ hw->rss_key = NULL;
+
+ rte_free(hw->qos_bw_cfg);
+ hw->qos_bw_cfg = NULL;
+
+ rte_free(hw->ets_config);
+ hw->ets_config = NULL;
}
static int
PMD_DRV_LOG(DEBUG, "RSS is not supported");
return -ENOTSUP;
}
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
/* set all lut items to default queue */
memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
#define IAVF_RXDID_LEGACY_0 0
#define IAVF_RXDID_LEGACY_1 1
-#define IAVF_RXDID_COMMS_GENERIC 16
+#define IAVF_RXDID_COMMS_OVS_1 22
int
ice_dcf_configure_queues(struct ice_dcf_hw *hw)
}
vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
vc_qp->rxq.queue_id = i;
- vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
if (i >= hw->eth_dev->data->nb_rx_queues)
continue;
+ vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
if (hw->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
hw->supported_rxdid &
- BIT(IAVF_RXDID_COMMS_GENERIC)) {
- vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC;
+ BIT(IAVF_RXDID_COMMS_OVS_1)) {
+ vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
"Queue[%d]", vc_qp->rxq.rxdid, i);
} else {
struct dcf_virtchnl_cmd args;
int err;
+ if (hw->resetting)
+ return 0;
+
memset(&queue_select, 0, sizeof(queue_select));
queue_select.vsi_id = hw->vsi_res->vsi_id;
struct dcf_virtchnl_cmd args;
int len, err = 0;
+ if (hw->resetting) {
+ if (!add)
+ return 0;
+
+ PMD_DRV_LOG(ERR, "fail to add all MACs for VF resetting");
+ return -EIO;
+ }
+
len = sizeof(struct virtchnl_ether_addr_list);
addr = hw->eth_dev->data->mac_addrs;
len += sizeof(struct virtchnl_ether_addr);
rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
- PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
- addr->addr_bytes[0], addr->addr_bytes[1],
- addr->addr_bytes[2], addr->addr_bytes[3],
- addr->addr_bytes[4], addr->addr_bytes[5]);
+ PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT,
+ RTE_ETHER_ADDR_BYTES(addr));
list->vsi_id = hw->vsi_res->vsi_id;
list->num_elements = 1;