#include <rte_atomic.h>
#include <rte_eal.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_dev.h>
caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
- VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
+ VIRTCHNL_VF_OFFLOAD_QOS;
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
(uint8_t *)&caps, sizeof(caps));
}
hw->num_vfs = vsi_map->num_vfs;
+ hw->pf_vsi_id = vsi_map->pf_vsi;
}
if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
}
do {
- if ((!desc_cmd.pending && !buff_cmd.pending) ||
- (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) ||
- (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS))
+ if (!desc_cmd.pending && !buff_cmd.pending)
break;
rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- int ret;
+ int ret, size;
hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
hw->avf.back = hw;
}
}
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
+ ice_dcf_tm_conf_init(eth_dev);
+ size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs;
+ hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0);
+ if (!hw->qos_bw_cfg) {
+ PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg");
+ goto err_rss;
+ }
+ }
+
hw->eth_dev = eth_dev;
rte_intr_callback_register(&pci_dev->intr_handle,
ice_dcf_dev_interrupt_handler, hw);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+ if (hw->tm_conf.committed) {
+ ice_dcf_clear_bw(hw);
+ ice_dcf_tm_conf_uninit(eth_dev);
+ }
+
ice_dcf_disable_irq0(hw);
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
rte_free(hw->vf_res);
rte_free(hw->rss_lut);
rte_free(hw->rss_key);
+ rte_free(hw->qos_bw_cfg);
}
static int
return 0;
}
+#define IAVF_RXDID_LEGACY_0 0
#define IAVF_RXDID_LEGACY_1 1
-#define IAVF_RXDID_COMMS_GENERIC 16
+#define IAVF_RXDID_COMMS_OVS_1 22
int
ice_dcf_configure_queues(struct ice_dcf_hw *hw)
}
vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
vc_qp->rxq.queue_id = i;
- vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
if (i >= hw->eth_dev->data->nb_rx_queues)
continue;
+ vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (hw->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
hw->supported_rxdid &
- BIT(IAVF_RXDID_COMMS_GENERIC)) {
- vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC;
+ BIT(IAVF_RXDID_COMMS_OVS_1)) {
+ vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
"Queue[%d]", vc_qp->rxq.rxdid, i);
} else {
PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
return -EINVAL;
}
+#else
+ if (hw->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+ hw->supported_rxdid &
+ BIT(IAVF_RXDID_LEGACY_0)) {
+ vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
+ PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
+ "Queue[%d]", vc_qp->rxq.rxdid, i);
+ } else {
+ PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
+ return -EINVAL;
+ }
+#endif
+ ice_select_rxd_to_pkt_fields_handler(rxq[i], vc_qp->rxq.rxdid);
}
memset(&args, 0, sizeof(args));
rte_free(map_info);
return err;
}
+
+int
+ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
+{
+ struct virtchnl_queue_select queue_select;
+ struct dcf_virtchnl_cmd args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = hw->vsi_res->vsi_id;
+ if (rx)
+ queue_select.rx_queues |= 1 << qid;
+ else
+ queue_select.tx_queues |= 1 << qid;
+
+ memset(&args, 0, sizeof(args));
+ if (on)
+ args.v_op = VIRTCHNL_OP_ENABLE_QUEUES;
+ else
+ args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
+
+ args.req_msg = (u8 *)&queue_select;
+ args.req_msglen = sizeof(queue_select);
+
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+ on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
+
+ return err;
+}
+
+int
+ice_dcf_disable_queues(struct ice_dcf_hw *hw)
+{
+ struct virtchnl_queue_select queue_select;
+ struct dcf_virtchnl_cmd args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = hw->vsi_res->vsi_id;
+
+ queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;
+
+ memset(&args, 0, sizeof(args));
+ args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.req_msg = (u8 *)&queue_select;
+ args.req_msglen = sizeof(queue_select);
+
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_DISABLE_QUEUES");
+
+ return err;
+}
+
+int
+ice_dcf_query_stats(struct ice_dcf_hw *hw,
+ struct virtchnl_eth_stats *pstats)
+{
+ struct virtchnl_queue_select q_stats;
+ struct dcf_virtchnl_cmd args;
+ int err;
+
+ memset(&q_stats, 0, sizeof(q_stats));
+ q_stats.vsi_id = hw->vsi_res->vsi_id;
+
+ args.v_op = VIRTCHNL_OP_GET_STATS;
+ args.req_msg = (uint8_t *)&q_stats;
+ args.req_msglen = sizeof(q_stats);
+ args.rsp_msglen = sizeof(*pstats);
+ args.rsp_msgbuf = (uint8_t *)pstats;
+ args.rsp_buflen = sizeof(*pstats);
+
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
+ return err;
+ }
+
+ return 0;
+}
+
+int
+ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct rte_ether_addr *addr;
+ struct dcf_virtchnl_cmd args;
+ int len, err = 0;
+
+ len = sizeof(struct virtchnl_ether_addr_list);
+ addr = hw->eth_dev->data->mac_addrs;
+ len += sizeof(struct virtchnl_ether_addr);
+
+ list = rte_zmalloc(NULL, len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+ PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT,
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+
+ list->vsi_id = hw->vsi_res->vsi_id;
+ list->num_elements = 1;
+
+ memset(&args, 0, sizeof(args));
+ args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.req_msg = (uint8_t *)list;
+ args.req_msglen = len;
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETHER_ADDRESS" :
+ "OP_DEL_ETHER_ADDRESS");
+ rte_free(list);
+ return err;
+}