i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
+static void
+i40evf_dev_alarm_handler(void *param);
/* Default hash key buffer for RSS */
static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
{
txq_info->vsi_id = vsi_id;
txq_info->queue_id = queue_id;
- if (queue_id < nb_txq) {
+ if (queue_id < nb_txq && txq) {
txq_info->ring_len = txq->nb_tx_desc;
txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
}
rxq_info->vsi_id = vsi_id;
rxq_info->queue_id = queue_id;
rxq_info->max_pkt_size = max_pkt_size;
- if (queue_id < nb_rxq) {
+ if (queue_id < nb_rxq && rxq) {
rxq_info->ring_len = rxq->nb_rx_desc;
rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
rxq_info->databuffer_size =
for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
- vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ vc_vqci->vsi_id, i, dev->data->nb_tx_queues,
+ txq ? txq[i] : NULL);
i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
- vf->max_pkt_len, rxq[i]);
+ vf->max_pkt_len, rxq ? rxq[i] : NULL);
}
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
args.in_args_size = sizeof(vfres);
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
+
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
err = i40evf_execute_vf_cmd(dev, &args);
if (err)
PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
return err;
}
*/
static struct rte_pci_driver rte_i40evf_pmd = {
.id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = eth_i40evf_pci_probe,
.remove = eth_i40evf_pci_remove,
};
return 0;
}
+
+bool
+is_i40evf_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_i40evf_pmd);
+}