1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
11 #include <ethdev_pci.h>
13 #include "txgbe_logs.h"
14 #include "base/txgbe.h"
15 #include "txgbe_ethdev.h"
16 #include "txgbe_rxtx.h"
18 static int txgbevf_dev_close(struct rte_eth_dev *dev);
19 static void txgbevf_intr_disable(struct rte_eth_dev *dev);
20 static void txgbevf_intr_enable(struct rte_eth_dev *dev);
23 * The set of PCI devices this driver supports (for VF)
25 static const struct rte_pci_id pci_id_txgbevf_map[] = {
26 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF) },
27 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF_HV) },
28 { .vendor_id = 0, /* sentinel */ },
31 static const struct eth_dev_ops txgbevf_eth_dev_ops;
34 * Negotiate mailbox API version with the PF.
35 * After reset API version is always set to the basic one (txgbe_mbox_api_10).
36 * Then we try to negotiate starting with the most recent one.
37 * If all negotiation attempts fail, then we will proceed with
38 * the default one (txgbe_mbox_api_10).
41 txgbevf_negotiate_api(struct txgbe_hw *hw)
45 /* start with highest supported, proceed down */
46 static const int sup_ver[] = {
53 for (i = 0; i < ARRAY_SIZE(sup_ver); i++) {
54 if (txgbevf_negotiate_api_version(hw, sup_ver[i]) == 0)
60 * Virtual Function device init
63 eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
67 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
68 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
70 PMD_INIT_FUNC_TRACE();
72 eth_dev->dev_ops = &txgbevf_eth_dev_ops;
74 /* for secondary processes, we don't initialise any further as primary
75 * has already done this work. Only check we don't need a different
78 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
79 struct txgbe_tx_queue *txq;
80 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
81 /* TX queue function in primary, set by last queue initialized
82 * Tx queue may not initialized by primary process
84 if (eth_dev->data->tx_queues) {
85 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
86 txgbe_set_tx_function(eth_dev, txq);
88 /* Use default TX function if we get here */
90 "No TX queues configured yet. Using default TX function.");
93 txgbe_set_rx_function(eth_dev);
98 rte_eth_copy_pci_info(eth_dev, pci_dev);
100 hw->device_id = pci_dev->id.device_id;
101 hw->vendor_id = pci_dev->id.vendor_id;
102 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
103 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
104 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
106 /* Initialize the shared code (base driver) */
107 err = txgbe_init_shared_code(hw);
110 "Shared code init failed for txgbevf: %d", err);
114 /* init_mailbox_params */
115 hw->mbx.init_params(hw);
117 /* Disable the interrupts for VF */
118 txgbevf_intr_disable(eth_dev);
120 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
121 err = hw->mac.reset_hw(hw);
124 * The VF reset operation returns the TXGBE_ERR_INVALID_MAC_ADDR when
125 * the underlying PF driver has not assigned a MAC address to the VF.
126 * In this case, assign a random MAC address.
128 if (err != 0 && err != TXGBE_ERR_INVALID_MAC_ADDR) {
129 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
131 * This error code will be propagated to the app by
132 * rte_eth_dev_reset, so use a public error code rather than
133 * the internal-only TXGBE_ERR_RESET_FAILED
138 /* negotiate mailbox API version to use with the PF. */
139 txgbevf_negotiate_api(hw);
141 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
142 txgbevf_get_queues(hw, &tcs, &tc);
144 txgbevf_intr_enable(eth_dev);
149 /* Virtual Function device uninit */
151 eth_txgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
153 PMD_INIT_FUNC_TRACE();
155 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
158 txgbevf_dev_close(eth_dev);
163 static int eth_txgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
164 struct rte_pci_device *pci_dev)
166 return rte_eth_dev_pci_generic_probe(pci_dev,
167 sizeof(struct txgbe_adapter), eth_txgbevf_dev_init);
170 static int eth_txgbevf_pci_remove(struct rte_pci_device *pci_dev)
172 return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbevf_dev_uninit);
176 * virtual function driver struct
178 static struct rte_pci_driver rte_txgbevf_pmd = {
179 .id_table = pci_id_txgbevf_map,
180 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
181 .probe = eth_txgbevf_pci_probe,
182 .remove = eth_txgbevf_pci_remove,
186 * Virtual Function operations
189 txgbevf_intr_disable(struct rte_eth_dev *dev)
191 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
192 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
194 PMD_INIT_FUNC_TRACE();
196 /* Clear interrupt mask to stop from interrupts being generated */
197 wr32(hw, TXGBE_VFIMS, TXGBE_VFIMS_MASK);
201 /* Clear mask value. */
202 intr->mask_misc = TXGBE_VFIMS_MASK;
206 txgbevf_intr_enable(struct rte_eth_dev *dev)
208 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
209 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
211 PMD_INIT_FUNC_TRACE();
213 /* VF enable interrupt autoclean */
214 wr32(hw, TXGBE_VFIMC, TXGBE_VFIMC_MASK);
222 txgbevf_dev_close(struct rte_eth_dev *dev)
224 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
225 PMD_INIT_FUNC_TRACE();
226 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
229 hw->mac.reset_hw(hw);
231 txgbe_dev_free_queues(dev);
233 /* Disable the interrupts for VF */
234 txgbevf_intr_disable(dev);
240 * dev_ops for virtual function, bare necessities for basic vf
241 * operation have been implemented
243 static const struct eth_dev_ops txgbevf_eth_dev_ops = {
246 RTE_PMD_REGISTER_PCI(net_txgbe_vf, rte_txgbevf_pmd);
247 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe_vf, pci_id_txgbevf_map);
248 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe_vf, "* igb_uio | vfio-pci");