1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
11 #include <ethdev_pci.h>
13 #include "txgbe_logs.h"
14 #include "base/txgbe.h"
15 #include "txgbe_ethdev.h"
16 #include "txgbe_rxtx.h"
18 static int txgbevf_dev_info_get(struct rte_eth_dev *dev,
19 struct rte_eth_dev_info *dev_info);
20 static int txgbevf_dev_close(struct rte_eth_dev *dev);
21 static void txgbevf_intr_disable(struct rte_eth_dev *dev);
22 static void txgbevf_intr_enable(struct rte_eth_dev *dev);
23 static void txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
26 * The set of PCI devices this driver supports (for VF)
28 static const struct rte_pci_id pci_id_txgbevf_map[] = {
29 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF) },
30 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF_HV) },
31 { .vendor_id = 0, /* sentinel */ },
34 static const struct rte_eth_desc_lim rx_desc_lim = {
35 .nb_max = TXGBE_RING_DESC_MAX,
36 .nb_min = TXGBE_RING_DESC_MIN,
37 .nb_align = TXGBE_RXD_ALIGN,
40 static const struct rte_eth_desc_lim tx_desc_lim = {
41 .nb_max = TXGBE_RING_DESC_MAX,
42 .nb_min = TXGBE_RING_DESC_MIN,
43 .nb_align = TXGBE_TXD_ALIGN,
44 .nb_seg_max = TXGBE_TX_MAX_SEG,
45 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
48 static const struct eth_dev_ops txgbevf_eth_dev_ops;
51 * Negotiate mailbox API version with the PF.
52 * After reset API version is always set to the basic one (txgbe_mbox_api_10).
53 * Then we try to negotiate starting with the most recent one.
54 * If all negotiation attempts fail, then we will proceed with
55 * the default one (txgbe_mbox_api_10).
58 txgbevf_negotiate_api(struct txgbe_hw *hw)
62 /* start with highest supported, proceed down */
63 static const int sup_ver[] = {
70 for (i = 0; i < ARRAY_SIZE(sup_ver); i++) {
71 if (txgbevf_negotiate_api_version(hw, sup_ver[i]) == 0)
77 generate_random_mac_addr(struct rte_ether_addr *mac_addr)
81 /* Set Organizationally Unique Identifier (OUI) prefix. */
82 mac_addr->addr_bytes[0] = 0x00;
83 mac_addr->addr_bytes[1] = 0x09;
84 mac_addr->addr_bytes[2] = 0xC0;
85 /* Force indication of locally assigned MAC address. */
86 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
87 /* Generate the last 3 bytes of the MAC address with a random number. */
89 memcpy(&mac_addr->addr_bytes[3], &random, 3);
93 * Virtual Function device init
96 eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
100 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
101 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
102 struct rte_ether_addr *perm_addr =
103 (struct rte_ether_addr *)hw->mac.perm_addr;
105 PMD_INIT_FUNC_TRACE();
107 eth_dev->dev_ops = &txgbevf_eth_dev_ops;
109 /* for secondary processes, we don't initialise any further as primary
110 * has already done this work. Only check we don't need a different
113 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
114 struct txgbe_tx_queue *txq;
115 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
116 /* TX queue function in primary, set by last queue initialized
117 * Tx queue may not initialized by primary process
119 if (eth_dev->data->tx_queues) {
120 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
121 txgbe_set_tx_function(eth_dev, txq);
123 /* Use default TX function if we get here */
125 "No TX queues configured yet. Using default TX function.");
128 txgbe_set_rx_function(eth_dev);
133 rte_eth_copy_pci_info(eth_dev, pci_dev);
135 hw->device_id = pci_dev->id.device_id;
136 hw->vendor_id = pci_dev->id.vendor_id;
137 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
138 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
139 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
141 /* Initialize the shared code (base driver) */
142 err = txgbe_init_shared_code(hw);
145 "Shared code init failed for txgbevf: %d", err);
149 /* init_mailbox_params */
150 hw->mbx.init_params(hw);
152 /* Disable the interrupts for VF */
153 txgbevf_intr_disable(eth_dev);
155 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
156 err = hw->mac.reset_hw(hw);
159 * The VF reset operation returns the TXGBE_ERR_INVALID_MAC_ADDR when
160 * the underlying PF driver has not assigned a MAC address to the VF.
161 * In this case, assign a random MAC address.
163 if (err != 0 && err != TXGBE_ERR_INVALID_MAC_ADDR) {
164 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
166 * This error code will be propagated to the app by
167 * rte_eth_dev_reset, so use a public error code rather than
168 * the internal-only TXGBE_ERR_RESET_FAILED
173 /* negotiate mailbox API version to use with the PF. */
174 txgbevf_negotiate_api(hw);
176 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
177 txgbevf_get_queues(hw, &tcs, &tc);
179 /* Allocate memory for storing MAC addresses */
180 eth_dev->data->mac_addrs = rte_zmalloc("txgbevf", RTE_ETHER_ADDR_LEN *
181 hw->mac.num_rar_entries, 0);
182 if (eth_dev->data->mac_addrs == NULL) {
184 "Failed to allocate %u bytes needed to store "
186 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
190 /* Generate a random MAC address, if none was assigned by PF. */
191 if (rte_is_zero_ether_addr(perm_addr)) {
192 generate_random_mac_addr(perm_addr);
193 err = txgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
195 rte_free(eth_dev->data->mac_addrs);
196 eth_dev->data->mac_addrs = NULL;
199 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
200 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
201 "%02x:%02x:%02x:%02x:%02x:%02x",
202 perm_addr->addr_bytes[0],
203 perm_addr->addr_bytes[1],
204 perm_addr->addr_bytes[2],
205 perm_addr->addr_bytes[3],
206 perm_addr->addr_bytes[4],
207 perm_addr->addr_bytes[5]);
210 /* Copy the permanent MAC address */
211 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]);
213 /* reset the hardware with the new settings */
214 err = hw->mac.start_hw(hw);
216 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
220 txgbevf_intr_enable(eth_dev);
222 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
223 eth_dev->data->port_id, pci_dev->id.vendor_id,
224 pci_dev->id.device_id, "txgbe_mac_raptor_vf");
229 /* Virtual Function device uninit */
231 eth_txgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
233 PMD_INIT_FUNC_TRACE();
235 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
238 txgbevf_dev_close(eth_dev);
243 static int eth_txgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
244 struct rte_pci_device *pci_dev)
246 return rte_eth_dev_pci_generic_probe(pci_dev,
247 sizeof(struct txgbe_adapter), eth_txgbevf_dev_init);
250 static int eth_txgbevf_pci_remove(struct rte_pci_device *pci_dev)
252 return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbevf_dev_uninit);
256 * virtual function driver struct
258 static struct rte_pci_driver rte_txgbevf_pmd = {
259 .id_table = pci_id_txgbevf_map,
260 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
261 .probe = eth_txgbevf_pci_probe,
262 .remove = eth_txgbevf_pci_remove,
266 txgbevf_dev_info_get(struct rte_eth_dev *dev,
267 struct rte_eth_dev_info *dev_info)
269 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
270 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
272 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
273 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
274 dev_info->min_rx_bufsize = 1024;
275 dev_info->max_rx_pktlen = TXGBE_FRAME_SIZE_MAX;
276 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
277 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
278 dev_info->max_vfs = pci_dev->max_vfs;
279 dev_info->max_vmdq_pools = ETH_64_POOLS;
280 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
281 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
282 dev_info->rx_queue_offload_capa);
283 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
284 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
285 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
286 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
287 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
289 dev_info->default_rxconf = (struct rte_eth_rxconf) {
291 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
292 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
293 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
295 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
300 dev_info->default_txconf = (struct rte_eth_txconf) {
302 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
303 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
304 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
306 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
310 dev_info->rx_desc_lim = rx_desc_lim;
311 dev_info->tx_desc_lim = tx_desc_lim;
317 * Virtual Function operations
320 txgbevf_intr_disable(struct rte_eth_dev *dev)
322 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
323 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
325 PMD_INIT_FUNC_TRACE();
327 /* Clear interrupt mask to stop from interrupts being generated */
328 wr32(hw, TXGBE_VFIMS, TXGBE_VFIMS_MASK);
332 /* Clear mask value. */
333 intr->mask_misc = TXGBE_VFIMS_MASK;
337 txgbevf_intr_enable(struct rte_eth_dev *dev)
339 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
340 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
342 PMD_INIT_FUNC_TRACE();
344 /* VF enable interrupt autoclean */
345 wr32(hw, TXGBE_VFIMC, TXGBE_VFIMC_MASK);
353 txgbevf_dev_close(struct rte_eth_dev *dev)
355 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
356 PMD_INIT_FUNC_TRACE();
357 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
360 hw->mac.reset_hw(hw);
362 txgbe_dev_free_queues(dev);
365 * Remove the VF MAC address ro ensure
366 * that the VF traffic goes to the PF
367 * after stop, close and detach of the VF
369 txgbevf_remove_mac_addr(dev, 0);
371 /* Disable the interrupts for VF */
372 txgbevf_intr_disable(dev);
374 rte_free(dev->data->mac_addrs);
375 dev->data->mac_addrs = NULL;
381 txgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
382 __rte_unused uint32_t index,
383 __rte_unused uint32_t pool)
385 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
389 * On a VF, adding again the same MAC addr is not an idempotent
390 * operation. Trap this case to avoid exhausting the [very limited]
391 * set of PF resources used to store VF MAC addresses.
393 if (memcmp(hw->mac.perm_addr, mac_addr,
394 sizeof(struct rte_ether_addr)) == 0)
396 err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
398 PMD_DRV_LOG(ERR, "Unable to add MAC address "
399 "%02x:%02x:%02x:%02x:%02x:%02x - err=%d",
400 mac_addr->addr_bytes[0],
401 mac_addr->addr_bytes[1],
402 mac_addr->addr_bytes[2],
403 mac_addr->addr_bytes[3],
404 mac_addr->addr_bytes[4],
405 mac_addr->addr_bytes[5],
411 txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
413 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
414 struct rte_ether_addr *perm_addr =
415 (struct rte_ether_addr *)hw->mac.perm_addr;
416 struct rte_ether_addr *mac_addr;
421 * The TXGBE_VF_SET_MACVLAN command of the txgbe-pf driver does
422 * not support the deletion of a given MAC address.
423 * Instead, it imposes to delete all MAC addresses, then to add again
424 * all MAC addresses with the exception of the one to be deleted.
426 (void)txgbevf_set_uc_addr_vf(hw, 0, NULL);
429 * Add again all MAC addresses, with the exception of the deleted one
430 * and of the permanent MAC address.
432 for (i = 0, mac_addr = dev->data->mac_addrs;
433 i < hw->mac.num_rar_entries; i++, mac_addr++) {
434 /* Skip the deleted MAC address */
437 /* Skip NULL MAC addresses */
438 if (rte_is_zero_ether_addr(mac_addr))
440 /* Skip the permanent MAC address */
441 if (memcmp(perm_addr, mac_addr,
442 sizeof(struct rte_ether_addr)) == 0)
444 err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
447 "Adding again MAC address "
448 "%02x:%02x:%02x:%02x:%02x:%02x failed "
450 mac_addr->addr_bytes[0],
451 mac_addr->addr_bytes[1],
452 mac_addr->addr_bytes[2],
453 mac_addr->addr_bytes[3],
454 mac_addr->addr_bytes[4],
455 mac_addr->addr_bytes[5],
461 txgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
462 struct rte_ether_addr *addr)
464 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
466 hw->mac.set_rar(hw, 0, (void *)addr, 0, 0);
472 * dev_ops for virtual function, bare necessities for basic vf
473 * operation have been implemented
475 static const struct eth_dev_ops txgbevf_eth_dev_ops = {
476 .dev_infos_get = txgbevf_dev_info_get,
477 .mac_addr_add = txgbevf_add_mac_addr,
478 .mac_addr_remove = txgbevf_remove_mac_addr,
479 .rxq_info_get = txgbe_rxq_info_get,
480 .txq_info_get = txgbe_txq_info_get,
481 .mac_addr_set = txgbevf_set_default_mac_addr,
484 RTE_PMD_REGISTER_PCI(net_txgbe_vf, rte_txgbevf_pmd);
485 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe_vf, pci_id_txgbevf_map);
486 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe_vf, "* igb_uio | vfio-pci");