1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
11 #include <ethdev_pci.h>
13 #include "txgbe_logs.h"
14 #include "base/txgbe.h"
15 #include "txgbe_ethdev.h"
16 #include "txgbe_rxtx.h"
18 static int txgbevf_dev_info_get(struct rte_eth_dev *dev,
19 struct rte_eth_dev_info *dev_info);
20 static int txgbevf_dev_close(struct rte_eth_dev *dev);
21 static void txgbevf_intr_disable(struct rte_eth_dev *dev);
22 static void txgbevf_intr_enable(struct rte_eth_dev *dev);
23 static void txgbevf_configure_msix(struct rte_eth_dev *dev);
24 static void txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
25 static void txgbevf_dev_interrupt_handler(void *param);
28 * The set of PCI devices this driver supports (for VF)
30 static const struct rte_pci_id pci_id_txgbevf_map[] = {
31 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF) },
32 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF_HV) },
33 { .vendor_id = 0, /* sentinel */ },
36 static const struct rte_eth_desc_lim rx_desc_lim = {
37 .nb_max = TXGBE_RING_DESC_MAX,
38 .nb_min = TXGBE_RING_DESC_MIN,
39 .nb_align = TXGBE_RXD_ALIGN,
42 static const struct rte_eth_desc_lim tx_desc_lim = {
43 .nb_max = TXGBE_RING_DESC_MAX,
44 .nb_min = TXGBE_RING_DESC_MIN,
45 .nb_align = TXGBE_TXD_ALIGN,
46 .nb_seg_max = TXGBE_TX_MAX_SEG,
47 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
50 static const struct eth_dev_ops txgbevf_eth_dev_ops;
53 * Negotiate mailbox API version with the PF.
54 * After reset API version is always set to the basic one (txgbe_mbox_api_10).
55 * Then we try to negotiate starting with the most recent one.
56 * If all negotiation attempts fail, then we will proceed with
57 * the default one (txgbe_mbox_api_10).
60 txgbevf_negotiate_api(struct txgbe_hw *hw)
64 /* start with highest supported, proceed down */
65 static const int sup_ver[] = {
72 for (i = 0; i < ARRAY_SIZE(sup_ver); i++) {
73 if (txgbevf_negotiate_api_version(hw, sup_ver[i]) == 0)
79 generate_random_mac_addr(struct rte_ether_addr *mac_addr)
83 /* Set Organizationally Unique Identifier (OUI) prefix. */
84 mac_addr->addr_bytes[0] = 0x00;
85 mac_addr->addr_bytes[1] = 0x09;
86 mac_addr->addr_bytes[2] = 0xC0;
87 /* Force indication of locally assigned MAC address. */
88 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
89 /* Generate the last 3 bytes of the MAC address with a random number. */
91 memcpy(&mac_addr->addr_bytes[3], &random, 3);
95 * Virtual Function device init
98 eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
102 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
103 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
104 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
105 struct rte_ether_addr *perm_addr =
106 (struct rte_ether_addr *)hw->mac.perm_addr;
108 PMD_INIT_FUNC_TRACE();
110 eth_dev->dev_ops = &txgbevf_eth_dev_ops;
112 /* for secondary processes, we don't initialise any further as primary
113 * has already done this work. Only check we don't need a different
116 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
117 struct txgbe_tx_queue *txq;
118 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
119 /* TX queue function in primary, set by last queue initialized
120 * Tx queue may not initialized by primary process
122 if (eth_dev->data->tx_queues) {
123 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
124 txgbe_set_tx_function(eth_dev, txq);
126 /* Use default TX function if we get here */
128 "No TX queues configured yet. Using default TX function.");
131 txgbe_set_rx_function(eth_dev);
136 rte_eth_copy_pci_info(eth_dev, pci_dev);
138 hw->device_id = pci_dev->id.device_id;
139 hw->vendor_id = pci_dev->id.vendor_id;
140 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
141 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
142 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
144 /* Initialize the shared code (base driver) */
145 err = txgbe_init_shared_code(hw);
148 "Shared code init failed for txgbevf: %d", err);
152 /* init_mailbox_params */
153 hw->mbx.init_params(hw);
155 /* Disable the interrupts for VF */
156 txgbevf_intr_disable(eth_dev);
158 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
159 err = hw->mac.reset_hw(hw);
162 * The VF reset operation returns the TXGBE_ERR_INVALID_MAC_ADDR when
163 * the underlying PF driver has not assigned a MAC address to the VF.
164 * In this case, assign a random MAC address.
166 if (err != 0 && err != TXGBE_ERR_INVALID_MAC_ADDR) {
167 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
169 * This error code will be propagated to the app by
170 * rte_eth_dev_reset, so use a public error code rather than
171 * the internal-only TXGBE_ERR_RESET_FAILED
176 /* negotiate mailbox API version to use with the PF. */
177 txgbevf_negotiate_api(hw);
179 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
180 txgbevf_get_queues(hw, &tcs, &tc);
182 /* Allocate memory for storing MAC addresses */
183 eth_dev->data->mac_addrs = rte_zmalloc("txgbevf", RTE_ETHER_ADDR_LEN *
184 hw->mac.num_rar_entries, 0);
185 if (eth_dev->data->mac_addrs == NULL) {
187 "Failed to allocate %u bytes needed to store "
189 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
193 /* Generate a random MAC address, if none was assigned by PF. */
194 if (rte_is_zero_ether_addr(perm_addr)) {
195 generate_random_mac_addr(perm_addr);
196 err = txgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
198 rte_free(eth_dev->data->mac_addrs);
199 eth_dev->data->mac_addrs = NULL;
202 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
203 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
204 "%02x:%02x:%02x:%02x:%02x:%02x",
205 perm_addr->addr_bytes[0],
206 perm_addr->addr_bytes[1],
207 perm_addr->addr_bytes[2],
208 perm_addr->addr_bytes[3],
209 perm_addr->addr_bytes[4],
210 perm_addr->addr_bytes[5]);
213 /* Copy the permanent MAC address */
214 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]);
216 /* reset the hardware with the new settings */
217 err = hw->mac.start_hw(hw);
219 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
223 rte_intr_callback_register(intr_handle,
224 txgbevf_dev_interrupt_handler, eth_dev);
225 rte_intr_enable(intr_handle);
226 txgbevf_intr_enable(eth_dev);
228 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
229 eth_dev->data->port_id, pci_dev->id.vendor_id,
230 pci_dev->id.device_id, "txgbe_mac_raptor_vf");
235 /* Virtual Function device uninit */
237 eth_txgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
239 PMD_INIT_FUNC_TRACE();
241 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
244 txgbevf_dev_close(eth_dev);
249 static int eth_txgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
250 struct rte_pci_device *pci_dev)
252 return rte_eth_dev_pci_generic_probe(pci_dev,
253 sizeof(struct txgbe_adapter), eth_txgbevf_dev_init);
256 static int eth_txgbevf_pci_remove(struct rte_pci_device *pci_dev)
258 return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbevf_dev_uninit);
262 * virtual function driver struct
264 static struct rte_pci_driver rte_txgbevf_pmd = {
265 .id_table = pci_id_txgbevf_map,
266 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
267 .probe = eth_txgbevf_pci_probe,
268 .remove = eth_txgbevf_pci_remove,
272 txgbevf_dev_info_get(struct rte_eth_dev *dev,
273 struct rte_eth_dev_info *dev_info)
275 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
276 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
278 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
279 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
280 dev_info->min_rx_bufsize = 1024;
281 dev_info->max_rx_pktlen = TXGBE_FRAME_SIZE_MAX;
282 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
283 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
284 dev_info->max_vfs = pci_dev->max_vfs;
285 dev_info->max_vmdq_pools = ETH_64_POOLS;
286 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
287 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
288 dev_info->rx_queue_offload_capa);
289 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
290 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
291 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
292 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
293 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
295 dev_info->default_rxconf = (struct rte_eth_rxconf) {
297 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
298 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
299 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
301 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
306 dev_info->default_txconf = (struct rte_eth_txconf) {
308 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
309 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
310 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
312 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
316 dev_info->rx_desc_lim = rx_desc_lim;
317 dev_info->tx_desc_lim = tx_desc_lim;
323 * Virtual Function operations
326 txgbevf_intr_disable(struct rte_eth_dev *dev)
328 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
329 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
331 PMD_INIT_FUNC_TRACE();
333 /* Clear interrupt mask to stop from interrupts being generated */
334 wr32(hw, TXGBE_VFIMS, TXGBE_VFIMS_MASK);
338 /* Clear mask value. */
339 intr->mask_misc = TXGBE_VFIMS_MASK;
343 txgbevf_intr_enable(struct rte_eth_dev *dev)
345 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
346 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
348 PMD_INIT_FUNC_TRACE();
350 /* VF enable interrupt autoclean */
351 wr32(hw, TXGBE_VFIMC, TXGBE_VFIMC_MASK);
359 txgbevf_dev_close(struct rte_eth_dev *dev)
361 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
362 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
363 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
364 PMD_INIT_FUNC_TRACE();
365 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
368 hw->mac.reset_hw(hw);
370 txgbe_dev_free_queues(dev);
373 * Remove the VF MAC address ro ensure
374 * that the VF traffic goes to the PF
375 * after stop, close and detach of the VF
377 txgbevf_remove_mac_addr(dev, 0);
379 /* Disable the interrupts for VF */
380 txgbevf_intr_disable(dev);
382 rte_free(dev->data->mac_addrs);
383 dev->data->mac_addrs = NULL;
385 rte_intr_disable(intr_handle);
386 rte_intr_callback_unregister(intr_handle,
387 txgbevf_dev_interrupt_handler, dev);
393 txgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
395 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
396 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
397 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
398 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
399 uint32_t vec = TXGBE_MISC_VEC_ID;
401 if (rte_intr_allow_others(intr_handle))
402 vec = TXGBE_RX_VEC_START;
403 intr->mask_misc &= ~(1 << vec);
404 RTE_SET_USED(queue_id);
405 wr32(hw, TXGBE_VFIMC, ~intr->mask_misc);
407 rte_intr_enable(intr_handle);
413 txgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
415 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
416 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
417 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
418 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
419 uint32_t vec = TXGBE_MISC_VEC_ID;
421 if (rte_intr_allow_others(intr_handle))
422 vec = TXGBE_RX_VEC_START;
423 intr->mask_misc |= (1 << vec);
424 RTE_SET_USED(queue_id);
425 wr32(hw, TXGBE_VFIMS, intr->mask_misc);
431 txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
432 uint8_t queue, uint8_t msix_vector)
436 if (direction == -1) {
438 msix_vector |= TXGBE_VFIVAR_VLD;
439 tmp = rd32(hw, TXGBE_VFIVARMISC);
442 wr32(hw, TXGBE_VFIVARMISC, tmp);
445 /* Workround for ICR lost */
446 idx = ((16 * (queue & 1)) + (8 * direction));
447 tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
448 tmp &= ~(0xFF << idx);
449 tmp |= (msix_vector << idx);
450 wr32(hw, TXGBE_VFIVAR(queue >> 1), tmp);
455 txgbevf_configure_msix(struct rte_eth_dev *dev)
457 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
458 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
459 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
461 uint32_t vector_idx = TXGBE_MISC_VEC_ID;
462 uint32_t base = TXGBE_MISC_VEC_ID;
464 /* Configure VF other cause ivar */
465 txgbevf_set_ivar_map(hw, -1, 1, vector_idx);
467 /* won't configure msix register if no mapping is done
468 * between intr vector and event fd.
470 if (!rte_intr_dp_is_en(intr_handle))
473 if (rte_intr_allow_others(intr_handle)) {
474 base = TXGBE_RX_VEC_START;
475 vector_idx = TXGBE_RX_VEC_START;
478 /* Configure all RX queues of VF */
479 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
480 /* Force all queue use vector 0,
481 * as TXGBE_VF_MAXMSIVECOTR = 1
483 txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
484 intr_handle->intr_vec[q_idx] = vector_idx;
485 if (vector_idx < base + intr_handle->nb_efd - 1)
489 /* As RX queue setting above show, all queues use the vector 0.
490 * Set only the ITR value of TXGBE_MISC_VEC_ID.
492 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
493 TXGBE_ITR_IVAL(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
498 txgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
499 __rte_unused uint32_t index,
500 __rte_unused uint32_t pool)
502 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
506 * On a VF, adding again the same MAC addr is not an idempotent
507 * operation. Trap this case to avoid exhausting the [very limited]
508 * set of PF resources used to store VF MAC addresses.
510 if (memcmp(hw->mac.perm_addr, mac_addr,
511 sizeof(struct rte_ether_addr)) == 0)
513 err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
515 PMD_DRV_LOG(ERR, "Unable to add MAC address "
516 "%02x:%02x:%02x:%02x:%02x:%02x - err=%d",
517 mac_addr->addr_bytes[0],
518 mac_addr->addr_bytes[1],
519 mac_addr->addr_bytes[2],
520 mac_addr->addr_bytes[3],
521 mac_addr->addr_bytes[4],
522 mac_addr->addr_bytes[5],
528 txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
530 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
531 struct rte_ether_addr *perm_addr =
532 (struct rte_ether_addr *)hw->mac.perm_addr;
533 struct rte_ether_addr *mac_addr;
538 * The TXGBE_VF_SET_MACVLAN command of the txgbe-pf driver does
539 * not support the deletion of a given MAC address.
540 * Instead, it imposes to delete all MAC addresses, then to add again
541 * all MAC addresses with the exception of the one to be deleted.
543 (void)txgbevf_set_uc_addr_vf(hw, 0, NULL);
546 * Add again all MAC addresses, with the exception of the deleted one
547 * and of the permanent MAC address.
549 for (i = 0, mac_addr = dev->data->mac_addrs;
550 i < hw->mac.num_rar_entries; i++, mac_addr++) {
551 /* Skip the deleted MAC address */
554 /* Skip NULL MAC addresses */
555 if (rte_is_zero_ether_addr(mac_addr))
557 /* Skip the permanent MAC address */
558 if (memcmp(perm_addr, mac_addr,
559 sizeof(struct rte_ether_addr)) == 0)
561 err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
564 "Adding again MAC address "
565 "%02x:%02x:%02x:%02x:%02x:%02x failed "
567 mac_addr->addr_bytes[0],
568 mac_addr->addr_bytes[1],
569 mac_addr->addr_bytes[2],
570 mac_addr->addr_bytes[3],
571 mac_addr->addr_bytes[4],
572 mac_addr->addr_bytes[5],
578 txgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
579 struct rte_ether_addr *addr)
581 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
583 hw->mac.set_rar(hw, 0, (void *)addr, 0, 0);
588 static void txgbevf_mbx_process(struct rte_eth_dev *dev)
590 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
593 /* peek the message first */
594 in_msg = rd32(hw, TXGBE_VFMBX);
596 /* PF reset VF event */
597 if (in_msg == TXGBE_PF_CONTROL_MSG) {
598 /* dummy mbx read to ack pf */
599 if (txgbe_read_mbx(hw, &in_msg, 1, 0))
601 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
607 txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
610 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
611 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
612 txgbevf_intr_disable(dev);
614 /* read-on-clear nic registers here */
615 eicr = rd32(hw, TXGBE_VFICR);
618 /* only one misc vector supported - mailbox */
619 eicr &= TXGBE_VFICR_MASK;
620 /* Workround for ICR lost */
621 intr->flags |= TXGBE_FLAG_MAILBOX;
627 txgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
629 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
631 if (intr->flags & TXGBE_FLAG_MAILBOX) {
632 txgbevf_mbx_process(dev);
633 intr->flags &= ~TXGBE_FLAG_MAILBOX;
636 txgbevf_intr_enable(dev);
642 txgbevf_dev_interrupt_handler(void *param)
644 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
646 txgbevf_dev_interrupt_get_status(dev);
647 txgbevf_dev_interrupt_action(dev);
651 * dev_ops for virtual function, bare necessities for basic vf
652 * operation have been implemented
654 static const struct eth_dev_ops txgbevf_eth_dev_ops = {
655 .dev_infos_get = txgbevf_dev_info_get,
656 .rx_queue_intr_enable = txgbevf_dev_rx_queue_intr_enable,
657 .rx_queue_intr_disable = txgbevf_dev_rx_queue_intr_disable,
658 .mac_addr_add = txgbevf_add_mac_addr,
659 .mac_addr_remove = txgbevf_remove_mac_addr,
660 .rxq_info_get = txgbe_rxq_info_get,
661 .txq_info_get = txgbe_txq_info_get,
662 .mac_addr_set = txgbevf_set_default_mac_addr,
665 RTE_PMD_REGISTER_PCI(net_txgbe_vf, rte_txgbevf_pmd);
666 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe_vf, pci_id_txgbevf_map);
667 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe_vf, "* igb_uio | vfio-pci");