1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
11 #include <ethdev_pci.h>
13 #include "txgbe_logs.h"
14 #include "base/txgbe.h"
15 #include "txgbe_ethdev.h"
16 #include "txgbe_rxtx.h"
18 static int txgbevf_dev_xstats_get(struct rte_eth_dev *dev,
19 struct rte_eth_xstat *xstats, unsigned int n);
20 static int txgbevf_dev_info_get(struct rte_eth_dev *dev,
21 struct rte_eth_dev_info *dev_info);
22 static int txgbevf_dev_configure(struct rte_eth_dev *dev);
23 static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
24 int wait_to_complete);
25 static int txgbevf_dev_close(struct rte_eth_dev *dev);
26 static void txgbevf_intr_disable(struct rte_eth_dev *dev);
27 static void txgbevf_intr_enable(struct rte_eth_dev *dev);
28 static int txgbevf_dev_stats_reset(struct rte_eth_dev *dev);
29 static int txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
30 static void txgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
31 static void txgbevf_configure_msix(struct rte_eth_dev *dev);
32 static int txgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev);
33 static int txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev);
34 static void txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
35 static void txgbevf_dev_interrupt_handler(void *param);
38 * The set of PCI devices this driver supports (for VF)
40 static const struct rte_pci_id pci_id_txgbevf_map[] = {
41 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF) },
42 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF_HV) },
43 { .vendor_id = 0, /* sentinel */ },
46 static const struct rte_eth_desc_lim rx_desc_lim = {
47 .nb_max = TXGBE_RING_DESC_MAX,
48 .nb_min = TXGBE_RING_DESC_MIN,
49 .nb_align = TXGBE_RXD_ALIGN,
52 static const struct rte_eth_desc_lim tx_desc_lim = {
53 .nb_max = TXGBE_RING_DESC_MAX,
54 .nb_min = TXGBE_RING_DESC_MIN,
55 .nb_align = TXGBE_TXD_ALIGN,
56 .nb_seg_max = TXGBE_TX_MAX_SEG,
57 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
60 static const struct eth_dev_ops txgbevf_eth_dev_ops;
62 static const struct rte_txgbe_xstats_name_off rte_txgbevf_stats_strings[] = {
63 {"rx_multicast_packets_0",
64 offsetof(struct txgbevf_hw_stats, qp[0].vfmprc)},
65 {"rx_multicast_packets_1",
66 offsetof(struct txgbevf_hw_stats, qp[1].vfmprc)},
67 {"rx_multicast_packets_2",
68 offsetof(struct txgbevf_hw_stats, qp[2].vfmprc)},
69 {"rx_multicast_packets_3",
70 offsetof(struct txgbevf_hw_stats, qp[3].vfmprc)},
71 {"rx_multicast_packets_4",
72 offsetof(struct txgbevf_hw_stats, qp[4].vfmprc)},
73 {"rx_multicast_packets_5",
74 offsetof(struct txgbevf_hw_stats, qp[5].vfmprc)},
75 {"rx_multicast_packets_6",
76 offsetof(struct txgbevf_hw_stats, qp[6].vfmprc)},
77 {"rx_multicast_packets_7",
78 offsetof(struct txgbevf_hw_stats, qp[7].vfmprc)}
81 #define TXGBEVF_NB_XSTATS (sizeof(rte_txgbevf_stats_strings) / \
82 sizeof(rte_txgbevf_stats_strings[0]))
85 * Negotiate mailbox API version with the PF.
86 * After reset API version is always set to the basic one (txgbe_mbox_api_10).
87 * Then we try to negotiate starting with the most recent one.
88 * If all negotiation attempts fail, then we will proceed with
89 * the default one (txgbe_mbox_api_10).
92 txgbevf_negotiate_api(struct txgbe_hw *hw)
96 /* start with highest supported, proceed down */
97 static const int sup_ver[] = {
104 for (i = 0; i < ARRAY_SIZE(sup_ver); i++) {
105 if (txgbevf_negotiate_api_version(hw, sup_ver[i]) == 0)
111 generate_random_mac_addr(struct rte_ether_addr *mac_addr)
115 /* Set Organizationally Unique Identifier (OUI) prefix. */
116 mac_addr->addr_bytes[0] = 0x00;
117 mac_addr->addr_bytes[1] = 0x09;
118 mac_addr->addr_bytes[2] = 0xC0;
119 /* Force indication of locally assigned MAC address. */
120 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
121 /* Generate the last 3 bytes of the MAC address with a random number. */
123 memcpy(&mac_addr->addr_bytes[3], &random, 3);
127 * Virtual Function device init
130 eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
134 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
135 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
136 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
137 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
138 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
139 struct rte_ether_addr *perm_addr =
140 (struct rte_ether_addr *)hw->mac.perm_addr;
142 PMD_INIT_FUNC_TRACE();
144 eth_dev->dev_ops = &txgbevf_eth_dev_ops;
145 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
146 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
147 eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
148 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
150 /* for secondary processes, we don't initialise any further as primary
151 * has already done this work. Only check we don't need a different
154 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
155 struct txgbe_tx_queue *txq;
156 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
157 /* TX queue function in primary, set by last queue initialized
158 * Tx queue may not initialized by primary process
160 if (eth_dev->data->tx_queues) {
161 txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
162 txgbe_set_tx_function(eth_dev, txq);
164 /* Use default TX function if we get here */
166 "No TX queues configured yet. Using default TX function.");
169 txgbe_set_rx_function(eth_dev);
174 rte_eth_copy_pci_info(eth_dev, pci_dev);
176 hw->device_id = pci_dev->id.device_id;
177 hw->vendor_id = pci_dev->id.vendor_id;
178 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
179 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
180 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
182 /* initialize the vfta */
183 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
185 /* initialize the hw strip bitmap*/
186 memset(hwstrip, 0, sizeof(*hwstrip));
188 /* Initialize the shared code (base driver) */
189 err = txgbe_init_shared_code(hw);
192 "Shared code init failed for txgbevf: %d", err);
196 /* init_mailbox_params */
197 hw->mbx.init_params(hw);
199 /* Reset the hw statistics */
200 txgbevf_dev_stats_reset(eth_dev);
202 /* Disable the interrupts for VF */
203 txgbevf_intr_disable(eth_dev);
205 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
206 err = hw->mac.reset_hw(hw);
209 * The VF reset operation returns the TXGBE_ERR_INVALID_MAC_ADDR when
210 * the underlying PF driver has not assigned a MAC address to the VF.
211 * In this case, assign a random MAC address.
213 if (err != 0 && err != TXGBE_ERR_INVALID_MAC_ADDR) {
214 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
216 * This error code will be propagated to the app by
217 * rte_eth_dev_reset, so use a public error code rather than
218 * the internal-only TXGBE_ERR_RESET_FAILED
223 /* negotiate mailbox API version to use with the PF. */
224 txgbevf_negotiate_api(hw);
226 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
227 txgbevf_get_queues(hw, &tcs, &tc);
229 /* Allocate memory for storing MAC addresses */
230 eth_dev->data->mac_addrs = rte_zmalloc("txgbevf", RTE_ETHER_ADDR_LEN *
231 hw->mac.num_rar_entries, 0);
232 if (eth_dev->data->mac_addrs == NULL) {
234 "Failed to allocate %u bytes needed to store "
236 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
240 /* Generate a random MAC address, if none was assigned by PF. */
241 if (rte_is_zero_ether_addr(perm_addr)) {
242 generate_random_mac_addr(perm_addr);
243 err = txgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
245 rte_free(eth_dev->data->mac_addrs);
246 eth_dev->data->mac_addrs = NULL;
249 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
250 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
251 "%02x:%02x:%02x:%02x:%02x:%02x",
252 perm_addr->addr_bytes[0],
253 perm_addr->addr_bytes[1],
254 perm_addr->addr_bytes[2],
255 perm_addr->addr_bytes[3],
256 perm_addr->addr_bytes[4],
257 perm_addr->addr_bytes[5]);
260 /* Copy the permanent MAC address */
261 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]);
263 /* reset the hardware with the new settings */
264 err = hw->mac.start_hw(hw);
266 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
270 /* enter promiscuous mode */
271 txgbevf_dev_promiscuous_enable(eth_dev);
273 rte_intr_callback_register(intr_handle,
274 txgbevf_dev_interrupt_handler, eth_dev);
275 rte_intr_enable(intr_handle);
276 txgbevf_intr_enable(eth_dev);
278 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
279 eth_dev->data->port_id, pci_dev->id.vendor_id,
280 pci_dev->id.device_id, "txgbe_mac_raptor_vf");
285 /* Virtual Function device uninit */
287 eth_txgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
289 PMD_INIT_FUNC_TRACE();
291 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
294 txgbevf_dev_close(eth_dev);
299 static int eth_txgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
300 struct rte_pci_device *pci_dev)
302 return rte_eth_dev_pci_generic_probe(pci_dev,
303 sizeof(struct txgbe_adapter), eth_txgbevf_dev_init);
306 static int eth_txgbevf_pci_remove(struct rte_pci_device *pci_dev)
308 return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbevf_dev_uninit);
312 * virtual function driver struct
314 static struct rte_pci_driver rte_txgbevf_pmd = {
315 .id_table = pci_id_txgbevf_map,
316 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
317 .probe = eth_txgbevf_pci_probe,
318 .remove = eth_txgbevf_pci_remove,
321 static int txgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
322 struct rte_eth_xstat_name *xstats_names, unsigned int limit)
326 if (limit < TXGBEVF_NB_XSTATS && xstats_names != NULL)
329 if (xstats_names != NULL)
330 for (i = 0; i < TXGBEVF_NB_XSTATS; i++)
331 snprintf(xstats_names[i].name,
332 sizeof(xstats_names[i].name),
333 "%s", rte_txgbevf_stats_strings[i].name);
334 return TXGBEVF_NB_XSTATS;
338 txgbevf_update_stats(struct rte_eth_dev *dev)
340 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
341 struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
342 TXGBE_DEV_STATS(dev);
345 for (i = 0; i < dev->data->nb_rx_queues; i++) {
346 /* Good Rx packet, include VF loopback */
347 TXGBE_UPDCNT32(TXGBE_QPRXPKT(i),
348 hw_stats->qp[i].last_vfgprc, hw_stats->qp[i].vfgprc);
350 /* Good Rx octets, include VF loopback */
351 TXGBE_UPDCNT36(TXGBE_QPRXOCTL(i),
352 hw_stats->qp[i].last_vfgorc, hw_stats->qp[i].vfgorc);
354 /* Rx Multicst Packet */
355 TXGBE_UPDCNT32(TXGBE_QPRXMPKT(i),
356 hw_stats->qp[i].last_vfmprc, hw_stats->qp[i].vfmprc);
360 for (i = 0; i < dev->data->nb_tx_queues; i++) {
361 /* Good Tx packet, include VF loopback */
362 TXGBE_UPDCNT32(TXGBE_QPTXPKT(i),
363 hw_stats->qp[i].last_vfgptc, hw_stats->qp[i].vfgptc);
365 /* Good Tx octets, include VF loopback */
366 TXGBE_UPDCNT36(TXGBE_QPTXOCTL(i),
367 hw_stats->qp[i].last_vfgotc, hw_stats->qp[i].vfgotc);
369 hw->offset_loaded = 0;
373 txgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
376 struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
377 TXGBE_DEV_STATS(dev);
380 if (n < TXGBEVF_NB_XSTATS)
381 return TXGBEVF_NB_XSTATS;
383 txgbevf_update_stats(dev);
389 for (i = 0; i < TXGBEVF_NB_XSTATS; i++) {
391 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
392 rte_txgbevf_stats_strings[i].offset);
395 return TXGBEVF_NB_XSTATS;
399 txgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
401 struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
402 TXGBE_DEV_STATS(dev);
405 txgbevf_update_stats(dev);
415 for (i = 0; i < 8; i++) {
416 stats->ipackets += hw_stats->qp[i].vfgprc;
417 stats->ibytes += hw_stats->qp[i].vfgorc;
418 stats->opackets += hw_stats->qp[i].vfgptc;
419 stats->obytes += hw_stats->qp[i].vfgotc;
426 txgbevf_dev_stats_reset(struct rte_eth_dev *dev)
428 struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
429 TXGBE_DEV_STATS(dev);
432 /* Sync HW register to the last stats */
433 txgbevf_dev_stats_get(dev, NULL);
435 /* reset HW current stats*/
436 for (i = 0; i < 8; i++) {
437 hw_stats->qp[i].vfgprc = 0;
438 hw_stats->qp[i].vfgorc = 0;
439 hw_stats->qp[i].vfgptc = 0;
440 hw_stats->qp[i].vfgotc = 0;
447 txgbevf_dev_info_get(struct rte_eth_dev *dev,
448 struct rte_eth_dev_info *dev_info)
450 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
451 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
453 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
454 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
455 dev_info->min_rx_bufsize = 1024;
456 dev_info->max_rx_pktlen = TXGBE_FRAME_SIZE_MAX;
457 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
458 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
459 dev_info->max_vfs = pci_dev->max_vfs;
460 dev_info->max_vmdq_pools = ETH_64_POOLS;
461 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
462 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
463 dev_info->rx_queue_offload_capa);
464 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
465 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
466 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
467 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
468 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
470 dev_info->default_rxconf = (struct rte_eth_rxconf) {
472 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
473 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
474 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
476 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
481 dev_info->default_txconf = (struct rte_eth_txconf) {
483 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
484 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
485 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
487 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
491 dev_info->rx_desc_lim = rx_desc_lim;
492 dev_info->tx_desc_lim = tx_desc_lim;
498 txgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
500 return txgbe_dev_link_update_share(dev, wait_to_complete);
504 * Virtual Function operations
507 txgbevf_intr_disable(struct rte_eth_dev *dev)
509 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
510 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
512 PMD_INIT_FUNC_TRACE();
514 /* Clear interrupt mask to stop from interrupts being generated */
515 wr32(hw, TXGBE_VFIMS, TXGBE_VFIMS_MASK);
519 /* Clear mask value. */
520 intr->mask_misc = TXGBE_VFIMS_MASK;
524 txgbevf_intr_enable(struct rte_eth_dev *dev)
526 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
527 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
529 PMD_INIT_FUNC_TRACE();
531 /* VF enable interrupt autoclean */
532 wr32(hw, TXGBE_VFIMC, TXGBE_VFIMC_MASK);
540 txgbevf_dev_configure(struct rte_eth_dev *dev)
542 struct rte_eth_conf *conf = &dev->data->dev_conf;
543 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
545 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
548 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
549 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
552 * VF has no ability to enable/disable HW CRC
553 * Keep the persistent behavior the same as Host PF
555 #ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
556 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
557 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
558 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
561 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
562 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
563 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
568 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
569 * allocation or vector Rx preconditions we will reset it.
571 adapter->rx_bulk_alloc_allowed = true;
577 txgbevf_dev_close(struct rte_eth_dev *dev)
579 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
580 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
581 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
582 PMD_INIT_FUNC_TRACE();
583 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
586 hw->mac.reset_hw(hw);
588 txgbe_dev_free_queues(dev);
591 * Remove the VF MAC address ro ensure
592 * that the VF traffic goes to the PF
593 * after stop, close and detach of the VF
595 txgbevf_remove_mac_addr(dev, 0);
597 dev->rx_pkt_burst = NULL;
598 dev->tx_pkt_burst = NULL;
600 /* Disable the interrupts for VF */
601 txgbevf_intr_disable(dev);
603 rte_free(dev->data->mac_addrs);
604 dev->data->mac_addrs = NULL;
606 rte_intr_disable(intr_handle);
607 rte_intr_callback_unregister(intr_handle,
608 txgbevf_dev_interrupt_handler, dev);
613 static void txgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
615 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
616 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
617 int i = 0, j = 0, vfta = 0, mask = 1;
619 for (i = 0; i < TXGBE_VFTA_SIZE; i++) {
620 vfta = shadow_vfta->vfta[i];
623 for (j = 0; j < 32; j++) {
625 txgbe_set_vfta(hw, (i << 5) + j, 0,
634 txgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
636 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
637 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
638 uint32_t vid_idx = 0;
639 uint32_t vid_bit = 0;
642 PMD_INIT_FUNC_TRACE();
644 /* vind is not used in VF driver, set to 0, check txgbe_set_vfta_vf */
645 ret = hw->mac.set_vfta(hw, vlan_id, 0, !!on, false);
647 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
650 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
651 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
653 /* Save what we set and restore it after device reset */
655 shadow_vfta->vfta[vid_idx] |= vid_bit;
657 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
663 txgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
665 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
668 PMD_INIT_FUNC_TRACE();
670 if (queue >= hw->mac.max_rx_queues)
673 ctrl = rd32(hw, TXGBE_RXCFG(queue));
674 txgbe_dev_save_rx_queue(hw, queue);
676 ctrl |= TXGBE_RXCFG_VLAN;
678 ctrl &= ~TXGBE_RXCFG_VLAN;
679 wr32(hw, TXGBE_RXCFG(queue), 0);
681 txgbe_dev_store_rx_queue(hw, queue);
682 wr32m(hw, TXGBE_RXCFG(queue),
683 TXGBE_RXCFG_VLAN | TXGBE_RXCFG_ENA, ctrl);
685 txgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
689 txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
691 struct txgbe_rx_queue *rxq;
695 /* VF function only support hw strip feature, others are not support */
696 if (mask & ETH_VLAN_STRIP_MASK) {
697 for (i = 0; i < dev->data->nb_rx_queues; i++) {
698 rxq = dev->data->rx_queues[i];
699 on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
700 txgbevf_vlan_strip_queue_set(dev, i, on);
708 txgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
710 txgbe_config_vlan_strip_on_all_queues(dev, mask);
712 txgbevf_vlan_offload_config(dev, mask);
718 txgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
720 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
721 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
722 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
723 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
724 uint32_t vec = TXGBE_MISC_VEC_ID;
726 if (rte_intr_allow_others(intr_handle))
727 vec = TXGBE_RX_VEC_START;
728 intr->mask_misc &= ~(1 << vec);
729 RTE_SET_USED(queue_id);
730 wr32(hw, TXGBE_VFIMC, ~intr->mask_misc);
732 rte_intr_enable(intr_handle);
738 txgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
740 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
741 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
742 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
743 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
744 uint32_t vec = TXGBE_MISC_VEC_ID;
746 if (rte_intr_allow_others(intr_handle))
747 vec = TXGBE_RX_VEC_START;
748 intr->mask_misc |= (1 << vec);
749 RTE_SET_USED(queue_id);
750 wr32(hw, TXGBE_VFIMS, intr->mask_misc);
756 txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
757 uint8_t queue, uint8_t msix_vector)
761 if (direction == -1) {
763 msix_vector |= TXGBE_VFIVAR_VLD;
764 tmp = rd32(hw, TXGBE_VFIVARMISC);
767 wr32(hw, TXGBE_VFIVARMISC, tmp);
770 /* Workround for ICR lost */
771 idx = ((16 * (queue & 1)) + (8 * direction));
772 tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
773 tmp &= ~(0xFF << idx);
774 tmp |= (msix_vector << idx);
775 wr32(hw, TXGBE_VFIVAR(queue >> 1), tmp);
780 txgbevf_configure_msix(struct rte_eth_dev *dev)
782 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
783 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
784 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
786 uint32_t vector_idx = TXGBE_MISC_VEC_ID;
787 uint32_t base = TXGBE_MISC_VEC_ID;
789 /* Configure VF other cause ivar */
790 txgbevf_set_ivar_map(hw, -1, 1, vector_idx);
792 /* won't configure msix register if no mapping is done
793 * between intr vector and event fd.
795 if (!rte_intr_dp_is_en(intr_handle))
798 if (rte_intr_allow_others(intr_handle)) {
799 base = TXGBE_RX_VEC_START;
800 vector_idx = TXGBE_RX_VEC_START;
803 /* Configure all RX queues of VF */
804 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
805 /* Force all queue use vector 0,
806 * as TXGBE_VF_MAXMSIVECOTR = 1
808 txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
809 intr_handle->intr_vec[q_idx] = vector_idx;
810 if (vector_idx < base + intr_handle->nb_efd - 1)
814 /* As RX queue setting above show, all queues use the vector 0.
815 * Set only the ITR value of TXGBE_MISC_VEC_ID.
817 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
818 TXGBE_ITR_IVAL(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
823 txgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
824 __rte_unused uint32_t index,
825 __rte_unused uint32_t pool)
827 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
831 * On a VF, adding again the same MAC addr is not an idempotent
832 * operation. Trap this case to avoid exhausting the [very limited]
833 * set of PF resources used to store VF MAC addresses.
835 if (memcmp(hw->mac.perm_addr, mac_addr,
836 sizeof(struct rte_ether_addr)) == 0)
838 err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
840 PMD_DRV_LOG(ERR, "Unable to add MAC address "
841 "%02x:%02x:%02x:%02x:%02x:%02x - err=%d",
842 mac_addr->addr_bytes[0],
843 mac_addr->addr_bytes[1],
844 mac_addr->addr_bytes[2],
845 mac_addr->addr_bytes[3],
846 mac_addr->addr_bytes[4],
847 mac_addr->addr_bytes[5],
853 txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
855 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
856 struct rte_ether_addr *perm_addr =
857 (struct rte_ether_addr *)hw->mac.perm_addr;
858 struct rte_ether_addr *mac_addr;
863 * The TXGBE_VF_SET_MACVLAN command of the txgbe-pf driver does
864 * not support the deletion of a given MAC address.
865 * Instead, it imposes to delete all MAC addresses, then to add again
866 * all MAC addresses with the exception of the one to be deleted.
868 (void)txgbevf_set_uc_addr_vf(hw, 0, NULL);
871 * Add again all MAC addresses, with the exception of the deleted one
872 * and of the permanent MAC address.
874 for (i = 0, mac_addr = dev->data->mac_addrs;
875 i < hw->mac.num_rar_entries; i++, mac_addr++) {
876 /* Skip the deleted MAC address */
879 /* Skip NULL MAC addresses */
880 if (rte_is_zero_ether_addr(mac_addr))
882 /* Skip the permanent MAC address */
883 if (memcmp(perm_addr, mac_addr,
884 sizeof(struct rte_ether_addr)) == 0)
886 err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
889 "Adding again MAC address "
890 "%02x:%02x:%02x:%02x:%02x:%02x failed "
892 mac_addr->addr_bytes[0],
893 mac_addr->addr_bytes[1],
894 mac_addr->addr_bytes[2],
895 mac_addr->addr_bytes[3],
896 mac_addr->addr_bytes[4],
897 mac_addr->addr_bytes[5],
903 txgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
904 struct rte_ether_addr *addr)
906 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
908 hw->mac.set_rar(hw, 0, (void *)addr, 0, 0);
914 txgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
917 uint32_t max_frame = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
918 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
920 hw = TXGBE_DEV_HW(dev);
922 if (mtu < RTE_ETHER_MIN_MTU ||
923 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
926 /* refuse mtu that requires the support of scattered packets when this
927 * feature has not been enabled before.
929 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
930 (max_frame + 2 * TXGBE_VLAN_TAG_SIZE >
931 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
935 * When supported by the underlying PF driver, use the TXGBE_VF_SET_MTU
936 * request of the version 2.0 of the mailbox API.
937 * For now, use the TXGBE_VF_SET_LPE request of the version 1.0
938 * of the mailbox API.
940 if (txgbevf_rlpml_set_vf(hw, max_frame))
943 /* update max frame size */
944 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
949 txgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev)
951 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
954 switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_PROMISC)) {
958 case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
970 txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
972 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
975 switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_NONE)) {
979 case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
991 txgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
993 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
996 switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_ALLMULTI)) {
1000 case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
1012 txgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1014 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1017 switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_MULTI)) {
1021 case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
1032 static void txgbevf_mbx_process(struct rte_eth_dev *dev)
1034 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1037 /* peek the message first */
1038 in_msg = rd32(hw, TXGBE_VFMBX);
1040 /* PF reset VF event */
1041 if (in_msg == TXGBE_PF_CONTROL_MSG) {
1042 /* dummy mbx read to ack pf */
1043 if (txgbe_read_mbx(hw, &in_msg, 1, 0))
1045 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1051 txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
1054 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1055 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1056 txgbevf_intr_disable(dev);
1058 /* read-on-clear nic registers here */
1059 eicr = rd32(hw, TXGBE_VFICR);
1062 /* only one misc vector supported - mailbox */
1063 eicr &= TXGBE_VFICR_MASK;
1064 /* Workround for ICR lost */
1065 intr->flags |= TXGBE_FLAG_MAILBOX;
1071 txgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
1073 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1075 if (intr->flags & TXGBE_FLAG_MAILBOX) {
1076 txgbevf_mbx_process(dev);
1077 intr->flags &= ~TXGBE_FLAG_MAILBOX;
1080 txgbevf_intr_enable(dev);
1086 txgbevf_dev_interrupt_handler(void *param)
1088 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1090 txgbevf_dev_interrupt_get_status(dev);
1091 txgbevf_dev_interrupt_action(dev);
1095 * dev_ops for virtual function, bare necessities for basic vf
1096 * operation have been implemented
1098 static const struct eth_dev_ops txgbevf_eth_dev_ops = {
1099 .dev_configure = txgbevf_dev_configure,
1100 .link_update = txgbevf_dev_link_update,
1101 .stats_get = txgbevf_dev_stats_get,
1102 .xstats_get = txgbevf_dev_xstats_get,
1103 .stats_reset = txgbevf_dev_stats_reset,
1104 .xstats_reset = txgbevf_dev_stats_reset,
1105 .xstats_get_names = txgbevf_dev_xstats_get_names,
1106 .promiscuous_enable = txgbevf_dev_promiscuous_enable,
1107 .promiscuous_disable = txgbevf_dev_promiscuous_disable,
1108 .allmulticast_enable = txgbevf_dev_allmulticast_enable,
1109 .allmulticast_disable = txgbevf_dev_allmulticast_disable,
1110 .dev_infos_get = txgbevf_dev_info_get,
1111 .mtu_set = txgbevf_dev_set_mtu,
1112 .vlan_filter_set = txgbevf_vlan_filter_set,
1113 .vlan_strip_queue_set = txgbevf_vlan_strip_queue_set,
1114 .vlan_offload_set = txgbevf_vlan_offload_set,
1115 .rx_queue_intr_enable = txgbevf_dev_rx_queue_intr_enable,
1116 .rx_queue_intr_disable = txgbevf_dev_rx_queue_intr_disable,
1117 .mac_addr_add = txgbevf_add_mac_addr,
1118 .mac_addr_remove = txgbevf_remove_mac_addr,
1119 .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
1120 .rxq_info_get = txgbe_rxq_info_get,
1121 .txq_info_get = txgbe_txq_info_get,
1122 .mac_addr_set = txgbevf_set_default_mac_addr,
1123 .reta_update = txgbe_dev_rss_reta_update,
1124 .reta_query = txgbe_dev_rss_reta_query,
1125 .rss_hash_update = txgbe_dev_rss_hash_update,
1126 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
1129 RTE_PMD_REGISTER_PCI(net_txgbe_vf, rte_txgbevf_pmd);
1130 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe_vf, pci_id_txgbevf_map);
1131 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe_vf, "* igb_uio | vfio-pci");