1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_bus_pci.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
31 #include <rte_string_fns.h>
32 #include <rte_malloc.h>
35 #include "base/vmxnet3_defs.h"
37 #include "vmxnet3_ring.h"
38 #include "vmxnet3_logs.h"
39 #include "vmxnet3_ethdev.h"
41 #define PROCESS_SYS_EVENTS 0
43 #define VMXNET3_TX_MAX_SEG UINT8_MAX
45 #define VMXNET3_TX_OFFLOAD_CAP \
46 (DEV_TX_OFFLOAD_VLAN_INSERT | \
47 DEV_TX_OFFLOAD_IPV4_CKSUM | \
48 DEV_TX_OFFLOAD_TCP_CKSUM | \
49 DEV_TX_OFFLOAD_UDP_CKSUM | \
50 DEV_TX_OFFLOAD_TCP_TSO | \
51 DEV_TX_OFFLOAD_MULTI_SEGS)
53 #define VMXNET3_RX_OFFLOAD_CAP \
54 (DEV_RX_OFFLOAD_VLAN_STRIP | \
55 DEV_RX_OFFLOAD_SCATTER | \
56 DEV_RX_OFFLOAD_IPV4_CKSUM | \
57 DEV_RX_OFFLOAD_UDP_CKSUM | \
58 DEV_RX_OFFLOAD_TCP_CKSUM | \
59 DEV_RX_OFFLOAD_TCP_LRO | \
60 DEV_RX_OFFLOAD_JUMBO_FRAME | \
61 DEV_RX_OFFLOAD_CRC_STRIP)
63 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
64 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
65 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
66 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
67 static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
68 static void vmxnet3_dev_close(struct rte_eth_dev *dev);
69 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
70 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
71 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
72 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
73 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
74 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
75 int wait_to_complete);
76 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
77 int wait_to_complete);
78 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
79 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
80 struct rte_eth_stats *stats);
81 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
82 struct rte_eth_xstat_name *xstats,
84 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
85 struct rte_eth_xstat *xstats, unsigned int n);
86 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
87 struct rte_eth_dev_info *dev_info);
88 static const uint32_t *
89 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
90 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
91 uint16_t vid, int on);
92 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
93 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
94 struct ether_addr *mac_addr);
95 static void vmxnet3_interrupt_handler(void *param);
97 int vmxnet3_logtype_init;
98 int vmxnet3_logtype_driver;
101 * The set of PCI devices this driver supports
103 #define VMWARE_PCI_VENDOR_ID 0x15AD
104 #define VMWARE_DEV_ID_VMXNET3 0x07B0
105 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
106 { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
107 { .vendor_id = 0, /* sentinel */ },
110 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
111 .dev_configure = vmxnet3_dev_configure,
112 .dev_start = vmxnet3_dev_start,
113 .dev_stop = vmxnet3_dev_stop,
114 .dev_close = vmxnet3_dev_close,
115 .promiscuous_enable = vmxnet3_dev_promiscuous_enable,
116 .promiscuous_disable = vmxnet3_dev_promiscuous_disable,
117 .allmulticast_enable = vmxnet3_dev_allmulticast_enable,
118 .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
119 .link_update = vmxnet3_dev_link_update,
120 .stats_get = vmxnet3_dev_stats_get,
121 .xstats_get_names = vmxnet3_dev_xstats_get_names,
122 .xstats_get = vmxnet3_dev_xstats_get,
123 .mac_addr_set = vmxnet3_mac_addr_set,
124 .dev_infos_get = vmxnet3_dev_info_get,
125 .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
126 .vlan_filter_set = vmxnet3_dev_vlan_filter_set,
127 .vlan_offload_set = vmxnet3_dev_vlan_offload_set,
128 .rx_queue_setup = vmxnet3_dev_rx_queue_setup,
129 .rx_queue_release = vmxnet3_dev_rx_queue_release,
130 .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
131 .tx_queue_release = vmxnet3_dev_tx_queue_release,
134 struct vmxnet3_xstats_name_off {
135 char name[RTE_ETH_XSTATS_NAME_SIZE];
139 /* tx_qX_ is prepended to the name string here */
140 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
141 {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)},
142 {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
143 {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)},
144 {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
147 /* rx_qX_ is prepended to the name string here */
148 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
149 {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)},
150 {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)},
151 {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
152 {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
155 static const struct rte_memzone *
156 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
157 const char *post_string, int socket_id,
158 uint16_t align, bool reuse)
160 char z_name[RTE_MEMZONE_NAMESIZE];
161 const struct rte_memzone *mz;
163 snprintf(z_name, sizeof(z_name), "%s_%d_%s",
164 dev->device->driver->name, dev->data->port_id, post_string);
166 mz = rte_memzone_lookup(z_name);
169 rte_memzone_free(mz);
170 return rte_memzone_reserve_aligned(z_name, size, socket_id,
171 RTE_MEMZONE_IOVA_CONTIG, align);
177 return rte_memzone_reserve_aligned(z_name, size, socket_id,
178 RTE_MEMZONE_IOVA_CONTIG, align);
182 * This function is based on vmxnet3_disable_intr()
185 vmxnet3_disable_intr(struct vmxnet3_hw *hw)
189 PMD_INIT_FUNC_TRACE();
191 hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
192 for (i = 0; i < hw->num_intrs; i++)
193 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
197 vmxnet3_enable_intr(struct vmxnet3_hw *hw)
201 PMD_INIT_FUNC_TRACE();
203 hw->shared->devRead.intrConf.intrCtrl &= ~VMXNET3_IC_DISABLE_ALL;
204 for (i = 0; i < hw->num_intrs; i++)
205 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 0);
209 * Gets tx data ring descriptor size.
212 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
214 uint16 txdata_desc_size;
216 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
217 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
218 txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
220 return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
221 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
222 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
223 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
227 * It returns 0 on success.
230 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
232 struct rte_pci_device *pci_dev;
233 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
234 uint32_t mac_hi, mac_lo, ver;
235 struct rte_eth_link link;
237 PMD_INIT_FUNC_TRACE();
239 eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
240 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
241 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
242 eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
243 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
246 * for secondary processes, we don't initialize any further as primary
247 * has already done this work.
249 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
252 rte_eth_copy_pci_info(eth_dev, pci_dev);
254 /* Vendor and Device ID need to be set before init of shared code */
255 hw->device_id = pci_dev->id.device_id;
256 hw->vendor_id = pci_dev->id.vendor_id;
257 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
258 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
260 hw->num_rx_queues = 1;
261 hw->num_tx_queues = 1;
262 hw->bufs_per_pkt = 1;
264 /* Check h/w version compatibility with driver. */
265 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
266 PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
268 if (ver & (1 << VMXNET3_REV_3)) {
269 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
271 hw->version = VMXNET3_REV_3 + 1;
272 } else if (ver & (1 << VMXNET3_REV_2)) {
273 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
275 hw->version = VMXNET3_REV_2 + 1;
276 } else if (ver & (1 << VMXNET3_REV_1)) {
277 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
279 hw->version = VMXNET3_REV_1 + 1;
281 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
285 PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
287 /* Check UPT version compatibility with driver. */
288 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
289 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
291 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
293 PMD_INIT_LOG(ERR, "Incompatible UPT version.");
297 /* Getting MAC Address */
298 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
299 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
300 memcpy(hw->perm_addr, &mac_lo, 4);
301 memcpy(hw->perm_addr + 4, &mac_hi, 2);
303 /* Allocate memory for storing MAC addresses */
304 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
305 VMXNET3_MAX_MAC_ADDRS, 0);
306 if (eth_dev->data->mac_addrs == NULL) {
308 "Failed to allocate %d bytes needed to store MAC addresses",
309 ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
312 /* Copy the permanent MAC address */
313 ether_addr_copy((struct ether_addr *) hw->perm_addr,
314 ð_dev->data->mac_addrs[0]);
316 PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
317 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
318 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
320 /* Put device in Quiesce Mode */
321 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
323 /* allow untagged pkts */
324 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
326 hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
327 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
329 hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
330 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
331 RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
332 hw->rxdata_desc_size);
334 /* clear shadow stats */
335 memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
336 memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
338 /* set the initial link status */
339 memset(&link, 0, sizeof(link));
340 link.link_duplex = ETH_LINK_FULL_DUPLEX;
341 link.link_speed = ETH_SPEED_NUM_10G;
342 link.link_autoneg = ETH_LINK_FIXED;
343 rte_eth_linkstatus_set(eth_dev, &link);
349 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
351 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
353 PMD_INIT_FUNC_TRACE();
355 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
358 if (hw->adapter_stopped == 0)
359 vmxnet3_dev_close(eth_dev);
361 eth_dev->dev_ops = NULL;
362 eth_dev->rx_pkt_burst = NULL;
363 eth_dev->tx_pkt_burst = NULL;
364 eth_dev->tx_pkt_prepare = NULL;
366 rte_free(eth_dev->data->mac_addrs);
367 eth_dev->data->mac_addrs = NULL;
372 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
373 struct rte_pci_device *pci_dev)
375 return rte_eth_dev_pci_generic_probe(pci_dev,
376 sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
379 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
381 return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
384 static struct rte_pci_driver rte_vmxnet3_pmd = {
385 .id_table = pci_id_vmxnet3_map,
386 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
387 .probe = eth_vmxnet3_pci_probe,
388 .remove = eth_vmxnet3_pci_remove,
392 vmxnet3_dev_configure(struct rte_eth_dev *dev)
394 const struct rte_memzone *mz;
395 struct vmxnet3_hw *hw = dev->data->dev_private;
398 PMD_INIT_FUNC_TRACE();
400 if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
401 dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
402 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
406 if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
407 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
411 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
412 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
414 if (size > UINT16_MAX)
417 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
418 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
421 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
424 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
425 "shared", rte_socket_id(), 8, 1);
428 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
431 memset(mz->addr, 0, mz->len);
433 hw->shared = mz->addr;
434 hw->sharedPA = mz->iova;
437 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
440 * We cannot reuse this memzone from previous allocation as its size
441 * depends on the number of tx and rx queues, which could be different
442 * from one config to another.
444 mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
445 VMXNET3_QUEUE_DESC_ALIGN, 0);
447 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
450 memset(mz->addr, 0, mz->len);
452 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
453 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
455 hw->queueDescPA = mz->iova;
456 hw->queue_desc_len = (uint16_t)size;
458 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
459 /* Allocate memory structure for UPT1_RSSConf and configure */
460 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
461 "rss_conf", rte_socket_id(),
462 RTE_CACHE_LINE_SIZE, 1);
465 "ERROR: Creating rss_conf structure zone");
468 memset(mz->addr, 0, mz->len);
470 hw->rss_conf = mz->addr;
471 hw->rss_confPA = mz->iova;
478 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
483 "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
484 addr[0], addr[1], addr[2],
485 addr[3], addr[4], addr[5]);
487 memcpy(&val, addr, 4);
488 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
490 memcpy(&val, addr + 4, 2);
491 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
495 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
497 struct vmxnet3_hw *hw = dev->data->dev_private;
498 Vmxnet3_DriverShared *shared = hw->shared;
499 Vmxnet3_CmdInfo *cmdInfo;
500 struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
501 uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
502 uint32_t num, i, j, size;
504 if (hw->memRegsPA == 0) {
505 const struct rte_memzone *mz;
507 size = sizeof(Vmxnet3_MemRegs) +
508 (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
509 sizeof(Vmxnet3_MemoryRegion);
511 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
514 PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
517 memset(mz->addr, 0, mz->len);
518 hw->memRegs = mz->addr;
519 hw->memRegsPA = mz->iova;
522 num = hw->num_rx_queues;
524 for (i = 0; i < num; i++) {
525 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
532 * The same mempool could be used by multiple queues. In such a case,
533 * remove duplicate mempool entries. Only one entry is kept with
534 * bitmask indicating queues that are using this mempool.
536 for (i = 1; i < num; i++) {
537 for (j = 0; j < i; j++) {
538 if (mp[i] == mp[j]) {
547 for (i = 0; i < num; i++) {
551 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
554 (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
555 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
556 STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
557 mr->txQueueBits = index[i];
558 mr->rxQueueBits = index[i];
561 "index: %u startPA: %" PRIu64 " length: %u, "
563 j, mr->startPA, mr->length, mr->rxQueueBits);
566 hw->memRegs->numRegs = j;
567 PMD_INIT_LOG(INFO, "numRegs: %u", j);
569 size = sizeof(Vmxnet3_MemRegs) +
570 (j - 1) * sizeof(Vmxnet3_MemoryRegion);
572 cmdInfo = &shared->cu.cmdInfo;
573 cmdInfo->varConf.confVer = 1;
574 cmdInfo->varConf.confLen = size;
575 cmdInfo->varConf.confPA = hw->memRegsPA;
581 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
583 struct rte_eth_conf port_conf = dev->data->dev_conf;
584 struct vmxnet3_hw *hw = dev->data->dev_private;
585 uint32_t mtu = dev->data->mtu;
586 Vmxnet3_DriverShared *shared = hw->shared;
587 Vmxnet3_DSDevRead *devRead = &shared->devRead;
588 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
594 shared->magic = VMXNET3_REV1_MAGIC;
595 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
597 /* Setting up Guest OS information */
598 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
599 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
600 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
601 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
602 devRead->misc.driverInfo.uptVerSpt = 1;
604 devRead->misc.mtu = rte_le_to_cpu_32(mtu);
605 devRead->misc.queueDescPA = hw->queueDescPA;
606 devRead->misc.queueDescLen = hw->queue_desc_len;
607 devRead->misc.numTxQueues = hw->num_tx_queues;
608 devRead->misc.numRxQueues = hw->num_rx_queues;
611 * Set number of interrupts to 1
612 * PMD by default disables all the interrupts but this is MUST
613 * to activate device. It needs at least one interrupt for
614 * link events to handle
616 hw->num_intrs = devRead->intrConf.numIntrs = 1;
617 devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
619 for (i = 0; i < hw->num_tx_queues; i++) {
620 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
621 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
623 txq->shared = &hw->tqd_start[i];
625 tqd->ctrl.txNumDeferred = 0;
626 tqd->ctrl.txThreshold = 1;
627 tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
628 tqd->conf.compRingBasePA = txq->comp_ring.basePA;
629 tqd->conf.dataRingBasePA = txq->data_ring.basePA;
631 tqd->conf.txRingSize = txq->cmd_ring.size;
632 tqd->conf.compRingSize = txq->comp_ring.size;
633 tqd->conf.dataRingSize = txq->data_ring.size;
634 tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
635 tqd->conf.intrIdx = txq->comp_ring.intr_idx;
636 tqd->status.stopped = TRUE;
637 tqd->status.error = 0;
638 memset(&tqd->stats, 0, sizeof(tqd->stats));
641 for (i = 0; i < hw->num_rx_queues; i++) {
642 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
643 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
645 rxq->shared = &hw->rqd_start[i];
647 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
648 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
649 rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
651 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size;
652 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
653 rqd->conf.compRingSize = rxq->comp_ring.size;
654 rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
655 if (VMXNET3_VERSION_GE_3(hw)) {
656 rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
657 rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
659 rqd->status.stopped = TRUE;
660 rqd->status.error = 0;
661 memset(&rqd->stats, 0, sizeof(rqd->stats));
664 /* RxMode set to 0 of VMXNET3_RXM_xxx */
665 devRead->rxFilterConf.rxMode = 0;
667 /* Setting up feature flags */
668 if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
669 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
671 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
672 devRead->misc.uptFeatures |= VMXNET3_F_LRO;
673 devRead->misc.maxNumRxSG = 0;
676 if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
677 ret = vmxnet3_rss_configure(dev);
678 if (ret != VMXNET3_SUCCESS)
681 devRead->misc.uptFeatures |= VMXNET3_F_RSS;
682 devRead->rssConfDesc.confVer = 1;
683 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
684 devRead->rssConfDesc.confPA = hw->rss_confPA;
687 ret = vmxnet3_dev_vlan_offload_set(dev,
688 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
692 vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
694 return VMXNET3_SUCCESS;
698 * Configure device link speed and setup link.
699 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
700 * It returns 0 on success.
703 vmxnet3_dev_start(struct rte_eth_dev *dev)
706 struct vmxnet3_hw *hw = dev->data->dev_private;
708 PMD_INIT_FUNC_TRACE();
710 /* Save stats before it is reset by CMD_ACTIVATE */
711 vmxnet3_hw_stats_save(hw);
713 ret = vmxnet3_setup_driver_shared(dev);
714 if (ret != VMXNET3_SUCCESS)
717 /* check if lsc interrupt feature is enabled */
718 if (dev->data->dev_conf.intr_conf.lsc) {
719 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
721 /* Setup interrupt callback */
722 rte_intr_callback_register(&pci_dev->intr_handle,
723 vmxnet3_interrupt_handler, dev);
725 if (rte_intr_enable(&pci_dev->intr_handle) < 0) {
726 PMD_INIT_LOG(ERR, "interrupt enable failed");
731 /* Exchange shared data with device */
732 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
733 VMXNET3_GET_ADDR_LO(hw->sharedPA));
734 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
735 VMXNET3_GET_ADDR_HI(hw->sharedPA));
737 /* Activate device by register write */
738 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
739 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
742 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
746 /* Setup memory region for rx buffers */
747 ret = vmxnet3_dev_setup_memreg(dev);
749 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
750 VMXNET3_CMD_REGISTER_MEMREGS);
751 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
754 "Failed in setup memory region cmd\n");
757 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
760 /* Disable interrupts */
761 vmxnet3_disable_intr(hw);
764 * Load RX queues with blank mbufs and update next2fill index for device
765 * Update RxMode of the device
767 ret = vmxnet3_dev_rxtx_init(dev);
768 if (ret != VMXNET3_SUCCESS) {
769 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
773 hw->adapter_stopped = FALSE;
775 /* Setting proper Rx Mode and issue Rx Mode Update command */
776 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
778 if (dev->data->dev_conf.intr_conf.lsc) {
779 vmxnet3_enable_intr(hw);
782 * Update link state from device since this won't be
783 * done upon starting with lsc in use. This is done
784 * only after enabling interrupts to avoid any race
785 * where the link state could change without an
786 * interrupt being fired.
788 __vmxnet3_dev_link_update(dev, 0);
791 return VMXNET3_SUCCESS;
795 * Stop device: disable rx and tx functions to allow for reconfiguring.
798 vmxnet3_dev_stop(struct rte_eth_dev *dev)
800 struct rte_eth_link link;
801 struct vmxnet3_hw *hw = dev->data->dev_private;
803 PMD_INIT_FUNC_TRACE();
805 if (hw->adapter_stopped == 1) {
806 PMD_INIT_LOG(DEBUG, "Device already closed.");
810 /* disable interrupts */
811 vmxnet3_disable_intr(hw);
813 if (dev->data->dev_conf.intr_conf.lsc) {
814 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
816 rte_intr_disable(&pci_dev->intr_handle);
818 rte_intr_callback_unregister(&pci_dev->intr_handle,
819 vmxnet3_interrupt_handler, dev);
822 /* quiesce the device first */
823 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
824 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
825 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
827 /* reset the device */
828 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
829 PMD_INIT_LOG(DEBUG, "Device reset.");
830 hw->adapter_stopped = 0;
832 vmxnet3_dev_clear_queues(dev);
834 /* Clear recorded link status */
835 memset(&link, 0, sizeof(link));
836 link.link_duplex = ETH_LINK_FULL_DUPLEX;
837 link.link_speed = ETH_SPEED_NUM_10G;
838 link.link_autoneg = ETH_LINK_FIXED;
839 rte_eth_linkstatus_set(dev, &link);
843 * Reset and stop device.
846 vmxnet3_dev_close(struct rte_eth_dev *dev)
848 struct vmxnet3_hw *hw = dev->data->dev_private;
850 PMD_INIT_FUNC_TRACE();
852 vmxnet3_dev_stop(dev);
853 hw->adapter_stopped = 1;
857 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
858 struct UPT1_TxStats *res)
860 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \
861 ((r)->f = (h)->tqd_start[(i)].stats.f + \
862 (h)->saved_tx_stats[(i)].f)
864 VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
865 VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
866 VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
867 VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
868 VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
869 VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
870 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
871 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
873 #undef VMXNET3_UPDATE_TX_STAT
877 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
878 struct UPT1_RxStats *res)
880 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \
881 ((r)->f = (h)->rqd_start[(i)].stats.f + \
882 (h)->saved_rx_stats[(i)].f)
884 VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
885 VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
886 VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
887 VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
888 VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
889 VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
890 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
891 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
893 #undef VMXNET3_UPDATE_RX_STATS
897 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
901 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
903 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
905 for (i = 0; i < hw->num_tx_queues; i++)
906 vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
907 for (i = 0; i < hw->num_rx_queues; i++)
908 vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
912 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
913 struct rte_eth_xstat_name *xstats_names,
916 unsigned int i, t, count = 0;
917 unsigned int nstats =
918 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
919 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
921 if (!xstats_names || n < nstats)
924 for (i = 0; i < dev->data->nb_rx_queues; i++) {
925 if (!dev->data->rx_queues[i])
928 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
929 snprintf(xstats_names[count].name,
930 sizeof(xstats_names[count].name),
932 vmxnet3_rxq_stat_strings[t].name);
937 for (i = 0; i < dev->data->nb_tx_queues; i++) {
938 if (!dev->data->tx_queues[i])
941 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
942 snprintf(xstats_names[count].name,
943 sizeof(xstats_names[count].name),
945 vmxnet3_txq_stat_strings[t].name);
954 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
957 unsigned int i, t, count = 0;
958 unsigned int nstats =
959 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
960 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
965 for (i = 0; i < dev->data->nb_rx_queues; i++) {
966 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
971 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
972 xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
973 vmxnet3_rxq_stat_strings[t].offset);
974 xstats[count].id = count;
979 for (i = 0; i < dev->data->nb_tx_queues; i++) {
980 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
985 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
986 xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
987 vmxnet3_txq_stat_strings[t].offset);
988 xstats[count].id = count;
997 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1000 struct vmxnet3_hw *hw = dev->data->dev_private;
1001 struct UPT1_TxStats txStats;
1002 struct UPT1_RxStats rxStats;
1004 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1006 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1007 for (i = 0; i < hw->num_tx_queues; i++) {
1008 vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1010 stats->q_opackets[i] = txStats.ucastPktsTxOK +
1011 txStats.mcastPktsTxOK +
1012 txStats.bcastPktsTxOK;
1014 stats->q_obytes[i] = txStats.ucastBytesTxOK +
1015 txStats.mcastBytesTxOK +
1016 txStats.bcastBytesTxOK;
1018 stats->opackets += stats->q_opackets[i];
1019 stats->obytes += stats->q_obytes[i];
1020 stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1023 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
1024 for (i = 0; i < hw->num_rx_queues; i++) {
1025 vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1027 stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1028 rxStats.mcastPktsRxOK +
1029 rxStats.bcastPktsRxOK;
1031 stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1032 rxStats.mcastBytesRxOK +
1033 rxStats.bcastBytesRxOK;
1035 stats->ipackets += stats->q_ipackets[i];
1036 stats->ibytes += stats->q_ibytes[i];
1038 stats->q_errors[i] = rxStats.pktsRxError;
1039 stats->ierrors += rxStats.pktsRxError;
1040 stats->imissed += rxStats.pktsRxOutOfBuf;
1047 vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
1048 struct rte_eth_dev_info *dev_info)
1050 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1051 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1052 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1053 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1054 dev_info->speed_capa = ETH_LINK_SPEED_10G;
1055 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1057 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1059 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1060 .nb_max = VMXNET3_RX_RING_MAX_SIZE,
1061 .nb_min = VMXNET3_DEF_RX_RING_SIZE,
1065 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1066 .nb_max = VMXNET3_TX_RING_MAX_SIZE,
1067 .nb_min = VMXNET3_DEF_TX_RING_SIZE,
1069 .nb_seg_max = VMXNET3_TX_MAX_SEG,
1070 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1073 dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
1074 dev_info->rx_queue_offload_capa = 0;
1075 dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
1076 dev_info->tx_queue_offload_capa = 0;
1079 static const uint32_t *
1080 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1082 static const uint32_t ptypes[] = {
1083 RTE_PTYPE_L3_IPV4_EXT,
1088 if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1094 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1096 struct vmxnet3_hw *hw = dev->data->dev_private;
1098 ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
1099 vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1103 /* return 0 means link status changed, -1 means not changed */
1105 __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1106 __rte_unused int wait_to_complete)
1108 struct vmxnet3_hw *hw = dev->data->dev_private;
1109 struct rte_eth_link link;
1112 memset(&link, 0, sizeof(link));
1114 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1115 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1118 link.link_status = ETH_LINK_UP;
1119 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1120 link.link_speed = ETH_SPEED_NUM_10G;
1121 link.link_autoneg = ETH_LINK_FIXED;
1123 return rte_eth_linkstatus_set(dev, &link);
1127 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1129 /* Link status doesn't change for stopped dev */
1130 if (dev->data->dev_started == 0)
1133 return __vmxnet3_dev_link_update(dev, wait_to_complete);
1136 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1138 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1140 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1143 rxConf->rxMode = rxConf->rxMode | feature;
1145 rxConf->rxMode = rxConf->rxMode & (~feature);
1147 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1150 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1152 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1154 struct vmxnet3_hw *hw = dev->data->dev_private;
1155 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1157 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1158 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1160 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1161 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1164 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1166 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1168 struct vmxnet3_hw *hw = dev->data->dev_private;
1169 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1170 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1172 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1173 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1175 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1176 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1177 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1178 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1181 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1183 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1185 struct vmxnet3_hw *hw = dev->data->dev_private;
1187 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1190 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1192 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1194 struct vmxnet3_hw *hw = dev->data->dev_private;
1196 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1199 /* Enable/disable filter on vlan */
1201 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1203 struct vmxnet3_hw *hw = dev->data->dev_private;
1204 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1205 uint32_t *vf_table = rxConf->vfTable;
1207 /* save state for restore */
1209 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1211 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1213 /* don't change active filter if in promiscuous mode */
1214 if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1217 /* set in hardware */
1219 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1221 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1223 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1224 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1229 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1231 struct vmxnet3_hw *hw = dev->data->dev_private;
1232 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1233 uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1234 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1236 if (mask & ETH_VLAN_STRIP_MASK) {
1237 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1238 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1240 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1242 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1243 VMXNET3_CMD_UPDATE_FEATURE);
1246 if (mask & ETH_VLAN_FILTER_MASK) {
1247 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1248 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1250 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1252 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1253 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1260 vmxnet3_process_events(struct rte_eth_dev *dev)
1262 struct vmxnet3_hw *hw = dev->data->dev_private;
1263 uint32_t events = hw->shared->ecr;
1269 * ECR bits when written with 1b are cleared. Hence write
1270 * events back to ECR so that the bits which were set will be reset.
1272 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1274 /* Check if link state has changed */
1275 if (events & VMXNET3_ECR_LINK) {
1276 PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
1277 if (vmxnet3_dev_link_update(dev, 0) == 0)
1278 _rte_eth_dev_callback_process(dev,
1279 RTE_ETH_EVENT_INTR_LSC,
1283 /* Check if there is an error on xmit/recv queues */
1284 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1285 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1286 VMXNET3_CMD_GET_QUEUE_STATUS);
1288 if (hw->tqd_start->status.stopped)
1289 PMD_DRV_LOG(ERR, "tq error 0x%x",
1290 hw->tqd_start->status.error);
1292 if (hw->rqd_start->status.stopped)
1293 PMD_DRV_LOG(ERR, "rq error 0x%x",
1294 hw->rqd_start->status.error);
1296 /* Reset the device */
1297 /* Have to reset the device */
1300 if (events & VMXNET3_ECR_DIC)
1301 PMD_DRV_LOG(DEBUG, "Device implementation change event.");
1303 if (events & VMXNET3_ECR_DEBUG)
1304 PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
1308 vmxnet3_interrupt_handler(void *param)
1310 struct rte_eth_dev *dev = param;
1311 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1313 vmxnet3_process_events(dev);
1315 if (rte_intr_enable(&pci_dev->intr_handle) < 0)
1316 PMD_DRV_LOG(ERR, "interrupt enable failed");
1319 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1320 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1321 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
1323 RTE_INIT(vmxnet3_init_log)
1325 vmxnet3_logtype_init = rte_log_register("pmd.net.vmxnet3.init");
1326 if (vmxnet3_logtype_init >= 0)
1327 rte_log_set_level(vmxnet3_logtype_init, RTE_LOG_NOTICE);
1328 vmxnet3_logtype_driver = rte_log_register("pmd.net.vmxnet3.driver");
1329 if (vmxnet3_logtype_driver >= 0)
1330 rte_log_set_level(vmxnet3_logtype_driver, RTE_LOG_NOTICE);