4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_ethdev_pci.h>
60 #include <rte_atomic.h>
61 #include <rte_string_fns.h>
62 #include <rte_malloc.h>
65 #include "base/vmxnet3_defs.h"
67 #include "vmxnet3_ring.h"
68 #include "vmxnet3_logs.h"
69 #include "vmxnet3_ethdev.h"
71 #define PROCESS_SYS_EVENTS 0
73 #define VMXNET3_TX_MAX_SEG UINT8_MAX
75 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
76 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
77 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
78 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
79 static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
80 static void vmxnet3_dev_close(struct rte_eth_dev *dev);
81 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
82 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
83 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
84 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
85 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
86 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
87 int wait_to_complete);
88 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
89 static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
90 struct rte_eth_stats *stats);
91 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
92 struct rte_eth_xstat_name *xstats,
94 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
95 struct rte_eth_xstat *xstats, unsigned int n);
96 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
97 struct rte_eth_dev_info *dev_info);
98 static const uint32_t *
99 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
100 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
101 uint16_t vid, int on);
102 static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
103 static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
104 struct ether_addr *mac_addr);
106 #if PROCESS_SYS_EVENTS == 1
107 static void vmxnet3_process_events(struct vmxnet3_hw *);
110 * The set of PCI devices this driver supports
112 #define VMWARE_PCI_VENDOR_ID 0x15AD
113 #define VMWARE_DEV_ID_VMXNET3 0x07B0
114 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
115 { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
116 { .vendor_id = 0, /* sentinel */ },
119 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
120 .dev_configure = vmxnet3_dev_configure,
121 .dev_start = vmxnet3_dev_start,
122 .dev_stop = vmxnet3_dev_stop,
123 .dev_close = vmxnet3_dev_close,
124 .promiscuous_enable = vmxnet3_dev_promiscuous_enable,
125 .promiscuous_disable = vmxnet3_dev_promiscuous_disable,
126 .allmulticast_enable = vmxnet3_dev_allmulticast_enable,
127 .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
128 .link_update = vmxnet3_dev_link_update,
129 .stats_get = vmxnet3_dev_stats_get,
130 .xstats_get_names = vmxnet3_dev_xstats_get_names,
131 .xstats_get = vmxnet3_dev_xstats_get,
132 .mac_addr_set = vmxnet3_mac_addr_set,
133 .dev_infos_get = vmxnet3_dev_info_get,
134 .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
135 .vlan_filter_set = vmxnet3_dev_vlan_filter_set,
136 .vlan_offload_set = vmxnet3_dev_vlan_offload_set,
137 .rx_queue_setup = vmxnet3_dev_rx_queue_setup,
138 .rx_queue_release = vmxnet3_dev_rx_queue_release,
139 .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
140 .tx_queue_release = vmxnet3_dev_tx_queue_release,
143 struct vmxnet3_xstats_name_off {
144 char name[RTE_ETH_XSTATS_NAME_SIZE];
148 /* tx_qX_ is prepended to the name string here */
149 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
150 {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)},
151 {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
152 {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)},
153 {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
156 /* rx_qX_ is prepended to the name string here */
157 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
158 {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)},
159 {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)},
160 {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
161 {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
164 static const struct rte_memzone *
165 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
166 const char *post_string, int socket_id,
167 uint16_t align, bool reuse)
169 char z_name[RTE_MEMZONE_NAMESIZE];
170 const struct rte_memzone *mz;
172 snprintf(z_name, sizeof(z_name), "%s_%d_%s",
173 dev->device->driver->name, dev->data->port_id, post_string);
175 mz = rte_memzone_lookup(z_name);
178 rte_memzone_free(mz);
179 return rte_memzone_reserve_aligned(z_name, size, socket_id,
186 return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
190 * Atomically reads the link status information from global
191 * structure rte_eth_dev.
194 * - Pointer to the structure rte_eth_dev to read from.
195 * - Pointer to the buffer to be saved with the link status.
198 * - On success, zero.
199 * - On failure, negative value.
203 vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
204 struct rte_eth_link *link)
206 struct rte_eth_link *dst = link;
207 struct rte_eth_link *src = &(dev->data->dev_link);
209 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
210 *(uint64_t *)src) == 0)
217 * Atomically writes the link status information into global
218 * structure rte_eth_dev.
221 * - Pointer to the structure rte_eth_dev to write to.
222 * - Pointer to the buffer to be saved with the link status.
225 * - On success, zero.
226 * - On failure, negative value.
229 vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
230 struct rte_eth_link *link)
232 struct rte_eth_link *dst = &(dev->data->dev_link);
233 struct rte_eth_link *src = link;
235 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
236 *(uint64_t *)src) == 0)
243 * This function is based on vmxnet3_disable_intr()
246 vmxnet3_disable_intr(struct vmxnet3_hw *hw)
250 PMD_INIT_FUNC_TRACE();
252 hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
253 for (i = 0; i < VMXNET3_MAX_INTRS; i++)
254 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
258 * Gets tx data ring descriptor size.
261 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
263 uint16 txdata_desc_size;
265 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
266 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
267 txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
269 return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
270 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
271 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
272 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
276 * It returns 0 on success.
279 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
281 struct rte_pci_device *pci_dev;
282 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
283 uint32_t mac_hi, mac_lo, ver;
285 PMD_INIT_FUNC_TRACE();
287 eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
288 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
289 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
290 eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
291 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
294 * for secondary processes, we don't initialize any further as primary
295 * has already done this work.
297 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
300 rte_eth_copy_pci_info(eth_dev, pci_dev);
301 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
303 /* Vendor and Device ID need to be set before init of shared code */
304 hw->device_id = pci_dev->id.device_id;
305 hw->vendor_id = pci_dev->id.vendor_id;
306 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
307 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
309 hw->num_rx_queues = 1;
310 hw->num_tx_queues = 1;
311 hw->bufs_per_pkt = 1;
313 /* Check h/w version compatibility with driver. */
314 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
315 PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
317 if (ver & (1 << VMXNET3_REV_3)) {
318 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
320 hw->version = VMXNET3_REV_3 + 1;
321 } else if (ver & (1 << VMXNET3_REV_2)) {
322 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
324 hw->version = VMXNET3_REV_2 + 1;
325 } else if (ver & (1 << VMXNET3_REV_1)) {
326 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
328 hw->version = VMXNET3_REV_1 + 1;
330 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
334 PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
336 /* Check UPT version compatibility with driver. */
337 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
338 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
340 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
342 PMD_INIT_LOG(ERR, "Incompatible UPT version.");
346 /* Getting MAC Address */
347 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
348 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
349 memcpy(hw->perm_addr, &mac_lo, 4);
350 memcpy(hw->perm_addr + 4, &mac_hi, 2);
352 /* Allocate memory for storing MAC addresses */
353 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
354 VMXNET3_MAX_MAC_ADDRS, 0);
355 if (eth_dev->data->mac_addrs == NULL) {
357 "Failed to allocate %d bytes needed to store MAC addresses",
358 ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
361 /* Copy the permanent MAC address */
362 ether_addr_copy((struct ether_addr *) hw->perm_addr,
363 ð_dev->data->mac_addrs[0]);
365 PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
366 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
367 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
369 /* Put device in Quiesce Mode */
370 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
372 /* allow untagged pkts */
373 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
375 hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
376 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
378 hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
379 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
380 RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
381 hw->rxdata_desc_size);
383 /* clear shadow stats */
384 memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
385 memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
391 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
393 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
395 PMD_INIT_FUNC_TRACE();
397 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
400 if (hw->adapter_stopped == 0)
401 vmxnet3_dev_close(eth_dev);
403 eth_dev->dev_ops = NULL;
404 eth_dev->rx_pkt_burst = NULL;
405 eth_dev->tx_pkt_burst = NULL;
406 eth_dev->tx_pkt_prepare = NULL;
408 rte_free(eth_dev->data->mac_addrs);
409 eth_dev->data->mac_addrs = NULL;
414 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
415 struct rte_pci_device *pci_dev)
417 return rte_eth_dev_pci_generic_probe(pci_dev,
418 sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
421 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
423 return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
426 static struct rte_pci_driver rte_vmxnet3_pmd = {
427 .id_table = pci_id_vmxnet3_map,
428 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
429 .probe = eth_vmxnet3_pci_probe,
430 .remove = eth_vmxnet3_pci_remove,
434 vmxnet3_dev_configure(struct rte_eth_dev *dev)
436 const struct rte_memzone *mz;
437 struct vmxnet3_hw *hw = dev->data->dev_private;
440 PMD_INIT_FUNC_TRACE();
442 if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
443 dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
444 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
448 if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
449 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
453 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
454 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
456 if (size > UINT16_MAX)
459 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
460 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
463 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
466 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
467 "shared", rte_socket_id(), 8, 1);
470 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
473 memset(mz->addr, 0, mz->len);
475 hw->shared = mz->addr;
476 hw->sharedPA = mz->phys_addr;
479 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
482 * We cannot reuse this memzone from previous allocation as its size
483 * depends on the number of tx and rx queues, which could be different
484 * from one config to another.
486 mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
487 VMXNET3_QUEUE_DESC_ALIGN, 0);
489 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
492 memset(mz->addr, 0, mz->len);
494 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
495 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
497 hw->queueDescPA = mz->phys_addr;
498 hw->queue_desc_len = (uint16_t)size;
500 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
501 /* Allocate memory structure for UPT1_RSSConf and configure */
502 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
503 "rss_conf", rte_socket_id(),
504 RTE_CACHE_LINE_SIZE, 1);
507 "ERROR: Creating rss_conf structure zone");
510 memset(mz->addr, 0, mz->len);
512 hw->rss_conf = mz->addr;
513 hw->rss_confPA = mz->phys_addr;
520 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
525 "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
526 addr[0], addr[1], addr[2],
527 addr[3], addr[4], addr[5]);
529 val = *(const uint32_t *)addr;
530 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
532 val = (addr[5] << 8) | addr[4];
533 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
537 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
539 struct vmxnet3_hw *hw = dev->data->dev_private;
540 Vmxnet3_DriverShared *shared = hw->shared;
541 Vmxnet3_CmdInfo *cmdInfo;
542 struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
543 uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
544 uint32_t num, i, j, size;
546 if (hw->memRegsPA == 0) {
547 const struct rte_memzone *mz;
549 size = sizeof(Vmxnet3_MemRegs) +
550 (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
551 sizeof(Vmxnet3_MemoryRegion);
553 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
556 PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
559 memset(mz->addr, 0, mz->len);
560 hw->memRegs = mz->addr;
561 hw->memRegsPA = mz->phys_addr;
564 num = hw->num_rx_queues;
566 for (i = 0; i < num; i++) {
567 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
574 * The same mempool could be used by multiple queues. In such a case,
575 * remove duplicate mempool entries. Only one entry is kept with
576 * bitmask indicating queues that are using this mempool.
578 for (i = 1; i < num; i++) {
579 for (j = 0; j < i; j++) {
580 if (mp[i] == mp[j]) {
589 for (i = 0; i < num; i++) {
593 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
596 (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr;
597 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
598 STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
599 mr->txQueueBits = index[i];
600 mr->rxQueueBits = index[i];
603 "index: %u startPA: %" PRIu64 " length: %u, "
605 j, mr->startPA, mr->length, mr->rxQueueBits);
608 hw->memRegs->numRegs = j;
609 PMD_INIT_LOG(INFO, "numRegs: %u", j);
611 size = sizeof(Vmxnet3_MemRegs) +
612 (j - 1) * sizeof(Vmxnet3_MemoryRegion);
614 cmdInfo = &shared->cu.cmdInfo;
615 cmdInfo->varConf.confVer = 1;
616 cmdInfo->varConf.confLen = size;
617 cmdInfo->varConf.confPA = hw->memRegsPA;
623 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
625 struct rte_eth_conf port_conf = dev->data->dev_conf;
626 struct vmxnet3_hw *hw = dev->data->dev_private;
627 uint32_t mtu = dev->data->mtu;
628 Vmxnet3_DriverShared *shared = hw->shared;
629 Vmxnet3_DSDevRead *devRead = &shared->devRead;
633 shared->magic = VMXNET3_REV1_MAGIC;
634 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
636 /* Setting up Guest OS information */
637 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
638 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
639 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
640 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
641 devRead->misc.driverInfo.uptVerSpt = 1;
643 devRead->misc.mtu = rte_le_to_cpu_32(mtu);
644 devRead->misc.queueDescPA = hw->queueDescPA;
645 devRead->misc.queueDescLen = hw->queue_desc_len;
646 devRead->misc.numTxQueues = hw->num_tx_queues;
647 devRead->misc.numRxQueues = hw->num_rx_queues;
650 * Set number of interrupts to 1
651 * PMD disables all the interrupts but this is MUST to activate device
652 * It needs at least one interrupt for link events to handle
653 * So we'll disable it later after device activation if needed
655 devRead->intrConf.numIntrs = 1;
656 devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
658 for (i = 0; i < hw->num_tx_queues; i++) {
659 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
660 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
662 tqd->ctrl.txNumDeferred = 0;
663 tqd->ctrl.txThreshold = 1;
664 tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
665 tqd->conf.compRingBasePA = txq->comp_ring.basePA;
666 tqd->conf.dataRingBasePA = txq->data_ring.basePA;
668 tqd->conf.txRingSize = txq->cmd_ring.size;
669 tqd->conf.compRingSize = txq->comp_ring.size;
670 tqd->conf.dataRingSize = txq->data_ring.size;
671 tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
672 tqd->conf.intrIdx = txq->comp_ring.intr_idx;
673 tqd->status.stopped = TRUE;
674 tqd->status.error = 0;
675 memset(&tqd->stats, 0, sizeof(tqd->stats));
678 for (i = 0; i < hw->num_rx_queues; i++) {
679 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
680 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
682 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
683 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
684 rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
686 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size;
687 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
688 rqd->conf.compRingSize = rxq->comp_ring.size;
689 rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
690 if (VMXNET3_VERSION_GE_3(hw)) {
691 rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
692 rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
694 rqd->status.stopped = TRUE;
695 rqd->status.error = 0;
696 memset(&rqd->stats, 0, sizeof(rqd->stats));
699 /* RxMode set to 0 of VMXNET3_RXM_xxx */
700 devRead->rxFilterConf.rxMode = 0;
702 /* Setting up feature flags */
703 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
704 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
706 if (dev->data->dev_conf.rxmode.enable_lro) {
707 devRead->misc.uptFeatures |= VMXNET3_F_LRO;
708 devRead->misc.maxNumRxSG = 0;
711 if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
712 ret = vmxnet3_rss_configure(dev);
713 if (ret != VMXNET3_SUCCESS)
716 devRead->misc.uptFeatures |= VMXNET3_F_RSS;
717 devRead->rssConfDesc.confVer = 1;
718 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
719 devRead->rssConfDesc.confPA = hw->rss_confPA;
722 vmxnet3_dev_vlan_offload_set(dev,
723 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
725 vmxnet3_write_mac(hw, hw->perm_addr);
727 return VMXNET3_SUCCESS;
731 * Configure device link speed and setup link.
732 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
733 * It returns 0 on success.
736 vmxnet3_dev_start(struct rte_eth_dev *dev)
739 struct vmxnet3_hw *hw = dev->data->dev_private;
741 PMD_INIT_FUNC_TRACE();
743 /* Save stats before it is reset by CMD_ACTIVATE */
744 vmxnet3_hw_stats_save(hw);
746 ret = vmxnet3_setup_driver_shared(dev);
747 if (ret != VMXNET3_SUCCESS)
750 /* Exchange shared data with device */
751 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
752 VMXNET3_GET_ADDR_LO(hw->sharedPA));
753 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
754 VMXNET3_GET_ADDR_HI(hw->sharedPA));
756 /* Activate device by register write */
757 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
758 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
761 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
765 /* Setup memory region for rx buffers */
766 ret = vmxnet3_dev_setup_memreg(dev);
768 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
769 VMXNET3_CMD_REGISTER_MEMREGS);
770 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
773 "Failed in setup memory region cmd\n");
776 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
779 /* Disable interrupts */
780 vmxnet3_disable_intr(hw);
783 * Load RX queues with blank mbufs and update next2fill index for device
784 * Update RxMode of the device
786 ret = vmxnet3_dev_rxtx_init(dev);
787 if (ret != VMXNET3_SUCCESS) {
788 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
792 hw->adapter_stopped = FALSE;
794 /* Setting proper Rx Mode and issue Rx Mode Update command */
795 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
798 * Don't need to handle events for now
800 #if PROCESS_SYS_EVENTS == 1
801 events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
802 PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
803 vmxnet3_process_events(hw);
805 return VMXNET3_SUCCESS;
809 * Stop device: disable rx and tx functions to allow for reconfiguring.
812 vmxnet3_dev_stop(struct rte_eth_dev *dev)
814 struct rte_eth_link link;
815 struct vmxnet3_hw *hw = dev->data->dev_private;
817 PMD_INIT_FUNC_TRACE();
819 if (hw->adapter_stopped == 1) {
820 PMD_INIT_LOG(DEBUG, "Device already closed.");
824 /* disable interrupts */
825 vmxnet3_disable_intr(hw);
827 /* quiesce the device first */
828 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
829 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
830 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
832 /* reset the device */
833 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
834 PMD_INIT_LOG(DEBUG, "Device reset.");
835 hw->adapter_stopped = 0;
837 vmxnet3_dev_clear_queues(dev);
839 /* Clear recorded link status */
840 memset(&link, 0, sizeof(link));
841 vmxnet3_dev_atomic_write_link_status(dev, &link);
845 * Reset and stop device.
848 vmxnet3_dev_close(struct rte_eth_dev *dev)
850 struct vmxnet3_hw *hw = dev->data->dev_private;
852 PMD_INIT_FUNC_TRACE();
854 vmxnet3_dev_stop(dev);
855 hw->adapter_stopped = 1;
859 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
860 struct UPT1_TxStats *res)
862 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \
863 ((r)->f = (h)->tqd_start[(i)].stats.f + \
864 (h)->saved_tx_stats[(i)].f)
866 VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
867 VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
868 VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
869 VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
870 VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
871 VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
872 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
873 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
875 #undef VMXNET3_UPDATE_TX_STAT
879 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
880 struct UPT1_RxStats *res)
882 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \
883 ((r)->f = (h)->rqd_start[(i)].stats.f + \
884 (h)->saved_rx_stats[(i)].f)
886 VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
887 VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
888 VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
889 VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
890 VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
891 VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
892 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
893 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
895 #undef VMXNET3_UPDATE_RX_STATS
899 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
903 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
905 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
907 for (i = 0; i < hw->num_tx_queues; i++)
908 vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
909 for (i = 0; i < hw->num_rx_queues; i++)
910 vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
914 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
915 struct rte_eth_xstat_name *xstats_names,
918 unsigned int i, t, count = 0;
919 unsigned int nstats =
920 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
921 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
923 if (!xstats_names || n < nstats)
926 for (i = 0; i < dev->data->nb_rx_queues; i++) {
927 if (!dev->data->rx_queues[i])
930 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
931 snprintf(xstats_names[count].name,
932 sizeof(xstats_names[count].name),
934 vmxnet3_rxq_stat_strings[t].name);
939 for (i = 0; i < dev->data->nb_tx_queues; i++) {
940 if (!dev->data->tx_queues[i])
943 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
944 snprintf(xstats_names[count].name,
945 sizeof(xstats_names[count].name),
947 vmxnet3_txq_stat_strings[t].name);
956 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
959 unsigned int i, t, count = 0;
960 unsigned int nstats =
961 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
962 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
967 for (i = 0; i < dev->data->nb_rx_queues; i++) {
968 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
973 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
974 xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
975 vmxnet3_rxq_stat_strings[t].offset);
976 xstats[count].id = count;
981 for (i = 0; i < dev->data->nb_tx_queues; i++) {
982 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
987 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
988 xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
989 vmxnet3_txq_stat_strings[t].offset);
990 xstats[count].id = count;
999 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1002 struct vmxnet3_hw *hw = dev->data->dev_private;
1003 struct UPT1_TxStats txStats;
1004 struct UPT1_RxStats rxStats;
1006 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1008 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1009 for (i = 0; i < hw->num_tx_queues; i++) {
1010 vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1012 stats->q_opackets[i] = txStats.ucastPktsTxOK +
1013 txStats.mcastPktsTxOK +
1014 txStats.bcastPktsTxOK;
1016 stats->q_obytes[i] = txStats.ucastBytesTxOK +
1017 txStats.mcastBytesTxOK +
1018 txStats.bcastBytesTxOK;
1020 stats->opackets += stats->q_opackets[i];
1021 stats->obytes += stats->q_obytes[i];
1022 stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1025 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
1026 for (i = 0; i < hw->num_rx_queues; i++) {
1027 vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1029 stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1030 rxStats.mcastPktsRxOK +
1031 rxStats.bcastPktsRxOK;
1033 stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1034 rxStats.mcastBytesRxOK +
1035 rxStats.bcastBytesRxOK;
1037 stats->ipackets += stats->q_ipackets[i];
1038 stats->ibytes += stats->q_ibytes[i];
1040 stats->q_errors[i] = rxStats.pktsRxError;
1041 stats->ierrors += rxStats.pktsRxError;
1042 stats->rx_nombuf += rxStats.pktsRxOutOfBuf;
1047 vmxnet3_dev_info_get(struct rte_eth_dev *dev,
1048 struct rte_eth_dev_info *dev_info)
1050 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1052 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1053 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1054 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1055 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1056 dev_info->speed_capa = ETH_LINK_SPEED_10G;
1057 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1059 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
1060 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1062 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1063 .nb_max = VMXNET3_RX_RING_MAX_SIZE,
1064 .nb_min = VMXNET3_DEF_RX_RING_SIZE,
1068 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1069 .nb_max = VMXNET3_TX_RING_MAX_SIZE,
1070 .nb_min = VMXNET3_DEF_TX_RING_SIZE,
1072 .nb_seg_max = VMXNET3_TX_MAX_SEG,
1073 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1076 dev_info->rx_offload_capa =
1077 DEV_RX_OFFLOAD_VLAN_STRIP |
1078 DEV_RX_OFFLOAD_UDP_CKSUM |
1079 DEV_RX_OFFLOAD_TCP_CKSUM |
1080 DEV_RX_OFFLOAD_TCP_LRO;
1082 dev_info->tx_offload_capa =
1083 DEV_TX_OFFLOAD_VLAN_INSERT |
1084 DEV_TX_OFFLOAD_TCP_CKSUM |
1085 DEV_TX_OFFLOAD_UDP_CKSUM |
1086 DEV_TX_OFFLOAD_TCP_TSO;
1089 static const uint32_t *
1090 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1092 static const uint32_t ptypes[] = {
1093 RTE_PTYPE_L3_IPV4_EXT,
1098 if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1104 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1106 struct vmxnet3_hw *hw = dev->data->dev_private;
1108 vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1111 /* return 0 means link status changed, -1 means not changed */
1113 vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1114 __rte_unused int wait_to_complete)
1116 struct vmxnet3_hw *hw = dev->data->dev_private;
1117 struct rte_eth_link old = { 0 }, link;
1120 /* Link status doesn't change for stopped dev */
1121 if (dev->data->dev_started == 0)
1124 memset(&link, 0, sizeof(link));
1125 vmxnet3_dev_atomic_read_link_status(dev, &old);
1127 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1128 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1131 link.link_status = ETH_LINK_UP;
1132 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1133 link.link_speed = ETH_SPEED_NUM_10G;
1134 link.link_autoneg = ETH_LINK_SPEED_FIXED;
1137 vmxnet3_dev_atomic_write_link_status(dev, &link);
1139 return (old.link_status == link.link_status) ? -1 : 0;
1142 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1144 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1146 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1149 rxConf->rxMode = rxConf->rxMode | feature;
1151 rxConf->rxMode = rxConf->rxMode & (~feature);
1153 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1156 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1158 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1160 struct vmxnet3_hw *hw = dev->data->dev_private;
1161 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1163 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1164 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1166 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1167 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1170 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1172 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1174 struct vmxnet3_hw *hw = dev->data->dev_private;
1175 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1177 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1178 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1179 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1180 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1183 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1185 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1187 struct vmxnet3_hw *hw = dev->data->dev_private;
1189 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1192 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1194 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1196 struct vmxnet3_hw *hw = dev->data->dev_private;
1198 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1201 /* Enable/disable filter on vlan */
1203 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1205 struct vmxnet3_hw *hw = dev->data->dev_private;
1206 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1207 uint32_t *vf_table = rxConf->vfTable;
1209 /* save state for restore */
1211 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1213 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1215 /* don't change active filter if in promiscuous mode */
1216 if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1219 /* set in hardware */
1221 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1223 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1225 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1226 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1231 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1233 struct vmxnet3_hw *hw = dev->data->dev_private;
1234 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1235 uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1237 if (mask & ETH_VLAN_STRIP_MASK) {
1238 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1239 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1241 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1243 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1244 VMXNET3_CMD_UPDATE_FEATURE);
1247 if (mask & ETH_VLAN_FILTER_MASK) {
1248 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1249 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1251 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1253 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1254 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1258 #if PROCESS_SYS_EVENTS == 1
1260 vmxnet3_process_events(struct vmxnet3_hw *hw)
1262 uint32_t events = hw->shared->ecr;
1265 PMD_INIT_LOG(ERR, "No events to process");
1270 * ECR bits when written with 1b are cleared. Hence write
1271 * events back to ECR so that the bits which were set will be reset.
1273 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1275 /* Check if link state has changed */
1276 if (events & VMXNET3_ECR_LINK)
1278 "Process events in %s(): VMXNET3_ECR_LINK event",
1281 /* Check if there is an error on xmit/recv queues */
1282 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1283 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1284 VMXNET3_CMD_GET_QUEUE_STATUS);
1286 if (hw->tqd_start->status.stopped)
1287 PMD_INIT_LOG(ERR, "tq error 0x%x",
1288 hw->tqd_start->status.error);
1290 if (hw->rqd_start->status.stopped)
1291 PMD_INIT_LOG(ERR, "rq error 0x%x",
1292 hw->rqd_start->status.error);
1294 /* Reset the device */
1295 /* Have to reset the device */
1298 if (events & VMXNET3_ECR_DIC)
1299 PMD_INIT_LOG(ERR, "Device implementation change event.");
1301 if (events & VMXNET3_ECR_DEBUG)
1302 PMD_INIT_LOG(ERR, "Debug event generated by device.");
1306 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1307 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1308 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");