1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_bus_pci.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <ethdev_driver.h>
30 #include <ethdev_pci.h>
31 #include <rte_string_fns.h>
32 #include <rte_malloc.h>
35 #include "base/vmxnet3_defs.h"
37 #include "vmxnet3_ring.h"
38 #include "vmxnet3_logs.h"
39 #include "vmxnet3_ethdev.h"
41 #define VMXNET3_TX_MAX_SEG UINT8_MAX
43 #define VMXNET3_TX_OFFLOAD_CAP \
44 (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
45 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
46 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
47 RTE_ETH_TX_OFFLOAD_TCP_TSO | \
48 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
50 #define VMXNET3_RX_OFFLOAD_CAP \
51 (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
52 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
53 RTE_ETH_RX_OFFLOAD_SCATTER | \
54 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
55 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
56 RTE_ETH_RX_OFFLOAD_TCP_LRO | \
57 RTE_ETH_RX_OFFLOAD_RSS_HASH)
59 int vmxnet3_segs_dynfield_offset = -1;
61 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
62 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
63 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
64 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
65 static int vmxnet3_dev_stop(struct rte_eth_dev *dev);
66 static int vmxnet3_dev_close(struct rte_eth_dev *dev);
67 static int vmxnet3_dev_reset(struct rte_eth_dev *dev);
68 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
69 static int vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
70 static int vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
71 static int vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
72 static int vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
73 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
74 int wait_to_complete);
75 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
76 int wait_to_complete);
77 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
78 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
79 struct rte_eth_stats *stats);
80 static int vmxnet3_dev_stats_reset(struct rte_eth_dev *dev);
81 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
82 struct rte_eth_xstat_name *xstats,
84 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
85 struct rte_eth_xstat *xstats, unsigned int n);
86 static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
87 struct rte_eth_dev_info *dev_info);
88 static const uint32_t *
89 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
90 static int vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
91 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
92 uint16_t vid, int on);
93 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
94 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
95 struct rte_ether_addr *mac_addr);
96 static void vmxnet3_process_events(struct rte_eth_dev *dev);
97 static void vmxnet3_interrupt_handler(void *param);
99 vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
100 struct rte_eth_rss_reta_entry64 *reta_conf,
103 vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
104 struct rte_eth_rss_reta_entry64 *reta_conf,
106 static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
108 static int vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
112 * The set of PCI devices this driver supports
114 #define VMWARE_PCI_VENDOR_ID 0x15AD
115 #define VMWARE_DEV_ID_VMXNET3 0x07B0
116 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
117 { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
118 { .vendor_id = 0, /* sentinel */ },
121 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
122 .dev_configure = vmxnet3_dev_configure,
123 .dev_start = vmxnet3_dev_start,
124 .dev_stop = vmxnet3_dev_stop,
125 .dev_close = vmxnet3_dev_close,
126 .dev_reset = vmxnet3_dev_reset,
127 .promiscuous_enable = vmxnet3_dev_promiscuous_enable,
128 .promiscuous_disable = vmxnet3_dev_promiscuous_disable,
129 .allmulticast_enable = vmxnet3_dev_allmulticast_enable,
130 .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
131 .link_update = vmxnet3_dev_link_update,
132 .stats_get = vmxnet3_dev_stats_get,
133 .xstats_get_names = vmxnet3_dev_xstats_get_names,
134 .xstats_get = vmxnet3_dev_xstats_get,
135 .stats_reset = vmxnet3_dev_stats_reset,
136 .mac_addr_set = vmxnet3_mac_addr_set,
137 .dev_infos_get = vmxnet3_dev_info_get,
138 .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
139 .mtu_set = vmxnet3_dev_mtu_set,
140 .vlan_filter_set = vmxnet3_dev_vlan_filter_set,
141 .vlan_offload_set = vmxnet3_dev_vlan_offload_set,
142 .rx_queue_setup = vmxnet3_dev_rx_queue_setup,
143 .rx_queue_release = vmxnet3_dev_rx_queue_release,
144 .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
145 .tx_queue_release = vmxnet3_dev_tx_queue_release,
146 .rx_queue_intr_enable = vmxnet3_dev_rx_queue_intr_enable,
147 .rx_queue_intr_disable = vmxnet3_dev_rx_queue_intr_disable,
148 .reta_update = vmxnet3_rss_reta_update,
149 .reta_query = vmxnet3_rss_reta_query,
152 struct vmxnet3_xstats_name_off {
153 char name[RTE_ETH_XSTATS_NAME_SIZE];
157 /* tx_qX_ is prepended to the name string here */
158 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
159 {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)},
160 {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
161 {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)},
162 {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
165 /* rx_qX_ is prepended to the name string here */
166 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
167 {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)},
168 {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)},
169 {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
170 {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
173 static const struct rte_memzone *
174 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
175 const char *post_string, int socket_id,
176 uint16_t align, bool reuse)
178 char z_name[RTE_MEMZONE_NAMESIZE];
179 const struct rte_memzone *mz;
181 snprintf(z_name, sizeof(z_name), "eth_p%d_%s",
182 dev->data->port_id, post_string);
184 mz = rte_memzone_lookup(z_name);
187 rte_memzone_free(mz);
188 return rte_memzone_reserve_aligned(z_name, size, socket_id,
189 RTE_MEMZONE_IOVA_CONTIG, align);
195 return rte_memzone_reserve_aligned(z_name, size, socket_id,
196 RTE_MEMZONE_IOVA_CONTIG, align);
200 * Enable the given interrupt
203 vmxnet3_enable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
205 PMD_INIT_FUNC_TRACE();
206 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 0);
210 * Disable the given interrupt
213 vmxnet3_disable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
215 PMD_INIT_FUNC_TRACE();
216 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 1);
220 * Enable all intrs used by the device
223 vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
225 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
227 PMD_INIT_FUNC_TRACE();
229 devRead->intrConf.intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
231 if (hw->intr.lsc_only) {
232 vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
236 for (i = 0; i < hw->intr.num_intrs; i++)
237 vmxnet3_enable_intr(hw, i);
242 * Disable all intrs used by the device
245 vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw)
249 PMD_INIT_FUNC_TRACE();
251 hw->shared->devRead.intrConf.intrCtrl |=
252 rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
253 for (i = 0; i < hw->num_intrs; i++)
254 vmxnet3_disable_intr(hw, i);
258 * Gets tx data ring descriptor size.
261 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
263 uint16 txdata_desc_size;
265 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
266 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
267 txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
269 return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
270 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
271 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
272 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
276 * It returns 0 on success.
279 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
281 struct rte_pci_device *pci_dev;
282 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
283 uint32_t mac_hi, mac_lo, ver;
284 struct rte_eth_link link;
285 static const struct rte_mbuf_dynfield vmxnet3_segs_dynfield_desc = {
286 .name = VMXNET3_SEGS_DYNFIELD_NAME,
287 .size = sizeof(vmxnet3_segs_dynfield_t),
288 .align = __alignof__(vmxnet3_segs_dynfield_t),
291 PMD_INIT_FUNC_TRACE();
293 eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
294 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
295 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
296 eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
297 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
299 /* extra mbuf field is required to guess MSS */
300 vmxnet3_segs_dynfield_offset =
301 rte_mbuf_dynfield_register(&vmxnet3_segs_dynfield_desc);
302 if (vmxnet3_segs_dynfield_offset < 0) {
303 PMD_INIT_LOG(ERR, "Cannot register mbuf field.");
308 * for secondary processes, we don't initialize any further as primary
309 * has already done this work.
311 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
314 rte_eth_copy_pci_info(eth_dev, pci_dev);
315 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
317 /* Vendor and Device ID need to be set before init of shared code */
318 hw->device_id = pci_dev->id.device_id;
319 hw->vendor_id = pci_dev->id.vendor_id;
320 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
321 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
323 hw->num_rx_queues = 1;
324 hw->num_tx_queues = 1;
325 hw->bufs_per_pkt = 1;
327 /* Check h/w version compatibility with driver. */
328 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
330 if (ver & (1 << VMXNET3_REV_5)) {
331 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
333 hw->version = VMXNET3_REV_5 + 1;
334 } else if (ver & (1 << VMXNET3_REV_4)) {
335 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
337 hw->version = VMXNET3_REV_4 + 1;
338 } else if (ver & (1 << VMXNET3_REV_3)) {
339 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
341 hw->version = VMXNET3_REV_3 + 1;
342 } else if (ver & (1 << VMXNET3_REV_2)) {
343 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
345 hw->version = VMXNET3_REV_2 + 1;
346 } else if (ver & (1 << VMXNET3_REV_1)) {
347 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
349 hw->version = VMXNET3_REV_1 + 1;
351 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
355 PMD_INIT_LOG(INFO, "Using device v%d", hw->version);
357 /* Check UPT version compatibility with driver. */
358 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
359 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
361 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
363 PMD_INIT_LOG(ERR, "Incompatible UPT version.");
367 /* Getting MAC Address */
368 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
369 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
370 memcpy(hw->perm_addr, &mac_lo, 4);
371 memcpy(hw->perm_addr + 4, &mac_hi, 2);
373 /* Allocate memory for storing MAC addresses */
374 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", RTE_ETHER_ADDR_LEN *
375 VMXNET3_MAX_MAC_ADDRS, 0);
376 if (eth_dev->data->mac_addrs == NULL) {
378 "Failed to allocate %d bytes needed to store MAC addresses",
379 RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
382 /* Copy the permanent MAC address */
383 rte_ether_addr_copy((struct rte_ether_addr *)hw->perm_addr,
384 ð_dev->data->mac_addrs[0]);
386 PMD_INIT_LOG(DEBUG, "MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
387 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
388 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
390 /* Put device in Quiesce Mode */
391 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
393 /* allow untagged pkts */
394 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
396 hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
397 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
399 hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
400 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
401 RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
402 hw->rxdata_desc_size);
404 /* clear shadow stats */
405 memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
406 memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
408 /* clear snapshot stats */
409 memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats));
410 memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats));
412 /* set the initial link status */
413 memset(&link, 0, sizeof(link));
414 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
415 link.link_speed = RTE_ETH_SPEED_NUM_10G;
416 link.link_autoneg = RTE_ETH_LINK_FIXED;
417 rte_eth_linkstatus_set(eth_dev, &link);
423 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
425 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
427 PMD_INIT_FUNC_TRACE();
429 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
432 if (hw->adapter_stopped == 0) {
433 PMD_INIT_LOG(DEBUG, "Device has not been closed.");
440 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
441 struct rte_pci_device *pci_dev)
443 return rte_eth_dev_pci_generic_probe(pci_dev,
444 sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
447 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
449 return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
452 static struct rte_pci_driver rte_vmxnet3_pmd = {
453 .id_table = pci_id_vmxnet3_map,
454 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
455 .probe = eth_vmxnet3_pci_probe,
456 .remove = eth_vmxnet3_pci_remove,
460 vmxnet3_alloc_intr_resources(struct rte_eth_dev *dev)
462 struct vmxnet3_hw *hw = dev->data->dev_private;
464 int nvec = 1; /* for link event */
467 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
468 VMXNET3_CMD_GET_CONF_INTR);
469 cfg = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
470 hw->intr.type = cfg & 0x3;
471 hw->intr.mask_mode = (cfg >> 2) & 0x3;
473 if (hw->intr.type == VMXNET3_IT_AUTO)
474 hw->intr.type = VMXNET3_IT_MSIX;
476 if (hw->intr.type == VMXNET3_IT_MSIX) {
477 /* only support shared tx/rx intr */
478 if (hw->num_tx_queues != hw->num_rx_queues)
481 nvec += hw->num_rx_queues;
482 hw->intr.num_intrs = nvec;
487 /* the tx/rx queue interrupt will be disabled */
488 hw->intr.num_intrs = 2;
489 hw->intr.lsc_only = TRUE;
490 PMD_INIT_LOG(INFO, "Enabled MSI-X with %d vectors", hw->intr.num_intrs);
494 vmxnet3_dev_configure(struct rte_eth_dev *dev)
496 const struct rte_memzone *mz;
497 struct vmxnet3_hw *hw = dev->data->dev_private;
500 PMD_INIT_FUNC_TRACE();
502 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
503 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
505 if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
506 dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
507 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
511 if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
512 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
516 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
517 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
519 if (size > UINT16_MAX)
522 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
523 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
526 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
529 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
530 "shared", rte_socket_id(), 8, 1);
533 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
536 memset(mz->addr, 0, mz->len);
538 hw->shared = mz->addr;
539 hw->sharedPA = mz->iova;
542 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
545 * We cannot reuse this memzone from previous allocation as its size
546 * depends on the number of tx and rx queues, which could be different
547 * from one config to another.
549 mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
550 VMXNET3_QUEUE_DESC_ALIGN, 0);
552 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
555 memset(mz->addr, 0, mz->len);
557 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
558 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
560 hw->queueDescPA = mz->iova;
561 hw->queue_desc_len = (uint16_t)size;
563 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
564 /* Allocate memory structure for UPT1_RSSConf and configure */
565 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
566 "rss_conf", rte_socket_id(),
567 RTE_CACHE_LINE_SIZE, 1);
570 "ERROR: Creating rss_conf structure zone");
573 memset(mz->addr, 0, mz->len);
575 hw->rss_conf = mz->addr;
576 hw->rss_confPA = mz->iova;
579 vmxnet3_alloc_intr_resources(dev);
585 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
590 "Writing MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
591 addr[0], addr[1], addr[2],
592 addr[3], addr[4], addr[5]);
594 memcpy(&val, addr, 4);
595 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
597 memcpy(&val, addr + 4, 2);
598 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
602 * Configure the hardware to generate MSI-X interrupts.
603 * If setting up MSIx fails, try setting up MSI (only 1 interrupt vector
604 * which will be disabled to allow lsc to work).
606 * Returns 0 on success and -1 otherwise.
609 vmxnet3_configure_msix(struct rte_eth_dev *dev)
611 struct vmxnet3_hw *hw = dev->data->dev_private;
612 struct rte_intr_handle *intr_handle = dev->intr_handle;
613 uint16_t intr_vector;
616 hw->intr.event_intr_idx = 0;
618 /* only vfio-pci driver can support interrupt mode. */
619 if (!rte_intr_cap_multiple(intr_handle) ||
620 dev->data->dev_conf.intr_conf.rxq == 0)
623 intr_vector = dev->data->nb_rx_queues;
624 if (intr_vector > VMXNET3_MAX_RX_QUEUES) {
625 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
626 VMXNET3_MAX_RX_QUEUES);
630 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
631 PMD_INIT_LOG(ERR, "Failed to enable fastpath event fd");
635 if (rte_intr_dp_is_en(intr_handle)) {
636 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
637 dev->data->nb_rx_queues)) {
638 PMD_INIT_LOG(ERR, "Failed to allocate %d Rx queues intr_vec",
639 dev->data->nb_rx_queues);
640 rte_intr_efd_disable(intr_handle);
645 if (!rte_intr_allow_others(intr_handle) &&
646 dev->data->dev_conf.intr_conf.lsc != 0) {
647 PMD_INIT_LOG(ERR, "not enough intr vector to support both Rx interrupt and LSC");
648 rte_intr_vec_list_free(intr_handle);
649 rte_intr_efd_disable(intr_handle);
653 /* if we cannot allocate one MSI-X vector per queue, don't enable
656 if (hw->intr.num_intrs !=
657 (rte_intr_nb_efd_get(intr_handle) + 1)) {
658 PMD_INIT_LOG(ERR, "Device configured with %d Rx intr vectors, expecting %d",
660 rte_intr_nb_efd_get(intr_handle) + 1);
661 rte_intr_vec_list_free(intr_handle);
662 rte_intr_efd_disable(intr_handle);
666 for (i = 0; i < dev->data->nb_rx_queues; i++)
667 if (rte_intr_vec_list_index_set(intr_handle, i, i + 1))
670 for (i = 0; i < hw->intr.num_intrs; i++)
671 hw->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
673 PMD_INIT_LOG(INFO, "intr type %u, mode %u, %u vectors allocated",
674 hw->intr.type, hw->intr.mask_mode, hw->intr.num_intrs);
680 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
682 struct vmxnet3_hw *hw = dev->data->dev_private;
683 Vmxnet3_DriverShared *shared = hw->shared;
684 Vmxnet3_CmdInfo *cmdInfo;
685 struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
686 uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
687 uint32_t num, i, j, size;
689 if (hw->memRegsPA == 0) {
690 const struct rte_memzone *mz;
692 size = sizeof(Vmxnet3_MemRegs) +
693 (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
694 sizeof(Vmxnet3_MemoryRegion);
696 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
699 PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
702 memset(mz->addr, 0, mz->len);
703 hw->memRegs = mz->addr;
704 hw->memRegsPA = mz->iova;
707 num = hw->num_rx_queues;
709 for (i = 0; i < num; i++) {
710 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
717 * The same mempool could be used by multiple queues. In such a case,
718 * remove duplicate mempool entries. Only one entry is kept with
719 * bitmask indicating queues that are using this mempool.
721 for (i = 1; i < num; i++) {
722 for (j = 0; j < i; j++) {
723 if (mp[i] == mp[j]) {
732 for (i = 0; i < num; i++) {
736 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
739 (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
740 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
741 STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
742 mr->txQueueBits = index[i];
743 mr->rxQueueBits = index[i];
746 "index: %u startPA: %" PRIu64 " length: %u, "
748 j, mr->startPA, mr->length, mr->rxQueueBits);
751 hw->memRegs->numRegs = j;
752 PMD_INIT_LOG(INFO, "numRegs: %u", j);
754 size = sizeof(Vmxnet3_MemRegs) +
755 (j - 1) * sizeof(Vmxnet3_MemoryRegion);
757 cmdInfo = &shared->cu.cmdInfo;
758 cmdInfo->varConf.confVer = 1;
759 cmdInfo->varConf.confLen = size;
760 cmdInfo->varConf.confPA = hw->memRegsPA;
766 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
768 struct rte_eth_conf port_conf = dev->data->dev_conf;
769 struct vmxnet3_hw *hw = dev->data->dev_private;
770 struct rte_intr_handle *intr_handle = dev->intr_handle;
771 uint32_t mtu = dev->data->mtu;
772 Vmxnet3_DriverShared *shared = hw->shared;
773 Vmxnet3_DSDevRead *devRead = &shared->devRead;
774 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
780 shared->magic = VMXNET3_REV1_MAGIC;
781 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
783 /* Setting up Guest OS information */
784 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
785 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
786 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
787 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
788 devRead->misc.driverInfo.uptVerSpt = 1;
790 devRead->misc.mtu = rte_le_to_cpu_32(mtu);
791 devRead->misc.queueDescPA = hw->queueDescPA;
792 devRead->misc.queueDescLen = hw->queue_desc_len;
793 devRead->misc.numTxQueues = hw->num_tx_queues;
794 devRead->misc.numRxQueues = hw->num_rx_queues;
796 for (i = 0; i < hw->num_tx_queues; i++) {
797 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
798 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
800 txq->shared = &hw->tqd_start[i];
802 tqd->ctrl.txNumDeferred = 0;
803 tqd->ctrl.txThreshold = 1;
804 tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
805 tqd->conf.compRingBasePA = txq->comp_ring.basePA;
806 tqd->conf.dataRingBasePA = txq->data_ring.basePA;
808 tqd->conf.txRingSize = txq->cmd_ring.size;
809 tqd->conf.compRingSize = txq->comp_ring.size;
810 tqd->conf.dataRingSize = txq->data_ring.size;
811 tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
813 if (hw->intr.lsc_only)
814 tqd->conf.intrIdx = 1;
817 rte_intr_vec_list_index_get(intr_handle,
819 tqd->status.stopped = TRUE;
820 tqd->status.error = 0;
821 memset(&tqd->stats, 0, sizeof(tqd->stats));
824 for (i = 0; i < hw->num_rx_queues; i++) {
825 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
826 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
828 rxq->shared = &hw->rqd_start[i];
830 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
831 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
832 rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
834 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size;
835 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
836 rqd->conf.compRingSize = rxq->comp_ring.size;
838 if (hw->intr.lsc_only)
839 rqd->conf.intrIdx = 1;
842 rte_intr_vec_list_index_get(intr_handle,
844 rqd->status.stopped = TRUE;
845 rqd->status.error = 0;
846 memset(&rqd->stats, 0, sizeof(rqd->stats));
850 devRead->intrConf.autoMask = hw->intr.mask_mode == VMXNET3_IMM_AUTO;
851 devRead->intrConf.numIntrs = hw->intr.num_intrs;
852 for (i = 0; i < hw->intr.num_intrs; i++)
853 devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
855 devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
856 devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
858 /* RxMode set to 0 of VMXNET3_RXM_xxx */
859 devRead->rxFilterConf.rxMode = 0;
861 /* Setting up feature flags */
862 if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
863 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
865 if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
866 devRead->misc.uptFeatures |= VMXNET3_F_LRO;
867 devRead->misc.maxNumRxSG = 0;
870 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
871 ret = vmxnet3_rss_configure(dev);
872 if (ret != VMXNET3_SUCCESS)
875 devRead->misc.uptFeatures |= VMXNET3_F_RSS;
876 devRead->rssConfDesc.confVer = 1;
877 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
878 devRead->rssConfDesc.confPA = hw->rss_confPA;
881 ret = vmxnet3_dev_vlan_offload_set(dev,
882 RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
886 vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
888 return VMXNET3_SUCCESS;
892 * Configure device link speed and setup link.
893 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
894 * It returns 0 on success.
897 vmxnet3_dev_start(struct rte_eth_dev *dev)
900 struct vmxnet3_hw *hw = dev->data->dev_private;
902 PMD_INIT_FUNC_TRACE();
904 /* Save stats before it is reset by CMD_ACTIVATE */
905 vmxnet3_hw_stats_save(hw);
907 /* configure MSI-X */
908 ret = vmxnet3_configure_msix(dev);
910 /* revert to lsc only */
911 hw->intr.num_intrs = 2;
912 hw->intr.lsc_only = TRUE;
915 ret = vmxnet3_setup_driver_shared(dev);
916 if (ret != VMXNET3_SUCCESS)
919 /* Exchange shared data with device */
920 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
921 VMXNET3_GET_ADDR_LO(hw->sharedPA));
922 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
923 VMXNET3_GET_ADDR_HI(hw->sharedPA));
925 /* Activate device by register write */
926 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
927 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
930 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
934 /* Setup memory region for rx buffers */
935 ret = vmxnet3_dev_setup_memreg(dev);
937 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
938 VMXNET3_CMD_REGISTER_MEMREGS);
939 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
942 "Failed in setup memory region cmd\n");
945 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
948 if (VMXNET3_VERSION_GE_4(hw) &&
949 dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
950 /* Check for additional RSS */
951 ret = vmxnet3_v4_rss_configure(dev);
952 if (ret != VMXNET3_SUCCESS) {
953 PMD_INIT_LOG(ERR, "Failed to configure v4 RSS");
959 * Load RX queues with blank mbufs and update next2fill index for device
960 * Update RxMode of the device
962 ret = vmxnet3_dev_rxtx_init(dev);
963 if (ret != VMXNET3_SUCCESS) {
964 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
968 hw->adapter_stopped = FALSE;
970 /* Setting proper Rx Mode and issue Rx Mode Update command */
971 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
973 /* Setup interrupt callback */
974 rte_intr_callback_register(dev->intr_handle,
975 vmxnet3_interrupt_handler, dev);
977 if (rte_intr_enable(dev->intr_handle) < 0) {
978 PMD_INIT_LOG(ERR, "interrupt enable failed");
982 /* enable all intrs */
983 vmxnet3_enable_all_intrs(hw);
985 vmxnet3_process_events(dev);
988 * Update link state from device since this won't be
989 * done upon starting with lsc in use. This is done
990 * only after enabling interrupts to avoid any race
991 * where the link state could change without an
992 * interrupt being fired.
994 __vmxnet3_dev_link_update(dev, 0);
996 return VMXNET3_SUCCESS;
1000 * Stop device: disable rx and tx functions to allow for reconfiguring.
1003 vmxnet3_dev_stop(struct rte_eth_dev *dev)
1005 struct rte_eth_link link;
1006 struct vmxnet3_hw *hw = dev->data->dev_private;
1007 struct rte_intr_handle *intr_handle = dev->intr_handle;
1010 PMD_INIT_FUNC_TRACE();
1012 if (hw->adapter_stopped == 1) {
1013 PMD_INIT_LOG(DEBUG, "Device already stopped.");
1018 /* Unregister has lock to make sure there is no running cb.
1019 * This has to happen first since vmxnet3_interrupt_handler
1020 * reenables interrupts by calling vmxnet3_enable_intr
1022 ret = rte_intr_callback_unregister(intr_handle,
1023 vmxnet3_interrupt_handler,
1025 } while (ret == -EAGAIN);
1028 PMD_DRV_LOG(ERR, "Error attempting to unregister intr cb: %d",
1031 PMD_INIT_LOG(DEBUG, "Disabled %d intr callbacks", ret);
1033 /* disable interrupts */
1034 vmxnet3_disable_all_intrs(hw);
1036 rte_intr_disable(intr_handle);
1038 /* Clean datapath event and queue/vector mapping */
1039 rte_intr_efd_disable(intr_handle);
1040 rte_intr_vec_list_free(intr_handle);
1042 /* quiesce the device first */
1043 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
1044 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
1045 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
1047 /* reset the device */
1048 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1049 PMD_INIT_LOG(DEBUG, "Device reset.");
1051 vmxnet3_dev_clear_queues(dev);
1053 /* Clear recorded link status */
1054 memset(&link, 0, sizeof(link));
1055 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1056 link.link_speed = RTE_ETH_SPEED_NUM_10G;
1057 link.link_autoneg = RTE_ETH_LINK_FIXED;
1058 rte_eth_linkstatus_set(dev, &link);
1060 hw->adapter_stopped = 1;
1061 dev->data->dev_started = 0;
1067 vmxnet3_free_queues(struct rte_eth_dev *dev)
1071 PMD_INIT_FUNC_TRACE();
1073 for (i = 0; i < dev->data->nb_rx_queues; i++)
1074 vmxnet3_dev_rx_queue_release(dev, i);
1075 dev->data->nb_rx_queues = 0;
1077 for (i = 0; i < dev->data->nb_tx_queues; i++)
1078 vmxnet3_dev_tx_queue_release(dev, i);
1079 dev->data->nb_tx_queues = 0;
1083 * Reset and stop device.
1086 vmxnet3_dev_close(struct rte_eth_dev *dev)
1089 PMD_INIT_FUNC_TRACE();
1090 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1093 ret = vmxnet3_dev_stop(dev);
1094 vmxnet3_free_queues(dev);
1100 vmxnet3_dev_reset(struct rte_eth_dev *dev)
1104 ret = eth_vmxnet3_dev_uninit(dev);
1107 ret = eth_vmxnet3_dev_init(dev);
1112 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1113 struct UPT1_TxStats *res)
1115 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \
1116 ((r)->f = (h)->tqd_start[(i)].stats.f + \
1117 (h)->saved_tx_stats[(i)].f)
1119 VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
1120 VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
1121 VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
1122 VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
1123 VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
1124 VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
1125 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
1126 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
1128 #undef VMXNET3_UPDATE_TX_STAT
1132 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1133 struct UPT1_RxStats *res)
1135 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \
1136 ((r)->f = (h)->rqd_start[(i)].stats.f + \
1137 (h)->saved_rx_stats[(i)].f)
1139 VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
1140 VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
1141 VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
1142 VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
1143 VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
1144 VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
1145 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
1146 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1148 #undef VMXNET3_UPDATE_RX_STAT
1152 vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1153 struct UPT1_TxStats *res)
1155 vmxnet3_hw_tx_stats_get(hw, q, res);
1157 #define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r) \
1158 ((r)->f -= (h)->snapshot_tx_stats[(i)].f)
1160 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res);
1161 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res);
1162 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res);
1163 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res);
1164 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res);
1165 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res);
1166 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res);
1167 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res);
1169 #undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT
1173 vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1174 struct UPT1_RxStats *res)
1176 vmxnet3_hw_rx_stats_get(hw, q, res);
1178 #define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r) \
1179 ((r)->f -= (h)->snapshot_rx_stats[(i)].f)
1181 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res);
1182 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res);
1183 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res);
1184 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res);
1185 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res);
1186 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res);
1187 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res);
1188 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1190 #undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT
1194 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
1198 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1200 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1202 for (i = 0; i < hw->num_tx_queues; i++)
1203 vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
1204 for (i = 0; i < hw->num_rx_queues; i++)
1205 vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
1209 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
1210 struct rte_eth_xstat_name *xstats_names,
1213 unsigned int i, t, count = 0;
1214 unsigned int nstats =
1215 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1216 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1218 if (!xstats_names || n < nstats)
1221 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1222 if (!dev->data->rx_queues[i])
1225 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1226 snprintf(xstats_names[count].name,
1227 sizeof(xstats_names[count].name),
1229 vmxnet3_rxq_stat_strings[t].name);
1234 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1235 if (!dev->data->tx_queues[i])
1238 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1239 snprintf(xstats_names[count].name,
1240 sizeof(xstats_names[count].name),
1242 vmxnet3_txq_stat_strings[t].name);
1251 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1254 unsigned int i, t, count = 0;
1255 unsigned int nstats =
1256 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1257 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1262 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1263 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
1268 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1269 xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
1270 vmxnet3_rxq_stat_strings[t].offset);
1271 xstats[count].id = count;
1276 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1277 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1282 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1283 xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
1284 vmxnet3_txq_stat_strings[t].offset);
1285 xstats[count].id = count;
1294 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1297 struct vmxnet3_hw *hw = dev->data->dev_private;
1298 struct UPT1_TxStats txStats;
1299 struct UPT1_RxStats rxStats;
1301 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1303 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1304 for (i = 0; i < hw->num_tx_queues; i++) {
1305 vmxnet3_tx_stats_get(hw, i, &txStats);
1307 stats->q_opackets[i] = txStats.ucastPktsTxOK +
1308 txStats.mcastPktsTxOK +
1309 txStats.bcastPktsTxOK;
1311 stats->q_obytes[i] = txStats.ucastBytesTxOK +
1312 txStats.mcastBytesTxOK +
1313 txStats.bcastBytesTxOK;
1315 stats->opackets += stats->q_opackets[i];
1316 stats->obytes += stats->q_obytes[i];
1317 stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1320 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
1321 for (i = 0; i < hw->num_rx_queues; i++) {
1322 vmxnet3_rx_stats_get(hw, i, &rxStats);
1324 stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1325 rxStats.mcastPktsRxOK +
1326 rxStats.bcastPktsRxOK;
1328 stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1329 rxStats.mcastBytesRxOK +
1330 rxStats.bcastBytesRxOK;
1332 stats->ipackets += stats->q_ipackets[i];
1333 stats->ibytes += stats->q_ibytes[i];
1335 stats->q_errors[i] = rxStats.pktsRxError;
1336 stats->ierrors += rxStats.pktsRxError;
1337 stats->imissed += rxStats.pktsRxOutOfBuf;
1344 vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
1347 struct vmxnet3_hw *hw = dev->data->dev_private;
1348 struct UPT1_TxStats txStats = {0};
1349 struct UPT1_RxStats rxStats = {0};
1351 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1353 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1355 for (i = 0; i < hw->num_tx_queues; i++) {
1356 vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1357 memcpy(&hw->snapshot_tx_stats[i], &txStats,
1358 sizeof(hw->snapshot_tx_stats[0]));
1360 for (i = 0; i < hw->num_rx_queues; i++) {
1361 vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1362 memcpy(&hw->snapshot_rx_stats[i], &rxStats,
1363 sizeof(hw->snapshot_rx_stats[0]));
1370 vmxnet3_dev_info_get(struct rte_eth_dev *dev,
1371 struct rte_eth_dev_info *dev_info)
1373 struct vmxnet3_hw *hw = dev->data->dev_private;
1375 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1376 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1377 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1378 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1379 dev_info->min_mtu = VMXNET3_MIN_MTU;
1380 dev_info->max_mtu = VMXNET3_MAX_MTU;
1381 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
1382 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1384 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1386 if (VMXNET3_VERSION_GE_4(hw)) {
1387 dev_info->flow_type_rss_offloads |= VMXNET3_V4_RSS_MASK;
1390 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1391 .nb_max = VMXNET3_RX_RING_MAX_SIZE,
1392 .nb_min = VMXNET3_DEF_RX_RING_SIZE,
1396 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1397 .nb_max = VMXNET3_TX_RING_MAX_SIZE,
1398 .nb_min = VMXNET3_DEF_TX_RING_SIZE,
1400 .nb_seg_max = VMXNET3_TX_MAX_SEG,
1401 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1404 dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
1405 dev_info->rx_queue_offload_capa = 0;
1406 dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
1407 dev_info->tx_queue_offload_capa = 0;
1412 static const uint32_t *
1413 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1415 static const uint32_t ptypes[] = {
1416 RTE_PTYPE_L3_IPV4_EXT,
1421 if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1427 vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, __rte_unused uint16_t mtu)
1429 if (dev->data->dev_started) {
1430 PMD_DRV_LOG(ERR, "Port %d must be stopped to configure MTU",
1431 dev->data->port_id);
1439 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1441 struct vmxnet3_hw *hw = dev->data->dev_private;
1443 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
1444 vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1448 /* return 0 means link status changed, -1 means not changed */
1450 __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1451 __rte_unused int wait_to_complete)
1453 struct vmxnet3_hw *hw = dev->data->dev_private;
1454 struct rte_eth_link link;
1457 memset(&link, 0, sizeof(link));
1459 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1460 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1463 link.link_status = RTE_ETH_LINK_UP;
1464 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1465 link.link_speed = RTE_ETH_SPEED_NUM_10G;
1466 link.link_autoneg = RTE_ETH_LINK_FIXED;
1468 return rte_eth_linkstatus_set(dev, &link);
1472 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1474 /* Link status doesn't change for stopped dev */
1475 if (dev->data->dev_started == 0)
1478 return __vmxnet3_dev_link_update(dev, wait_to_complete);
1481 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1483 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1485 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1488 rxConf->rxMode = rxConf->rxMode | feature;
1490 rxConf->rxMode = rxConf->rxMode & (~feature);
1492 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1495 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1497 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1499 struct vmxnet3_hw *hw = dev->data->dev_private;
1500 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1502 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1503 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1505 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1506 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1511 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1513 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1515 struct vmxnet3_hw *hw = dev->data->dev_private;
1516 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1517 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1519 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1520 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1522 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1523 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1524 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1525 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1530 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1532 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1534 struct vmxnet3_hw *hw = dev->data->dev_private;
1536 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1541 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1543 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1545 struct vmxnet3_hw *hw = dev->data->dev_private;
1547 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1552 /* Enable/disable filter on vlan */
1554 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1556 struct vmxnet3_hw *hw = dev->data->dev_private;
1557 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1558 uint32_t *vf_table = rxConf->vfTable;
1560 /* save state for restore */
1562 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1564 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1566 /* don't change active filter if in promiscuous mode */
1567 if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1570 /* set in hardware */
1572 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1574 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1576 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1577 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1582 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1584 struct vmxnet3_hw *hw = dev->data->dev_private;
1585 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1586 uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1587 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1589 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1590 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1591 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1593 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1595 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1596 VMXNET3_CMD_UPDATE_FEATURE);
1599 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1600 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1601 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1603 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1605 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1606 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1613 vmxnet3_process_events(struct rte_eth_dev *dev)
1615 struct vmxnet3_hw *hw = dev->data->dev_private;
1616 uint32_t events = hw->shared->ecr;
1622 * ECR bits when written with 1b are cleared. Hence write
1623 * events back to ECR so that the bits which were set will be reset.
1625 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1627 /* Check if link state has changed */
1628 if (events & VMXNET3_ECR_LINK) {
1629 PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
1630 if (vmxnet3_dev_link_update(dev, 0) == 0)
1631 rte_eth_dev_callback_process(dev,
1632 RTE_ETH_EVENT_INTR_LSC,
1636 /* Check if there is an error on xmit/recv queues */
1637 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1638 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1639 VMXNET3_CMD_GET_QUEUE_STATUS);
1641 if (hw->tqd_start->status.stopped)
1642 PMD_DRV_LOG(ERR, "tq error 0x%x",
1643 hw->tqd_start->status.error);
1645 if (hw->rqd_start->status.stopped)
1646 PMD_DRV_LOG(ERR, "rq error 0x%x",
1647 hw->rqd_start->status.error);
1649 /* Reset the device */
1650 /* Have to reset the device */
1653 if (events & VMXNET3_ECR_DIC)
1654 PMD_DRV_LOG(DEBUG, "Device implementation change event.");
1656 if (events & VMXNET3_ECR_DEBUG)
1657 PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
1661 vmxnet3_interrupt_handler(void *param)
1663 struct rte_eth_dev *dev = param;
1664 struct vmxnet3_hw *hw = dev->data->dev_private;
1665 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1668 PMD_INIT_FUNC_TRACE();
1669 vmxnet3_disable_intr(hw, devRead->intrConf.eventIntrIdx);
1671 events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
1675 RTE_LOG(DEBUG, PMD, "Reading events: 0x%X", events);
1676 vmxnet3_process_events(dev);
1678 vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
1682 vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1684 struct vmxnet3_hw *hw = dev->data->dev_private;
1686 vmxnet3_enable_intr(hw,
1687 rte_intr_vec_list_index_get(dev->intr_handle,
1694 vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1696 struct vmxnet3_hw *hw = dev->data->dev_private;
1698 vmxnet3_disable_intr(hw,
1699 rte_intr_vec_list_index_get(dev->intr_handle, queue_id));
1704 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1705 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1706 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
1707 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_init, init, NOTICE);
1708 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_driver, driver, NOTICE);
1711 vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
1712 struct rte_eth_rss_reta_entry64 *reta_conf,
1716 struct vmxnet3_hw *hw = dev->data->dev_private;
1717 struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
1719 if (reta_size != dev_rss_conf->indTableSize) {
1721 "The size of hash lookup table configured (%d) doesn't match "
1722 "the supported number (%d)",
1723 reta_size, dev_rss_conf->indTableSize);
1727 for (i = 0; i < reta_size; i++) {
1728 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1729 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1730 if (reta_conf[idx].mask & RTE_BIT64(shift))
1731 dev_rss_conf->indTable[i] = (uint8_t)reta_conf[idx].reta[shift];
1734 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1735 VMXNET3_CMD_UPDATE_RSSIDT);
1741 vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
1742 struct rte_eth_rss_reta_entry64 *reta_conf,
1746 struct vmxnet3_hw *hw = dev->data->dev_private;
1747 struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
1749 if (reta_size != dev_rss_conf->indTableSize) {
1751 "Size of requested hash lookup table (%d) doesn't "
1752 "match the configured size (%d)",
1753 reta_size, dev_rss_conf->indTableSize);
1757 for (i = 0; i < reta_size; i++) {
1758 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1759 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1760 if (reta_conf[idx].mask & RTE_BIT64(shift))
1761 reta_conf[idx].reta[shift] = dev_rss_conf->indTable[i];