net/vmxnet3: implement extended stats
[dpdk.git] / drivers / net / vmxnet3 / vmxnet3_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <fcntl.h>
42 #include <inttypes.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_ethdev_pci.h>
60 #include <rte_atomic.h>
61 #include <rte_string_fns.h>
62 #include <rte_malloc.h>
63 #include <rte_dev.h>
64
65 #include "base/vmxnet3_defs.h"
66
67 #include "vmxnet3_ring.h"
68 #include "vmxnet3_logs.h"
69 #include "vmxnet3_ethdev.h"
70
71 #define PROCESS_SYS_EVENTS 0
72
73 #define VMXNET3_TX_MAX_SEG      UINT8_MAX
74
75 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
76 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
77 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
78 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
79 static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
80 static void vmxnet3_dev_close(struct rte_eth_dev *dev);
81 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
82 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
83 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
84 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
85 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
86 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
87                                    int wait_to_complete);
88 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
89 static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
90                                   struct rte_eth_stats *stats);
91 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
92                                         struct rte_eth_xstat_name *xstats,
93                                         unsigned int n);
94 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
95                                   struct rte_eth_xstat *xstats, unsigned int n);
96 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
97                                  struct rte_eth_dev_info *dev_info);
98 static const uint32_t *
99 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
100 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
101                                        uint16_t vid, int on);
102 static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
103 static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
104                                  struct ether_addr *mac_addr);
105
106 #if PROCESS_SYS_EVENTS == 1
107 static void vmxnet3_process_events(struct vmxnet3_hw *);
108 #endif
109 /*
110  * The set of PCI devices this driver supports
111  */
112 #define VMWARE_PCI_VENDOR_ID 0x15AD
113 #define VMWARE_DEV_ID_VMXNET3 0x07B0
114 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
115         { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
116         { .vendor_id = 0, /* sentinel */ },
117 };
118
119 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
120         .dev_configure        = vmxnet3_dev_configure,
121         .dev_start            = vmxnet3_dev_start,
122         .dev_stop             = vmxnet3_dev_stop,
123         .dev_close            = vmxnet3_dev_close,
124         .promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
125         .promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
126         .allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
127         .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
128         .link_update          = vmxnet3_dev_link_update,
129         .stats_get            = vmxnet3_dev_stats_get,
130         .xstats_get_names     = vmxnet3_dev_xstats_get_names,
131         .xstats_get           = vmxnet3_dev_xstats_get,
132         .mac_addr_set         = vmxnet3_mac_addr_set,
133         .dev_infos_get        = vmxnet3_dev_info_get,
134         .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
135         .vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
136         .vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
137         .rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
138         .rx_queue_release     = vmxnet3_dev_rx_queue_release,
139         .tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
140         .tx_queue_release     = vmxnet3_dev_tx_queue_release,
141 };
142
143 struct vmxnet3_xstats_name_off {
144         char name[RTE_ETH_XSTATS_NAME_SIZE];
145         unsigned int offset;
146 };
147
148 /* tx_qX_ is prepended to the name string here */
149 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
150         {"drop_total",         offsetof(struct vmxnet3_txq_stats, drop_total)},
151         {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
152         {"drop_tso",           offsetof(struct vmxnet3_txq_stats, drop_tso)},
153         {"tx_ring_full",       offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
154 };
155
156 /* rx_qX_ is prepended to the name string here */
157 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
158         {"drop_total",           offsetof(struct vmxnet3_rxq_stats, drop_total)},
159         {"drop_err",             offsetof(struct vmxnet3_rxq_stats, drop_err)},
160         {"drop_fcs",             offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
161         {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
162 };
163
164 static const struct rte_memzone *
165 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
166                  const char *post_string, int socket_id,
167                  uint16_t align, bool reuse)
168 {
169         char z_name[RTE_MEMZONE_NAMESIZE];
170         const struct rte_memzone *mz;
171
172         snprintf(z_name, sizeof(z_name), "%s_%d_%s",
173                  dev->device->driver->name, dev->data->port_id, post_string);
174
175         mz = rte_memzone_lookup(z_name);
176         if (!reuse) {
177                 if (mz)
178                         rte_memzone_free(mz);
179                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
180                                                    0, align);
181         }
182
183         if (mz)
184                 return mz;
185
186         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
187 }
188
189 /**
190  * Atomically reads the link status information from global
191  * structure rte_eth_dev.
192  *
193  * @param dev
194  *   - Pointer to the structure rte_eth_dev to read from.
195  *   - Pointer to the buffer to be saved with the link status.
196  *
197  * @return
198  *   - On success, zero.
199  *   - On failure, negative value.
200  */
201
202 static int
203 vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
204                                     struct rte_eth_link *link)
205 {
206         struct rte_eth_link *dst = link;
207         struct rte_eth_link *src = &(dev->data->dev_link);
208
209         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
210                                 *(uint64_t *)src) == 0)
211                 return -1;
212
213         return 0;
214 }
215
216 /**
217  * Atomically writes the link status information into global
218  * structure rte_eth_dev.
219  *
220  * @param dev
221  *   - Pointer to the structure rte_eth_dev to write to.
222  *   - Pointer to the buffer to be saved with the link status.
223  *
224  * @return
225  *   - On success, zero.
226  *   - On failure, negative value.
227  */
228 static int
229 vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
230                                      struct rte_eth_link *link)
231 {
232         struct rte_eth_link *dst = &(dev->data->dev_link);
233         struct rte_eth_link *src = link;
234
235         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
236                                 *(uint64_t *)src) == 0)
237                 return -1;
238
239         return 0;
240 }
241
242 /*
243  * This function is based on vmxnet3_disable_intr()
244  */
245 static void
246 vmxnet3_disable_intr(struct vmxnet3_hw *hw)
247 {
248         int i;
249
250         PMD_INIT_FUNC_TRACE();
251
252         hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
253         for (i = 0; i < VMXNET3_MAX_INTRS; i++)
254                 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
255 }
256
257 /*
258  * Gets tx data ring descriptor size.
259  */
260 static uint16_t
261 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
262 {
263         uint16 txdata_desc_size;
264
265         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
266                                VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
267         txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
268
269         return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
270                 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
271                 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
272                 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
273 }
274
275 /*
276  * It returns 0 on success.
277  */
278 static int
279 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
280 {
281         struct rte_pci_device *pci_dev;
282         struct vmxnet3_hw *hw = eth_dev->data->dev_private;
283         uint32_t mac_hi, mac_lo, ver;
284
285         PMD_INIT_FUNC_TRACE();
286
287         eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
288         eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
289         eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
290         eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
291         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
292
293         /*
294          * for secondary processes, we don't initialize any further as primary
295          * has already done this work.
296          */
297         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
298                 return 0;
299
300         rte_eth_copy_pci_info(eth_dev, pci_dev);
301         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
302
303         /* Vendor and Device ID need to be set before init of shared code */
304         hw->device_id = pci_dev->id.device_id;
305         hw->vendor_id = pci_dev->id.vendor_id;
306         hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
307         hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
308
309         hw->num_rx_queues = 1;
310         hw->num_tx_queues = 1;
311         hw->bufs_per_pkt = 1;
312
313         /* Check h/w version compatibility with driver. */
314         ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
315         PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
316
317         if (ver & (1 << VMXNET3_REV_3)) {
318                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
319                                        1 << VMXNET3_REV_3);
320                 hw->version = VMXNET3_REV_3 + 1;
321         } else if (ver & (1 << VMXNET3_REV_2)) {
322                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
323                                        1 << VMXNET3_REV_2);
324                 hw->version = VMXNET3_REV_2 + 1;
325         } else if (ver & (1 << VMXNET3_REV_1)) {
326                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
327                                        1 << VMXNET3_REV_1);
328                 hw->version = VMXNET3_REV_1 + 1;
329         } else {
330                 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
331                 return -EIO;
332         }
333
334         PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
335
336         /* Check UPT version compatibility with driver. */
337         ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
338         PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
339         if (ver & 0x1)
340                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
341         else {
342                 PMD_INIT_LOG(ERR, "Incompatible UPT version.");
343                 return -EIO;
344         }
345
346         /* Getting MAC Address */
347         mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
348         mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
349         memcpy(hw->perm_addr, &mac_lo, 4);
350         memcpy(hw->perm_addr + 4, &mac_hi, 2);
351
352         /* Allocate memory for storing MAC addresses */
353         eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
354                                                VMXNET3_MAX_MAC_ADDRS, 0);
355         if (eth_dev->data->mac_addrs == NULL) {
356                 PMD_INIT_LOG(ERR,
357                              "Failed to allocate %d bytes needed to store MAC addresses",
358                              ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
359                 return -ENOMEM;
360         }
361         /* Copy the permanent MAC address */
362         ether_addr_copy((struct ether_addr *) hw->perm_addr,
363                         &eth_dev->data->mac_addrs[0]);
364
365         PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
366                      hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
367                      hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
368
369         /* Put device in Quiesce Mode */
370         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
371
372         /* allow untagged pkts */
373         VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
374
375         hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
376                 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
377
378         hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
379                 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
380         RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
381                    hw->rxdata_desc_size);
382
383         /* clear shadow stats */
384         memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
385         memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
386
387         return 0;
388 }
389
390 static int
391 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
392 {
393         struct vmxnet3_hw *hw = eth_dev->data->dev_private;
394
395         PMD_INIT_FUNC_TRACE();
396
397         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
398                 return 0;
399
400         if (hw->adapter_stopped == 0)
401                 vmxnet3_dev_close(eth_dev);
402
403         eth_dev->dev_ops = NULL;
404         eth_dev->rx_pkt_burst = NULL;
405         eth_dev->tx_pkt_burst = NULL;
406         eth_dev->tx_pkt_prepare = NULL;
407
408         rte_free(eth_dev->data->mac_addrs);
409         eth_dev->data->mac_addrs = NULL;
410
411         return 0;
412 }
413
414 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
415         struct rte_pci_device *pci_dev)
416 {
417         return rte_eth_dev_pci_generic_probe(pci_dev,
418                 sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
419 }
420
421 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
422 {
423         return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
424 }
425
426 static struct rte_pci_driver rte_vmxnet3_pmd = {
427         .id_table = pci_id_vmxnet3_map,
428         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
429         .probe = eth_vmxnet3_pci_probe,
430         .remove = eth_vmxnet3_pci_remove,
431 };
432
433 static int
434 vmxnet3_dev_configure(struct rte_eth_dev *dev)
435 {
436         const struct rte_memzone *mz;
437         struct vmxnet3_hw *hw = dev->data->dev_private;
438         size_t size;
439
440         PMD_INIT_FUNC_TRACE();
441
442         if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
443             dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
444                 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
445                 return -EINVAL;
446         }
447
448         if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
449                 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
450                 return -EINVAL;
451         }
452
453         size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
454                 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
455
456         if (size > UINT16_MAX)
457                 return -EINVAL;
458
459         hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
460         hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
461
462         /*
463          * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
464          * on current socket
465          */
466         mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
467                               "shared", rte_socket_id(), 8, 1);
468
469         if (mz == NULL) {
470                 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
471                 return -ENOMEM;
472         }
473         memset(mz->addr, 0, mz->len);
474
475         hw->shared = mz->addr;
476         hw->sharedPA = mz->phys_addr;
477
478         /*
479          * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
480          * on current socket.
481          *
482          * We cannot reuse this memzone from previous allocation as its size
483          * depends on the number of tx and rx queues, which could be different
484          * from one config to another.
485          */
486         mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
487                               VMXNET3_QUEUE_DESC_ALIGN, 0);
488         if (mz == NULL) {
489                 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
490                 return -ENOMEM;
491         }
492         memset(mz->addr, 0, mz->len);
493
494         hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
495         hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
496
497         hw->queueDescPA = mz->phys_addr;
498         hw->queue_desc_len = (uint16_t)size;
499
500         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
501                 /* Allocate memory structure for UPT1_RSSConf and configure */
502                 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
503                                       "rss_conf", rte_socket_id(),
504                                       RTE_CACHE_LINE_SIZE, 1);
505                 if (mz == NULL) {
506                         PMD_INIT_LOG(ERR,
507                                      "ERROR: Creating rss_conf structure zone");
508                         return -ENOMEM;
509                 }
510                 memset(mz->addr, 0, mz->len);
511
512                 hw->rss_conf = mz->addr;
513                 hw->rss_confPA = mz->phys_addr;
514         }
515
516         return 0;
517 }
518
519 static void
520 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
521 {
522         uint32_t val;
523
524         PMD_INIT_LOG(DEBUG,
525                      "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
526                      addr[0], addr[1], addr[2],
527                      addr[3], addr[4], addr[5]);
528
529         val = *(const uint32_t *)addr;
530         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
531
532         val = (addr[5] << 8) | addr[4];
533         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
534 }
535
536 static int
537 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
538 {
539         struct vmxnet3_hw *hw = dev->data->dev_private;
540         Vmxnet3_DriverShared *shared = hw->shared;
541         Vmxnet3_CmdInfo *cmdInfo;
542         struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
543         uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
544         uint32_t num, i, j, size;
545
546         if (hw->memRegsPA == 0) {
547                 const struct rte_memzone *mz;
548
549                 size = sizeof(Vmxnet3_MemRegs) +
550                         (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
551                         sizeof(Vmxnet3_MemoryRegion);
552
553                 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
554                                       1);
555                 if (mz == NULL) {
556                         PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
557                         return -ENOMEM;
558                 }
559                 memset(mz->addr, 0, mz->len);
560                 hw->memRegs = mz->addr;
561                 hw->memRegsPA = mz->phys_addr;
562         }
563
564         num = hw->num_rx_queues;
565
566         for (i = 0; i < num; i++) {
567                 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
568
569                 mp[i] = rxq->mp;
570                 index[i] = 1 << i;
571         }
572
573         /*
574          * The same mempool could be used by multiple queues. In such a case,
575          * remove duplicate mempool entries. Only one entry is kept with
576          * bitmask indicating queues that are using this mempool.
577          */
578         for (i = 1; i < num; i++) {
579                 for (j = 0; j < i; j++) {
580                         if (mp[i] == mp[j]) {
581                                 mp[i] = NULL;
582                                 index[j] |= 1 << i;
583                                 break;
584                         }
585                 }
586         }
587
588         j = 0;
589         for (i = 0; i < num; i++) {
590                 if (mp[i] == NULL)
591                         continue;
592
593                 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
594
595                 mr->startPA =
596                         (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr;
597                 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
598                         STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
599                 mr->txQueueBits = index[i];
600                 mr->rxQueueBits = index[i];
601
602                 PMD_INIT_LOG(INFO,
603                              "index: %u startPA: %" PRIu64 " length: %u, "
604                              "rxBits: %x",
605                              j, mr->startPA, mr->length, mr->rxQueueBits);
606                 j++;
607         }
608         hw->memRegs->numRegs = j;
609         PMD_INIT_LOG(INFO, "numRegs: %u", j);
610
611         size = sizeof(Vmxnet3_MemRegs) +
612                 (j - 1) * sizeof(Vmxnet3_MemoryRegion);
613
614         cmdInfo = &shared->cu.cmdInfo;
615         cmdInfo->varConf.confVer = 1;
616         cmdInfo->varConf.confLen = size;
617         cmdInfo->varConf.confPA = hw->memRegsPA;
618
619         return 0;
620 }
621
622 static int
623 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
624 {
625         struct rte_eth_conf port_conf = dev->data->dev_conf;
626         struct vmxnet3_hw *hw = dev->data->dev_private;
627         uint32_t mtu = dev->data->mtu;
628         Vmxnet3_DriverShared *shared = hw->shared;
629         Vmxnet3_DSDevRead *devRead = &shared->devRead;
630         uint32_t i;
631         int ret;
632
633         shared->magic = VMXNET3_REV1_MAGIC;
634         devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
635
636         /* Setting up Guest OS information */
637         devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
638                 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
639         devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
640         devRead->misc.driverInfo.vmxnet3RevSpt = 1;
641         devRead->misc.driverInfo.uptVerSpt     = 1;
642
643         devRead->misc.mtu = rte_le_to_cpu_32(mtu);
644         devRead->misc.queueDescPA  = hw->queueDescPA;
645         devRead->misc.queueDescLen = hw->queue_desc_len;
646         devRead->misc.numTxQueues  = hw->num_tx_queues;
647         devRead->misc.numRxQueues  = hw->num_rx_queues;
648
649         /*
650          * Set number of interrupts to 1
651          * PMD disables all the interrupts but this is MUST to activate device
652          * It needs at least one interrupt for link events to handle
653          * So we'll disable it later after device activation if needed
654          */
655         devRead->intrConf.numIntrs = 1;
656         devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
657
658         for (i = 0; i < hw->num_tx_queues; i++) {
659                 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
660                 vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
661
662                 tqd->ctrl.txNumDeferred  = 0;
663                 tqd->ctrl.txThreshold    = 1;
664                 tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
665                 tqd->conf.compRingBasePA = txq->comp_ring.basePA;
666                 tqd->conf.dataRingBasePA = txq->data_ring.basePA;
667
668                 tqd->conf.txRingSize   = txq->cmd_ring.size;
669                 tqd->conf.compRingSize = txq->comp_ring.size;
670                 tqd->conf.dataRingSize = txq->data_ring.size;
671                 tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
672                 tqd->conf.intrIdx      = txq->comp_ring.intr_idx;
673                 tqd->status.stopped    = TRUE;
674                 tqd->status.error      = 0;
675                 memset(&tqd->stats, 0, sizeof(tqd->stats));
676         }
677
678         for (i = 0; i < hw->num_rx_queues; i++) {
679                 Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
680                 vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
681
682                 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
683                 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
684                 rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
685
686                 rqd->conf.rxRingSize[0]   = rxq->cmd_ring[0].size;
687                 rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
688                 rqd->conf.compRingSize    = rxq->comp_ring.size;
689                 rqd->conf.intrIdx         = rxq->comp_ring.intr_idx;
690                 if (VMXNET3_VERSION_GE_3(hw)) {
691                         rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
692                         rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
693                 }
694                 rqd->status.stopped       = TRUE;
695                 rqd->status.error         = 0;
696                 memset(&rqd->stats, 0, sizeof(rqd->stats));
697         }
698
699         /* RxMode set to 0 of VMXNET3_RXM_xxx */
700         devRead->rxFilterConf.rxMode = 0;
701
702         /* Setting up feature flags */
703         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
704                 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
705
706         if (dev->data->dev_conf.rxmode.enable_lro) {
707                 devRead->misc.uptFeatures |= VMXNET3_F_LRO;
708                 devRead->misc.maxNumRxSG = 0;
709         }
710
711         if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
712                 ret = vmxnet3_rss_configure(dev);
713                 if (ret != VMXNET3_SUCCESS)
714                         return ret;
715
716                 devRead->misc.uptFeatures |= VMXNET3_F_RSS;
717                 devRead->rssConfDesc.confVer = 1;
718                 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
719                 devRead->rssConfDesc.confPA  = hw->rss_confPA;
720         }
721
722         vmxnet3_dev_vlan_offload_set(dev,
723                                      ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
724
725         vmxnet3_write_mac(hw, hw->perm_addr);
726
727         return VMXNET3_SUCCESS;
728 }
729
730 /*
731  * Configure device link speed and setup link.
732  * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
733  * It returns 0 on success.
734  */
735 static int
736 vmxnet3_dev_start(struct rte_eth_dev *dev)
737 {
738         int ret;
739         struct vmxnet3_hw *hw = dev->data->dev_private;
740
741         PMD_INIT_FUNC_TRACE();
742
743         /* Save stats before it is reset by CMD_ACTIVATE */
744         vmxnet3_hw_stats_save(hw);
745
746         ret = vmxnet3_setup_driver_shared(dev);
747         if (ret != VMXNET3_SUCCESS)
748                 return ret;
749
750         /* Exchange shared data with device */
751         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
752                                VMXNET3_GET_ADDR_LO(hw->sharedPA));
753         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
754                                VMXNET3_GET_ADDR_HI(hw->sharedPA));
755
756         /* Activate device by register write */
757         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
758         ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
759
760         if (ret != 0) {
761                 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
762                 return -EINVAL;
763         }
764
765         /* Setup memory region for rx buffers */
766         ret = vmxnet3_dev_setup_memreg(dev);
767         if (ret == 0) {
768                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
769                                        VMXNET3_CMD_REGISTER_MEMREGS);
770                 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
771                 if (ret != 0)
772                         PMD_INIT_LOG(DEBUG,
773                                      "Failed in setup memory region cmd\n");
774                 ret = 0;
775         } else {
776                 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
777         }
778
779         /* Disable interrupts */
780         vmxnet3_disable_intr(hw);
781
782         /*
783          * Load RX queues with blank mbufs and update next2fill index for device
784          * Update RxMode of the device
785          */
786         ret = vmxnet3_dev_rxtx_init(dev);
787         if (ret != VMXNET3_SUCCESS) {
788                 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
789                 return ret;
790         }
791
792         hw->adapter_stopped = FALSE;
793
794         /* Setting proper Rx Mode and issue Rx Mode Update command */
795         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
796
797         /*
798          * Don't need to handle events for now
799          */
800 #if PROCESS_SYS_EVENTS == 1
801         events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
802         PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
803         vmxnet3_process_events(hw);
804 #endif
805         return VMXNET3_SUCCESS;
806 }
807
808 /*
809  * Stop device: disable rx and tx functions to allow for reconfiguring.
810  */
811 static void
812 vmxnet3_dev_stop(struct rte_eth_dev *dev)
813 {
814         struct rte_eth_link link;
815         struct vmxnet3_hw *hw = dev->data->dev_private;
816
817         PMD_INIT_FUNC_TRACE();
818
819         if (hw->adapter_stopped == 1) {
820                 PMD_INIT_LOG(DEBUG, "Device already closed.");
821                 return;
822         }
823
824         /* disable interrupts */
825         vmxnet3_disable_intr(hw);
826
827         /* quiesce the device first */
828         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
829         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
830         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
831
832         /* reset the device */
833         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
834         PMD_INIT_LOG(DEBUG, "Device reset.");
835         hw->adapter_stopped = 0;
836
837         vmxnet3_dev_clear_queues(dev);
838
839         /* Clear recorded link status */
840         memset(&link, 0, sizeof(link));
841         vmxnet3_dev_atomic_write_link_status(dev, &link);
842 }
843
844 /*
845  * Reset and stop device.
846  */
847 static void
848 vmxnet3_dev_close(struct rte_eth_dev *dev)
849 {
850         struct vmxnet3_hw *hw = dev->data->dev_private;
851
852         PMD_INIT_FUNC_TRACE();
853
854         vmxnet3_dev_stop(dev);
855         hw->adapter_stopped = 1;
856 }
857
858 static void
859 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
860                         struct UPT1_TxStats *res)
861 {
862 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r)              \
863                 ((r)->f = (h)->tqd_start[(i)].stats.f + \
864                         (h)->saved_tx_stats[(i)].f)
865
866         VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
867         VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
868         VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
869         VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
870         VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
871         VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
872         VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
873         VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
874
875 #undef VMXNET3_UPDATE_TX_STAT
876 }
877
878 static void
879 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
880                         struct UPT1_RxStats *res)
881 {
882 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r)              \
883                 ((r)->f = (h)->rqd_start[(i)].stats.f + \
884                         (h)->saved_rx_stats[(i)].f)
885
886         VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
887         VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
888         VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
889         VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
890         VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
891         VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
892         VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
893         VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
894
895 #undef VMXNET3_UPDATE_RX_STATS
896 }
897
898 static void
899 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
900 {
901         unsigned int i;
902
903         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
904
905         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
906
907         for (i = 0; i < hw->num_tx_queues; i++)
908                 vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
909         for (i = 0; i < hw->num_rx_queues; i++)
910                 vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
911 }
912
913 static int
914 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
915                              struct rte_eth_xstat_name *xstats_names,
916                              unsigned int n)
917 {
918         unsigned int i, t, count = 0;
919         unsigned int nstats =
920                 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
921                 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
922
923         if (!xstats_names || n < nstats)
924                 return nstats;
925
926         for (i = 0; i < dev->data->nb_rx_queues; i++) {
927                 if (!dev->data->rx_queues[i])
928                         continue;
929
930                 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
931                         snprintf(xstats_names[count].name,
932                                  sizeof(xstats_names[count].name),
933                                  "rx_q%u_%s", i,
934                                  vmxnet3_rxq_stat_strings[t].name);
935                         count++;
936                 }
937         }
938
939         for (i = 0; i < dev->data->nb_tx_queues; i++) {
940                 if (!dev->data->tx_queues[i])
941                         continue;
942
943                 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
944                         snprintf(xstats_names[count].name,
945                                  sizeof(xstats_names[count].name),
946                                  "tx_q%u_%s", i,
947                                  vmxnet3_txq_stat_strings[t].name);
948                         count++;
949                 }
950         }
951
952         return count;
953 }
954
955 static int
956 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
957                        unsigned int n)
958 {
959         unsigned int i, t, count = 0;
960         unsigned int nstats =
961                 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
962                 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
963
964         if (n < nstats)
965                 return nstats;
966
967         for (i = 0; i < dev->data->nb_rx_queues; i++) {
968                 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
969
970                 if (rxq == NULL)
971                         continue;
972
973                 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
974                         xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
975                                 vmxnet3_rxq_stat_strings[t].offset);
976                         xstats[count].id = count;
977                         count++;
978                 }
979         }
980
981         for (i = 0; i < dev->data->nb_tx_queues; i++) {
982                 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
983
984                 if (txq == NULL)
985                         continue;
986
987                 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
988                         xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
989                                 vmxnet3_txq_stat_strings[t].offset);
990                         xstats[count].id = count;
991                         count++;
992                 }
993         }
994
995         return count;
996 }
997
998 static void
999 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1000 {
1001         unsigned int i;
1002         struct vmxnet3_hw *hw = dev->data->dev_private;
1003         struct UPT1_TxStats txStats;
1004         struct UPT1_RxStats rxStats;
1005
1006         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1007
1008         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1009         for (i = 0; i < hw->num_tx_queues; i++) {
1010                 vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1011
1012                 stats->q_opackets[i] = txStats.ucastPktsTxOK +
1013                         txStats.mcastPktsTxOK +
1014                         txStats.bcastPktsTxOK;
1015
1016                 stats->q_obytes[i] = txStats.ucastBytesTxOK +
1017                         txStats.mcastBytesTxOK +
1018                         txStats.bcastBytesTxOK;
1019
1020                 stats->opackets += stats->q_opackets[i];
1021                 stats->obytes += stats->q_obytes[i];
1022                 stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1023         }
1024
1025         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
1026         for (i = 0; i < hw->num_rx_queues; i++) {
1027                 vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1028
1029                 stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1030                         rxStats.mcastPktsRxOK +
1031                         rxStats.bcastPktsRxOK;
1032
1033                 stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1034                         rxStats.mcastBytesRxOK +
1035                         rxStats.bcastBytesRxOK;
1036
1037                 stats->ipackets += stats->q_ipackets[i];
1038                 stats->ibytes += stats->q_ibytes[i];
1039
1040                 stats->q_errors[i] = rxStats.pktsRxError;
1041                 stats->ierrors += rxStats.pktsRxError;
1042                 stats->rx_nombuf += rxStats.pktsRxOutOfBuf;
1043         }
1044 }
1045
1046 static void
1047 vmxnet3_dev_info_get(struct rte_eth_dev *dev,
1048                      struct rte_eth_dev_info *dev_info)
1049 {
1050         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1051
1052         dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1053         dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1054         dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1055         dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1056         dev_info->speed_capa = ETH_LINK_SPEED_10G;
1057         dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1058
1059         dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
1060         dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1061
1062         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1063                 .nb_max = VMXNET3_RX_RING_MAX_SIZE,
1064                 .nb_min = VMXNET3_DEF_RX_RING_SIZE,
1065                 .nb_align = 1,
1066         };
1067
1068         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1069                 .nb_max = VMXNET3_TX_RING_MAX_SIZE,
1070                 .nb_min = VMXNET3_DEF_TX_RING_SIZE,
1071                 .nb_align = 1,
1072                 .nb_seg_max = VMXNET3_TX_MAX_SEG,
1073                 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1074         };
1075
1076         dev_info->rx_offload_capa =
1077                 DEV_RX_OFFLOAD_VLAN_STRIP |
1078                 DEV_RX_OFFLOAD_UDP_CKSUM |
1079                 DEV_RX_OFFLOAD_TCP_CKSUM |
1080                 DEV_RX_OFFLOAD_TCP_LRO;
1081
1082         dev_info->tx_offload_capa =
1083                 DEV_TX_OFFLOAD_VLAN_INSERT |
1084                 DEV_TX_OFFLOAD_TCP_CKSUM |
1085                 DEV_TX_OFFLOAD_UDP_CKSUM |
1086                 DEV_TX_OFFLOAD_TCP_TSO;
1087 }
1088
1089 static const uint32_t *
1090 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1091 {
1092         static const uint32_t ptypes[] = {
1093                 RTE_PTYPE_L3_IPV4_EXT,
1094                 RTE_PTYPE_L3_IPV4,
1095                 RTE_PTYPE_UNKNOWN
1096         };
1097
1098         if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1099                 return ptypes;
1100         return NULL;
1101 }
1102
1103 static void
1104 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1105 {
1106         struct vmxnet3_hw *hw = dev->data->dev_private;
1107
1108         vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1109 }
1110
1111 /* return 0 means link status changed, -1 means not changed */
1112 static int
1113 vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1114                         __rte_unused int wait_to_complete)
1115 {
1116         struct vmxnet3_hw *hw = dev->data->dev_private;
1117         struct rte_eth_link old = { 0 }, link;
1118         uint32_t ret;
1119
1120         /* Link status doesn't change for stopped dev */
1121         if (dev->data->dev_started == 0)
1122                 return -1;
1123
1124         memset(&link, 0, sizeof(link));
1125         vmxnet3_dev_atomic_read_link_status(dev, &old);
1126
1127         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1128         ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1129
1130         if (ret & 0x1) {
1131                 link.link_status = ETH_LINK_UP;
1132                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1133                 link.link_speed = ETH_SPEED_NUM_10G;
1134                 link.link_autoneg = ETH_LINK_SPEED_FIXED;
1135         }
1136
1137         vmxnet3_dev_atomic_write_link_status(dev, &link);
1138
1139         return (old.link_status == link.link_status) ? -1 : 0;
1140 }
1141
1142 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1143 static void
1144 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1145 {
1146         struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1147
1148         if (set)
1149                 rxConf->rxMode = rxConf->rxMode | feature;
1150         else
1151                 rxConf->rxMode = rxConf->rxMode & (~feature);
1152
1153         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1154 }
1155
1156 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1157 static void
1158 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1159 {
1160         struct vmxnet3_hw *hw = dev->data->dev_private;
1161         uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1162
1163         memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1164         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1165
1166         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1167                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1168 }
1169
1170 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1171 static void
1172 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1173 {
1174         struct vmxnet3_hw *hw = dev->data->dev_private;
1175         uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1176
1177         memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1178         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1179         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1180                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1181 }
1182
1183 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1184 static void
1185 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1186 {
1187         struct vmxnet3_hw *hw = dev->data->dev_private;
1188
1189         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1190 }
1191
1192 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1193 static void
1194 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1195 {
1196         struct vmxnet3_hw *hw = dev->data->dev_private;
1197
1198         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1199 }
1200
1201 /* Enable/disable filter on vlan */
1202 static int
1203 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1204 {
1205         struct vmxnet3_hw *hw = dev->data->dev_private;
1206         struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1207         uint32_t *vf_table = rxConf->vfTable;
1208
1209         /* save state for restore */
1210         if (on)
1211                 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1212         else
1213                 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1214
1215         /* don't change active filter if in promiscuous mode */
1216         if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1217                 return 0;
1218
1219         /* set in hardware */
1220         if (on)
1221                 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1222         else
1223                 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1224
1225         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1226                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1227         return 0;
1228 }
1229
1230 static void
1231 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1232 {
1233         struct vmxnet3_hw *hw = dev->data->dev_private;
1234         Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1235         uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1236
1237         if (mask & ETH_VLAN_STRIP_MASK) {
1238                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1239                         devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1240                 else
1241                         devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1242
1243                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1244                                        VMXNET3_CMD_UPDATE_FEATURE);
1245         }
1246
1247         if (mask & ETH_VLAN_FILTER_MASK) {
1248                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1249                         memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1250                 else
1251                         memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1252
1253                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1254                                        VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1255         }
1256 }
1257
1258 #if PROCESS_SYS_EVENTS == 1
1259 static void
1260 vmxnet3_process_events(struct vmxnet3_hw *hw)
1261 {
1262         uint32_t events = hw->shared->ecr;
1263
1264         if (!events) {
1265                 PMD_INIT_LOG(ERR, "No events to process");
1266                 return;
1267         }
1268
1269         /*
1270          * ECR bits when written with 1b are cleared. Hence write
1271          * events back to ECR so that the bits which were set will be reset.
1272          */
1273         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1274
1275         /* Check if link state has changed */
1276         if (events & VMXNET3_ECR_LINK)
1277                 PMD_INIT_LOG(ERR,
1278                              "Process events in %s(): VMXNET3_ECR_LINK event",
1279                              __func__);
1280
1281         /* Check if there is an error on xmit/recv queues */
1282         if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1283                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1284                                        VMXNET3_CMD_GET_QUEUE_STATUS);
1285
1286                 if (hw->tqd_start->status.stopped)
1287                         PMD_INIT_LOG(ERR, "tq error 0x%x",
1288                                      hw->tqd_start->status.error);
1289
1290                 if (hw->rqd_start->status.stopped)
1291                         PMD_INIT_LOG(ERR, "rq error 0x%x",
1292                                      hw->rqd_start->status.error);
1293
1294                 /* Reset the device */
1295                 /* Have to reset the device */
1296         }
1297
1298         if (events & VMXNET3_ECR_DIC)
1299                 PMD_INIT_LOG(ERR, "Device implementation change event.");
1300
1301         if (events & VMXNET3_ECR_DEBUG)
1302                 PMD_INIT_LOG(ERR, "Debug event generated by device.");
1303 }
1304 #endif
1305
1306 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1307 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1308 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");