ethdev: add namespace
[dpdk.git] / drivers / net / vmxnet3 / vmxnet3_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <fcntl.h>
13 #include <inttypes.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <ethdev_driver.h>
30 #include <ethdev_pci.h>
31 #include <rte_string_fns.h>
32 #include <rte_malloc.h>
33 #include <rte_dev.h>
34
35 #include "base/vmxnet3_defs.h"
36
37 #include "vmxnet3_ring.h"
38 #include "vmxnet3_logs.h"
39 #include "vmxnet3_ethdev.h"
40
41 #define VMXNET3_TX_MAX_SEG      UINT8_MAX
42
43 #define VMXNET3_TX_OFFLOAD_CAP          \
44         (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |       \
45          RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
46          RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
47          RTE_ETH_TX_OFFLOAD_TCP_TSO |   \
48          RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
49
50 #define VMXNET3_RX_OFFLOAD_CAP          \
51         (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |        \
52          RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
53          RTE_ETH_RX_OFFLOAD_SCATTER |   \
54          RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
55          RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
56          RTE_ETH_RX_OFFLOAD_TCP_LRO |   \
57          RTE_ETH_RX_OFFLOAD_RSS_HASH)
58
59 int vmxnet3_segs_dynfield_offset = -1;
60
61 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
62 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
63 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
64 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
65 static int vmxnet3_dev_stop(struct rte_eth_dev *dev);
66 static int vmxnet3_dev_close(struct rte_eth_dev *dev);
67 static int vmxnet3_dev_reset(struct rte_eth_dev *dev);
68 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
69 static int vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
70 static int vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
71 static int vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
72 static int vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
73 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
74                                      int wait_to_complete);
75 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
76                                    int wait_to_complete);
77 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
78 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
79                                   struct rte_eth_stats *stats);
80 static int vmxnet3_dev_stats_reset(struct rte_eth_dev *dev);
81 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
82                                         struct rte_eth_xstat_name *xstats,
83                                         unsigned int n);
84 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
85                                   struct rte_eth_xstat *xstats, unsigned int n);
86 static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
87                                 struct rte_eth_dev_info *dev_info);
88 static const uint32_t *
89 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
90 static int vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
91 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
92                                        uint16_t vid, int on);
93 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
94 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
95                                  struct rte_ether_addr *mac_addr);
96 static void vmxnet3_process_events(struct rte_eth_dev *dev);
97 static void vmxnet3_interrupt_handler(void *param);
98 static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
99                                                 uint16_t queue_id);
100 static int vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
101                                                 uint16_t queue_id);
102
103 /*
104  * The set of PCI devices this driver supports
105  */
106 #define VMWARE_PCI_VENDOR_ID 0x15AD
107 #define VMWARE_DEV_ID_VMXNET3 0x07B0
108 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
109         { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
110         { .vendor_id = 0, /* sentinel */ },
111 };
112
113 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
114         .dev_configure        = vmxnet3_dev_configure,
115         .dev_start            = vmxnet3_dev_start,
116         .dev_stop             = vmxnet3_dev_stop,
117         .dev_close            = vmxnet3_dev_close,
118         .dev_reset            = vmxnet3_dev_reset,
119         .promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
120         .promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
121         .allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
122         .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
123         .link_update          = vmxnet3_dev_link_update,
124         .stats_get            = vmxnet3_dev_stats_get,
125         .xstats_get_names     = vmxnet3_dev_xstats_get_names,
126         .xstats_get           = vmxnet3_dev_xstats_get,
127         .stats_reset          = vmxnet3_dev_stats_reset,
128         .mac_addr_set         = vmxnet3_mac_addr_set,
129         .dev_infos_get        = vmxnet3_dev_info_get,
130         .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
131         .mtu_set              = vmxnet3_dev_mtu_set,
132         .vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
133         .vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
134         .rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
135         .rx_queue_release     = vmxnet3_dev_rx_queue_release,
136         .tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
137         .tx_queue_release     = vmxnet3_dev_tx_queue_release,
138         .rx_queue_intr_enable = vmxnet3_dev_rx_queue_intr_enable,
139         .rx_queue_intr_disable = vmxnet3_dev_rx_queue_intr_disable,
140 };
141
142 struct vmxnet3_xstats_name_off {
143         char name[RTE_ETH_XSTATS_NAME_SIZE];
144         unsigned int offset;
145 };
146
147 /* tx_qX_ is prepended to the name string here */
148 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
149         {"drop_total",         offsetof(struct vmxnet3_txq_stats, drop_total)},
150         {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
151         {"drop_tso",           offsetof(struct vmxnet3_txq_stats, drop_tso)},
152         {"tx_ring_full",       offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
153 };
154
155 /* rx_qX_ is prepended to the name string here */
156 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
157         {"drop_total",           offsetof(struct vmxnet3_rxq_stats, drop_total)},
158         {"drop_err",             offsetof(struct vmxnet3_rxq_stats, drop_err)},
159         {"drop_fcs",             offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
160         {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
161 };
162
163 static const struct rte_memzone *
164 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
165                  const char *post_string, int socket_id,
166                  uint16_t align, bool reuse)
167 {
168         char z_name[RTE_MEMZONE_NAMESIZE];
169         const struct rte_memzone *mz;
170
171         snprintf(z_name, sizeof(z_name), "eth_p%d_%s",
172                         dev->data->port_id, post_string);
173
174         mz = rte_memzone_lookup(z_name);
175         if (!reuse) {
176                 if (mz)
177                         rte_memzone_free(mz);
178                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
179                                 RTE_MEMZONE_IOVA_CONTIG, align);
180         }
181
182         if (mz)
183                 return mz;
184
185         return rte_memzone_reserve_aligned(z_name, size, socket_id,
186                         RTE_MEMZONE_IOVA_CONTIG, align);
187 }
188
189 /*
190  * Enable the given interrupt
191  */
192 static void
193 vmxnet3_enable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
194 {
195         PMD_INIT_FUNC_TRACE();
196         VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 0);
197 }
198
199 /*
200  * Disable the given interrupt
201  */
202 static void
203 vmxnet3_disable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
204 {
205         PMD_INIT_FUNC_TRACE();
206         VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 1);
207 }
208
209 /*
210  * Enable all intrs used by the device
211  */
212 static void
213 vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
214 {
215         Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
216
217         PMD_INIT_FUNC_TRACE();
218
219         devRead->intrConf.intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
220
221         if (hw->intr.lsc_only) {
222                 vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
223         } else {
224                 int i;
225
226                 for (i = 0; i < hw->intr.num_intrs; i++)
227                         vmxnet3_enable_intr(hw, i);
228         }
229 }
230
231 /*
232  * Disable all intrs used by the device
233  */
234 static void
235 vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw)
236 {
237         int i;
238
239         PMD_INIT_FUNC_TRACE();
240
241         hw->shared->devRead.intrConf.intrCtrl |=
242                 rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
243         for (i = 0; i < hw->num_intrs; i++)
244                 vmxnet3_disable_intr(hw, i);
245 }
246
247 /*
248  * Gets tx data ring descriptor size.
249  */
250 static uint16_t
251 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
252 {
253         uint16 txdata_desc_size;
254
255         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
256                                VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
257         txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
258
259         return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
260                 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
261                 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
262                 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
263 }
264
265 /*
266  * It returns 0 on success.
267  */
268 static int
269 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
270 {
271         struct rte_pci_device *pci_dev;
272         struct vmxnet3_hw *hw = eth_dev->data->dev_private;
273         uint32_t mac_hi, mac_lo, ver;
274         struct rte_eth_link link;
275         static const struct rte_mbuf_dynfield vmxnet3_segs_dynfield_desc = {
276                 .name = VMXNET3_SEGS_DYNFIELD_NAME,
277                 .size = sizeof(vmxnet3_segs_dynfield_t),
278                 .align = __alignof__(vmxnet3_segs_dynfield_t),
279         };
280
281         PMD_INIT_FUNC_TRACE();
282
283         eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
284         eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
285         eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
286         eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
287         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
288
289         /* extra mbuf field is required to guess MSS */
290         vmxnet3_segs_dynfield_offset =
291                 rte_mbuf_dynfield_register(&vmxnet3_segs_dynfield_desc);
292         if (vmxnet3_segs_dynfield_offset < 0) {
293                 PMD_INIT_LOG(ERR, "Cannot register mbuf field.");
294                 return -rte_errno;
295         }
296
297         /*
298          * for secondary processes, we don't initialize any further as primary
299          * has already done this work.
300          */
301         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
302                 return 0;
303
304         rte_eth_copy_pci_info(eth_dev, pci_dev);
305         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
306
307         /* Vendor and Device ID need to be set before init of shared code */
308         hw->device_id = pci_dev->id.device_id;
309         hw->vendor_id = pci_dev->id.vendor_id;
310         hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
311         hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
312
313         hw->num_rx_queues = 1;
314         hw->num_tx_queues = 1;
315         hw->bufs_per_pkt = 1;
316
317         /* Check h/w version compatibility with driver. */
318         ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
319         PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
320
321         if (ver & (1 << VMXNET3_REV_4)) {
322                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
323                                        1 << VMXNET3_REV_4);
324                 hw->version = VMXNET3_REV_4 + 1;
325         } else if (ver & (1 << VMXNET3_REV_3)) {
326                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
327                                        1 << VMXNET3_REV_3);
328                 hw->version = VMXNET3_REV_3 + 1;
329         } else if (ver & (1 << VMXNET3_REV_2)) {
330                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
331                                        1 << VMXNET3_REV_2);
332                 hw->version = VMXNET3_REV_2 + 1;
333         } else if (ver & (1 << VMXNET3_REV_1)) {
334                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
335                                        1 << VMXNET3_REV_1);
336                 hw->version = VMXNET3_REV_1 + 1;
337         } else {
338                 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
339                 return -EIO;
340         }
341
342         PMD_INIT_LOG(INFO, "Using device v%d", hw->version);
343
344         /* Check UPT version compatibility with driver. */
345         ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
346         PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
347         if (ver & 0x1)
348                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
349         else {
350                 PMD_INIT_LOG(ERR, "Incompatible UPT version.");
351                 return -EIO;
352         }
353
354         /* Getting MAC Address */
355         mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
356         mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
357         memcpy(hw->perm_addr, &mac_lo, 4);
358         memcpy(hw->perm_addr + 4, &mac_hi, 2);
359
360         /* Allocate memory for storing MAC addresses */
361         eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", RTE_ETHER_ADDR_LEN *
362                                                VMXNET3_MAX_MAC_ADDRS, 0);
363         if (eth_dev->data->mac_addrs == NULL) {
364                 PMD_INIT_LOG(ERR,
365                              "Failed to allocate %d bytes needed to store MAC addresses",
366                              RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
367                 return -ENOMEM;
368         }
369         /* Copy the permanent MAC address */
370         rte_ether_addr_copy((struct rte_ether_addr *)hw->perm_addr,
371                         &eth_dev->data->mac_addrs[0]);
372
373         PMD_INIT_LOG(DEBUG, "MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
374                      hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
375                      hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
376
377         /* Put device in Quiesce Mode */
378         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
379
380         /* allow untagged pkts */
381         VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
382
383         hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
384                 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
385
386         hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
387                 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
388         RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
389                    hw->rxdata_desc_size);
390
391         /* clear shadow stats */
392         memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
393         memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
394
395         /* clear snapshot stats */
396         memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats));
397         memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats));
398
399         /* set the initial link status */
400         memset(&link, 0, sizeof(link));
401         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
402         link.link_speed = RTE_ETH_SPEED_NUM_10G;
403         link.link_autoneg = RTE_ETH_LINK_FIXED;
404         rte_eth_linkstatus_set(eth_dev, &link);
405
406         return 0;
407 }
408
409 static int
410 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
411 {
412         struct vmxnet3_hw *hw = eth_dev->data->dev_private;
413
414         PMD_INIT_FUNC_TRACE();
415
416         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
417                 return 0;
418
419         if (hw->adapter_stopped == 0) {
420                 PMD_INIT_LOG(DEBUG, "Device has not been closed.");
421                 return -EBUSY;
422         }
423
424         return 0;
425 }
426
427 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
428         struct rte_pci_device *pci_dev)
429 {
430         return rte_eth_dev_pci_generic_probe(pci_dev,
431                 sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
432 }
433
434 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
435 {
436         return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
437 }
438
439 static struct rte_pci_driver rte_vmxnet3_pmd = {
440         .id_table = pci_id_vmxnet3_map,
441         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
442         .probe = eth_vmxnet3_pci_probe,
443         .remove = eth_vmxnet3_pci_remove,
444 };
445
446 static void
447 vmxnet3_alloc_intr_resources(struct rte_eth_dev *dev)
448 {
449         struct vmxnet3_hw *hw = dev->data->dev_private;
450         uint32_t cfg;
451         int nvec = 1; /* for link event */
452
453         /* intr settings */
454         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
455                                VMXNET3_CMD_GET_CONF_INTR);
456         cfg = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
457         hw->intr.type = cfg & 0x3;
458         hw->intr.mask_mode = (cfg >> 2) & 0x3;
459
460         if (hw->intr.type == VMXNET3_IT_AUTO)
461                 hw->intr.type = VMXNET3_IT_MSIX;
462
463         if (hw->intr.type == VMXNET3_IT_MSIX) {
464                 /* only support shared tx/rx intr */
465                 if (hw->num_tx_queues != hw->num_rx_queues)
466                         goto msix_err;
467
468                 nvec += hw->num_rx_queues;
469                 hw->intr.num_intrs = nvec;
470                 return;
471         }
472
473 msix_err:
474         /* the tx/rx queue interrupt will be disabled */
475         hw->intr.num_intrs = 2;
476         hw->intr.lsc_only = TRUE;
477         PMD_INIT_LOG(INFO, "Enabled MSI-X with %d vectors", hw->intr.num_intrs);
478 }
479
480 static int
481 vmxnet3_dev_configure(struct rte_eth_dev *dev)
482 {
483         const struct rte_memzone *mz;
484         struct vmxnet3_hw *hw = dev->data->dev_private;
485         size_t size;
486
487         PMD_INIT_FUNC_TRACE();
488
489         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
490                 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
491
492         if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
493             dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
494                 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
495                 return -EINVAL;
496         }
497
498         if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
499                 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
500                 return -EINVAL;
501         }
502
503         size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
504                 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
505
506         if (size > UINT16_MAX)
507                 return -EINVAL;
508
509         hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
510         hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
511
512         /*
513          * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
514          * on current socket
515          */
516         mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
517                               "shared", rte_socket_id(), 8, 1);
518
519         if (mz == NULL) {
520                 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
521                 return -ENOMEM;
522         }
523         memset(mz->addr, 0, mz->len);
524
525         hw->shared = mz->addr;
526         hw->sharedPA = mz->iova;
527
528         /*
529          * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
530          * on current socket.
531          *
532          * We cannot reuse this memzone from previous allocation as its size
533          * depends on the number of tx and rx queues, which could be different
534          * from one config to another.
535          */
536         mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
537                               VMXNET3_QUEUE_DESC_ALIGN, 0);
538         if (mz == NULL) {
539                 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
540                 return -ENOMEM;
541         }
542         memset(mz->addr, 0, mz->len);
543
544         hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
545         hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
546
547         hw->queueDescPA = mz->iova;
548         hw->queue_desc_len = (uint16_t)size;
549
550         if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
551                 /* Allocate memory structure for UPT1_RSSConf and configure */
552                 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
553                                       "rss_conf", rte_socket_id(),
554                                       RTE_CACHE_LINE_SIZE, 1);
555                 if (mz == NULL) {
556                         PMD_INIT_LOG(ERR,
557                                      "ERROR: Creating rss_conf structure zone");
558                         return -ENOMEM;
559                 }
560                 memset(mz->addr, 0, mz->len);
561
562                 hw->rss_conf = mz->addr;
563                 hw->rss_confPA = mz->iova;
564         }
565
566         vmxnet3_alloc_intr_resources(dev);
567
568         return 0;
569 }
570
571 static void
572 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
573 {
574         uint32_t val;
575
576         PMD_INIT_LOG(DEBUG,
577                      "Writing MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
578                      addr[0], addr[1], addr[2],
579                      addr[3], addr[4], addr[5]);
580
581         memcpy(&val, addr, 4);
582         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
583
584         memcpy(&val, addr + 4, 2);
585         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
586 }
587
588 /*
589  * Configure the hardware to generate MSI-X interrupts.
590  * If setting up MSIx fails, try setting up MSI (only 1 interrupt vector
591  * which will be disabled to allow lsc to work).
592  *
593  * Returns 0 on success and -1 otherwise.
594  */
595 static int
596 vmxnet3_configure_msix(struct rte_eth_dev *dev)
597 {
598         struct vmxnet3_hw *hw = dev->data->dev_private;
599         struct rte_intr_handle *intr_handle = dev->intr_handle;
600         uint16_t intr_vector;
601         int i;
602
603         hw->intr.event_intr_idx = 0;
604
605         /* only vfio-pci driver can support interrupt mode. */
606         if (!rte_intr_cap_multiple(intr_handle) ||
607             dev->data->dev_conf.intr_conf.rxq == 0)
608                 return -1;
609
610         intr_vector = dev->data->nb_rx_queues;
611         if (intr_vector > VMXNET3_MAX_RX_QUEUES) {
612                 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
613                              VMXNET3_MAX_RX_QUEUES);
614                 return -ENOTSUP;
615         }
616
617         if (rte_intr_efd_enable(intr_handle, intr_vector)) {
618                 PMD_INIT_LOG(ERR, "Failed to enable fastpath event fd");
619                 return -1;
620         }
621
622         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
623                 intr_handle->intr_vec =
624                         rte_zmalloc("intr_vec",
625                                     dev->data->nb_rx_queues * sizeof(int), 0);
626                 if (intr_handle->intr_vec == NULL) {
627                         PMD_INIT_LOG(ERR, "Failed to allocate %d Rx queues intr_vec",
628                                         dev->data->nb_rx_queues);
629                         rte_intr_efd_disable(intr_handle);
630                         return -ENOMEM;
631                 }
632         }
633
634         if (!rte_intr_allow_others(intr_handle) &&
635             dev->data->dev_conf.intr_conf.lsc != 0) {
636                 PMD_INIT_LOG(ERR, "not enough intr vector to support both Rx interrupt and LSC");
637                 rte_free(intr_handle->intr_vec);
638                 intr_handle->intr_vec = NULL;
639                 rte_intr_efd_disable(intr_handle);
640                 return -1;
641         }
642
643         /* if we cannot allocate one MSI-X vector per queue, don't enable
644          * interrupt mode.
645          */
646         if (hw->intr.num_intrs != (intr_handle->nb_efd + 1)) {
647                 PMD_INIT_LOG(ERR, "Device configured with %d Rx intr vectors, expecting %d",
648                                 hw->intr.num_intrs, intr_handle->nb_efd + 1);
649                 rte_free(intr_handle->intr_vec);
650                 intr_handle->intr_vec = NULL;
651                 rte_intr_efd_disable(intr_handle);
652                 return -1;
653         }
654
655         for (i = 0; i < dev->data->nb_rx_queues; i++)
656                 intr_handle->intr_vec[i] = i + 1;
657
658         for (i = 0; i < hw->intr.num_intrs; i++)
659                 hw->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
660
661         PMD_INIT_LOG(INFO, "intr type %u, mode %u, %u vectors allocated",
662                     hw->intr.type, hw->intr.mask_mode, hw->intr.num_intrs);
663
664         return 0;
665 }
666
667 static int
668 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
669 {
670         struct vmxnet3_hw *hw = dev->data->dev_private;
671         Vmxnet3_DriverShared *shared = hw->shared;
672         Vmxnet3_CmdInfo *cmdInfo;
673         struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
674         uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
675         uint32_t num, i, j, size;
676
677         if (hw->memRegsPA == 0) {
678                 const struct rte_memzone *mz;
679
680                 size = sizeof(Vmxnet3_MemRegs) +
681                         (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
682                         sizeof(Vmxnet3_MemoryRegion);
683
684                 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
685                                       1);
686                 if (mz == NULL) {
687                         PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
688                         return -ENOMEM;
689                 }
690                 memset(mz->addr, 0, mz->len);
691                 hw->memRegs = mz->addr;
692                 hw->memRegsPA = mz->iova;
693         }
694
695         num = hw->num_rx_queues;
696
697         for (i = 0; i < num; i++) {
698                 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
699
700                 mp[i] = rxq->mp;
701                 index[i] = 1 << i;
702         }
703
704         /*
705          * The same mempool could be used by multiple queues. In such a case,
706          * remove duplicate mempool entries. Only one entry is kept with
707          * bitmask indicating queues that are using this mempool.
708          */
709         for (i = 1; i < num; i++) {
710                 for (j = 0; j < i; j++) {
711                         if (mp[i] == mp[j]) {
712                                 mp[i] = NULL;
713                                 index[j] |= 1 << i;
714                                 break;
715                         }
716                 }
717         }
718
719         j = 0;
720         for (i = 0; i < num; i++) {
721                 if (mp[i] == NULL)
722                         continue;
723
724                 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
725
726                 mr->startPA =
727                         (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
728                 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
729                         STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
730                 mr->txQueueBits = index[i];
731                 mr->rxQueueBits = index[i];
732
733                 PMD_INIT_LOG(INFO,
734                              "index: %u startPA: %" PRIu64 " length: %u, "
735                              "rxBits: %x",
736                              j, mr->startPA, mr->length, mr->rxQueueBits);
737                 j++;
738         }
739         hw->memRegs->numRegs = j;
740         PMD_INIT_LOG(INFO, "numRegs: %u", j);
741
742         size = sizeof(Vmxnet3_MemRegs) +
743                 (j - 1) * sizeof(Vmxnet3_MemoryRegion);
744
745         cmdInfo = &shared->cu.cmdInfo;
746         cmdInfo->varConf.confVer = 1;
747         cmdInfo->varConf.confLen = size;
748         cmdInfo->varConf.confPA = hw->memRegsPA;
749
750         return 0;
751 }
752
753 static int
754 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
755 {
756         struct rte_eth_conf port_conf = dev->data->dev_conf;
757         struct vmxnet3_hw *hw = dev->data->dev_private;
758         struct rte_intr_handle *intr_handle = dev->intr_handle;
759         uint32_t mtu = dev->data->mtu;
760         Vmxnet3_DriverShared *shared = hw->shared;
761         Vmxnet3_DSDevRead *devRead = &shared->devRead;
762         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
763         uint32_t i;
764         int ret;
765
766         hw->mtu = mtu;
767
768         shared->magic = VMXNET3_REV1_MAGIC;
769         devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
770
771         /* Setting up Guest OS information */
772         devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
773                 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
774         devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
775         devRead->misc.driverInfo.vmxnet3RevSpt = 1;
776         devRead->misc.driverInfo.uptVerSpt     = 1;
777
778         devRead->misc.mtu = rte_le_to_cpu_32(mtu);
779         devRead->misc.queueDescPA  = hw->queueDescPA;
780         devRead->misc.queueDescLen = hw->queue_desc_len;
781         devRead->misc.numTxQueues  = hw->num_tx_queues;
782         devRead->misc.numRxQueues  = hw->num_rx_queues;
783
784         for (i = 0; i < hw->num_tx_queues; i++) {
785                 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
786                 vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
787
788                 txq->shared = &hw->tqd_start[i];
789
790                 tqd->ctrl.txNumDeferred  = 0;
791                 tqd->ctrl.txThreshold    = 1;
792                 tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
793                 tqd->conf.compRingBasePA = txq->comp_ring.basePA;
794                 tqd->conf.dataRingBasePA = txq->data_ring.basePA;
795
796                 tqd->conf.txRingSize   = txq->cmd_ring.size;
797                 tqd->conf.compRingSize = txq->comp_ring.size;
798                 tqd->conf.dataRingSize = txq->data_ring.size;
799                 tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
800
801                 if (hw->intr.lsc_only)
802                         tqd->conf.intrIdx = 1;
803                 else
804                         tqd->conf.intrIdx = intr_handle->intr_vec[i];
805                 tqd->status.stopped = TRUE;
806                 tqd->status.error   = 0;
807                 memset(&tqd->stats, 0, sizeof(tqd->stats));
808         }
809
810         for (i = 0; i < hw->num_rx_queues; i++) {
811                 Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
812                 vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
813
814                 rxq->shared = &hw->rqd_start[i];
815
816                 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
817                 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
818                 rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
819
820                 rqd->conf.rxRingSize[0]   = rxq->cmd_ring[0].size;
821                 rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
822                 rqd->conf.compRingSize    = rxq->comp_ring.size;
823
824                 if (hw->intr.lsc_only)
825                         rqd->conf.intrIdx = 1;
826                 else
827                         rqd->conf.intrIdx = intr_handle->intr_vec[i];
828                 rqd->status.stopped = TRUE;
829                 rqd->status.error   = 0;
830                 memset(&rqd->stats, 0, sizeof(rqd->stats));
831         }
832
833         /* intr settings */
834         devRead->intrConf.autoMask = hw->intr.mask_mode == VMXNET3_IMM_AUTO;
835         devRead->intrConf.numIntrs = hw->intr.num_intrs;
836         for (i = 0; i < hw->intr.num_intrs; i++)
837                 devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
838
839         devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
840         devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
841
842         /* RxMode set to 0 of VMXNET3_RXM_xxx */
843         devRead->rxFilterConf.rxMode = 0;
844
845         /* Setting up feature flags */
846         if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
847                 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
848
849         if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
850                 devRead->misc.uptFeatures |= VMXNET3_F_LRO;
851                 devRead->misc.maxNumRxSG = 0;
852         }
853
854         if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
855                 ret = vmxnet3_rss_configure(dev);
856                 if (ret != VMXNET3_SUCCESS)
857                         return ret;
858
859                 devRead->misc.uptFeatures |= VMXNET3_F_RSS;
860                 devRead->rssConfDesc.confVer = 1;
861                 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
862                 devRead->rssConfDesc.confPA  = hw->rss_confPA;
863         }
864
865         ret = vmxnet3_dev_vlan_offload_set(dev,
866                         RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
867         if (ret)
868                 return ret;
869
870         vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
871
872         return VMXNET3_SUCCESS;
873 }
874
875 /*
876  * Configure device link speed and setup link.
877  * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
878  * It returns 0 on success.
879  */
880 static int
881 vmxnet3_dev_start(struct rte_eth_dev *dev)
882 {
883         int ret;
884         struct vmxnet3_hw *hw = dev->data->dev_private;
885
886         PMD_INIT_FUNC_TRACE();
887
888         /* Save stats before it is reset by CMD_ACTIVATE */
889         vmxnet3_hw_stats_save(hw);
890
891         /* configure MSI-X */
892         ret = vmxnet3_configure_msix(dev);
893         if (ret < 0) {
894                 /* revert to lsc only */
895                 hw->intr.num_intrs = 2;
896                 hw->intr.lsc_only = TRUE;
897         }
898
899         ret = vmxnet3_setup_driver_shared(dev);
900         if (ret != VMXNET3_SUCCESS)
901                 return ret;
902
903         /* Exchange shared data with device */
904         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
905                                VMXNET3_GET_ADDR_LO(hw->sharedPA));
906         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
907                                VMXNET3_GET_ADDR_HI(hw->sharedPA));
908
909         /* Activate device by register write */
910         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
911         ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
912
913         if (ret != 0) {
914                 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
915                 return -EINVAL;
916         }
917
918         /* Setup memory region for rx buffers */
919         ret = vmxnet3_dev_setup_memreg(dev);
920         if (ret == 0) {
921                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
922                                        VMXNET3_CMD_REGISTER_MEMREGS);
923                 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
924                 if (ret != 0)
925                         PMD_INIT_LOG(DEBUG,
926                                      "Failed in setup memory region cmd\n");
927                 ret = 0;
928         } else {
929                 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
930         }
931
932         if (VMXNET3_VERSION_GE_4(hw) &&
933             dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
934                 /* Check for additional RSS  */
935                 ret = vmxnet3_v4_rss_configure(dev);
936                 if (ret != VMXNET3_SUCCESS) {
937                         PMD_INIT_LOG(ERR, "Failed to configure v4 RSS");
938                         return ret;
939                 }
940         }
941
942         /*
943          * Load RX queues with blank mbufs and update next2fill index for device
944          * Update RxMode of the device
945          */
946         ret = vmxnet3_dev_rxtx_init(dev);
947         if (ret != VMXNET3_SUCCESS) {
948                 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
949                 return ret;
950         }
951
952         hw->adapter_stopped = FALSE;
953
954         /* Setting proper Rx Mode and issue Rx Mode Update command */
955         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
956
957         /* Setup interrupt callback  */
958         rte_intr_callback_register(dev->intr_handle,
959                                    vmxnet3_interrupt_handler, dev);
960
961         if (rte_intr_enable(dev->intr_handle) < 0) {
962                 PMD_INIT_LOG(ERR, "interrupt enable failed");
963                 return -EIO;
964         }
965
966         /* enable all intrs */
967         vmxnet3_enable_all_intrs(hw);
968
969         vmxnet3_process_events(dev);
970
971         /*
972          * Update link state from device since this won't be
973          * done upon starting with lsc in use. This is done
974          * only after enabling interrupts to avoid any race
975          * where the link state could change without an
976          * interrupt being fired.
977          */
978         __vmxnet3_dev_link_update(dev, 0);
979
980         return VMXNET3_SUCCESS;
981 }
982
983 /*
984  * Stop device: disable rx and tx functions to allow for reconfiguring.
985  */
986 static int
987 vmxnet3_dev_stop(struct rte_eth_dev *dev)
988 {
989         struct rte_eth_link link;
990         struct vmxnet3_hw *hw = dev->data->dev_private;
991         struct rte_intr_handle *intr_handle = dev->intr_handle;
992         int ret;
993
994         PMD_INIT_FUNC_TRACE();
995
996         if (hw->adapter_stopped == 1) {
997                 PMD_INIT_LOG(DEBUG, "Device already stopped.");
998                 return 0;
999         }
1000
1001         do {
1002                 /* Unregister has lock to make sure there is no running cb.
1003                  * This has to happen first since vmxnet3_interrupt_handler
1004                  * reenables interrupts by calling vmxnet3_enable_intr
1005                  */
1006                 ret = rte_intr_callback_unregister(intr_handle,
1007                                                    vmxnet3_interrupt_handler,
1008                                                    (void *)-1);
1009         } while (ret == -EAGAIN);
1010
1011         if (ret < 0)
1012                 PMD_DRV_LOG(ERR, "Error attempting to unregister intr cb: %d",
1013                             ret);
1014
1015         PMD_INIT_LOG(DEBUG, "Disabled %d intr callbacks", ret);
1016
1017         /* disable interrupts */
1018         vmxnet3_disable_all_intrs(hw);
1019
1020         rte_intr_disable(intr_handle);
1021
1022         /* Clean datapath event and queue/vector mapping */
1023         rte_intr_efd_disable(intr_handle);
1024         if (intr_handle->intr_vec != NULL) {
1025                 rte_free(intr_handle->intr_vec);
1026                 intr_handle->intr_vec = NULL;
1027         }
1028
1029         /* quiesce the device first */
1030         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
1031         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
1032         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
1033
1034         /* reset the device */
1035         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1036         PMD_INIT_LOG(DEBUG, "Device reset.");
1037
1038         vmxnet3_dev_clear_queues(dev);
1039
1040         /* Clear recorded link status */
1041         memset(&link, 0, sizeof(link));
1042         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1043         link.link_speed = RTE_ETH_SPEED_NUM_10G;
1044         link.link_autoneg = RTE_ETH_LINK_FIXED;
1045         rte_eth_linkstatus_set(dev, &link);
1046
1047         hw->adapter_stopped = 1;
1048         dev->data->dev_started = 0;
1049
1050         return 0;
1051 }
1052
1053 static void
1054 vmxnet3_free_queues(struct rte_eth_dev *dev)
1055 {
1056         int i;
1057
1058         PMD_INIT_FUNC_TRACE();
1059
1060         for (i = 0; i < dev->data->nb_rx_queues; i++)
1061                 vmxnet3_dev_rx_queue_release(dev, i);
1062         dev->data->nb_rx_queues = 0;
1063
1064         for (i = 0; i < dev->data->nb_tx_queues; i++)
1065                 vmxnet3_dev_tx_queue_release(dev, i);
1066         dev->data->nb_tx_queues = 0;
1067 }
1068
1069 /*
1070  * Reset and stop device.
1071  */
1072 static int
1073 vmxnet3_dev_close(struct rte_eth_dev *dev)
1074 {
1075         int ret;
1076         PMD_INIT_FUNC_TRACE();
1077         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1078                 return 0;
1079
1080         ret = vmxnet3_dev_stop(dev);
1081         vmxnet3_free_queues(dev);
1082
1083         return ret;
1084 }
1085
1086 static int
1087 vmxnet3_dev_reset(struct rte_eth_dev *dev)
1088 {
1089         int ret;
1090
1091         ret = eth_vmxnet3_dev_uninit(dev);
1092         if (ret)
1093                 return ret;
1094         ret = eth_vmxnet3_dev_init(dev);
1095         return ret;
1096 }
1097
1098 static void
1099 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1100                         struct UPT1_TxStats *res)
1101 {
1102 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r)              \
1103                 ((r)->f = (h)->tqd_start[(i)].stats.f + \
1104                         (h)->saved_tx_stats[(i)].f)
1105
1106         VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
1107         VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
1108         VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
1109         VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
1110         VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
1111         VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
1112         VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
1113         VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
1114
1115 #undef VMXNET3_UPDATE_TX_STAT
1116 }
1117
1118 static void
1119 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1120                         struct UPT1_RxStats *res)
1121 {
1122 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r)              \
1123                 ((r)->f = (h)->rqd_start[(i)].stats.f + \
1124                         (h)->saved_rx_stats[(i)].f)
1125
1126         VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
1127         VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
1128         VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
1129         VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
1130         VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
1131         VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
1132         VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
1133         VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1134
1135 #undef VMXNET3_UPDATE_RX_STAT
1136 }
1137
1138 static void
1139 vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1140                                         struct UPT1_TxStats *res)
1141 {
1142                 vmxnet3_hw_tx_stats_get(hw, q, res);
1143
1144 #define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r)     \
1145                 ((r)->f -= (h)->snapshot_tx_stats[(i)].f)
1146
1147         VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res);
1148         VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res);
1149         VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res);
1150         VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res);
1151         VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res);
1152         VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res);
1153         VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res);
1154         VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res);
1155
1156 #undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT
1157 }
1158
1159 static void
1160 vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1161                                         struct UPT1_RxStats *res)
1162 {
1163                 vmxnet3_hw_rx_stats_get(hw, q, res);
1164
1165 #define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r)     \
1166                 ((r)->f -= (h)->snapshot_rx_stats[(i)].f)
1167
1168         VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res);
1169         VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res);
1170         VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res);
1171         VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res);
1172         VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res);
1173         VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res);
1174         VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res);
1175         VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1176
1177 #undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT
1178 }
1179
1180 static void
1181 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
1182 {
1183         unsigned int i;
1184
1185         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1186
1187         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1188
1189         for (i = 0; i < hw->num_tx_queues; i++)
1190                 vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
1191         for (i = 0; i < hw->num_rx_queues; i++)
1192                 vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
1193 }
1194
1195 static int
1196 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
1197                              struct rte_eth_xstat_name *xstats_names,
1198                              unsigned int n)
1199 {
1200         unsigned int i, t, count = 0;
1201         unsigned int nstats =
1202                 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1203                 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1204
1205         if (!xstats_names || n < nstats)
1206                 return nstats;
1207
1208         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1209                 if (!dev->data->rx_queues[i])
1210                         continue;
1211
1212                 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1213                         snprintf(xstats_names[count].name,
1214                                  sizeof(xstats_names[count].name),
1215                                  "rx_q%u_%s", i,
1216                                  vmxnet3_rxq_stat_strings[t].name);
1217                         count++;
1218                 }
1219         }
1220
1221         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1222                 if (!dev->data->tx_queues[i])
1223                         continue;
1224
1225                 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1226                         snprintf(xstats_names[count].name,
1227                                  sizeof(xstats_names[count].name),
1228                                  "tx_q%u_%s", i,
1229                                  vmxnet3_txq_stat_strings[t].name);
1230                         count++;
1231                 }
1232         }
1233
1234         return count;
1235 }
1236
1237 static int
1238 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1239                        unsigned int n)
1240 {
1241         unsigned int i, t, count = 0;
1242         unsigned int nstats =
1243                 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1244                 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1245
1246         if (n < nstats)
1247                 return nstats;
1248
1249         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1250                 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
1251
1252                 if (rxq == NULL)
1253                         continue;
1254
1255                 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1256                         xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
1257                                 vmxnet3_rxq_stat_strings[t].offset);
1258                         xstats[count].id = count;
1259                         count++;
1260                 }
1261         }
1262
1263         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1264                 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1265
1266                 if (txq == NULL)
1267                         continue;
1268
1269                 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1270                         xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
1271                                 vmxnet3_txq_stat_strings[t].offset);
1272                         xstats[count].id = count;
1273                         count++;
1274                 }
1275         }
1276
1277         return count;
1278 }
1279
1280 static int
1281 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1282 {
1283         unsigned int i;
1284         struct vmxnet3_hw *hw = dev->data->dev_private;
1285         struct UPT1_TxStats txStats;
1286         struct UPT1_RxStats rxStats;
1287
1288         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1289
1290         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1291         for (i = 0; i < hw->num_tx_queues; i++) {
1292                 vmxnet3_tx_stats_get(hw, i, &txStats);
1293
1294                 stats->q_opackets[i] = txStats.ucastPktsTxOK +
1295                         txStats.mcastPktsTxOK +
1296                         txStats.bcastPktsTxOK;
1297
1298                 stats->q_obytes[i] = txStats.ucastBytesTxOK +
1299                         txStats.mcastBytesTxOK +
1300                         txStats.bcastBytesTxOK;
1301
1302                 stats->opackets += stats->q_opackets[i];
1303                 stats->obytes += stats->q_obytes[i];
1304                 stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1305         }
1306
1307         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
1308         for (i = 0; i < hw->num_rx_queues; i++) {
1309                 vmxnet3_rx_stats_get(hw, i, &rxStats);
1310
1311                 stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1312                         rxStats.mcastPktsRxOK +
1313                         rxStats.bcastPktsRxOK;
1314
1315                 stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1316                         rxStats.mcastBytesRxOK +
1317                         rxStats.bcastBytesRxOK;
1318
1319                 stats->ipackets += stats->q_ipackets[i];
1320                 stats->ibytes += stats->q_ibytes[i];
1321
1322                 stats->q_errors[i] = rxStats.pktsRxError;
1323                 stats->ierrors += rxStats.pktsRxError;
1324                 stats->imissed += rxStats.pktsRxOutOfBuf;
1325         }
1326
1327         return 0;
1328 }
1329
1330 static int
1331 vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
1332 {
1333         unsigned int i;
1334         struct vmxnet3_hw *hw = dev->data->dev_private;
1335         struct UPT1_TxStats txStats = {0};
1336         struct UPT1_RxStats rxStats = {0};
1337
1338         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1339
1340         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1341
1342         for (i = 0; i < hw->num_tx_queues; i++) {
1343                 vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1344                 memcpy(&hw->snapshot_tx_stats[i], &txStats,
1345                         sizeof(hw->snapshot_tx_stats[0]));
1346         }
1347         for (i = 0; i < hw->num_rx_queues; i++) {
1348                 vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1349                 memcpy(&hw->snapshot_rx_stats[i], &rxStats,
1350                         sizeof(hw->snapshot_rx_stats[0]));
1351         }
1352
1353         return 0;
1354 }
1355
1356 static int
1357 vmxnet3_dev_info_get(struct rte_eth_dev *dev,
1358                      struct rte_eth_dev_info *dev_info)
1359 {
1360         struct vmxnet3_hw *hw = dev->data->dev_private;
1361
1362         dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1363         dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1364         dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1365         dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1366         dev_info->min_mtu = VMXNET3_MIN_MTU;
1367         dev_info->max_mtu = VMXNET3_MAX_MTU;
1368         dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
1369         dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1370
1371         dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1372
1373         if (VMXNET3_VERSION_GE_4(hw)) {
1374                 dev_info->flow_type_rss_offloads |= VMXNET3_V4_RSS_MASK;
1375         }
1376
1377         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1378                 .nb_max = VMXNET3_RX_RING_MAX_SIZE,
1379                 .nb_min = VMXNET3_DEF_RX_RING_SIZE,
1380                 .nb_align = 1,
1381         };
1382
1383         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1384                 .nb_max = VMXNET3_TX_RING_MAX_SIZE,
1385                 .nb_min = VMXNET3_DEF_TX_RING_SIZE,
1386                 .nb_align = 1,
1387                 .nb_seg_max = VMXNET3_TX_MAX_SEG,
1388                 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1389         };
1390
1391         dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
1392         dev_info->rx_queue_offload_capa = 0;
1393         dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
1394         dev_info->tx_queue_offload_capa = 0;
1395
1396         return 0;
1397 }
1398
1399 static const uint32_t *
1400 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1401 {
1402         static const uint32_t ptypes[] = {
1403                 RTE_PTYPE_L3_IPV4_EXT,
1404                 RTE_PTYPE_L3_IPV4,
1405                 RTE_PTYPE_UNKNOWN
1406         };
1407
1408         if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1409                 return ptypes;
1410         return NULL;
1411 }
1412
1413 static int
1414 vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, __rte_unused uint16_t mtu)
1415 {
1416         if (dev->data->dev_started) {
1417                 PMD_DRV_LOG(ERR, "Port %d must be stopped to configure MTU",
1418                             dev->data->port_id);
1419                 return -EBUSY;
1420         }
1421
1422         return 0;
1423 }
1424
1425 static int
1426 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1427 {
1428         struct vmxnet3_hw *hw = dev->data->dev_private;
1429
1430         rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
1431         vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1432         return 0;
1433 }
1434
1435 /* return 0 means link status changed, -1 means not changed */
1436 static int
1437 __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1438                           __rte_unused int wait_to_complete)
1439 {
1440         struct vmxnet3_hw *hw = dev->data->dev_private;
1441         struct rte_eth_link link;
1442         uint32_t ret;
1443
1444         memset(&link, 0, sizeof(link));
1445
1446         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1447         ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1448
1449         if (ret & 0x1)
1450                 link.link_status = RTE_ETH_LINK_UP;
1451         link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1452         link.link_speed = RTE_ETH_SPEED_NUM_10G;
1453         link.link_autoneg = RTE_ETH_LINK_FIXED;
1454
1455         return rte_eth_linkstatus_set(dev, &link);
1456 }
1457
1458 static int
1459 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1460 {
1461         /* Link status doesn't change for stopped dev */
1462         if (dev->data->dev_started == 0)
1463                 return -1;
1464
1465         return __vmxnet3_dev_link_update(dev, wait_to_complete);
1466 }
1467
1468 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1469 static void
1470 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1471 {
1472         struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1473
1474         if (set)
1475                 rxConf->rxMode = rxConf->rxMode | feature;
1476         else
1477                 rxConf->rxMode = rxConf->rxMode & (~feature);
1478
1479         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1480 }
1481
1482 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1483 static int
1484 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1485 {
1486         struct vmxnet3_hw *hw = dev->data->dev_private;
1487         uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1488
1489         memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1490         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1491
1492         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1493                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1494
1495         return 0;
1496 }
1497
1498 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1499 static int
1500 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1501 {
1502         struct vmxnet3_hw *hw = dev->data->dev_private;
1503         uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1504         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1505
1506         if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1507                 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1508         else
1509                 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1510         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1511         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1512                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1513
1514         return 0;
1515 }
1516
1517 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1518 static int
1519 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1520 {
1521         struct vmxnet3_hw *hw = dev->data->dev_private;
1522
1523         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1524
1525         return 0;
1526 }
1527
1528 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1529 static int
1530 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1531 {
1532         struct vmxnet3_hw *hw = dev->data->dev_private;
1533
1534         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1535
1536         return 0;
1537 }
1538
1539 /* Enable/disable filter on vlan */
1540 static int
1541 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1542 {
1543         struct vmxnet3_hw *hw = dev->data->dev_private;
1544         struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1545         uint32_t *vf_table = rxConf->vfTable;
1546
1547         /* save state for restore */
1548         if (on)
1549                 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1550         else
1551                 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1552
1553         /* don't change active filter if in promiscuous mode */
1554         if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1555                 return 0;
1556
1557         /* set in hardware */
1558         if (on)
1559                 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1560         else
1561                 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1562
1563         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1564                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1565         return 0;
1566 }
1567
1568 static int
1569 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1570 {
1571         struct vmxnet3_hw *hw = dev->data->dev_private;
1572         Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1573         uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1574         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1575
1576         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1577                 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1578                         devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1579                 else
1580                         devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1581
1582                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1583                                        VMXNET3_CMD_UPDATE_FEATURE);
1584         }
1585
1586         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1587                 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1588                         memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1589                 else
1590                         memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1591
1592                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1593                                        VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1594         }
1595
1596         return 0;
1597 }
1598
1599 static void
1600 vmxnet3_process_events(struct rte_eth_dev *dev)
1601 {
1602         struct vmxnet3_hw *hw = dev->data->dev_private;
1603         uint32_t events = hw->shared->ecr;
1604
1605         if (!events)
1606                 return;
1607
1608         /*
1609          * ECR bits when written with 1b are cleared. Hence write
1610          * events back to ECR so that the bits which were set will be reset.
1611          */
1612         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1613
1614         /* Check if link state has changed */
1615         if (events & VMXNET3_ECR_LINK) {
1616                 PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
1617                 if (vmxnet3_dev_link_update(dev, 0) == 0)
1618                         rte_eth_dev_callback_process(dev,
1619                                                      RTE_ETH_EVENT_INTR_LSC,
1620                                                      NULL);
1621         }
1622
1623         /* Check if there is an error on xmit/recv queues */
1624         if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1625                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1626                                        VMXNET3_CMD_GET_QUEUE_STATUS);
1627
1628                 if (hw->tqd_start->status.stopped)
1629                         PMD_DRV_LOG(ERR, "tq error 0x%x",
1630                                     hw->tqd_start->status.error);
1631
1632                 if (hw->rqd_start->status.stopped)
1633                         PMD_DRV_LOG(ERR, "rq error 0x%x",
1634                                      hw->rqd_start->status.error);
1635
1636                 /* Reset the device */
1637                 /* Have to reset the device */
1638         }
1639
1640         if (events & VMXNET3_ECR_DIC)
1641                 PMD_DRV_LOG(DEBUG, "Device implementation change event.");
1642
1643         if (events & VMXNET3_ECR_DEBUG)
1644                 PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
1645 }
1646
1647 static void
1648 vmxnet3_interrupt_handler(void *param)
1649 {
1650         struct rte_eth_dev *dev = param;
1651         struct vmxnet3_hw *hw = dev->data->dev_private;
1652         Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1653         uint32_t events;
1654
1655         PMD_INIT_FUNC_TRACE();
1656         vmxnet3_disable_intr(hw, devRead->intrConf.eventIntrIdx);
1657
1658         events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
1659         if (events == 0)
1660                 goto done;
1661
1662         RTE_LOG(DEBUG, PMD, "Reading events: 0x%X", events);
1663         vmxnet3_process_events(dev);
1664 done:
1665         vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
1666 }
1667
1668 static int
1669 vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1670 {
1671         struct vmxnet3_hw *hw = dev->data->dev_private;
1672
1673         vmxnet3_enable_intr(hw, dev->intr_handle->intr_vec[queue_id]);
1674
1675         return 0;
1676 }
1677
1678 static int
1679 vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1680 {
1681         struct vmxnet3_hw *hw = dev->data->dev_private;
1682
1683         vmxnet3_disable_intr(hw, dev->intr_handle->intr_vec[queue_id]);
1684
1685         return 0;
1686 }
1687
1688 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1689 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1690 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
1691 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_init, init, NOTICE);
1692 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_driver, driver, NOTICE);