vmxnet3: fix VLAN filtering
[dpdk.git] / drivers / net / vmxnet3 / vmxnet3_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <fcntl.h>
42 #include <inttypes.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_string_fns.h>
61 #include <rte_malloc.h>
62 #include <rte_dev.h>
63
64 #include "base/vmxnet3_defs.h"
65
66 #include "vmxnet3_ring.h"
67 #include "vmxnet3_logs.h"
68 #include "vmxnet3_ethdev.h"
69
70 #define PROCESS_SYS_EVENTS 0
71
72 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
73 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
74 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
75 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
76 static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
77 static void vmxnet3_dev_close(struct rte_eth_dev *dev);
78 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
79 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
80 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
81 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
82 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
83 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
84                                 int wait_to_complete);
85 static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
86                                 struct rte_eth_stats *stats);
87 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
88                                 struct rte_eth_dev_info *dev_info);
89 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
90                                        uint16_t vid, int on);
91 static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
92
93 #if PROCESS_SYS_EVENTS == 1
94 static void vmxnet3_process_events(struct vmxnet3_hw *);
95 #endif
96 /*
97  * The set of PCI devices this driver supports
98  */
99 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
100
101 #define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
102 #include "rte_pci_dev_ids.h"
103
104 { .vendor_id = 0, /* sentinel */ },
105 };
106
107 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
108         .dev_configure        = vmxnet3_dev_configure,
109         .dev_start            = vmxnet3_dev_start,
110         .dev_stop             = vmxnet3_dev_stop,
111         .dev_close            = vmxnet3_dev_close,
112         .promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
113         .promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
114         .allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
115         .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
116         .link_update          = vmxnet3_dev_link_update,
117         .stats_get            = vmxnet3_dev_stats_get,
118         .dev_infos_get        = vmxnet3_dev_info_get,
119         .vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
120         .vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
121         .rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
122         .rx_queue_release     = vmxnet3_dev_rx_queue_release,
123         .tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
124         .tx_queue_release     = vmxnet3_dev_tx_queue_release,
125 };
126
127 static const struct rte_memzone *
128 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
129                 const char *post_string, int socket_id, uint16_t align)
130 {
131         char z_name[RTE_MEMZONE_NAMESIZE];
132         const struct rte_memzone *mz;
133
134         snprintf(z_name, sizeof(z_name), "%s_%d_%s",
135                                         dev->driver->pci_drv.name, dev->data->port_id, post_string);
136
137         mz = rte_memzone_lookup(z_name);
138         if (mz)
139                 return mz;
140
141         return rte_memzone_reserve_aligned(z_name, size,
142                         socket_id, 0, align);
143 }
144
145 /**
146  * Atomically reads the link status information from global
147  * structure rte_eth_dev.
148  *
149  * @param dev
150  *   - Pointer to the structure rte_eth_dev to read from.
151  *   - Pointer to the buffer to be saved with the link status.
152  *
153  * @return
154  *   - On success, zero.
155  *   - On failure, negative value.
156  */
157
158 static int
159 vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
160                                     struct rte_eth_link *link)
161 {
162         struct rte_eth_link *dst = link;
163         struct rte_eth_link *src = &(dev->data->dev_link);
164
165         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
166                                 *(uint64_t *)src) == 0)
167                 return -1;
168
169         return 0;
170 }
171
172 /**
173  * Atomically writes the link status information into global
174  * structure rte_eth_dev.
175  *
176  * @param dev
177  *   - Pointer to the structure rte_eth_dev to write to.
178  *   - Pointer to the buffer to be saved with the link status.
179  *
180  * @return
181  *   - On success, zero.
182  *   - On failure, negative value.
183  */
184 static int
185 vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
186                                      struct rte_eth_link *link)
187 {
188         struct rte_eth_link *dst = &(dev->data->dev_link);
189         struct rte_eth_link *src = link;
190
191         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
192                                         *(uint64_t *)src) == 0)
193                 return -1;
194
195         return 0;
196 }
197
198 /*
199  * This function is based on vmxnet3_disable_intr()
200  */
201 static void
202 vmxnet3_disable_intr(struct vmxnet3_hw *hw)
203 {
204         int i;
205
206         PMD_INIT_FUNC_TRACE();
207
208         hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
209         for (i = 0; i < VMXNET3_MAX_INTRS; i++)
210                         VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
211 }
212
213 /*
214  * It returns 0 on success.
215  */
216 static int
217 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
218 {
219         struct rte_pci_device *pci_dev;
220         struct vmxnet3_hw *hw = eth_dev->data->dev_private;
221         uint32_t mac_hi, mac_lo, ver;
222
223         PMD_INIT_FUNC_TRACE();
224
225         eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
226         eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
227         eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
228         pci_dev = eth_dev->pci_dev;
229
230         /*
231          * for secondary processes, we don't initialize any further as primary
232          * has already done this work.
233          */
234         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
235                 return 0;
236
237         rte_eth_copy_pci_info(eth_dev, pci_dev);
238
239         /* Vendor and Device ID need to be set before init of shared code */
240         hw->device_id = pci_dev->id.device_id;
241         hw->vendor_id = pci_dev->id.vendor_id;
242         hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
243         hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
244
245         hw->num_rx_queues = 1;
246         hw->num_tx_queues = 1;
247         hw->bufs_per_pkt = 1;
248
249         /* Check h/w version compatibility with driver. */
250         ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
251         PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
252         if (ver & 0x1)
253                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
254         else {
255                 PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
256                 return -EIO;
257         }
258
259         /* Check UPT version compatibility with driver. */
260         ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
261         PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
262         if (ver & 0x1)
263                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
264         else {
265                 PMD_INIT_LOG(ERR, "Incompatible UPT version.");
266                 return -EIO;
267         }
268
269         /* Getting MAC Address */
270         mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
271         mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
272         memcpy(hw->perm_addr  , &mac_lo, 4);
273         memcpy(hw->perm_addr+4, &mac_hi, 2);
274
275         /* Allocate memory for storing MAC addresses */
276         eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
277                                                VMXNET3_MAX_MAC_ADDRS, 0);
278         if (eth_dev->data->mac_addrs == NULL) {
279                 PMD_INIT_LOG(ERR,
280                              "Failed to allocate %d bytes needed to store MAC addresses",
281                              ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
282                 return -ENOMEM;
283         }
284         /* Copy the permanent MAC address */
285         ether_addr_copy((struct ether_addr *) hw->perm_addr,
286                         &eth_dev->data->mac_addrs[0]);
287
288         PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
289                      hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
290                      hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
291
292         /* Put device in Quiesce Mode */
293         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
294
295         /* allow untagged pkts */
296         VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
297
298         return 0;
299 }
300
301 static int
302 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
303 {
304         struct vmxnet3_hw *hw = eth_dev->data->dev_private;
305
306         PMD_INIT_FUNC_TRACE();
307
308         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
309                 return 0;
310
311         if (hw->adapter_stopped == 0)
312                 vmxnet3_dev_close(eth_dev);
313
314         eth_dev->dev_ops = NULL;
315         eth_dev->rx_pkt_burst = NULL;
316         eth_dev->tx_pkt_burst = NULL;
317
318         rte_free(eth_dev->data->mac_addrs);
319         eth_dev->data->mac_addrs = NULL;
320
321         return 0;
322 }
323
324 static struct eth_driver rte_vmxnet3_pmd = {
325         .pci_drv = {
326                 .name = "rte_vmxnet3_pmd",
327                 .id_table = pci_id_vmxnet3_map,
328                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
329         },
330         .eth_dev_init = eth_vmxnet3_dev_init,
331         .eth_dev_uninit = eth_vmxnet3_dev_uninit,
332         .dev_private_size = sizeof(struct vmxnet3_hw),
333 };
334
335 /*
336  * Driver initialization routine.
337  * Invoked once at EAL init time.
338  * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices.
339  */
340 static int
341 rte_vmxnet3_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
342 {
343         PMD_INIT_FUNC_TRACE();
344
345         rte_eth_driver_register(&rte_vmxnet3_pmd);
346         return 0;
347 }
348
349 static int
350 vmxnet3_dev_configure(struct rte_eth_dev *dev)
351 {
352         const struct rte_memzone *mz;
353         struct vmxnet3_hw *hw = dev->data->dev_private;
354         size_t size;
355
356         PMD_INIT_FUNC_TRACE();
357
358         if (dev->data->nb_rx_queues > UINT8_MAX ||
359             dev->data->nb_tx_queues > UINT8_MAX)
360                 return -EINVAL;
361
362         size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
363                 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
364
365         if (size > UINT16_MAX)
366                 return -EINVAL;
367
368         hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
369         hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
370
371         /*
372          * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
373          * on current socket
374          */
375         mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
376                               "shared", rte_socket_id(), 8);
377
378         if (mz == NULL) {
379                 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
380                 return -ENOMEM;
381         }
382         memset(mz->addr, 0, mz->len);
383
384         hw->shared = mz->addr;
385         hw->sharedPA = mz->phys_addr;
386
387         /*
388          * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
389          * on current socket
390          */
391         mz = gpa_zone_reserve(dev, size, "queuedesc",
392                               rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
393         if (mz == NULL) {
394                 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
395                 return -ENOMEM;
396         }
397         memset(mz->addr, 0, mz->len);
398
399         hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
400         hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
401
402         hw->queueDescPA = mz->phys_addr;
403         hw->queue_desc_len = (uint16_t)size;
404
405         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
406
407                 /* Allocate memory structure for UPT1_RSSConf and configure */
408                 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
409                                       rte_socket_id(), RTE_CACHE_LINE_SIZE);
410                 if (mz == NULL) {
411                         PMD_INIT_LOG(ERR,
412                                      "ERROR: Creating rss_conf structure zone");
413                         return -ENOMEM;
414                 }
415                 memset(mz->addr, 0, mz->len);
416
417                 hw->rss_conf = mz->addr;
418                 hw->rss_confPA = mz->phys_addr;
419         }
420
421         return 0;
422 }
423
424 static int
425 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
426 {
427         struct rte_eth_conf port_conf = dev->data->dev_conf;
428         struct vmxnet3_hw *hw = dev->data->dev_private;
429         uint32_t mtu = dev->data->mtu;
430         Vmxnet3_DriverShared *shared = hw->shared;
431         Vmxnet3_DSDevRead *devRead = &shared->devRead;
432         uint32_t *mac_ptr;
433         uint32_t val, i;
434         int ret;
435
436         shared->magic = VMXNET3_REV1_MAGIC;
437         devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
438
439         /* Setting up Guest OS information */
440         devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
441                 VMXNET3_GOS_BITS_32 :
442                 VMXNET3_GOS_BITS_64;
443         devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
444         devRead->misc.driverInfo.vmxnet3RevSpt = 1;
445         devRead->misc.driverInfo.uptVerSpt     = 1;
446
447         devRead->misc.mtu = rte_le_to_cpu_32(mtu);
448         devRead->misc.queueDescPA  = hw->queueDescPA;
449         devRead->misc.queueDescLen = hw->queue_desc_len;
450         devRead->misc.numTxQueues  = hw->num_tx_queues;
451         devRead->misc.numRxQueues  = hw->num_rx_queues;
452
453         /*
454          * Set number of interrupts to 1
455          * PMD disables all the interrupts but this is MUST to activate device
456          * It needs at least one interrupt for link events to handle
457          * So we'll disable it later after device activation if needed
458          */
459         devRead->intrConf.numIntrs = 1;
460         devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
461
462         for (i = 0; i < hw->num_tx_queues; i++) {
463                 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
464                 vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
465
466                 tqd->ctrl.txNumDeferred  = 0;
467                 tqd->ctrl.txThreshold    = 1;
468                 tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
469                 tqd->conf.compRingBasePA = txq->comp_ring.basePA;
470                 tqd->conf.dataRingBasePA = txq->data_ring.basePA;
471
472                 tqd->conf.txRingSize   = txq->cmd_ring.size;
473                 tqd->conf.compRingSize = txq->comp_ring.size;
474                 tqd->conf.dataRingSize = txq->data_ring.size;
475                 tqd->conf.intrIdx      = txq->comp_ring.intr_idx;
476                 tqd->status.stopped    = TRUE;
477                 tqd->status.error      = 0;
478                 memset(&tqd->stats, 0, sizeof(tqd->stats));
479         }
480
481         for (i = 0; i < hw->num_rx_queues; i++) {
482                 Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
483                 vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
484
485                 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
486                 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
487                 rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
488
489                 rqd->conf.rxRingSize[0]   = rxq->cmd_ring[0].size;
490                 rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
491                 rqd->conf.compRingSize    = rxq->comp_ring.size;
492                 rqd->conf.intrIdx         = rxq->comp_ring.intr_idx;
493                 rqd->status.stopped       = TRUE;
494                 rqd->status.error         = 0;
495                 memset(&rqd->stats, 0, sizeof(rqd->stats));
496         }
497
498         /* RxMode set to 0 of VMXNET3_RXM_xxx */
499         devRead->rxFilterConf.rxMode = 0;
500
501         /* Setting up feature flags */
502         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
503                 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
504
505         if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
506                 ret = vmxnet3_rss_configure(dev);
507                 if (ret != VMXNET3_SUCCESS)
508                         return ret;
509
510                 devRead->misc.uptFeatures |= VMXNET3_F_RSS;
511                 devRead->rssConfDesc.confVer = 1;
512                 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
513                 devRead->rssConfDesc.confPA  = hw->rss_confPA;
514         }
515
516         vmxnet3_dev_vlan_offload_set(dev,
517                              ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
518
519         PMD_INIT_LOG(DEBUG,
520                      "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
521                      hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
522                      hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
523
524         /* Write MAC Address back to device */
525         mac_ptr = (uint32_t *)hw->perm_addr;
526         val = *mac_ptr;
527         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
528
529         val = (hw->perm_addr[5] << 8) | hw->perm_addr[4];
530         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
531
532         return VMXNET3_SUCCESS;
533 }
534
535 /*
536  * Configure device link speed and setup link.
537  * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
538  * It returns 0 on success.
539  */
540 static int
541 vmxnet3_dev_start(struct rte_eth_dev *dev)
542 {
543         int status, ret;
544         struct vmxnet3_hw *hw = dev->data->dev_private;
545
546         PMD_INIT_FUNC_TRACE();
547
548         ret = vmxnet3_setup_driver_shared(dev);
549         if (ret != VMXNET3_SUCCESS)
550                 return ret;
551
552         /* Exchange shared data with device */
553         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
554                                VMXNET3_GET_ADDR_LO(hw->sharedPA));
555         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
556                                VMXNET3_GET_ADDR_HI(hw->sharedPA));
557
558         /* Activate device by register write */
559         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
560         status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
561
562         if (status != 0) {
563                 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
564                 return -1;
565         }
566
567         /* Disable interrupts */
568         vmxnet3_disable_intr(hw);
569
570         /*
571          * Load RX queues with blank mbufs and update next2fill index for device
572          * Update RxMode of the device
573          */
574         ret = vmxnet3_dev_rxtx_init(dev);
575         if (ret != VMXNET3_SUCCESS) {
576                 PMD_INIT_LOG(ERR, "Device receive init: UNSUCCESSFUL");
577                 return ret;
578         }
579
580         /* Setting proper Rx Mode and issue Rx Mode Update command */
581         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
582
583         /*
584          * Don't need to handle events for now
585          */
586 #if PROCESS_SYS_EVENTS == 1
587         events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
588         PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
589         vmxnet3_process_events(hw);
590 #endif
591         return status;
592 }
593
594 /*
595  * Stop device: disable rx and tx functions to allow for reconfiguring.
596  */
597 static void
598 vmxnet3_dev_stop(struct rte_eth_dev *dev)
599 {
600         struct rte_eth_link link;
601         struct vmxnet3_hw *hw = dev->data->dev_private;
602
603         PMD_INIT_FUNC_TRACE();
604
605         if (hw->adapter_stopped == 1) {
606                 PMD_INIT_LOG(DEBUG, "Device already closed.");
607                 return;
608         }
609
610         /* disable interrupts */
611         vmxnet3_disable_intr(hw);
612
613         /* quiesce the device first */
614         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
615         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
616         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
617
618         /* reset the device */
619         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
620         PMD_INIT_LOG(DEBUG, "Device reset.");
621         hw->adapter_stopped = 0;
622
623         vmxnet3_dev_clear_queues(dev);
624
625         /* Clear recorded link status */
626         memset(&link, 0, sizeof(link));
627         vmxnet3_dev_atomic_write_link_status(dev, &link);
628 }
629
630 /*
631  * Reset and stop device.
632  */
633 static void
634 vmxnet3_dev_close(struct rte_eth_dev *dev)
635 {
636         struct vmxnet3_hw *hw = dev->data->dev_private;
637
638         PMD_INIT_FUNC_TRACE();
639
640         vmxnet3_dev_stop(dev);
641         hw->adapter_stopped = 1;
642 }
643
644 static void
645 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
646 {
647         unsigned int i;
648         struct vmxnet3_hw *hw = dev->data->dev_private;
649
650         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
651
652         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
653         for (i = 0; i < hw->num_tx_queues; i++) {
654                 struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
655
656                 stats->q_opackets[i] = txStats->ucastPktsTxOK +
657                         txStats->mcastPktsTxOK +
658                         txStats->bcastPktsTxOK;
659                 stats->q_obytes[i] = txStats->ucastBytesTxOK +
660                         txStats->mcastBytesTxOK +
661                         txStats->bcastBytesTxOK;
662
663                 stats->opackets += stats->q_opackets[i];
664                 stats->obytes += stats->q_obytes[i];
665                 stats->oerrors += txStats->pktsTxError +
666                         txStats->pktsTxDiscard;
667         }
668
669         RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
670         for (i = 0; i < hw->num_rx_queues; i++) {
671                 struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
672
673                 stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
674                         rxStats->mcastPktsRxOK +
675                         rxStats->bcastPktsRxOK;
676
677                 stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
678                         rxStats->mcastBytesRxOK +
679                         rxStats->bcastBytesRxOK;
680
681                 stats->ipackets += stats->q_ipackets[i];
682                 stats->ibytes += stats->q_ibytes[i];
683
684                 stats->q_errors[i] = rxStats->pktsRxError;
685                 stats->ierrors += rxStats->pktsRxError;
686                 stats->imcasts += rxStats->mcastPktsRxOK;
687                 stats->rx_nombuf += rxStats->pktsRxOutOfBuf;
688         }
689 }
690
691 static void
692 vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev,
693                      struct rte_eth_dev_info *dev_info)
694 {
695         dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
696         dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
697         dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
698         dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
699         dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
700
701         dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
702                                                 ETH_TXQ_FLAGS_NOOFFLOADS;
703         dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
704
705         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
706                 .nb_max = VMXNET3_RX_RING_MAX_SIZE,
707                 .nb_min = VMXNET3_DEF_RX_RING_SIZE,
708                 .nb_align = 1,
709         };
710
711         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
712                 .nb_max = VMXNET3_TX_RING_MAX_SIZE,
713                 .nb_min = VMXNET3_DEF_TX_RING_SIZE,
714                 .nb_align = 1,
715         };
716
717         dev_info->rx_offload_capa =
718                 DEV_RX_OFFLOAD_VLAN_STRIP |
719                 DEV_RX_OFFLOAD_UDP_CKSUM |
720                 DEV_RX_OFFLOAD_TCP_CKSUM;
721
722         dev_info->tx_offload_capa =
723                 DEV_TX_OFFLOAD_VLAN_INSERT |
724                 DEV_TX_OFFLOAD_TCP_CKSUM |
725                 DEV_TX_OFFLOAD_UDP_CKSUM |
726                 DEV_TX_OFFLOAD_TCP_TSO;
727 }
728
729 /* return 0 means link status changed, -1 means not changed */
730 static int
731 vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete)
732 {
733         struct vmxnet3_hw *hw = dev->data->dev_private;
734         struct rte_eth_link old, link;
735         uint32_t ret;
736
737         if (dev->data->dev_started == 0)
738                 return -1; /* Link status doesn't change for stopped dev */
739
740         memset(&link, 0, sizeof(link));
741         vmxnet3_dev_atomic_read_link_status(dev, &old);
742
743         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
744         ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
745
746         if (ret & 0x1) {
747                 link.link_status = 1;
748                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
749                 link.link_speed = ETH_LINK_SPEED_10000;
750         }
751
752         vmxnet3_dev_atomic_write_link_status(dev, &link);
753
754         return (old.link_status == link.link_status) ? -1 : 0;
755 }
756
757 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
758 static void
759 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
760
761         struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
762
763         if (set)
764                 rxConf->rxMode = rxConf->rxMode | feature;
765         else
766                 rxConf->rxMode = rxConf->rxMode & (~feature);
767
768         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
769 }
770
771 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
772 static void
773 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
774 {
775         struct vmxnet3_hw *hw = dev->data->dev_private;
776         uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
777
778         memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
779         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
780
781         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
782                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
783 }
784
785 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
786 static void
787 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
788 {
789         struct vmxnet3_hw *hw = dev->data->dev_private;
790         uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
791
792         memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
793         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
794         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
795                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
796 }
797
798 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
799 static void
800 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
801 {
802         struct vmxnet3_hw *hw = dev->data->dev_private;
803
804         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
805 }
806
807 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
808 static void
809 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
810 {
811         struct vmxnet3_hw *hw = dev->data->dev_private;
812
813         vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
814 }
815
816 /* Enable/disable filter on vlan */
817 static int
818 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
819 {
820         struct vmxnet3_hw *hw = dev->data->dev_private;
821         struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
822         uint32_t *vf_table = rxConf->vfTable;
823
824         /* save state for restore */
825         if (on)
826                 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
827         else
828                 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
829
830         /* don't change active filter if in promiscuous mode */
831         if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
832                 return 0;
833
834         /* set in hardware */
835         if (on)
836                 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
837         else
838                 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
839
840         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
841                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
842         return 0;
843 }
844
845 static void
846 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
847 {
848         struct vmxnet3_hw *hw = dev->data->dev_private;
849         Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
850         uint32_t *vf_table = devRead->rxFilterConf.vfTable;
851
852         if (mask & ETH_VLAN_STRIP_MASK) {
853                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
854                         devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
855                 else
856                         devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
857
858                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
859                                        VMXNET3_CMD_UPDATE_FEATURE);
860         }
861
862         if (mask & ETH_VLAN_FILTER_MASK) {
863                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
864                         memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
865                 else
866                         memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
867
868                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
869                                        VMXNET3_CMD_UPDATE_VLAN_FILTERS);
870         }
871 }
872
873 #if PROCESS_SYS_EVENTS == 1
874 static void
875 vmxnet3_process_events(struct vmxnet3_hw *hw)
876 {
877         uint32_t events = hw->shared->ecr;
878
879         if (!events) {
880                 PMD_INIT_LOG(ERR, "No events to process");
881                 return;
882         }
883
884         /*
885          * ECR bits when written with 1b are cleared. Hence write
886          * events back to ECR so that the bits which were set will be reset.
887          */
888         VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
889
890         /* Check if link state has changed */
891         if (events & VMXNET3_ECR_LINK)
892                 PMD_INIT_LOG(ERR,
893                              "Process events in %s(): VMXNET3_ECR_LINK event", __func__);
894
895         /* Check if there is an error on xmit/recv queues */
896         if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
897                 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS);
898
899                 if (hw->tqd_start->status.stopped)
900                         PMD_INIT_LOG(ERR, "tq error 0x%x",
901                                      hw->tqd_start->status.error);
902
903                 if (hw->rqd_start->status.stopped)
904                         PMD_INIT_LOG(ERR, "rq error 0x%x",
905                                      hw->rqd_start->status.error);
906
907                 /* Reset the device */
908                 /* Have to reset the device */
909         }
910
911         if (events & VMXNET3_ECR_DIC)
912                 PMD_INIT_LOG(ERR, "Device implementation change event.");
913
914         if (events & VMXNET3_ECR_DEBUG)
915                 PMD_INIT_LOG(ERR, "Debug event generated by device.");
916
917 }
918 #endif
919
920 static struct rte_driver rte_vmxnet3_driver = {
921         .type = PMD_PDEV,
922         .init = rte_vmxnet3_pmd_init,
923 };
924
925 PMD_REGISTER_DRIVER(rte_vmxnet3_driver);