net/virtio: revert fix restart
[dpdk.git] / drivers / net / virtio / virtio_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <string.h>
36 #include <stdio.h>
37 #include <errno.h>
38 #include <unistd.h>
39
40 #include <rte_ethdev.h>
41 #include <rte_memcpy.h>
42 #include <rte_string_fns.h>
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
45 #include <rte_atomic.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_pci.h>
48 #include <rte_ether.h>
49 #include <rte_common.h>
50 #include <rte_errno.h>
51
52 #include <rte_memory.h>
53 #include <rte_eal.h>
54 #include <rte_dev.h>
55
56 #include "virtio_ethdev.h"
57 #include "virtio_pci.h"
58 #include "virtio_logs.h"
59 #include "virtqueue.h"
60 #include "virtio_rxtx.h"
61
62 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
63 static int  virtio_dev_configure(struct rte_eth_dev *dev);
64 static int  virtio_dev_start(struct rte_eth_dev *dev);
65 static void virtio_dev_stop(struct rte_eth_dev *dev);
66 static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
67 static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
68 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
69 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
70 static void virtio_dev_info_get(struct rte_eth_dev *dev,
71                                 struct rte_eth_dev_info *dev_info);
72 static int virtio_dev_link_update(struct rte_eth_dev *dev,
73         __rte_unused int wait_to_complete);
74
75 static void virtio_set_hwaddr(struct virtio_hw *hw);
76 static void virtio_get_hwaddr(struct virtio_hw *hw);
77
78 static void virtio_dev_stats_get(struct rte_eth_dev *dev,
79                                  struct rte_eth_stats *stats);
80 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
81                                  struct rte_eth_xstat *xstats, unsigned n);
82 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
83                                        struct rte_eth_xstat_name *xstats_names,
84                                        unsigned limit);
85 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
86 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
87 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
88                                 uint16_t vlan_id, int on);
89 static void virtio_mac_addr_add(struct rte_eth_dev *dev,
90                                 struct ether_addr *mac_addr,
91                                 uint32_t index, uint32_t vmdq __rte_unused);
92 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
93 static void virtio_mac_addr_set(struct rte_eth_dev *dev,
94                                 struct ether_addr *mac_addr);
95
96 static int virtio_dev_queue_stats_mapping_set(
97         __rte_unused struct rte_eth_dev *eth_dev,
98         __rte_unused uint16_t queue_id,
99         __rte_unused uint8_t stat_idx,
100         __rte_unused uint8_t is_rx);
101
102 /*
103  * The set of PCI devices this driver supports
104  */
105 static const struct rte_pci_id pci_id_virtio_map[] = {
106         { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
107         { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
108         { .vendor_id = 0, /* sentinel */ },
109 };
110
111 struct rte_virtio_xstats_name_off {
112         char name[RTE_ETH_XSTATS_NAME_SIZE];
113         unsigned offset;
114 };
115
116 /* [rt]x_qX_ is prepended to the name string here */
117 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
118         {"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
119         {"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
120         {"errors",                 offsetof(struct virtnet_rx, stats.errors)},
121         {"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
122         {"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
123         {"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
124         {"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
125         {"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
126         {"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
127         {"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
128         {"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
129         {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
130         {"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
131 };
132
133 /* [rt]x_qX_ is prepended to the name string here */
134 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
135         {"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
136         {"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
137         {"errors",                 offsetof(struct virtnet_tx, stats.errors)},
138         {"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
139         {"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
140         {"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
141         {"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
142         {"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
143         {"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
144         {"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
145         {"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
146         {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
147         {"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
148 };
149
150 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
151                             sizeof(rte_virtio_rxq_stat_strings[0]))
152 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
153                             sizeof(rte_virtio_txq_stat_strings[0]))
154
155 static int
156 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
157                 int *dlen, int pkt_num)
158 {
159         uint32_t head, i;
160         int k, sum = 0;
161         virtio_net_ctrl_ack status = ~0;
162         struct virtio_pmd_ctrl result;
163         struct virtqueue *vq;
164
165         ctrl->status = status;
166
167         if (!cvq || !cvq->vq) {
168                 PMD_INIT_LOG(ERR, "Control queue is not supported.");
169                 return -1;
170         }
171         vq = cvq->vq;
172         head = vq->vq_desc_head_idx;
173
174         PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
175                 "vq->hw->cvq = %p vq = %p",
176                 vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
177
178         if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
179                 return -1;
180
181         memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
182                 sizeof(struct virtio_pmd_ctrl));
183
184         /*
185          * Format is enforced in qemu code:
186          * One TX packet for header;
187          * At least one TX packet per argument;
188          * One RX packet for ACK.
189          */
190         vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
191         vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
192         vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
193         vq->vq_free_cnt--;
194         i = vq->vq_ring.desc[head].next;
195
196         for (k = 0; k < pkt_num; k++) {
197                 vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
198                 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
199                         + sizeof(struct virtio_net_ctrl_hdr)
200                         + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
201                 vq->vq_ring.desc[i].len = dlen[k];
202                 sum += dlen[k];
203                 vq->vq_free_cnt--;
204                 i = vq->vq_ring.desc[i].next;
205         }
206
207         vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
208         vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
209                         + sizeof(struct virtio_net_ctrl_hdr);
210         vq->vq_ring.desc[i].len = sizeof(ctrl->status);
211         vq->vq_free_cnt--;
212
213         vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
214
215         vq_update_avail_ring(vq, head);
216         vq_update_avail_idx(vq);
217
218         PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
219
220         virtqueue_notify(vq);
221
222         rte_rmb();
223         while (VIRTQUEUE_NUSED(vq) == 0) {
224                 rte_rmb();
225                 usleep(100);
226         }
227
228         while (VIRTQUEUE_NUSED(vq)) {
229                 uint32_t idx, desc_idx, used_idx;
230                 struct vring_used_elem *uep;
231
232                 used_idx = (uint32_t)(vq->vq_used_cons_idx
233                                 & (vq->vq_nentries - 1));
234                 uep = &vq->vq_ring.used->ring[used_idx];
235                 idx = (uint32_t) uep->id;
236                 desc_idx = idx;
237
238                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
239                         desc_idx = vq->vq_ring.desc[desc_idx].next;
240                         vq->vq_free_cnt++;
241                 }
242
243                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
244                 vq->vq_desc_head_idx = idx;
245
246                 vq->vq_used_cons_idx++;
247                 vq->vq_free_cnt++;
248         }
249
250         PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
251                         vq->vq_free_cnt, vq->vq_desc_head_idx);
252
253         memcpy(&result, cvq->virtio_net_hdr_mz->addr,
254                         sizeof(struct virtio_pmd_ctrl));
255
256         return result.status;
257 }
258
259 static int
260 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
261 {
262         struct virtio_hw *hw = dev->data->dev_private;
263         struct virtio_pmd_ctrl ctrl;
264         int dlen[1];
265         int ret;
266
267         ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
268         ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
269         memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
270
271         dlen[0] = sizeof(uint16_t);
272
273         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
274         if (ret) {
275                 PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
276                           "failed, this is too late now...");
277                 return -EINVAL;
278         }
279
280         return 0;
281 }
282
283 void
284 virtio_dev_queue_release(struct virtqueue *vq)
285 {
286         struct virtio_hw *hw;
287
288         if (vq) {
289                 hw = vq->hw;
290                 if (vq->configured)
291                         hw->vtpci_ops->del_queue(hw, vq);
292
293                 rte_free(vq->sw_ring);
294                 rte_free(vq);
295         }
296 }
297
298 int virtio_dev_queue_setup(struct rte_eth_dev *dev,
299                         int queue_type,
300                         uint16_t queue_idx,
301                         uint16_t vtpci_queue_idx,
302                         uint16_t nb_desc,
303                         unsigned int socket_id,
304                         void **pvq)
305 {
306         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
307         char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
308         const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
309         unsigned int vq_size, size;
310         struct virtio_hw *hw = dev->data->dev_private;
311         struct virtnet_rx *rxvq = NULL;
312         struct virtnet_tx *txvq = NULL;
313         struct virtnet_ctl *cvq = NULL;
314         struct virtqueue *vq;
315         const char *queue_names[] = {"rvq", "txq", "cvq"};
316         size_t sz_vq, sz_q = 0, sz_hdr_mz = 0;
317         void *sw_ring = NULL;
318         int ret;
319
320         PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
321
322         /*
323          * Read the virtqueue size from the Queue Size field
324          * Always power of 2 and if 0 virtqueue does not exist
325          */
326         vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx);
327         PMD_INIT_LOG(DEBUG, "vq_size: %u nb_desc:%u", vq_size, nb_desc);
328         if (vq_size == 0) {
329                 PMD_INIT_LOG(ERR, "virtqueue does not exist");
330                 return -EINVAL;
331         }
332
333         if (!rte_is_power_of_2(vq_size)) {
334                 PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
335                 return -EINVAL;
336         }
337
338         snprintf(vq_name, sizeof(vq_name), "port%d_%s%d",
339                  dev->data->port_id, queue_names[queue_type], queue_idx);
340
341         sz_vq = RTE_ALIGN_CEIL(sizeof(*vq) +
342                                 vq_size * sizeof(struct vq_desc_extra),
343                                 RTE_CACHE_LINE_SIZE);
344         if (queue_type == VTNET_RQ) {
345                 sz_q = sz_vq + sizeof(*rxvq);
346         } else if (queue_type == VTNET_TQ) {
347                 sz_q = sz_vq + sizeof(*txvq);
348                 /*
349                  * For each xmit packet, allocate a virtio_net_hdr
350                  * and indirect ring elements
351                  */
352                 sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
353         } else if (queue_type == VTNET_CQ) {
354                 sz_q = sz_vq + sizeof(*cvq);
355                 /* Allocate a page for control vq command, data and status */
356                 sz_hdr_mz = PAGE_SIZE;
357         }
358
359         vq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id);
360         if (vq == NULL) {
361                 PMD_INIT_LOG(ERR, "can not allocate vq");
362                 return -ENOMEM;
363         }
364         vq->hw = hw;
365         vq->vq_queue_index = vtpci_queue_idx;
366         vq->vq_nentries = vq_size;
367
368         if (nb_desc == 0 || nb_desc > vq_size)
369                 nb_desc = vq_size;
370         vq->vq_free_cnt = nb_desc;
371
372         /*
373          * Reserve a memzone for vring elements
374          */
375         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
376         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
377         PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
378                      size, vq->vq_ring_size);
379
380         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id,
381                                          0, VIRTIO_PCI_VRING_ALIGN);
382         if (mz == NULL) {
383                 if (rte_errno == EEXIST)
384                         mz = rte_memzone_lookup(vq_name);
385                 if (mz == NULL) {
386                         ret = -ENOMEM;
387                         goto fail_q_alloc;
388                 }
389         }
390
391         memset(mz->addr, 0, sizeof(mz->len));
392
393         vq->vq_ring_mem = mz->phys_addr;
394         vq->vq_ring_virt_mem = mz->addr;
395         PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem:      0x%" PRIx64,
396                      (uint64_t)mz->phys_addr);
397         PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
398                      (uint64_t)(uintptr_t)mz->addr);
399
400         if (sz_hdr_mz) {
401                 snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_%s%d_hdr",
402                          dev->data->port_id, queue_names[queue_type],
403                          queue_idx);
404                 hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
405                                                      socket_id, 0,
406                                                      RTE_CACHE_LINE_SIZE);
407                 if (hdr_mz == NULL) {
408                         if (rte_errno == EEXIST)
409                                 hdr_mz = rte_memzone_lookup(vq_hdr_name);
410                         if (hdr_mz == NULL) {
411                                 ret = -ENOMEM;
412                                 goto fail_q_alloc;
413                         }
414                 }
415         }
416
417         if (queue_type == VTNET_RQ) {
418                 size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
419                                sizeof(vq->sw_ring[0]);
420
421                 sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
422                                              RTE_CACHE_LINE_SIZE, socket_id);
423                 if (!sw_ring) {
424                         PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
425                         ret = -ENOMEM;
426                         goto fail_q_alloc;
427                 }
428
429                 vq->sw_ring = sw_ring;
430                 rxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq);
431                 rxvq->vq = vq;
432                 rxvq->port_id = dev->data->port_id;
433                 rxvq->queue_id = queue_idx;
434                 rxvq->mz = mz;
435                 *pvq = rxvq;
436         } else if (queue_type == VTNET_TQ) {
437                 txvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq);
438                 txvq->vq = vq;
439                 txvq->port_id = dev->data->port_id;
440                 txvq->queue_id = queue_idx;
441                 txvq->mz = mz;
442                 txvq->virtio_net_hdr_mz = hdr_mz;
443                 txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
444
445                 *pvq = txvq;
446         } else if (queue_type == VTNET_CQ) {
447                 cvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq);
448                 cvq->vq = vq;
449                 cvq->mz = mz;
450                 cvq->virtio_net_hdr_mz = hdr_mz;
451                 cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
452                 memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
453                 *pvq = cvq;
454         }
455
456         /* For virtio_user case (that is when dev->pci_dev is NULL), we use
457          * virtual address. And we need properly set _offset_, please see
458          * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
459          */
460         if (dev->pci_dev)
461                 vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
462         else {
463                 vq->vq_ring_mem = (uintptr_t)mz->addr;
464                 vq->offset = offsetof(struct rte_mbuf, buf_addr);
465                 if (queue_type == VTNET_TQ)
466                         txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
467                 else if (queue_type == VTNET_CQ)
468                         cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
469         }
470
471         if (queue_type == VTNET_TQ) {
472                 struct virtio_tx_region *txr;
473                 unsigned int i;
474
475                 txr = hdr_mz->addr;
476                 memset(txr, 0, vq_size * sizeof(*txr));
477                 for (i = 0; i < vq_size; i++) {
478                         struct vring_desc *start_dp = txr[i].tx_indir;
479
480                         vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
481
482                         /* first indirect descriptor is always the tx header */
483                         start_dp->addr = txvq->virtio_net_hdr_mem
484                                 + i * sizeof(*txr)
485                                 + offsetof(struct virtio_tx_region, tx_hdr);
486
487                         start_dp->len = hw->vtnet_hdr_size;
488                         start_dp->flags = VRING_DESC_F_NEXT;
489                 }
490         }
491
492         if (hw->vtpci_ops->setup_queue(hw, vq) < 0) {
493                 PMD_INIT_LOG(ERR, "setup_queue failed");
494                 virtio_dev_queue_release(vq);
495                 return -EINVAL;
496         }
497
498         vq->configured = 1;
499         return 0;
500
501 fail_q_alloc:
502         rte_free(sw_ring);
503         rte_memzone_free(hdr_mz);
504         rte_memzone_free(mz);
505         rte_free(vq);
506
507         return ret;
508 }
509
510 static int
511 virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
512                 uint32_t socket_id)
513 {
514         struct virtnet_ctl *cvq;
515         int ret;
516         struct virtio_hw *hw = dev->data->dev_private;
517
518         PMD_INIT_FUNC_TRACE();
519         ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
520                         vtpci_queue_idx, 0, socket_id, (void **)&cvq);
521         if (ret < 0) {
522                 PMD_INIT_LOG(ERR, "control vq initialization failed");
523                 return ret;
524         }
525
526         hw->cvq = cvq;
527         return 0;
528 }
529
530 static void
531 virtio_free_queues(struct rte_eth_dev *dev)
532 {
533         unsigned int i;
534
535         for (i = 0; i < dev->data->nb_rx_queues; i++)
536                 virtio_dev_rx_queue_release(dev->data->rx_queues[i]);
537
538         dev->data->nb_rx_queues = 0;
539
540         for (i = 0; i < dev->data->nb_tx_queues; i++)
541                 virtio_dev_tx_queue_release(dev->data->tx_queues[i]);
542
543         dev->data->nb_tx_queues = 0;
544 }
545
546 static void
547 virtio_dev_close(struct rte_eth_dev *dev)
548 {
549         struct virtio_hw *hw = dev->data->dev_private;
550
551         PMD_INIT_LOG(DEBUG, "virtio_dev_close");
552
553         if (hw->cvq)
554                 virtio_dev_queue_release(hw->cvq->vq);
555
556         /* reset the NIC */
557         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
558                 vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
559         vtpci_reset(hw);
560         hw->started = 0;
561         virtio_dev_free_mbufs(dev);
562         virtio_free_queues(dev);
563 }
564
565 static void
566 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
567 {
568         struct virtio_hw *hw = dev->data->dev_private;
569         struct virtio_pmd_ctrl ctrl;
570         int dlen[1];
571         int ret;
572
573         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
574                 PMD_INIT_LOG(INFO, "host does not support rx control\n");
575                 return;
576         }
577
578         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
579         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
580         ctrl.data[0] = 1;
581         dlen[0] = 1;
582
583         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
584         if (ret)
585                 PMD_INIT_LOG(ERR, "Failed to enable promisc");
586 }
587
588 static void
589 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
590 {
591         struct virtio_hw *hw = dev->data->dev_private;
592         struct virtio_pmd_ctrl ctrl;
593         int dlen[1];
594         int ret;
595
596         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
597                 PMD_INIT_LOG(INFO, "host does not support rx control\n");
598                 return;
599         }
600
601         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
602         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
603         ctrl.data[0] = 0;
604         dlen[0] = 1;
605
606         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
607         if (ret)
608                 PMD_INIT_LOG(ERR, "Failed to disable promisc");
609 }
610
611 static void
612 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
613 {
614         struct virtio_hw *hw = dev->data->dev_private;
615         struct virtio_pmd_ctrl ctrl;
616         int dlen[1];
617         int ret;
618
619         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
620                 PMD_INIT_LOG(INFO, "host does not support rx control\n");
621                 return;
622         }
623
624         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
625         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
626         ctrl.data[0] = 1;
627         dlen[0] = 1;
628
629         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
630         if (ret)
631                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
632 }
633
634 static void
635 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
636 {
637         struct virtio_hw *hw = dev->data->dev_private;
638         struct virtio_pmd_ctrl ctrl;
639         int dlen[1];
640         int ret;
641
642         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
643                 PMD_INIT_LOG(INFO, "host does not support rx control\n");
644                 return;
645         }
646
647         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
648         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
649         ctrl.data[0] = 0;
650         dlen[0] = 1;
651
652         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
653         if (ret)
654                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
655 }
656
657 #define VLAN_TAG_LEN           4    /* 802.3ac tag (not DMA'd) */
658 static int
659 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
660 {
661         struct virtio_hw *hw = dev->data->dev_private;
662         uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
663                                  hw->vtnet_hdr_size;
664         uint32_t frame_size = mtu + ether_hdr_len;
665
666         if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
667                 PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
668                         ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
669                 return -EINVAL;
670         }
671         return 0;
672 }
673
674 /*
675  * dev_ops for virtio, bare necessities for basic operation
676  */
677 static const struct eth_dev_ops virtio_eth_dev_ops = {
678         .dev_configure           = virtio_dev_configure,
679         .dev_start               = virtio_dev_start,
680         .dev_stop                = virtio_dev_stop,
681         .dev_close               = virtio_dev_close,
682         .promiscuous_enable      = virtio_dev_promiscuous_enable,
683         .promiscuous_disable     = virtio_dev_promiscuous_disable,
684         .allmulticast_enable     = virtio_dev_allmulticast_enable,
685         .allmulticast_disable    = virtio_dev_allmulticast_disable,
686         .mtu_set                 = virtio_mtu_set,
687         .dev_infos_get           = virtio_dev_info_get,
688         .stats_get               = virtio_dev_stats_get,
689         .xstats_get              = virtio_dev_xstats_get,
690         .xstats_get_names        = virtio_dev_xstats_get_names,
691         .stats_reset             = virtio_dev_stats_reset,
692         .xstats_reset            = virtio_dev_stats_reset,
693         .link_update             = virtio_dev_link_update,
694         .rx_queue_setup          = virtio_dev_rx_queue_setup,
695         .rx_queue_release        = virtio_dev_rx_queue_release,
696         .tx_queue_setup          = virtio_dev_tx_queue_setup,
697         .tx_queue_release        = virtio_dev_tx_queue_release,
698         /* collect stats per queue */
699         .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
700         .vlan_filter_set         = virtio_vlan_filter_set,
701         .mac_addr_add            = virtio_mac_addr_add,
702         .mac_addr_remove         = virtio_mac_addr_remove,
703         .mac_addr_set            = virtio_mac_addr_set,
704 };
705
706 static inline int
707 virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
708                                 struct rte_eth_link *link)
709 {
710         struct rte_eth_link *dst = link;
711         struct rte_eth_link *src = &(dev->data->dev_link);
712
713         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
714                         *(uint64_t *)src) == 0)
715                 return -1;
716
717         return 0;
718 }
719
720 /**
721  * Atomically writes the link status information into global
722  * structure rte_eth_dev.
723  *
724  * @param dev
725  *   - Pointer to the structure rte_eth_dev to read from.
726  *   - Pointer to the buffer to be saved with the link status.
727  *
728  * @return
729  *   - On success, zero.
730  *   - On failure, negative value.
731  */
732 static inline int
733 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
734                 struct rte_eth_link *link)
735 {
736         struct rte_eth_link *dst = &(dev->data->dev_link);
737         struct rte_eth_link *src = link;
738
739         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
740                                         *(uint64_t *)src) == 0)
741                 return -1;
742
743         return 0;
744 }
745
746 static void
747 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
748 {
749         unsigned i;
750
751         for (i = 0; i < dev->data->nb_tx_queues; i++) {
752                 const struct virtnet_tx *txvq = dev->data->tx_queues[i];
753                 if (txvq == NULL)
754                         continue;
755
756                 stats->opackets += txvq->stats.packets;
757                 stats->obytes += txvq->stats.bytes;
758                 stats->oerrors += txvq->stats.errors;
759
760                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
761                         stats->q_opackets[i] = txvq->stats.packets;
762                         stats->q_obytes[i] = txvq->stats.bytes;
763                 }
764         }
765
766         for (i = 0; i < dev->data->nb_rx_queues; i++) {
767                 const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
768                 if (rxvq == NULL)
769                         continue;
770
771                 stats->ipackets += rxvq->stats.packets;
772                 stats->ibytes += rxvq->stats.bytes;
773                 stats->ierrors += rxvq->stats.errors;
774
775                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
776                         stats->q_ipackets[i] = rxvq->stats.packets;
777                         stats->q_ibytes[i] = rxvq->stats.bytes;
778                 }
779         }
780
781         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
782 }
783
784 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
785                                        struct rte_eth_xstat_name *xstats_names,
786                                        __rte_unused unsigned limit)
787 {
788         unsigned i;
789         unsigned count = 0;
790         unsigned t;
791
792         unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
793                 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
794
795         if (xstats_names != NULL) {
796                 /* Note: limit checked in rte_eth_xstats_names() */
797
798                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
799                         struct virtqueue *rxvq = dev->data->rx_queues[i];
800                         if (rxvq == NULL)
801                                 continue;
802                         for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
803                                 snprintf(xstats_names[count].name,
804                                         sizeof(xstats_names[count].name),
805                                         "rx_q%u_%s", i,
806                                         rte_virtio_rxq_stat_strings[t].name);
807                                 count++;
808                         }
809                 }
810
811                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
812                         struct virtqueue *txvq = dev->data->tx_queues[i];
813                         if (txvq == NULL)
814                                 continue;
815                         for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
816                                 snprintf(xstats_names[count].name,
817                                         sizeof(xstats_names[count].name),
818                                         "tx_q%u_%s", i,
819                                         rte_virtio_txq_stat_strings[t].name);
820                                 count++;
821                         }
822                 }
823                 return count;
824         }
825         return nstats;
826 }
827
828 static int
829 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
830                       unsigned n)
831 {
832         unsigned i;
833         unsigned count = 0;
834
835         unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
836                 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
837
838         if (n < nstats)
839                 return nstats;
840
841         for (i = 0; i < dev->data->nb_rx_queues; i++) {
842                 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
843
844                 if (rxvq == NULL)
845                         continue;
846
847                 unsigned t;
848
849                 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
850                         xstats[count].value = *(uint64_t *)(((char *)rxvq) +
851                                 rte_virtio_rxq_stat_strings[t].offset);
852                         count++;
853                 }
854         }
855
856         for (i = 0; i < dev->data->nb_tx_queues; i++) {
857                 struct virtnet_tx *txvq = dev->data->tx_queues[i];
858
859                 if (txvq == NULL)
860                         continue;
861
862                 unsigned t;
863
864                 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
865                         xstats[count].value = *(uint64_t *)(((char *)txvq) +
866                                 rte_virtio_txq_stat_strings[t].offset);
867                         count++;
868                 }
869         }
870
871         return count;
872 }
873
874 static void
875 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
876 {
877         virtio_update_stats(dev, stats);
878 }
879
880 static void
881 virtio_dev_stats_reset(struct rte_eth_dev *dev)
882 {
883         unsigned int i;
884
885         for (i = 0; i < dev->data->nb_tx_queues; i++) {
886                 struct virtnet_tx *txvq = dev->data->tx_queues[i];
887                 if (txvq == NULL)
888                         continue;
889
890                 txvq->stats.packets = 0;
891                 txvq->stats.bytes = 0;
892                 txvq->stats.errors = 0;
893                 txvq->stats.multicast = 0;
894                 txvq->stats.broadcast = 0;
895                 memset(txvq->stats.size_bins, 0,
896                        sizeof(txvq->stats.size_bins[0]) * 8);
897         }
898
899         for (i = 0; i < dev->data->nb_rx_queues; i++) {
900                 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
901                 if (rxvq == NULL)
902                         continue;
903
904                 rxvq->stats.packets = 0;
905                 rxvq->stats.bytes = 0;
906                 rxvq->stats.errors = 0;
907                 rxvq->stats.multicast = 0;
908                 rxvq->stats.broadcast = 0;
909                 memset(rxvq->stats.size_bins, 0,
910                        sizeof(rxvq->stats.size_bins[0]) * 8);
911         }
912 }
913
914 static void
915 virtio_set_hwaddr(struct virtio_hw *hw)
916 {
917         vtpci_write_dev_config(hw,
918                         offsetof(struct virtio_net_config, mac),
919                         &hw->mac_addr, ETHER_ADDR_LEN);
920 }
921
922 static void
923 virtio_get_hwaddr(struct virtio_hw *hw)
924 {
925         if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
926                 vtpci_read_dev_config(hw,
927                         offsetof(struct virtio_net_config, mac),
928                         &hw->mac_addr, ETHER_ADDR_LEN);
929         } else {
930                 eth_random_addr(&hw->mac_addr[0]);
931                 virtio_set_hwaddr(hw);
932         }
933 }
934
935 static void
936 virtio_mac_table_set(struct virtio_hw *hw,
937                      const struct virtio_net_ctrl_mac *uc,
938                      const struct virtio_net_ctrl_mac *mc)
939 {
940         struct virtio_pmd_ctrl ctrl;
941         int err, len[2];
942
943         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
944                 PMD_DRV_LOG(INFO, "host does not support mac table");
945                 return;
946         }
947
948         ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
949         ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
950
951         len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
952         memcpy(ctrl.data, uc, len[0]);
953
954         len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
955         memcpy(ctrl.data + len[0], mc, len[1]);
956
957         err = virtio_send_command(hw->cvq, &ctrl, len, 2);
958         if (err != 0)
959                 PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
960 }
961
962 static void
963 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
964                     uint32_t index, uint32_t vmdq __rte_unused)
965 {
966         struct virtio_hw *hw = dev->data->dev_private;
967         const struct ether_addr *addrs = dev->data->mac_addrs;
968         unsigned int i;
969         struct virtio_net_ctrl_mac *uc, *mc;
970
971         if (index >= VIRTIO_MAX_MAC_ADDRS) {
972                 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
973                 return;
974         }
975
976         uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
977         uc->entries = 0;
978         mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
979         mc->entries = 0;
980
981         for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
982                 const struct ether_addr *addr
983                         = (i == index) ? mac_addr : addrs + i;
984                 struct virtio_net_ctrl_mac *tbl
985                         = is_multicast_ether_addr(addr) ? mc : uc;
986
987                 memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
988         }
989
990         virtio_mac_table_set(hw, uc, mc);
991 }
992
993 static void
994 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
995 {
996         struct virtio_hw *hw = dev->data->dev_private;
997         struct ether_addr *addrs = dev->data->mac_addrs;
998         struct virtio_net_ctrl_mac *uc, *mc;
999         unsigned int i;
1000
1001         if (index >= VIRTIO_MAX_MAC_ADDRS) {
1002                 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1003                 return;
1004         }
1005
1006         uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
1007         uc->entries = 0;
1008         mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
1009         mc->entries = 0;
1010
1011         for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1012                 struct virtio_net_ctrl_mac *tbl;
1013
1014                 if (i == index || is_zero_ether_addr(addrs + i))
1015                         continue;
1016
1017                 tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
1018                 memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
1019         }
1020
1021         virtio_mac_table_set(hw, uc, mc);
1022 }
1023
1024 static void
1025 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1026 {
1027         struct virtio_hw *hw = dev->data->dev_private;
1028
1029         memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
1030
1031         /* Use atomic update if available */
1032         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1033                 struct virtio_pmd_ctrl ctrl;
1034                 int len = ETHER_ADDR_LEN;
1035
1036                 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1037                 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
1038
1039                 memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
1040                 virtio_send_command(hw->cvq, &ctrl, &len, 1);
1041         } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
1042                 virtio_set_hwaddr(hw);
1043 }
1044
1045 static int
1046 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1047 {
1048         struct virtio_hw *hw = dev->data->dev_private;
1049         struct virtio_pmd_ctrl ctrl;
1050         int len;
1051
1052         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1053                 return -ENOTSUP;
1054
1055         ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1056         ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1057         memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1058         len = sizeof(vlan_id);
1059
1060         return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1061 }
1062
1063 static int
1064 virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
1065 {
1066         uint64_t host_features;
1067
1068         /* Prepare guest_features: feature that driver wants to support */
1069         PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1070                 req_features);
1071
1072         /* Read device(host) feature bits */
1073         host_features = hw->vtpci_ops->get_features(hw);
1074         PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1075                 host_features);
1076
1077         /*
1078          * Negotiate features: Subset of device feature bits are written back
1079          * guest feature bits.
1080          */
1081         hw->guest_features = req_features;
1082         hw->guest_features = vtpci_negotiate_features(hw, host_features);
1083         PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1084                 hw->guest_features);
1085
1086         if (hw->modern) {
1087                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
1088                         PMD_INIT_LOG(ERR,
1089                                 "VIRTIO_F_VERSION_1 features is not enabled.");
1090                         return -1;
1091                 }
1092                 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1093                 if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1094                         PMD_INIT_LOG(ERR,
1095                                 "failed to set FEATURES_OK status!");
1096                         return -1;
1097                 }
1098         }
1099
1100         hw->req_guest_features = req_features;
1101
1102         return 0;
1103 }
1104
1105 /*
1106  * Process Virtio Config changed interrupt and call the callback
1107  * if link state changed.
1108  */
1109 static void
1110 virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1111                          void *param)
1112 {
1113         struct rte_eth_dev *dev = param;
1114         struct virtio_hw *hw = dev->data->dev_private;
1115         uint8_t isr;
1116
1117         /* Read interrupt status which clears interrupt */
1118         isr = vtpci_isr(hw);
1119         PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1120
1121         if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0)
1122                 PMD_DRV_LOG(ERR, "interrupt enable failed");
1123
1124         if (isr & VIRTIO_PCI_ISR_CONFIG) {
1125                 if (virtio_dev_link_update(dev, 0) == 0)
1126                         _rte_eth_dev_callback_process(dev,
1127                                                       RTE_ETH_EVENT_INTR_LSC, NULL);
1128         }
1129
1130 }
1131
1132 static void
1133 rx_func_get(struct rte_eth_dev *eth_dev)
1134 {
1135         struct virtio_hw *hw = eth_dev->data->dev_private;
1136         if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
1137                 eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1138         else
1139                 eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1140 }
1141
1142 /* reset device and renegotiate features if needed */
1143 static int
1144 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
1145 {
1146         struct virtio_hw *hw = eth_dev->data->dev_private;
1147         struct virtio_net_config *config;
1148         struct virtio_net_config local_config;
1149         struct rte_pci_device *pci_dev = eth_dev->pci_dev;
1150
1151         /* Reset the device although not necessary at startup */
1152         vtpci_reset(hw);
1153
1154         /* Tell the host we've noticed this device. */
1155         vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1156
1157         /* Tell the host we've known how to drive the device. */
1158         vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1159         if (virtio_negotiate_features(hw, req_features) < 0)
1160                 return -1;
1161
1162         /* If host does not support status then disable LSC */
1163         if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
1164                 eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1165         else
1166                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1167
1168         rte_eth_copy_pci_info(eth_dev, pci_dev);
1169
1170         rx_func_get(eth_dev);
1171
1172         /* Setting up rx_header size for the device */
1173         if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1174             vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
1175                 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1176         else
1177                 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1178
1179         /* Copy the permanent MAC address to: virtio_hw */
1180         virtio_get_hwaddr(hw);
1181         ether_addr_copy((struct ether_addr *) hw->mac_addr,
1182                         &eth_dev->data->mac_addrs[0]);
1183         PMD_INIT_LOG(DEBUG,
1184                      "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1185                      hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1186                      hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1187
1188         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1189                 config = &local_config;
1190
1191                 vtpci_read_dev_config(hw,
1192                         offsetof(struct virtio_net_config, mac),
1193                         &config->mac, sizeof(config->mac));
1194
1195                 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1196                         vtpci_read_dev_config(hw,
1197                                 offsetof(struct virtio_net_config, status),
1198                                 &config->status, sizeof(config->status));
1199                 } else {
1200                         PMD_INIT_LOG(DEBUG,
1201                                      "VIRTIO_NET_F_STATUS is not supported");
1202                         config->status = 0;
1203                 }
1204
1205                 if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
1206                         vtpci_read_dev_config(hw,
1207                                 offsetof(struct virtio_net_config, max_virtqueue_pairs),
1208                                 &config->max_virtqueue_pairs,
1209                                 sizeof(config->max_virtqueue_pairs));
1210                 } else {
1211                         PMD_INIT_LOG(DEBUG,
1212                                      "VIRTIO_NET_F_MQ is not supported");
1213                         config->max_virtqueue_pairs = 1;
1214                 }
1215
1216                 hw->max_queue_pairs = config->max_virtqueue_pairs;
1217
1218                 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1219                                 config->max_virtqueue_pairs);
1220                 PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1221                 PMD_INIT_LOG(DEBUG,
1222                                 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1223                                 config->mac[0], config->mac[1],
1224                                 config->mac[2], config->mac[3],
1225                                 config->mac[4], config->mac[5]);
1226         } else {
1227                 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
1228                 hw->max_queue_pairs = 1;
1229         }
1230
1231         if (pci_dev)
1232                 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1233                         eth_dev->data->port_id, pci_dev->id.vendor_id,
1234                         pci_dev->id.device_id);
1235
1236         return 0;
1237 }
1238
1239 /*
1240  * This function is based on probe() function in virtio_pci.c
1241  * It returns 0 on success.
1242  */
1243 int
1244 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1245 {
1246         struct virtio_hw *hw = eth_dev->data->dev_private;
1247         struct rte_pci_device *pci_dev;
1248         uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
1249         int ret;
1250
1251         RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
1252
1253         eth_dev->dev_ops = &virtio_eth_dev_ops;
1254         eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
1255
1256         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1257                 rx_func_get(eth_dev);
1258                 return 0;
1259         }
1260
1261         /* Allocate memory for storing MAC addresses */
1262         eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
1263         if (eth_dev->data->mac_addrs == NULL) {
1264                 PMD_INIT_LOG(ERR,
1265                         "Failed to allocate %d bytes needed to store MAC addresses",
1266                         VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
1267                 return -ENOMEM;
1268         }
1269
1270         pci_dev = eth_dev->pci_dev;
1271
1272         if (pci_dev) {
1273                 ret = vtpci_init(pci_dev, hw, &dev_flags);
1274                 if (ret)
1275                         return ret;
1276         }
1277
1278         eth_dev->data->dev_flags = dev_flags;
1279
1280         /* reset device and negotiate default features */
1281         ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
1282         if (ret < 0)
1283                 return ret;
1284
1285         /* Setup interrupt callback  */
1286         if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1287                 rte_intr_callback_register(&pci_dev->intr_handle,
1288                         virtio_interrupt_handler, eth_dev);
1289
1290         return 0;
1291 }
1292
1293 static int
1294 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
1295 {
1296         struct rte_pci_device *pci_dev;
1297         struct virtio_hw *hw = eth_dev->data->dev_private;
1298
1299         PMD_INIT_FUNC_TRACE();
1300
1301         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1302                 return -EPERM;
1303
1304         if (hw->started == 1) {
1305                 virtio_dev_stop(eth_dev);
1306                 virtio_dev_close(eth_dev);
1307         }
1308         pci_dev = eth_dev->pci_dev;
1309
1310         eth_dev->dev_ops = NULL;
1311         eth_dev->tx_pkt_burst = NULL;
1312         eth_dev->rx_pkt_burst = NULL;
1313
1314         rte_free(eth_dev->data->mac_addrs);
1315         eth_dev->data->mac_addrs = NULL;
1316
1317         /* reset interrupt callback  */
1318         if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1319                 rte_intr_callback_unregister(&pci_dev->intr_handle,
1320                                                 virtio_interrupt_handler,
1321                                                 eth_dev);
1322         rte_eal_pci_unmap_device(pci_dev);
1323
1324         PMD_INIT_LOG(DEBUG, "dev_uninit completed");
1325
1326         return 0;
1327 }
1328
1329 static struct eth_driver rte_virtio_pmd = {
1330         .pci_drv = {
1331                 .driver = {
1332                         .name = "net_virtio",
1333                 },
1334                 .id_table = pci_id_virtio_map,
1335                 .drv_flags = RTE_PCI_DRV_DETACHABLE,
1336                 .probe = rte_eth_dev_pci_probe,
1337                 .remove = rte_eth_dev_pci_remove,
1338         },
1339         .eth_dev_init = eth_virtio_dev_init,
1340         .eth_dev_uninit = eth_virtio_dev_uninit,
1341         .dev_private_size = sizeof(struct virtio_hw),
1342 };
1343
1344 RTE_INIT(rte_virtio_pmd_init);
1345 static void
1346 rte_virtio_pmd_init(void)
1347 {
1348         if (rte_eal_iopl_init() != 0) {
1349                 PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
1350                 return;
1351         }
1352
1353         rte_eal_pci_register(&rte_virtio_pmd.pci_drv);
1354 }
1355
1356 /*
1357  * Configure virtio device
1358  * It returns 0 on success.
1359  */
1360 static int
1361 virtio_dev_configure(struct rte_eth_dev *dev)
1362 {
1363         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1364         struct virtio_hw *hw = dev->data->dev_private;
1365         uint64_t req_features;
1366         int ret;
1367
1368         PMD_INIT_LOG(DEBUG, "configure");
1369         req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
1370         if (rxmode->hw_ip_checksum)
1371                 req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
1372         if (rxmode->enable_lro)
1373                 req_features |=
1374                         (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
1375                         (1ULL << VIRTIO_NET_F_GUEST_TSO6);
1376
1377         /* if request features changed, reinit the device */
1378         if (req_features != hw->req_guest_features) {
1379                 ret = virtio_init_device(dev, req_features);
1380                 if (ret < 0)
1381                         return ret;
1382         }
1383
1384         if (rxmode->hw_ip_checksum &&
1385                 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
1386                 PMD_DRV_LOG(NOTICE,
1387                         "rx ip checksum not available on this host");
1388                 return -ENOTSUP;
1389         }
1390
1391         if (rxmode->enable_lro &&
1392                 (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
1393                         !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
1394                 PMD_DRV_LOG(NOTICE,
1395                         "lro not available on this host");
1396                 return -ENOTSUP;
1397         }
1398
1399         /* Setup and start control queue */
1400         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1401                 ret = virtio_dev_cq_queue_setup(dev,
1402                         hw->max_queue_pairs * 2,
1403                         SOCKET_ID_ANY);
1404                 if (ret < 0)
1405                         return ret;
1406                 virtio_dev_cq_start(dev);
1407         }
1408
1409         hw->vlan_strip = rxmode->hw_vlan_strip;
1410
1411         if (rxmode->hw_vlan_filter
1412             && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
1413                 PMD_DRV_LOG(NOTICE,
1414                             "vlan filtering not available on this host");
1415                 return -ENOTSUP;
1416         }
1417
1418         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1419                 if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
1420                         PMD_DRV_LOG(ERR, "failed to set config vector");
1421                         return -EBUSY;
1422                 }
1423
1424         return 0;
1425 }
1426
1427
1428 static int
1429 virtio_dev_start(struct rte_eth_dev *dev)
1430 {
1431         uint16_t nb_queues, i;
1432         struct virtio_hw *hw = dev->data->dev_private;
1433         struct virtnet_rx *rxvq;
1434         struct virtnet_tx *txvq __rte_unused;
1435
1436         /* check if lsc interrupt feature is enabled */
1437         if (dev->data->dev_conf.intr_conf.lsc) {
1438                 if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1439                         PMD_DRV_LOG(ERR, "link status not supported by host");
1440                         return -ENOTSUP;
1441                 }
1442
1443                 if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) {
1444                         PMD_DRV_LOG(ERR, "interrupt enable failed");
1445                         return -EIO;
1446                 }
1447         }
1448
1449         /* Initialize Link state */
1450         virtio_dev_link_update(dev, 0);
1451
1452         /* On restart after stop do not touch queues */
1453         if (hw->started)
1454                 return 0;
1455
1456         /* Do final configuration before rx/tx engine starts */
1457         virtio_dev_rxtx_start(dev);
1458         vtpci_reinit_complete(hw);
1459
1460         hw->started = 1;
1461
1462         /*Notify the backend
1463          *Otherwise the tap backend might already stop its queue due to fullness.
1464          *vhost backend will have no chance to be waked up
1465          */
1466         nb_queues = dev->data->nb_rx_queues;
1467         if (nb_queues > 1) {
1468                 if (virtio_set_multiple_queues(dev, nb_queues) != 0)
1469                         return -EINVAL;
1470         }
1471
1472         PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
1473
1474         for (i = 0; i < nb_queues; i++) {
1475                 rxvq = dev->data->rx_queues[i];
1476                 virtqueue_notify(rxvq->vq);
1477         }
1478
1479         PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
1480
1481         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1482                 rxvq = dev->data->rx_queues[i];
1483                 VIRTQUEUE_DUMP(rxvq->vq);
1484         }
1485
1486         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1487                 txvq = dev->data->tx_queues[i];
1488                 VIRTQUEUE_DUMP(txvq->vq);
1489         }
1490
1491         return 0;
1492 }
1493
1494 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
1495 {
1496         struct rte_mbuf *buf;
1497         int i, mbuf_num = 0;
1498
1499         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1500                 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1501
1502                 PMD_INIT_LOG(DEBUG,
1503                              "Before freeing rxq[%d] used and unused buf", i);
1504                 VIRTQUEUE_DUMP(rxvq->vq);
1505
1506                 PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
1507                 while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
1508                         rte_pktmbuf_free(buf);
1509                         mbuf_num++;
1510                 }
1511
1512                 PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1513                 PMD_INIT_LOG(DEBUG,
1514                              "After freeing rxq[%d] used and unused buf", i);
1515                 VIRTQUEUE_DUMP(rxvq->vq);
1516         }
1517
1518         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1519                 struct virtnet_tx *txvq = dev->data->tx_queues[i];
1520
1521                 PMD_INIT_LOG(DEBUG,
1522                              "Before freeing txq[%d] used and unused bufs",
1523                              i);
1524                 VIRTQUEUE_DUMP(txvq->vq);
1525
1526                 mbuf_num = 0;
1527                 while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
1528                         rte_pktmbuf_free(buf);
1529                         mbuf_num++;
1530                 }
1531
1532                 PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1533                 PMD_INIT_LOG(DEBUG,
1534                              "After freeing txq[%d] used and unused buf", i);
1535                 VIRTQUEUE_DUMP(txvq->vq);
1536         }
1537 }
1538
1539 /*
1540  * Stop device: disable interrupt and mark link down
1541  */
1542 static void
1543 virtio_dev_stop(struct rte_eth_dev *dev)
1544 {
1545         struct rte_eth_link link;
1546
1547         PMD_INIT_LOG(DEBUG, "stop");
1548
1549         if (dev->data->dev_conf.intr_conf.lsc)
1550                 rte_intr_disable(&dev->pci_dev->intr_handle);
1551
1552         memset(&link, 0, sizeof(link));
1553         virtio_dev_atomic_write_link_status(dev, &link);
1554 }
1555
1556 static int
1557 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1558 {
1559         struct rte_eth_link link, old;
1560         uint16_t status;
1561         struct virtio_hw *hw = dev->data->dev_private;
1562         memset(&link, 0, sizeof(link));
1563         virtio_dev_atomic_read_link_status(dev, &link);
1564         old = link;
1565         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1566         link.link_speed  = SPEED_10G;
1567
1568         if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1569                 PMD_INIT_LOG(DEBUG, "Get link status from hw");
1570                 vtpci_read_dev_config(hw,
1571                                 offsetof(struct virtio_net_config, status),
1572                                 &status, sizeof(status));
1573                 if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
1574                         link.link_status = ETH_LINK_DOWN;
1575                         PMD_INIT_LOG(DEBUG, "Port %d is down",
1576                                      dev->data->port_id);
1577                 } else {
1578                         link.link_status = ETH_LINK_UP;
1579                         PMD_INIT_LOG(DEBUG, "Port %d is up",
1580                                      dev->data->port_id);
1581                 }
1582         } else {
1583                 link.link_status = ETH_LINK_UP;
1584         }
1585         virtio_dev_atomic_write_link_status(dev, &link);
1586
1587         return (old.link_status == link.link_status) ? -1 : 0;
1588 }
1589
1590 static void
1591 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1592 {
1593         uint64_t tso_mask;
1594         struct virtio_hw *hw = dev->data->dev_private;
1595
1596         if (dev->pci_dev)
1597                 dev_info->driver_name = dev->driver->pci_drv.driver.name;
1598         else
1599                 dev_info->driver_name = "virtio_user PMD";
1600         dev_info->max_rx_queues =
1601                 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
1602         dev_info->max_tx_queues =
1603                 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
1604         dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
1605         dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
1606         dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
1607         dev_info->default_txconf = (struct rte_eth_txconf) {
1608                 .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
1609         };
1610         dev_info->rx_offload_capa =
1611                 DEV_RX_OFFLOAD_TCP_CKSUM |
1612                 DEV_RX_OFFLOAD_UDP_CKSUM |
1613                 DEV_RX_OFFLOAD_TCP_LRO;
1614         dev_info->tx_offload_capa = 0;
1615
1616         if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
1617                 dev_info->tx_offload_capa |=
1618                         DEV_TX_OFFLOAD_UDP_CKSUM |
1619                         DEV_TX_OFFLOAD_TCP_CKSUM;
1620         }
1621
1622         tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1623                 (1ULL << VIRTIO_NET_F_HOST_TSO6);
1624         if ((hw->guest_features & tso_mask) == tso_mask)
1625                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1626 }
1627
1628 /*
1629  * It enables testpmd to collect per queue stats.
1630  */
1631 static int
1632 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
1633 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
1634 __rte_unused uint8_t is_rx)
1635 {
1636         return 0;
1637 }
1638
1639 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
1640 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);