net/virtio: support modern device id
[dpdk.git] / drivers / net / virtio / virtio_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <string.h>
36 #include <stdio.h>
37 #include <errno.h>
38 #include <unistd.h>
39
40 #include <rte_ethdev.h>
41 #include <rte_memcpy.h>
42 #include <rte_string_fns.h>
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
45 #include <rte_atomic.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_pci.h>
48 #include <rte_ether.h>
49 #include <rte_common.h>
50 #include <rte_errno.h>
51
52 #include <rte_memory.h>
53 #include <rte_eal.h>
54 #include <rte_dev.h>
55
56 #include "virtio_ethdev.h"
57 #include "virtio_pci.h"
58 #include "virtio_logs.h"
59 #include "virtqueue.h"
60 #include "virtio_rxtx.h"
61
62 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
63 static int  virtio_dev_configure(struct rte_eth_dev *dev);
64 static int  virtio_dev_start(struct rte_eth_dev *dev);
65 static void virtio_dev_stop(struct rte_eth_dev *dev);
66 static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
67 static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
68 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
69 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
70 static void virtio_dev_info_get(struct rte_eth_dev *dev,
71                                 struct rte_eth_dev_info *dev_info);
72 static int virtio_dev_link_update(struct rte_eth_dev *dev,
73         __rte_unused int wait_to_complete);
74
75 static void virtio_set_hwaddr(struct virtio_hw *hw);
76 static void virtio_get_hwaddr(struct virtio_hw *hw);
77
78 static void virtio_dev_stats_get(struct rte_eth_dev *dev,
79                                  struct rte_eth_stats *stats);
80 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
81                                  struct rte_eth_xstat *xstats, unsigned n);
82 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
83                                        struct rte_eth_xstat_name *xstats_names,
84                                        unsigned limit);
85 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
86 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
87 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
88                                 uint16_t vlan_id, int on);
89 static void virtio_mac_addr_add(struct rte_eth_dev *dev,
90                                 struct ether_addr *mac_addr,
91                                 uint32_t index, uint32_t vmdq __rte_unused);
92 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
93 static void virtio_mac_addr_set(struct rte_eth_dev *dev,
94                                 struct ether_addr *mac_addr);
95
96 static int virtio_dev_queue_stats_mapping_set(
97         __rte_unused struct rte_eth_dev *eth_dev,
98         __rte_unused uint16_t queue_id,
99         __rte_unused uint8_t stat_idx,
100         __rte_unused uint8_t is_rx);
101
102 /*
103  * The set of PCI devices this driver supports
104  */
105 static const struct rte_pci_id pci_id_virtio_map[] = {
106         { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
107         { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
108         { .vendor_id = 0, /* sentinel */ },
109 };
110
111 struct rte_virtio_xstats_name_off {
112         char name[RTE_ETH_XSTATS_NAME_SIZE];
113         unsigned offset;
114 };
115
116 /* [rt]x_qX_ is prepended to the name string here */
117 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
118         {"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
119         {"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
120         {"errors",                 offsetof(struct virtnet_rx, stats.errors)},
121         {"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
122         {"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
123         {"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
124         {"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
125         {"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
126         {"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
127         {"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
128         {"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
129         {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
130         {"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
131 };
132
133 /* [rt]x_qX_ is prepended to the name string here */
134 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
135         {"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
136         {"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
137         {"errors",                 offsetof(struct virtnet_tx, stats.errors)},
138         {"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
139         {"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
140         {"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
141         {"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
142         {"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
143         {"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
144         {"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
145         {"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
146         {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
147         {"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
148 };
149
150 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
151                             sizeof(rte_virtio_rxq_stat_strings[0]))
152 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
153                             sizeof(rte_virtio_txq_stat_strings[0]))
154
155 static int
156 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
157                 int *dlen, int pkt_num)
158 {
159         uint32_t head, i;
160         int k, sum = 0;
161         virtio_net_ctrl_ack status = ~0;
162         struct virtio_pmd_ctrl result;
163         struct virtqueue *vq;
164
165         ctrl->status = status;
166
167         if (!cvq || !cvq->vq) {
168                 PMD_INIT_LOG(ERR, "Control queue is not supported.");
169                 return -1;
170         }
171         vq = cvq->vq;
172         head = vq->vq_desc_head_idx;
173
174         PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
175                 "vq->hw->cvq = %p vq = %p",
176                 vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
177
178         if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
179                 return -1;
180
181         memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
182                 sizeof(struct virtio_pmd_ctrl));
183
184         /*
185          * Format is enforced in qemu code:
186          * One TX packet for header;
187          * At least one TX packet per argument;
188          * One RX packet for ACK.
189          */
190         vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
191         vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
192         vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
193         vq->vq_free_cnt--;
194         i = vq->vq_ring.desc[head].next;
195
196         for (k = 0; k < pkt_num; k++) {
197                 vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
198                 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
199                         + sizeof(struct virtio_net_ctrl_hdr)
200                         + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
201                 vq->vq_ring.desc[i].len = dlen[k];
202                 sum += dlen[k];
203                 vq->vq_free_cnt--;
204                 i = vq->vq_ring.desc[i].next;
205         }
206
207         vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
208         vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
209                         + sizeof(struct virtio_net_ctrl_hdr);
210         vq->vq_ring.desc[i].len = sizeof(ctrl->status);
211         vq->vq_free_cnt--;
212
213         vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
214
215         vq_update_avail_ring(vq, head);
216         vq_update_avail_idx(vq);
217
218         PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
219
220         virtqueue_notify(vq);
221
222         rte_rmb();
223         while (VIRTQUEUE_NUSED(vq) == 0) {
224                 rte_rmb();
225                 usleep(100);
226         }
227
228         while (VIRTQUEUE_NUSED(vq)) {
229                 uint32_t idx, desc_idx, used_idx;
230                 struct vring_used_elem *uep;
231
232                 used_idx = (uint32_t)(vq->vq_used_cons_idx
233                                 & (vq->vq_nentries - 1));
234                 uep = &vq->vq_ring.used->ring[used_idx];
235                 idx = (uint32_t) uep->id;
236                 desc_idx = idx;
237
238                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
239                         desc_idx = vq->vq_ring.desc[desc_idx].next;
240                         vq->vq_free_cnt++;
241                 }
242
243                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
244                 vq->vq_desc_head_idx = idx;
245
246                 vq->vq_used_cons_idx++;
247                 vq->vq_free_cnt++;
248         }
249
250         PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
251                         vq->vq_free_cnt, vq->vq_desc_head_idx);
252
253         memcpy(&result, cvq->virtio_net_hdr_mz->addr,
254                         sizeof(struct virtio_pmd_ctrl));
255
256         return result.status;
257 }
258
259 static int
260 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
261 {
262         struct virtio_hw *hw = dev->data->dev_private;
263         struct virtio_pmd_ctrl ctrl;
264         int dlen[1];
265         int ret;
266
267         ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
268         ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
269         memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
270
271         dlen[0] = sizeof(uint16_t);
272
273         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
274         if (ret) {
275                 PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
276                           "failed, this is too late now...");
277                 return -EINVAL;
278         }
279
280         return 0;
281 }
282
283 void
284 virtio_dev_queue_release(struct virtqueue *vq)
285 {
286         struct virtio_hw *hw;
287
288         if (vq) {
289                 hw = vq->hw;
290                 if (vq->configured)
291                         hw->vtpci_ops->del_queue(hw, vq);
292
293                 rte_free(vq->sw_ring);
294                 rte_free(vq);
295         }
296 }
297
298 int virtio_dev_queue_setup(struct rte_eth_dev *dev,
299                         int queue_type,
300                         uint16_t queue_idx,
301                         uint16_t vtpci_queue_idx,
302                         uint16_t nb_desc,
303                         unsigned int socket_id,
304                         void **pvq)
305 {
306         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
307         char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
308         const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
309         unsigned int vq_size, size;
310         struct virtio_hw *hw = dev->data->dev_private;
311         struct virtnet_rx *rxvq = NULL;
312         struct virtnet_tx *txvq = NULL;
313         struct virtnet_ctl *cvq = NULL;
314         struct virtqueue *vq;
315         const char *queue_names[] = {"rvq", "txq", "cvq"};
316         size_t sz_vq, sz_q = 0, sz_hdr_mz = 0;
317         void *sw_ring = NULL;
318         int ret;
319
320         PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
321
322         /*
323          * Read the virtqueue size from the Queue Size field
324          * Always power of 2 and if 0 virtqueue does not exist
325          */
326         vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx);
327         PMD_INIT_LOG(DEBUG, "vq_size: %u nb_desc:%u", vq_size, nb_desc);
328         if (vq_size == 0) {
329                 PMD_INIT_LOG(ERR, "virtqueue does not exist");
330                 return -EINVAL;
331         }
332
333         if (!rte_is_power_of_2(vq_size)) {
334                 PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
335                 return -EINVAL;
336         }
337
338         snprintf(vq_name, sizeof(vq_name), "port%d_%s%d",
339                  dev->data->port_id, queue_names[queue_type], queue_idx);
340
341         sz_vq = RTE_ALIGN_CEIL(sizeof(*vq) +
342                                 vq_size * sizeof(struct vq_desc_extra),
343                                 RTE_CACHE_LINE_SIZE);
344         if (queue_type == VTNET_RQ) {
345                 sz_q = sz_vq + sizeof(*rxvq);
346         } else if (queue_type == VTNET_TQ) {
347                 sz_q = sz_vq + sizeof(*txvq);
348                 /*
349                  * For each xmit packet, allocate a virtio_net_hdr
350                  * and indirect ring elements
351                  */
352                 sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
353         } else if (queue_type == VTNET_CQ) {
354                 sz_q = sz_vq + sizeof(*cvq);
355                 /* Allocate a page for control vq command, data and status */
356                 sz_hdr_mz = PAGE_SIZE;
357         }
358
359         vq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id);
360         if (vq == NULL) {
361                 PMD_INIT_LOG(ERR, "can not allocate vq");
362                 return -ENOMEM;
363         }
364         vq->hw = hw;
365         vq->vq_queue_index = vtpci_queue_idx;
366         vq->vq_nentries = vq_size;
367
368         if (nb_desc == 0 || nb_desc > vq_size)
369                 nb_desc = vq_size;
370         vq->vq_free_cnt = nb_desc;
371
372         /*
373          * Reserve a memzone for vring elements
374          */
375         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
376         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
377         PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
378                      size, vq->vq_ring_size);
379
380         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id,
381                                          0, VIRTIO_PCI_VRING_ALIGN);
382         if (mz == NULL) {
383                 if (rte_errno == EEXIST)
384                         mz = rte_memzone_lookup(vq_name);
385                 if (mz == NULL) {
386                         ret = -ENOMEM;
387                         goto fail_q_alloc;
388                 }
389         }
390
391         memset(mz->addr, 0, sizeof(mz->len));
392
393         vq->vq_ring_mem = mz->phys_addr;
394         vq->vq_ring_virt_mem = mz->addr;
395         PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem:      0x%" PRIx64,
396                      (uint64_t)mz->phys_addr);
397         PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
398                      (uint64_t)(uintptr_t)mz->addr);
399
400         if (sz_hdr_mz) {
401                 snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_%s%d_hdr",
402                          dev->data->port_id, queue_names[queue_type],
403                          queue_idx);
404                 hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
405                                                      socket_id, 0,
406                                                      RTE_CACHE_LINE_SIZE);
407                 if (hdr_mz == NULL) {
408                         if (rte_errno == EEXIST)
409                                 hdr_mz = rte_memzone_lookup(vq_hdr_name);
410                         if (hdr_mz == NULL) {
411                                 ret = -ENOMEM;
412                                 goto fail_q_alloc;
413                         }
414                 }
415         }
416
417         if (queue_type == VTNET_RQ) {
418                 size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
419                                sizeof(vq->sw_ring[0]);
420
421                 sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
422                                              RTE_CACHE_LINE_SIZE, socket_id);
423                 if (!sw_ring) {
424                         PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
425                         ret = -ENOMEM;
426                         goto fail_q_alloc;
427                 }
428
429                 vq->sw_ring = sw_ring;
430                 rxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq);
431                 rxvq->vq = vq;
432                 rxvq->port_id = dev->data->port_id;
433                 rxvq->queue_id = queue_idx;
434                 rxvq->mz = mz;
435                 *pvq = rxvq;
436         } else if (queue_type == VTNET_TQ) {
437                 txvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq);
438                 txvq->vq = vq;
439                 txvq->port_id = dev->data->port_id;
440                 txvq->queue_id = queue_idx;
441                 txvq->mz = mz;
442                 txvq->virtio_net_hdr_mz = hdr_mz;
443                 txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
444
445                 *pvq = txvq;
446         } else if (queue_type == VTNET_CQ) {
447                 cvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq);
448                 cvq->vq = vq;
449                 cvq->mz = mz;
450                 cvq->virtio_net_hdr_mz = hdr_mz;
451                 cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
452                 memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
453                 *pvq = cvq;
454         }
455
456         /* For virtio_user case (that is when dev->pci_dev is NULL), we use
457          * virtual address. And we need properly set _offset_, please see
458          * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
459          */
460         if (dev->pci_dev)
461                 vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
462         else {
463                 vq->vq_ring_mem = (uintptr_t)mz->addr;
464                 vq->offset = offsetof(struct rte_mbuf, buf_addr);
465                 if (queue_type == VTNET_TQ)
466                         txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
467                 else if (queue_type == VTNET_CQ)
468                         cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
469         }
470
471         if (queue_type == VTNET_TQ) {
472                 struct virtio_tx_region *txr;
473                 unsigned int i;
474
475                 txr = hdr_mz->addr;
476                 memset(txr, 0, vq_size * sizeof(*txr));
477                 for (i = 0; i < vq_size; i++) {
478                         struct vring_desc *start_dp = txr[i].tx_indir;
479
480                         vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
481
482                         /* first indirect descriptor is always the tx header */
483                         start_dp->addr = txvq->virtio_net_hdr_mem
484                                 + i * sizeof(*txr)
485                                 + offsetof(struct virtio_tx_region, tx_hdr);
486
487                         start_dp->len = hw->vtnet_hdr_size;
488                         start_dp->flags = VRING_DESC_F_NEXT;
489                 }
490         }
491
492         if (hw->vtpci_ops->setup_queue(hw, vq) < 0) {
493                 PMD_INIT_LOG(ERR, "setup_queue failed");
494                 virtio_dev_queue_release(vq);
495                 return -EINVAL;
496         }
497
498         vq->configured = 1;
499         return 0;
500
501 fail_q_alloc:
502         rte_free(sw_ring);
503         rte_memzone_free(hdr_mz);
504         rte_memzone_free(mz);
505         rte_free(vq);
506
507         return ret;
508 }
509
510 static int
511 virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
512                 uint32_t socket_id)
513 {
514         struct virtnet_ctl *cvq;
515         int ret;
516         struct virtio_hw *hw = dev->data->dev_private;
517
518         PMD_INIT_FUNC_TRACE();
519         ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
520                         vtpci_queue_idx, 0, socket_id, (void **)&cvq);
521         if (ret < 0) {
522                 PMD_INIT_LOG(ERR, "control vq initialization failed");
523                 return ret;
524         }
525
526         hw->cvq = cvq;
527         return 0;
528 }
529
530 static void
531 virtio_free_queues(struct rte_eth_dev *dev)
532 {
533         unsigned int i;
534
535         for (i = 0; i < dev->data->nb_rx_queues; i++)
536                 virtio_dev_rx_queue_release(dev->data->rx_queues[i]);
537
538         dev->data->nb_rx_queues = 0;
539
540         for (i = 0; i < dev->data->nb_tx_queues; i++)
541                 virtio_dev_tx_queue_release(dev->data->tx_queues[i]);
542
543         dev->data->nb_tx_queues = 0;
544 }
545
546 static void
547 virtio_dev_close(struct rte_eth_dev *dev)
548 {
549         struct virtio_hw *hw = dev->data->dev_private;
550
551         PMD_INIT_LOG(DEBUG, "virtio_dev_close");
552
553         if (hw->started == 1)
554                 virtio_dev_stop(dev);
555
556         /* reset the NIC */
557         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
558                 vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
559         vtpci_reset(hw);
560         virtio_dev_free_mbufs(dev);
561         virtio_free_queues(dev);
562 }
563
564 static void
565 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
566 {
567         struct virtio_hw *hw = dev->data->dev_private;
568         struct virtio_pmd_ctrl ctrl;
569         int dlen[1];
570         int ret;
571
572         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
573                 PMD_INIT_LOG(INFO, "host does not support rx control\n");
574                 return;
575         }
576
577         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
578         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
579         ctrl.data[0] = 1;
580         dlen[0] = 1;
581
582         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
583         if (ret)
584                 PMD_INIT_LOG(ERR, "Failed to enable promisc");
585 }
586
587 static void
588 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
589 {
590         struct virtio_hw *hw = dev->data->dev_private;
591         struct virtio_pmd_ctrl ctrl;
592         int dlen[1];
593         int ret;
594
595         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
596                 PMD_INIT_LOG(INFO, "host does not support rx control\n");
597                 return;
598         }
599
600         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
601         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
602         ctrl.data[0] = 0;
603         dlen[0] = 1;
604
605         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
606         if (ret)
607                 PMD_INIT_LOG(ERR, "Failed to disable promisc");
608 }
609
610 static void
611 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
612 {
613         struct virtio_hw *hw = dev->data->dev_private;
614         struct virtio_pmd_ctrl ctrl;
615         int dlen[1];
616         int ret;
617
618         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
619                 PMD_INIT_LOG(INFO, "host does not support rx control\n");
620                 return;
621         }
622
623         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
624         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
625         ctrl.data[0] = 1;
626         dlen[0] = 1;
627
628         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
629         if (ret)
630                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
631 }
632
633 static void
634 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
635 {
636         struct virtio_hw *hw = dev->data->dev_private;
637         struct virtio_pmd_ctrl ctrl;
638         int dlen[1];
639         int ret;
640
641         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
642                 PMD_INIT_LOG(INFO, "host does not support rx control\n");
643                 return;
644         }
645
646         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
647         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
648         ctrl.data[0] = 0;
649         dlen[0] = 1;
650
651         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
652         if (ret)
653                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
654 }
655
656 #define VLAN_TAG_LEN           4    /* 802.3ac tag (not DMA'd) */
657 static int
658 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
659 {
660         struct virtio_hw *hw = dev->data->dev_private;
661         uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
662                                  hw->vtnet_hdr_size;
663         uint32_t frame_size = mtu + ether_hdr_len;
664
665         if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
666                 PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
667                         ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
668                 return -EINVAL;
669         }
670         return 0;
671 }
672
673 /*
674  * dev_ops for virtio, bare necessities for basic operation
675  */
676 static const struct eth_dev_ops virtio_eth_dev_ops = {
677         .dev_configure           = virtio_dev_configure,
678         .dev_start               = virtio_dev_start,
679         .dev_stop                = virtio_dev_stop,
680         .dev_close               = virtio_dev_close,
681         .promiscuous_enable      = virtio_dev_promiscuous_enable,
682         .promiscuous_disable     = virtio_dev_promiscuous_disable,
683         .allmulticast_enable     = virtio_dev_allmulticast_enable,
684         .allmulticast_disable    = virtio_dev_allmulticast_disable,
685         .mtu_set                 = virtio_mtu_set,
686         .dev_infos_get           = virtio_dev_info_get,
687         .stats_get               = virtio_dev_stats_get,
688         .xstats_get              = virtio_dev_xstats_get,
689         .xstats_get_names        = virtio_dev_xstats_get_names,
690         .stats_reset             = virtio_dev_stats_reset,
691         .xstats_reset            = virtio_dev_stats_reset,
692         .link_update             = virtio_dev_link_update,
693         .rx_queue_setup          = virtio_dev_rx_queue_setup,
694         .rx_queue_release        = virtio_dev_rx_queue_release,
695         .tx_queue_setup          = virtio_dev_tx_queue_setup,
696         .tx_queue_release        = virtio_dev_tx_queue_release,
697         /* collect stats per queue */
698         .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
699         .vlan_filter_set         = virtio_vlan_filter_set,
700         .mac_addr_add            = virtio_mac_addr_add,
701         .mac_addr_remove         = virtio_mac_addr_remove,
702         .mac_addr_set            = virtio_mac_addr_set,
703 };
704
705 static inline int
706 virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
707                                 struct rte_eth_link *link)
708 {
709         struct rte_eth_link *dst = link;
710         struct rte_eth_link *src = &(dev->data->dev_link);
711
712         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
713                         *(uint64_t *)src) == 0)
714                 return -1;
715
716         return 0;
717 }
718
719 /**
720  * Atomically writes the link status information into global
721  * structure rte_eth_dev.
722  *
723  * @param dev
724  *   - Pointer to the structure rte_eth_dev to read from.
725  *   - Pointer to the buffer to be saved with the link status.
726  *
727  * @return
728  *   - On success, zero.
729  *   - On failure, negative value.
730  */
731 static inline int
732 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
733                 struct rte_eth_link *link)
734 {
735         struct rte_eth_link *dst = &(dev->data->dev_link);
736         struct rte_eth_link *src = link;
737
738         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
739                                         *(uint64_t *)src) == 0)
740                 return -1;
741
742         return 0;
743 }
744
745 static void
746 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
747 {
748         unsigned i;
749
750         for (i = 0; i < dev->data->nb_tx_queues; i++) {
751                 const struct virtnet_tx *txvq = dev->data->tx_queues[i];
752                 if (txvq == NULL)
753                         continue;
754
755                 stats->opackets += txvq->stats.packets;
756                 stats->obytes += txvq->stats.bytes;
757                 stats->oerrors += txvq->stats.errors;
758
759                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
760                         stats->q_opackets[i] = txvq->stats.packets;
761                         stats->q_obytes[i] = txvq->stats.bytes;
762                 }
763         }
764
765         for (i = 0; i < dev->data->nb_rx_queues; i++) {
766                 const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
767                 if (rxvq == NULL)
768                         continue;
769
770                 stats->ipackets += rxvq->stats.packets;
771                 stats->ibytes += rxvq->stats.bytes;
772                 stats->ierrors += rxvq->stats.errors;
773
774                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
775                         stats->q_ipackets[i] = rxvq->stats.packets;
776                         stats->q_ibytes[i] = rxvq->stats.bytes;
777                 }
778         }
779
780         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
781 }
782
783 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
784                                        struct rte_eth_xstat_name *xstats_names,
785                                        __rte_unused unsigned limit)
786 {
787         unsigned i;
788         unsigned count = 0;
789         unsigned t;
790
791         unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
792                 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
793
794         if (xstats_names != NULL) {
795                 /* Note: limit checked in rte_eth_xstats_names() */
796
797                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
798                         struct virtqueue *rxvq = dev->data->rx_queues[i];
799                         if (rxvq == NULL)
800                                 continue;
801                         for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
802                                 snprintf(xstats_names[count].name,
803                                         sizeof(xstats_names[count].name),
804                                         "rx_q%u_%s", i,
805                                         rte_virtio_rxq_stat_strings[t].name);
806                                 count++;
807                         }
808                 }
809
810                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
811                         struct virtqueue *txvq = dev->data->tx_queues[i];
812                         if (txvq == NULL)
813                                 continue;
814                         for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
815                                 snprintf(xstats_names[count].name,
816                                         sizeof(xstats_names[count].name),
817                                         "tx_q%u_%s", i,
818                                         rte_virtio_txq_stat_strings[t].name);
819                                 count++;
820                         }
821                 }
822                 return count;
823         }
824         return nstats;
825 }
826
827 static int
828 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
829                       unsigned n)
830 {
831         unsigned i;
832         unsigned count = 0;
833
834         unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
835                 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
836
837         if (n < nstats)
838                 return nstats;
839
840         for (i = 0; i < dev->data->nb_rx_queues; i++) {
841                 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
842
843                 if (rxvq == NULL)
844                         continue;
845
846                 unsigned t;
847
848                 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
849                         xstats[count].value = *(uint64_t *)(((char *)rxvq) +
850                                 rte_virtio_rxq_stat_strings[t].offset);
851                         count++;
852                 }
853         }
854
855         for (i = 0; i < dev->data->nb_tx_queues; i++) {
856                 struct virtnet_tx *txvq = dev->data->tx_queues[i];
857
858                 if (txvq == NULL)
859                         continue;
860
861                 unsigned t;
862
863                 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
864                         xstats[count].value = *(uint64_t *)(((char *)txvq) +
865                                 rte_virtio_txq_stat_strings[t].offset);
866                         count++;
867                 }
868         }
869
870         return count;
871 }
872
873 static void
874 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
875 {
876         virtio_update_stats(dev, stats);
877 }
878
879 static void
880 virtio_dev_stats_reset(struct rte_eth_dev *dev)
881 {
882         unsigned int i;
883
884         for (i = 0; i < dev->data->nb_tx_queues; i++) {
885                 struct virtnet_tx *txvq = dev->data->tx_queues[i];
886                 if (txvq == NULL)
887                         continue;
888
889                 txvq->stats.packets = 0;
890                 txvq->stats.bytes = 0;
891                 txvq->stats.errors = 0;
892                 txvq->stats.multicast = 0;
893                 txvq->stats.broadcast = 0;
894                 memset(txvq->stats.size_bins, 0,
895                        sizeof(txvq->stats.size_bins[0]) * 8);
896         }
897
898         for (i = 0; i < dev->data->nb_rx_queues; i++) {
899                 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
900                 if (rxvq == NULL)
901                         continue;
902
903                 rxvq->stats.packets = 0;
904                 rxvq->stats.bytes = 0;
905                 rxvq->stats.errors = 0;
906                 rxvq->stats.multicast = 0;
907                 rxvq->stats.broadcast = 0;
908                 memset(rxvq->stats.size_bins, 0,
909                        sizeof(rxvq->stats.size_bins[0]) * 8);
910         }
911 }
912
913 static void
914 virtio_set_hwaddr(struct virtio_hw *hw)
915 {
916         vtpci_write_dev_config(hw,
917                         offsetof(struct virtio_net_config, mac),
918                         &hw->mac_addr, ETHER_ADDR_LEN);
919 }
920
921 static void
922 virtio_get_hwaddr(struct virtio_hw *hw)
923 {
924         if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
925                 vtpci_read_dev_config(hw,
926                         offsetof(struct virtio_net_config, mac),
927                         &hw->mac_addr, ETHER_ADDR_LEN);
928         } else {
929                 eth_random_addr(&hw->mac_addr[0]);
930                 virtio_set_hwaddr(hw);
931         }
932 }
933
934 static void
935 virtio_mac_table_set(struct virtio_hw *hw,
936                      const struct virtio_net_ctrl_mac *uc,
937                      const struct virtio_net_ctrl_mac *mc)
938 {
939         struct virtio_pmd_ctrl ctrl;
940         int err, len[2];
941
942         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
943                 PMD_DRV_LOG(INFO, "host does not support mac table");
944                 return;
945         }
946
947         ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
948         ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
949
950         len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
951         memcpy(ctrl.data, uc, len[0]);
952
953         len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
954         memcpy(ctrl.data + len[0], mc, len[1]);
955
956         err = virtio_send_command(hw->cvq, &ctrl, len, 2);
957         if (err != 0)
958                 PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
959 }
960
961 static void
962 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
963                     uint32_t index, uint32_t vmdq __rte_unused)
964 {
965         struct virtio_hw *hw = dev->data->dev_private;
966         const struct ether_addr *addrs = dev->data->mac_addrs;
967         unsigned int i;
968         struct virtio_net_ctrl_mac *uc, *mc;
969
970         if (index >= VIRTIO_MAX_MAC_ADDRS) {
971                 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
972                 return;
973         }
974
975         uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
976         uc->entries = 0;
977         mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
978         mc->entries = 0;
979
980         for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
981                 const struct ether_addr *addr
982                         = (i == index) ? mac_addr : addrs + i;
983                 struct virtio_net_ctrl_mac *tbl
984                         = is_multicast_ether_addr(addr) ? mc : uc;
985
986                 memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
987         }
988
989         virtio_mac_table_set(hw, uc, mc);
990 }
991
992 static void
993 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
994 {
995         struct virtio_hw *hw = dev->data->dev_private;
996         struct ether_addr *addrs = dev->data->mac_addrs;
997         struct virtio_net_ctrl_mac *uc, *mc;
998         unsigned int i;
999
1000         if (index >= VIRTIO_MAX_MAC_ADDRS) {
1001                 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1002                 return;
1003         }
1004
1005         uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
1006         uc->entries = 0;
1007         mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
1008         mc->entries = 0;
1009
1010         for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1011                 struct virtio_net_ctrl_mac *tbl;
1012
1013                 if (i == index || is_zero_ether_addr(addrs + i))
1014                         continue;
1015
1016                 tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
1017                 memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
1018         }
1019
1020         virtio_mac_table_set(hw, uc, mc);
1021 }
1022
1023 static void
1024 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1025 {
1026         struct virtio_hw *hw = dev->data->dev_private;
1027
1028         memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
1029
1030         /* Use atomic update if available */
1031         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1032                 struct virtio_pmd_ctrl ctrl;
1033                 int len = ETHER_ADDR_LEN;
1034
1035                 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1036                 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
1037
1038                 memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
1039                 virtio_send_command(hw->cvq, &ctrl, &len, 1);
1040         } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
1041                 virtio_set_hwaddr(hw);
1042 }
1043
1044 static int
1045 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1046 {
1047         struct virtio_hw *hw = dev->data->dev_private;
1048         struct virtio_pmd_ctrl ctrl;
1049         int len;
1050
1051         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1052                 return -ENOTSUP;
1053
1054         ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1055         ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1056         memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1057         len = sizeof(vlan_id);
1058
1059         return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1060 }
1061
1062 static int
1063 virtio_negotiate_features(struct virtio_hw *hw)
1064 {
1065         uint64_t host_features;
1066
1067         /* Prepare guest_features: feature that driver wants to support */
1068         hw->guest_features = VIRTIO_PMD_GUEST_FEATURES;
1069         PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1070                 hw->guest_features);
1071
1072         /* Read device(host) feature bits */
1073         host_features = hw->vtpci_ops->get_features(hw);
1074         PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1075                 host_features);
1076
1077         /*
1078          * Negotiate features: Subset of device feature bits are written back
1079          * guest feature bits.
1080          */
1081         hw->guest_features = vtpci_negotiate_features(hw, host_features);
1082         PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1083                 hw->guest_features);
1084
1085         if (hw->modern) {
1086                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
1087                         PMD_INIT_LOG(ERR,
1088                                 "VIRTIO_F_VERSION_1 features is not enabled.");
1089                         return -1;
1090                 }
1091                 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1092                 if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1093                         PMD_INIT_LOG(ERR,
1094                                 "failed to set FEATURES_OK status!");
1095                         return -1;
1096                 }
1097         }
1098
1099         return 0;
1100 }
1101
1102 /*
1103  * Process Virtio Config changed interrupt and call the callback
1104  * if link state changed.
1105  */
1106 static void
1107 virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1108                          void *param)
1109 {
1110         struct rte_eth_dev *dev = param;
1111         struct virtio_hw *hw = dev->data->dev_private;
1112         uint8_t isr;
1113
1114         /* Read interrupt status which clears interrupt */
1115         isr = vtpci_isr(hw);
1116         PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1117
1118         if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0)
1119                 PMD_DRV_LOG(ERR, "interrupt enable failed");
1120
1121         if (isr & VIRTIO_PCI_ISR_CONFIG) {
1122                 if (virtio_dev_link_update(dev, 0) == 0)
1123                         _rte_eth_dev_callback_process(dev,
1124                                                       RTE_ETH_EVENT_INTR_LSC);
1125         }
1126
1127 }
1128
1129 static void
1130 rx_func_get(struct rte_eth_dev *eth_dev)
1131 {
1132         struct virtio_hw *hw = eth_dev->data->dev_private;
1133         if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
1134                 eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1135         else
1136                 eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1137 }
1138
1139 /*
1140  * This function is based on probe() function in virtio_pci.c
1141  * It returns 0 on success.
1142  */
1143 int
1144 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1145 {
1146         struct virtio_hw *hw = eth_dev->data->dev_private;
1147         struct virtio_net_config *config;
1148         struct virtio_net_config local_config;
1149         struct rte_pci_device *pci_dev;
1150         uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
1151         int ret;
1152
1153         RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
1154
1155         eth_dev->dev_ops = &virtio_eth_dev_ops;
1156         eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
1157
1158         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1159                 rx_func_get(eth_dev);
1160                 return 0;
1161         }
1162
1163         /* Allocate memory for storing MAC addresses */
1164         eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
1165         if (eth_dev->data->mac_addrs == NULL) {
1166                 PMD_INIT_LOG(ERR,
1167                         "Failed to allocate %d bytes needed to store MAC addresses",
1168                         VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
1169                 return -ENOMEM;
1170         }
1171
1172         pci_dev = eth_dev->pci_dev;
1173
1174         if (pci_dev) {
1175                 ret = vtpci_init(pci_dev, hw, &dev_flags);
1176                 if (ret)
1177                         return ret;
1178         }
1179
1180         /* Reset the device although not necessary at startup */
1181         vtpci_reset(hw);
1182
1183         /* Tell the host we've noticed this device. */
1184         vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1185
1186         /* Tell the host we've known how to drive the device. */
1187         vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1188         if (virtio_negotiate_features(hw) < 0)
1189                 return -1;
1190
1191         /* If host does not support status then disable LSC */
1192         if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
1193                 dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1194
1195         rte_eth_copy_pci_info(eth_dev, pci_dev);
1196         eth_dev->data->dev_flags = dev_flags;
1197
1198         rx_func_get(eth_dev);
1199
1200         /* Setting up rx_header size for the device */
1201         if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1202             vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
1203                 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1204         else
1205                 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1206
1207         /* Copy the permanent MAC address to: virtio_hw */
1208         virtio_get_hwaddr(hw);
1209         ether_addr_copy((struct ether_addr *) hw->mac_addr,
1210                         &eth_dev->data->mac_addrs[0]);
1211         PMD_INIT_LOG(DEBUG,
1212                      "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1213                      hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1214                      hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1215
1216         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1217                 config = &local_config;
1218
1219                 vtpci_read_dev_config(hw,
1220                         offsetof(struct virtio_net_config, mac),
1221                         &config->mac, sizeof(config->mac));
1222
1223                 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1224                         vtpci_read_dev_config(hw,
1225                                 offsetof(struct virtio_net_config, status),
1226                                 &config->status, sizeof(config->status));
1227                 } else {
1228                         PMD_INIT_LOG(DEBUG,
1229                                      "VIRTIO_NET_F_STATUS is not supported");
1230                         config->status = 0;
1231                 }
1232
1233                 if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
1234                         vtpci_read_dev_config(hw,
1235                                 offsetof(struct virtio_net_config, max_virtqueue_pairs),
1236                                 &config->max_virtqueue_pairs,
1237                                 sizeof(config->max_virtqueue_pairs));
1238                 } else {
1239                         PMD_INIT_LOG(DEBUG,
1240                                      "VIRTIO_NET_F_MQ is not supported");
1241                         config->max_virtqueue_pairs = 1;
1242                 }
1243
1244                 hw->max_rx_queues =
1245                         (VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
1246                         VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
1247                 hw->max_tx_queues =
1248                         (VIRTIO_MAX_TX_QUEUES < config->max_virtqueue_pairs) ?
1249                         VIRTIO_MAX_TX_QUEUES : config->max_virtqueue_pairs;
1250
1251                 virtio_dev_cq_queue_setup(eth_dev,
1252                                         config->max_virtqueue_pairs * 2,
1253                                         SOCKET_ID_ANY);
1254
1255                 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1256                                 config->max_virtqueue_pairs);
1257                 PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1258                 PMD_INIT_LOG(DEBUG,
1259                                 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1260                                 config->mac[0], config->mac[1],
1261                                 config->mac[2], config->mac[3],
1262                                 config->mac[4], config->mac[5]);
1263         } else {
1264                 hw->max_rx_queues = 1;
1265                 hw->max_tx_queues = 1;
1266         }
1267
1268         PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d   hw->max_tx_queues=%d",
1269                         hw->max_rx_queues, hw->max_tx_queues);
1270         if (pci_dev)
1271                 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1272                         eth_dev->data->port_id, pci_dev->id.vendor_id,
1273                         pci_dev->id.device_id);
1274
1275         /* Setup interrupt callback  */
1276         if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1277                 rte_intr_callback_register(&pci_dev->intr_handle,
1278                                    virtio_interrupt_handler, eth_dev);
1279
1280         virtio_dev_cq_start(eth_dev);
1281
1282         return 0;
1283 }
1284
1285 static int
1286 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
1287 {
1288         struct rte_pci_device *pci_dev;
1289         struct virtio_hw *hw = eth_dev->data->dev_private;
1290
1291         PMD_INIT_FUNC_TRACE();
1292
1293         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1294                 return -EPERM;
1295
1296         /* Close it anyway since there's no way to know if closed */
1297         virtio_dev_close(eth_dev);
1298
1299         pci_dev = eth_dev->pci_dev;
1300
1301         eth_dev->dev_ops = NULL;
1302         eth_dev->tx_pkt_burst = NULL;
1303         eth_dev->rx_pkt_burst = NULL;
1304
1305         if (hw->cvq)
1306                 virtio_dev_queue_release(hw->cvq->vq);
1307
1308         rte_free(eth_dev->data->mac_addrs);
1309         eth_dev->data->mac_addrs = NULL;
1310
1311         /* reset interrupt callback  */
1312         if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1313                 rte_intr_callback_unregister(&pci_dev->intr_handle,
1314                                                 virtio_interrupt_handler,
1315                                                 eth_dev);
1316         rte_eal_pci_unmap_device(pci_dev);
1317
1318         PMD_INIT_LOG(DEBUG, "dev_uninit completed");
1319
1320         return 0;
1321 }
1322
1323 static struct eth_driver rte_virtio_pmd = {
1324         .pci_drv = {
1325                 .driver = {
1326                         .name = "net_virtio",
1327                 },
1328                 .id_table = pci_id_virtio_map,
1329                 .drv_flags = RTE_PCI_DRV_DETACHABLE,
1330                 .probe = rte_eth_dev_pci_probe,
1331                 .remove = rte_eth_dev_pci_remove,
1332         },
1333         .eth_dev_init = eth_virtio_dev_init,
1334         .eth_dev_uninit = eth_virtio_dev_uninit,
1335         .dev_private_size = sizeof(struct virtio_hw),
1336 };
1337
1338 RTE_INIT(rte_virtio_pmd_init);
1339 static void
1340 rte_virtio_pmd_init(void)
1341 {
1342         if (rte_eal_iopl_init() != 0) {
1343                 PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
1344                 return;
1345         }
1346
1347         rte_eal_pci_register(&rte_virtio_pmd.pci_drv);
1348 }
1349
1350 /*
1351  * Configure virtio device
1352  * It returns 0 on success.
1353  */
1354 static int
1355 virtio_dev_configure(struct rte_eth_dev *dev)
1356 {
1357         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1358         struct virtio_hw *hw = dev->data->dev_private;
1359
1360         PMD_INIT_LOG(DEBUG, "configure");
1361
1362         if (rxmode->hw_ip_checksum) {
1363                 PMD_DRV_LOG(ERR, "HW IP checksum not supported");
1364                 return -EINVAL;
1365         }
1366
1367         hw->vlan_strip = rxmode->hw_vlan_strip;
1368
1369         if (rxmode->hw_vlan_filter
1370             && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
1371                 PMD_DRV_LOG(NOTICE,
1372                             "vlan filtering not available on this host");
1373                 return -ENOTSUP;
1374         }
1375
1376         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1377                 if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
1378                         PMD_DRV_LOG(ERR, "failed to set config vector");
1379                         return -EBUSY;
1380                 }
1381
1382         return 0;
1383 }
1384
1385
1386 static int
1387 virtio_dev_start(struct rte_eth_dev *dev)
1388 {
1389         uint16_t nb_queues, i;
1390         struct virtio_hw *hw = dev->data->dev_private;
1391         struct virtnet_rx *rxvq;
1392         struct virtnet_tx *txvq __rte_unused;
1393
1394         /* check if lsc interrupt feature is enabled */
1395         if (dev->data->dev_conf.intr_conf.lsc) {
1396                 if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1397                         PMD_DRV_LOG(ERR, "link status not supported by host");
1398                         return -ENOTSUP;
1399                 }
1400
1401                 if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) {
1402                         PMD_DRV_LOG(ERR, "interrupt enable failed");
1403                         return -EIO;
1404                 }
1405         }
1406
1407         /* Initialize Link state */
1408         virtio_dev_link_update(dev, 0);
1409
1410         /* On restart after stop do not touch queues */
1411         if (hw->started)
1412                 return 0;
1413
1414         /* Do final configuration before rx/tx engine starts */
1415         virtio_dev_rxtx_start(dev);
1416         vtpci_reinit_complete(hw);
1417
1418         hw->started = 1;
1419
1420         /*Notify the backend
1421          *Otherwise the tap backend might already stop its queue due to fullness.
1422          *vhost backend will have no chance to be waked up
1423          */
1424         nb_queues = dev->data->nb_rx_queues;
1425         if (nb_queues > 1) {
1426                 if (virtio_set_multiple_queues(dev, nb_queues) != 0)
1427                         return -EINVAL;
1428         }
1429
1430         PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
1431
1432         for (i = 0; i < nb_queues; i++) {
1433                 rxvq = dev->data->rx_queues[i];
1434                 virtqueue_notify(rxvq->vq);
1435         }
1436
1437         PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
1438
1439         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1440                 rxvq = dev->data->rx_queues[i];
1441                 VIRTQUEUE_DUMP(rxvq->vq);
1442         }
1443
1444         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1445                 txvq = dev->data->tx_queues[i];
1446                 VIRTQUEUE_DUMP(txvq->vq);
1447         }
1448
1449         return 0;
1450 }
1451
1452 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
1453 {
1454         struct rte_mbuf *buf;
1455         int i, mbuf_num = 0;
1456
1457         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1458                 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1459
1460                 PMD_INIT_LOG(DEBUG,
1461                              "Before freeing rxq[%d] used and unused buf", i);
1462                 VIRTQUEUE_DUMP(rxvq->vq);
1463
1464                 PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
1465                 while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
1466                         rte_pktmbuf_free(buf);
1467                         mbuf_num++;
1468                 }
1469
1470                 PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1471                 PMD_INIT_LOG(DEBUG,
1472                              "After freeing rxq[%d] used and unused buf", i);
1473                 VIRTQUEUE_DUMP(rxvq->vq);
1474         }
1475
1476         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1477                 struct virtnet_tx *txvq = dev->data->tx_queues[i];
1478
1479                 PMD_INIT_LOG(DEBUG,
1480                              "Before freeing txq[%d] used and unused bufs",
1481                              i);
1482                 VIRTQUEUE_DUMP(txvq->vq);
1483
1484                 mbuf_num = 0;
1485                 while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
1486                         rte_pktmbuf_free(buf);
1487                         mbuf_num++;
1488                 }
1489
1490                 PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1491                 PMD_INIT_LOG(DEBUG,
1492                              "After freeing txq[%d] used and unused buf", i);
1493                 VIRTQUEUE_DUMP(txvq->vq);
1494         }
1495 }
1496
1497 /*
1498  * Stop device: disable interrupt and mark link down
1499  */
1500 static void
1501 virtio_dev_stop(struct rte_eth_dev *dev)
1502 {
1503         struct rte_eth_link link;
1504         struct virtio_hw *hw = dev->data->dev_private;
1505
1506         PMD_INIT_LOG(DEBUG, "stop");
1507
1508         hw->started = 0;
1509
1510         if (dev->data->dev_conf.intr_conf.lsc)
1511                 rte_intr_disable(&dev->pci_dev->intr_handle);
1512
1513         memset(&link, 0, sizeof(link));
1514         virtio_dev_atomic_write_link_status(dev, &link);
1515 }
1516
1517 static int
1518 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1519 {
1520         struct rte_eth_link link, old;
1521         uint16_t status;
1522         struct virtio_hw *hw = dev->data->dev_private;
1523         memset(&link, 0, sizeof(link));
1524         virtio_dev_atomic_read_link_status(dev, &link);
1525         old = link;
1526         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1527         link.link_speed  = SPEED_10G;
1528
1529         if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1530                 PMD_INIT_LOG(DEBUG, "Get link status from hw");
1531                 vtpci_read_dev_config(hw,
1532                                 offsetof(struct virtio_net_config, status),
1533                                 &status, sizeof(status));
1534                 if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
1535                         link.link_status = ETH_LINK_DOWN;
1536                         PMD_INIT_LOG(DEBUG, "Port %d is down",
1537                                      dev->data->port_id);
1538                 } else {
1539                         link.link_status = ETH_LINK_UP;
1540                         PMD_INIT_LOG(DEBUG, "Port %d is up",
1541                                      dev->data->port_id);
1542                 }
1543         } else {
1544                 link.link_status = ETH_LINK_UP;
1545         }
1546         virtio_dev_atomic_write_link_status(dev, &link);
1547
1548         return (old.link_status == link.link_status) ? -1 : 0;
1549 }
1550
1551 static void
1552 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1553 {
1554         struct virtio_hw *hw = dev->data->dev_private;
1555
1556         if (dev->pci_dev)
1557                 dev_info->driver_name = dev->driver->pci_drv.driver.name;
1558         else
1559                 dev_info->driver_name = "virtio_user PMD";
1560         dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1561         dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1562         dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
1563         dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
1564         dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
1565         dev_info->default_txconf = (struct rte_eth_txconf) {
1566                 .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
1567         };
1568 }
1569
1570 /*
1571  * It enables testpmd to collect per queue stats.
1572  */
1573 static int
1574 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
1575 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
1576 __rte_unused uint8_t is_rx)
1577 {
1578         return 0;
1579 }
1580
1581 DRIVER_EXPORT_NAME(net_virtio, __COUNTER__);
1582 DRIVER_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);