net/virtio: validate features at bus level
[dpdk.git] / drivers / net / virtio / virtio_pci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 #include <stdint.h>
5
6 #ifdef RTE_EXEC_ENV_LINUX
7  #include <dirent.h>
8  #include <fcntl.h>
9 #endif
10
11 #include <rte_io.h>
12 #include <rte_bus.h>
13
14 #include "virtio_pci.h"
15 #include "virtio_logs.h"
16 #include "virtqueue.h"
17
18 /*
19  * Following macros are derived from linux/pci_regs.h, however,
20  * we can't simply include that header here, as there is no such
21  * file for non-Linux platform.
22  */
23 #define PCI_CAPABILITY_LIST     0x34
24 #define PCI_CAP_ID_VNDR         0x09
25 #define PCI_CAP_ID_MSIX         0x11
26
27 /*
28  * The remaining space is defined by each driver as the per-driver
29  * configuration space.
30  */
31 #define VIRTIO_PCI_CONFIG(hw) \
32                 (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
33
34 static inline int
35 check_vq_phys_addr_ok(struct virtqueue *vq)
36 {
37         /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
38          * and only accepts 32 bit page frame number.
39          * Check if the allocated physical memory exceeds 16TB.
40          */
41         if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
42                         (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
43                 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
44                 return 0;
45         }
46
47         return 1;
48 }
49
50 #define PCI_MSIX_ENABLE 0x8000
51
52 static enum virtio_msix_status
53 vtpci_msix_detect(struct rte_pci_device *dev)
54 {
55         uint8_t pos;
56         int ret;
57
58         ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
59         if (ret != 1) {
60                 PMD_INIT_LOG(DEBUG,
61                              "failed to read pci capability list, ret %d", ret);
62                 return VIRTIO_MSIX_NONE;
63         }
64
65         while (pos) {
66                 uint8_t cap[2];
67
68                 ret = rte_pci_read_config(dev, cap, sizeof(cap), pos);
69                 if (ret != sizeof(cap)) {
70                         PMD_INIT_LOG(DEBUG,
71                                      "failed to read pci cap at pos: %x ret %d",
72                                      pos, ret);
73                         break;
74                 }
75
76                 if (cap[0] == PCI_CAP_ID_MSIX) {
77                         uint16_t flags;
78
79                         ret = rte_pci_read_config(dev, &flags, sizeof(flags),
80                                         pos + sizeof(cap));
81                         if (ret != sizeof(flags)) {
82                                 PMD_INIT_LOG(DEBUG,
83                                              "failed to read pci cap at pos:"
84                                              " %x ret %d", pos + 2, ret);
85                                 break;
86                         }
87
88                         if (flags & PCI_MSIX_ENABLE)
89                                 return VIRTIO_MSIX_ENABLED;
90                         else
91                                 return VIRTIO_MSIX_DISABLED;
92                 }
93
94                 pos = cap[1];
95         }
96
97         return VIRTIO_MSIX_NONE;
98 }
99
100 /*
101  * Since we are in legacy mode:
102  * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
103  *
104  * "Note that this is possible because while the virtio header is PCI (i.e.
105  * little) endian, the device-specific region is encoded in the native endian of
106  * the guest (where such distinction is applicable)."
107  *
108  * For powerpc which supports both, qemu supposes that cpu is big endian and
109  * enforces this for the virtio-net stuff.
110  */
111 static void
112 legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
113                        void *dst, int length)
114 {
115 #ifdef RTE_ARCH_PPC_64
116         int size;
117
118         while (length > 0) {
119                 if (length >= 4) {
120                         size = 4;
121                         rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
122                                 VIRTIO_PCI_CONFIG(hw) + offset);
123                         *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
124                 } else if (length >= 2) {
125                         size = 2;
126                         rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
127                                 VIRTIO_PCI_CONFIG(hw) + offset);
128                         *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
129                 } else {
130                         size = 1;
131                         rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
132                                 VIRTIO_PCI_CONFIG(hw) + offset);
133                 }
134
135                 dst = (char *)dst + size;
136                 offset += size;
137                 length -= size;
138         }
139 #else
140         rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
141                 VIRTIO_PCI_CONFIG(hw) + offset);
142 #endif
143 }
144
145 static void
146 legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
147                         const void *src, int length)
148 {
149 #ifdef RTE_ARCH_PPC_64
150         union {
151                 uint32_t u32;
152                 uint16_t u16;
153         } tmp;
154         int size;
155
156         while (length > 0) {
157                 if (length >= 4) {
158                         size = 4;
159                         tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
160                         rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
161                                 VIRTIO_PCI_CONFIG(hw) + offset);
162                 } else if (length >= 2) {
163                         size = 2;
164                         tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
165                         rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
166                                 VIRTIO_PCI_CONFIG(hw) + offset);
167                 } else {
168                         size = 1;
169                         rte_pci_ioport_write(VTPCI_IO(hw), src, size,
170                                 VIRTIO_PCI_CONFIG(hw) + offset);
171                 }
172
173                 src = (const char *)src + size;
174                 offset += size;
175                 length -= size;
176         }
177 #else
178         rte_pci_ioport_write(VTPCI_IO(hw), src, length,
179                 VIRTIO_PCI_CONFIG(hw) + offset);
180 #endif
181 }
182
183 static uint64_t
184 legacy_get_features(struct virtio_hw *hw)
185 {
186         uint32_t dst;
187
188         rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
189         return dst;
190 }
191
192 static void
193 legacy_set_features(struct virtio_hw *hw, uint64_t features)
194 {
195         if ((features >> 32) != 0) {
196                 PMD_DRV_LOG(ERR,
197                         "only 32 bit features are allowed for legacy virtio!");
198                 return;
199         }
200         rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
201                 VIRTIO_PCI_GUEST_FEATURES);
202 }
203
204 static int
205 legacy_features_ok(struct virtio_hw *hw __rte_unused)
206 {
207         return 0;
208 }
209
210 static uint8_t
211 legacy_get_status(struct virtio_hw *hw)
212 {
213         uint8_t dst;
214
215         rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
216         return dst;
217 }
218
219 static void
220 legacy_set_status(struct virtio_hw *hw, uint8_t status)
221 {
222         rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
223 }
224
225 static uint8_t
226 legacy_get_isr(struct virtio_hw *hw)
227 {
228         uint8_t dst;
229
230         rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
231         return dst;
232 }
233
234 /* Enable one vector (0) for Link State Intrerrupt */
235 static uint16_t
236 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
237 {
238         uint16_t dst;
239
240         rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
241         rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
242         return dst;
243 }
244
245 static uint16_t
246 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
247 {
248         uint16_t dst;
249
250         rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
251                 VIRTIO_PCI_QUEUE_SEL);
252         rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
253         rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
254         return dst;
255 }
256
257 static uint16_t
258 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
259 {
260         uint16_t dst;
261
262         rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
263         rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
264         return dst;
265 }
266
267 static int
268 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
269 {
270         uint32_t src;
271
272         if (!check_vq_phys_addr_ok(vq))
273                 return -1;
274
275         rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
276                 VIRTIO_PCI_QUEUE_SEL);
277         src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
278         rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
279
280         return 0;
281 }
282
283 static void
284 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
285 {
286         uint32_t src = 0;
287
288         rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
289                 VIRTIO_PCI_QUEUE_SEL);
290         rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
291 }
292
293 static void
294 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
295 {
296         rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
297                 VIRTIO_PCI_QUEUE_NOTIFY);
298 }
299
300 static void
301 legacy_intr_detect(struct virtio_hw *hw)
302 {
303         hw->use_msix = vtpci_msix_detect(VTPCI_DEV(hw));
304 }
305
306 static int
307 legacy_dev_close(struct virtio_hw *hw)
308 {
309         struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
310
311         rte_pci_unmap_device(dev->pci_dev);
312         rte_pci_ioport_unmap(VTPCI_IO(hw));
313
314         return 0;
315 }
316
317 const struct virtio_pci_ops legacy_ops = {
318         .read_dev_cfg   = legacy_read_dev_config,
319         .write_dev_cfg  = legacy_write_dev_config,
320         .get_status     = legacy_get_status,
321         .set_status     = legacy_set_status,
322         .get_features   = legacy_get_features,
323         .set_features   = legacy_set_features,
324         .features_ok    = legacy_features_ok,
325         .get_isr        = legacy_get_isr,
326         .set_config_irq = legacy_set_config_irq,
327         .set_queue_irq  = legacy_set_queue_irq,
328         .get_queue_num  = legacy_get_queue_num,
329         .setup_queue    = legacy_setup_queue,
330         .del_queue      = legacy_del_queue,
331         .notify_queue   = legacy_notify_queue,
332         .intr_detect    = legacy_intr_detect,
333         .dev_close      = legacy_dev_close,
334 };
335
336 static inline void
337 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
338 {
339         rte_write32(val & ((1ULL << 32) - 1), lo);
340         rte_write32(val >> 32,               hi);
341 }
342
343 static void
344 modern_read_dev_config(struct virtio_hw *hw, size_t offset,
345                        void *dst, int length)
346 {
347         int i;
348         uint8_t *p;
349         uint8_t old_gen, new_gen;
350
351         do {
352                 old_gen = rte_read8(&hw->common_cfg->config_generation);
353
354                 p = dst;
355                 for (i = 0;  i < length; i++)
356                         *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
357
358                 new_gen = rte_read8(&hw->common_cfg->config_generation);
359         } while (old_gen != new_gen);
360 }
361
362 static void
363 modern_write_dev_config(struct virtio_hw *hw, size_t offset,
364                         const void *src, int length)
365 {
366         int i;
367         const uint8_t *p = src;
368
369         for (i = 0;  i < length; i++)
370                 rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
371 }
372
373 static uint64_t
374 modern_get_features(struct virtio_hw *hw)
375 {
376         uint32_t features_lo, features_hi;
377
378         rte_write32(0, &hw->common_cfg->device_feature_select);
379         features_lo = rte_read32(&hw->common_cfg->device_feature);
380
381         rte_write32(1, &hw->common_cfg->device_feature_select);
382         features_hi = rte_read32(&hw->common_cfg->device_feature);
383
384         return ((uint64_t)features_hi << 32) | features_lo;
385 }
386
387 static void
388 modern_set_features(struct virtio_hw *hw, uint64_t features)
389 {
390         rte_write32(0, &hw->common_cfg->guest_feature_select);
391         rte_write32(features & ((1ULL << 32) - 1),
392                     &hw->common_cfg->guest_feature);
393
394         rte_write32(1, &hw->common_cfg->guest_feature_select);
395         rte_write32(features >> 32,
396                     &hw->common_cfg->guest_feature);
397 }
398
399 static int
400 modern_features_ok(struct virtio_hw *hw)
401 {
402         if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
403                 PMD_INIT_LOG(ERR, "Version 1+ required with modern devices\n");
404                 return -1;
405         }
406
407         return 0;
408 }
409
410 static uint8_t
411 modern_get_status(struct virtio_hw *hw)
412 {
413         return rte_read8(&hw->common_cfg->device_status);
414 }
415
416 static void
417 modern_set_status(struct virtio_hw *hw, uint8_t status)
418 {
419         rte_write8(status, &hw->common_cfg->device_status);
420 }
421
422 static uint8_t
423 modern_get_isr(struct virtio_hw *hw)
424 {
425         return rte_read8(hw->isr);
426 }
427
428 static uint16_t
429 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
430 {
431         rte_write16(vec, &hw->common_cfg->msix_config);
432         return rte_read16(&hw->common_cfg->msix_config);
433 }
434
435 static uint16_t
436 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
437 {
438         rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
439         rte_write16(vec, &hw->common_cfg->queue_msix_vector);
440         return rte_read16(&hw->common_cfg->queue_msix_vector);
441 }
442
443 static uint16_t
444 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
445 {
446         rte_write16(queue_id, &hw->common_cfg->queue_select);
447         return rte_read16(&hw->common_cfg->queue_size);
448 }
449
450 static int
451 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
452 {
453         uint64_t desc_addr, avail_addr, used_addr;
454         uint16_t notify_off;
455
456         if (!check_vq_phys_addr_ok(vq))
457                 return -1;
458
459         desc_addr = vq->vq_ring_mem;
460         avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
461         used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
462                                                          ring[vq->vq_nentries]),
463                                    VIRTIO_PCI_VRING_ALIGN);
464
465         rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
466
467         io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
468                                       &hw->common_cfg->queue_desc_hi);
469         io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
470                                        &hw->common_cfg->queue_avail_hi);
471         io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
472                                       &hw->common_cfg->queue_used_hi);
473
474         notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
475         vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
476                                 notify_off * hw->notify_off_multiplier);
477
478         rte_write16(1, &hw->common_cfg->queue_enable);
479
480         PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
481         PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
482         PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
483         PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
484         PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
485                 vq->notify_addr, notify_off);
486
487         return 0;
488 }
489
490 static void
491 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
492 {
493         rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
494
495         io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
496                                   &hw->common_cfg->queue_desc_hi);
497         io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
498                                   &hw->common_cfg->queue_avail_hi);
499         io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
500                                   &hw->common_cfg->queue_used_hi);
501
502         rte_write16(0, &hw->common_cfg->queue_enable);
503 }
504
505 static void
506 modern_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
507 {
508         uint32_t notify_data;
509
510         if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
511                 rte_write16(vq->vq_queue_index, vq->notify_addr);
512                 return;
513         }
514
515         if (vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) {
516                 /*
517                  * Bit[0:15]: vq queue index
518                  * Bit[16:30]: avail index
519                  * Bit[31]: avail wrap counter
520                  */
521                 notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags &
522                                 VRING_PACKED_DESC_F_AVAIL)) << 31) |
523                                 ((uint32_t)vq->vq_avail_idx << 16) |
524                                 vq->vq_queue_index;
525         } else {
526                 /*
527                  * Bit[0:15]: vq queue index
528                  * Bit[16:31]: avail index
529                  */
530                 notify_data = ((uint32_t)vq->vq_avail_idx << 16) |
531                                 vq->vq_queue_index;
532         }
533         rte_write32(notify_data, vq->notify_addr);
534 }
535
536
537
538 static void
539 modern_intr_detect(struct virtio_hw *hw)
540 {
541         hw->use_msix = vtpci_msix_detect(VTPCI_DEV(hw));
542 }
543
544 static int
545 modern_dev_close(struct virtio_hw *hw)
546 {
547         struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
548
549         rte_pci_unmap_device(dev->pci_dev);
550
551         return 0;
552 }
553
554 const struct virtio_pci_ops modern_ops = {
555         .read_dev_cfg   = modern_read_dev_config,
556         .write_dev_cfg  = modern_write_dev_config,
557         .get_status     = modern_get_status,
558         .set_status     = modern_set_status,
559         .get_features   = modern_get_features,
560         .set_features   = modern_set_features,
561         .features_ok    = modern_features_ok,
562         .get_isr        = modern_get_isr,
563         .set_config_irq = modern_set_config_irq,
564         .set_queue_irq  = modern_set_queue_irq,
565         .get_queue_num  = modern_get_queue_num,
566         .setup_queue    = modern_setup_queue,
567         .del_queue      = modern_del_queue,
568         .notify_queue   = modern_notify_queue,
569         .intr_detect    = modern_intr_detect,
570         .dev_close      = modern_dev_close,
571 };
572
573
574 void
575 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
576                       void *dst, int length)
577 {
578         VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
579 }
580
581 void
582 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
583                        const void *src, int length)
584 {
585         VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
586 }
587
588 uint64_t
589 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
590 {
591         uint64_t features;
592
593         /*
594          * Limit negotiated features to what the driver, virtqueue, and
595          * host all support.
596          */
597         features = host_features & hw->guest_features;
598         VTPCI_OPS(hw)->set_features(hw, features);
599
600         return features;
601 }
602
603 void
604 vtpci_reset(struct virtio_hw *hw)
605 {
606         VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
607         /* flush status write */
608         VTPCI_OPS(hw)->get_status(hw);
609 }
610
611 void
612 vtpci_reinit_complete(struct virtio_hw *hw)
613 {
614         vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
615 }
616
617 void
618 vtpci_set_status(struct virtio_hw *hw, uint8_t status)
619 {
620         if (status != VIRTIO_CONFIG_STATUS_RESET)
621                 status |= VTPCI_OPS(hw)->get_status(hw);
622
623         VTPCI_OPS(hw)->set_status(hw, status);
624 }
625
626 uint8_t
627 vtpci_get_status(struct virtio_hw *hw)
628 {
629         return VTPCI_OPS(hw)->get_status(hw);
630 }
631
632 uint8_t
633 vtpci_isr(struct virtio_hw *hw)
634 {
635         return VTPCI_OPS(hw)->get_isr(hw);
636 }
637
638 static void *
639 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
640 {
641         uint8_t  bar    = cap->bar;
642         uint32_t length = cap->length;
643         uint32_t offset = cap->offset;
644         uint8_t *base;
645
646         if (bar >= PCI_MAX_RESOURCE) {
647                 PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
648                 return NULL;
649         }
650
651         if (offset + length < offset) {
652                 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows",
653                         offset, length);
654                 return NULL;
655         }
656
657         if (offset + length > dev->mem_resource[bar].len) {
658                 PMD_INIT_LOG(ERR,
659                         "invalid cap: overflows bar space: %u > %" PRIu64,
660                         offset + length, dev->mem_resource[bar].len);
661                 return NULL;
662         }
663
664         base = dev->mem_resource[bar].addr;
665         if (base == NULL) {
666                 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
667                 return NULL;
668         }
669
670         return base + offset;
671 }
672
673 static int
674 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
675 {
676         uint8_t pos;
677         struct virtio_pci_cap cap;
678         int ret;
679
680         if (rte_pci_map_device(dev)) {
681                 PMD_INIT_LOG(DEBUG, "failed to map pci device!");
682                 return -1;
683         }
684
685         ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
686         if (ret != 1) {
687                 PMD_INIT_LOG(DEBUG,
688                              "failed to read pci capability list, ret %d", ret);
689                 return -1;
690         }
691
692         while (pos) {
693                 ret = rte_pci_read_config(dev, &cap, 2, pos);
694                 if (ret != 2) {
695                         PMD_INIT_LOG(DEBUG,
696                                      "failed to read pci cap at pos: %x ret %d",
697                                      pos, ret);
698                         break;
699                 }
700
701                 if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
702                         /* Transitional devices would also have this capability,
703                          * that's why we also check if msix is enabled.
704                          * 1st byte is cap ID; 2nd byte is the position of next
705                          * cap; next two bytes are the flags.
706                          */
707                         uint16_t flags;
708
709                         ret = rte_pci_read_config(dev, &flags, sizeof(flags),
710                                         pos + 2);
711                         if (ret != sizeof(flags)) {
712                                 PMD_INIT_LOG(DEBUG,
713                                              "failed to read pci cap at pos:"
714                                              " %x ret %d", pos + 2, ret);
715                                 break;
716                         }
717
718                         if (flags & PCI_MSIX_ENABLE)
719                                 hw->use_msix = VIRTIO_MSIX_ENABLED;
720                         else
721                                 hw->use_msix = VIRTIO_MSIX_DISABLED;
722                 }
723
724                 if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
725                         PMD_INIT_LOG(DEBUG,
726                                 "[%2x] skipping non VNDR cap id: %02x",
727                                 pos, cap.cap_vndr);
728                         goto next;
729                 }
730
731                 ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
732                 if (ret != sizeof(cap)) {
733                         PMD_INIT_LOG(DEBUG,
734                                      "failed to read pci cap at pos: %x ret %d",
735                                      pos, ret);
736                         break;
737                 }
738
739                 PMD_INIT_LOG(DEBUG,
740                         "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
741                         pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
742
743                 switch (cap.cfg_type) {
744                 case VIRTIO_PCI_CAP_COMMON_CFG:
745                         hw->common_cfg = get_cfg_addr(dev, &cap);
746                         break;
747                 case VIRTIO_PCI_CAP_NOTIFY_CFG:
748                         ret = rte_pci_read_config(dev,
749                                         &hw->notify_off_multiplier,
750                                         4, pos + sizeof(cap));
751                         if (ret != 4)
752                                 PMD_INIT_LOG(DEBUG,
753                                         "failed to read notify_off_multiplier, ret %d",
754                                         ret);
755                         else
756                                 hw->notify_base = get_cfg_addr(dev, &cap);
757                         break;
758                 case VIRTIO_PCI_CAP_DEVICE_CFG:
759                         hw->dev_cfg = get_cfg_addr(dev, &cap);
760                         break;
761                 case VIRTIO_PCI_CAP_ISR_CFG:
762                         hw->isr = get_cfg_addr(dev, &cap);
763                         break;
764                 }
765
766 next:
767                 pos = cap.cap_next;
768         }
769
770         if (hw->common_cfg == NULL || hw->notify_base == NULL ||
771             hw->dev_cfg == NULL    || hw->isr == NULL) {
772                 PMD_INIT_LOG(INFO, "no modern virtio pci device found.");
773                 return -1;
774         }
775
776         PMD_INIT_LOG(INFO, "found modern virtio pci device.");
777
778         PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg);
779         PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg);
780         PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr);
781         PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u",
782                 hw->notify_base, hw->notify_off_multiplier);
783
784         return 0;
785 }
786
787 /*
788  * Return -1:
789  *   if there is error mapping with VFIO/UIO.
790  *   if port map error when driver type is KDRV_NONE.
791  *   if marked as allowed but driver type is KDRV_UNKNOWN.
792  * Return 1 if kernel driver is managing the device.
793  * Return 0 on success.
794  */
795 int
796 vtpci_init(struct rte_pci_device *pci_dev, struct virtio_pci_dev *dev)
797 {
798         struct virtio_hw *hw = &dev->hw;
799
800         RTE_BUILD_BUG_ON(offsetof(struct virtio_pci_dev, hw) != 0);
801
802         dev->pci_dev = pci_dev;
803
804         /*
805          * Try if we can succeed reading virtio pci caps, which exists
806          * only on modern pci device. If failed, we fallback to legacy
807          * virtio handling.
808          */
809         if (virtio_read_caps(pci_dev, hw) == 0) {
810                 PMD_INIT_LOG(INFO, "modern virtio pci detected.");
811                 virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
812                 hw->bus_type = VIRTIO_BUS_PCI_MODERN;
813                 dev->modern = true;
814                 goto msix_detect;
815         }
816
817         PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
818         if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0) {
819                 rte_pci_unmap_device(pci_dev);
820                 if (pci_dev->kdrv == RTE_PCI_KDRV_UNKNOWN &&
821                     (!pci_dev->device.devargs ||
822                      pci_dev->device.devargs->bus !=
823                      rte_bus_find_by_name("pci"))) {
824                         PMD_INIT_LOG(INFO,
825                                 "skip kernel managed virtio device.");
826                         return 1;
827                 }
828                 return -1;
829         }
830
831         virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
832         hw->bus_type = VIRTIO_BUS_PCI_LEGACY;
833         dev->modern = false;
834
835 msix_detect:
836         VTPCI_OPS(hw)->intr_detect(hw);
837
838         return 0;
839 }
840