ethdev: change promiscuous callbacks to return status
[dpdk.git] / drivers / net / virtio / virtio_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <unistd.h>
10
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_memcpy.h>
14 #include <rte_string_fns.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_pci.h>
19 #include <rte_bus_pci.h>
20 #include <rte_ether.h>
21 #include <rte_ip.h>
22 #include <rte_arp.h>
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_cpuflags.h>
26
27 #include <rte_memory.h>
28 #include <rte_eal.h>
29 #include <rte_dev.h>
30 #include <rte_cycles.h>
31 #include <rte_kvargs.h>
32
33 #include "virtio_ethdev.h"
34 #include "virtio_pci.h"
35 #include "virtio_logs.h"
36 #include "virtqueue.h"
37 #include "virtio_rxtx.h"
38 #include "virtio_user/virtio_user_dev.h"
39
40 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
41 static int  virtio_dev_configure(struct rte_eth_dev *dev);
42 static int  virtio_dev_start(struct rte_eth_dev *dev);
43 static void virtio_dev_stop(struct rte_eth_dev *dev);
44 static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
45 static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
46 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
47 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
48 static int virtio_dev_info_get(struct rte_eth_dev *dev,
49                                 struct rte_eth_dev_info *dev_info);
50 static int virtio_dev_link_update(struct rte_eth_dev *dev,
51         int wait_to_complete);
52 static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
53
54 static void virtio_set_hwaddr(struct virtio_hw *hw);
55 static void virtio_get_hwaddr(struct virtio_hw *hw);
56
57 static int virtio_dev_stats_get(struct rte_eth_dev *dev,
58                                  struct rte_eth_stats *stats);
59 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
60                                  struct rte_eth_xstat *xstats, unsigned n);
61 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
62                                        struct rte_eth_xstat_name *xstats_names,
63                                        unsigned limit);
64 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
65 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
66 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
67                                 uint16_t vlan_id, int on);
68 static int virtio_mac_addr_add(struct rte_eth_dev *dev,
69                                 struct rte_ether_addr *mac_addr,
70                                 uint32_t index, uint32_t vmdq);
71 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
72 static int virtio_mac_addr_set(struct rte_eth_dev *dev,
73                                 struct rte_ether_addr *mac_addr);
74
75 static int virtio_intr_disable(struct rte_eth_dev *dev);
76
77 static int virtio_dev_queue_stats_mapping_set(
78         struct rte_eth_dev *eth_dev,
79         uint16_t queue_id,
80         uint8_t stat_idx,
81         uint8_t is_rx);
82
83 int virtio_logtype_init;
84 int virtio_logtype_driver;
85
86 static void virtio_notify_peers(struct rte_eth_dev *dev);
87 static void virtio_ack_link_announce(struct rte_eth_dev *dev);
88
89 /*
90  * The set of PCI devices this driver supports
91  */
92 static const struct rte_pci_id pci_id_virtio_map[] = {
93         { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
94         { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
95         { .vendor_id = 0, /* sentinel */ },
96 };
97
98 struct rte_virtio_xstats_name_off {
99         char name[RTE_ETH_XSTATS_NAME_SIZE];
100         unsigned offset;
101 };
102
103 /* [rt]x_qX_ is prepended to the name string here */
104 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
105         {"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
106         {"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
107         {"errors",                 offsetof(struct virtnet_rx, stats.errors)},
108         {"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
109         {"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
110         {"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
111         {"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
112         {"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
113         {"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
114         {"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
115         {"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
116         {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
117         {"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
118 };
119
120 /* [rt]x_qX_ is prepended to the name string here */
121 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
122         {"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
123         {"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
124         {"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
125         {"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
126         {"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
127         {"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
128         {"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
129         {"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
130         {"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
131         {"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
132         {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
133         {"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
134 };
135
136 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
137                             sizeof(rte_virtio_rxq_stat_strings[0]))
138 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
139                             sizeof(rte_virtio_txq_stat_strings[0]))
140
141 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
142
143 static struct virtio_pmd_ctrl *
144 virtio_send_command_packed(struct virtnet_ctl *cvq,
145                            struct virtio_pmd_ctrl *ctrl,
146                            int *dlen, int pkt_num)
147 {
148         struct virtqueue *vq = cvq->vq;
149         int head;
150         struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
151         struct virtio_pmd_ctrl *result;
152         uint16_t flags;
153         int sum = 0;
154         int nb_descs = 0;
155         int k;
156
157         /*
158          * Format is enforced in qemu code:
159          * One TX packet for header;
160          * At least one TX packet per argument;
161          * One RX packet for ACK.
162          */
163         head = vq->vq_avail_idx;
164         flags = vq->vq_packed.cached_flags;
165         desc[head].addr = cvq->virtio_net_hdr_mem;
166         desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
167         vq->vq_free_cnt--;
168         nb_descs++;
169         if (++vq->vq_avail_idx >= vq->vq_nentries) {
170                 vq->vq_avail_idx -= vq->vq_nentries;
171                 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
172         }
173
174         for (k = 0; k < pkt_num; k++) {
175                 desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
176                         + sizeof(struct virtio_net_ctrl_hdr)
177                         + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
178                 desc[vq->vq_avail_idx].len = dlen[k];
179                 desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
180                         vq->vq_packed.cached_flags;
181                 sum += dlen[k];
182                 vq->vq_free_cnt--;
183                 nb_descs++;
184                 if (++vq->vq_avail_idx >= vq->vq_nentries) {
185                         vq->vq_avail_idx -= vq->vq_nentries;
186                         vq->vq_packed.cached_flags ^=
187                                 VRING_PACKED_DESC_F_AVAIL_USED;
188                 }
189         }
190
191         desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
192                 + sizeof(struct virtio_net_ctrl_hdr);
193         desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
194         desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
195                 vq->vq_packed.cached_flags;
196         vq->vq_free_cnt--;
197         nb_descs++;
198         if (++vq->vq_avail_idx >= vq->vq_nentries) {
199                 vq->vq_avail_idx -= vq->vq_nentries;
200                 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
201         }
202
203         virtio_wmb(vq->hw->weak_barriers);
204         desc[head].flags = VRING_DESC_F_NEXT | flags;
205
206         virtio_wmb(vq->hw->weak_barriers);
207         virtqueue_notify(vq);
208
209         /* wait for used descriptors in virtqueue */
210         while (!desc_is_used(&desc[head], vq))
211                 usleep(100);
212
213         virtio_rmb(vq->hw->weak_barriers);
214
215         /* now get used descriptors */
216         vq->vq_free_cnt += nb_descs;
217         vq->vq_used_cons_idx += nb_descs;
218         if (vq->vq_used_cons_idx >= vq->vq_nentries) {
219                 vq->vq_used_cons_idx -= vq->vq_nentries;
220                 vq->vq_packed.used_wrap_counter ^= 1;
221         }
222
223         PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
224                         "vq->vq_avail_idx=%d\n"
225                         "vq->vq_used_cons_idx=%d\n"
226                         "vq->vq_packed.cached_flags=0x%x\n"
227                         "vq->vq_packed.used_wrap_counter=%d\n",
228                         vq->vq_free_cnt,
229                         vq->vq_avail_idx,
230                         vq->vq_used_cons_idx,
231                         vq->vq_packed.cached_flags,
232                         vq->vq_packed.used_wrap_counter);
233
234         result = cvq->virtio_net_hdr_mz->addr;
235         return result;
236 }
237
238 static struct virtio_pmd_ctrl *
239 virtio_send_command_split(struct virtnet_ctl *cvq,
240                           struct virtio_pmd_ctrl *ctrl,
241                           int *dlen, int pkt_num)
242 {
243         struct virtio_pmd_ctrl *result;
244         struct virtqueue *vq = cvq->vq;
245         uint32_t head, i;
246         int k, sum = 0;
247
248         head = vq->vq_desc_head_idx;
249
250         /*
251          * Format is enforced in qemu code:
252          * One TX packet for header;
253          * At least one TX packet per argument;
254          * One RX packet for ACK.
255          */
256         vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
257         vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
258         vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
259         vq->vq_free_cnt--;
260         i = vq->vq_split.ring.desc[head].next;
261
262         for (k = 0; k < pkt_num; k++) {
263                 vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
264                 vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
265                         + sizeof(struct virtio_net_ctrl_hdr)
266                         + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
267                 vq->vq_split.ring.desc[i].len = dlen[k];
268                 sum += dlen[k];
269                 vq->vq_free_cnt--;
270                 i = vq->vq_split.ring.desc[i].next;
271         }
272
273         vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
274         vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
275                         + sizeof(struct virtio_net_ctrl_hdr);
276         vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
277         vq->vq_free_cnt--;
278
279         vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
280
281         vq_update_avail_ring(vq, head);
282         vq_update_avail_idx(vq);
283
284         PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
285
286         virtqueue_notify(vq);
287
288         rte_rmb();
289         while (VIRTQUEUE_NUSED(vq) == 0) {
290                 rte_rmb();
291                 usleep(100);
292         }
293
294         while (VIRTQUEUE_NUSED(vq)) {
295                 uint32_t idx, desc_idx, used_idx;
296                 struct vring_used_elem *uep;
297
298                 used_idx = (uint32_t)(vq->vq_used_cons_idx
299                                 & (vq->vq_nentries - 1));
300                 uep = &vq->vq_split.ring.used->ring[used_idx];
301                 idx = (uint32_t) uep->id;
302                 desc_idx = idx;
303
304                 while (vq->vq_split.ring.desc[desc_idx].flags &
305                                 VRING_DESC_F_NEXT) {
306                         desc_idx = vq->vq_split.ring.desc[desc_idx].next;
307                         vq->vq_free_cnt++;
308                 }
309
310                 vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
311                 vq->vq_desc_head_idx = idx;
312
313                 vq->vq_used_cons_idx++;
314                 vq->vq_free_cnt++;
315         }
316
317         PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
318                         vq->vq_free_cnt, vq->vq_desc_head_idx);
319
320         result = cvq->virtio_net_hdr_mz->addr;
321         return result;
322 }
323
324 static int
325 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
326                     int *dlen, int pkt_num)
327 {
328         virtio_net_ctrl_ack status = ~0;
329         struct virtio_pmd_ctrl *result;
330         struct virtqueue *vq;
331
332         ctrl->status = status;
333
334         if (!cvq || !cvq->vq) {
335                 PMD_INIT_LOG(ERR, "Control queue is not supported.");
336                 return -1;
337         }
338
339         rte_spinlock_lock(&cvq->lock);
340         vq = cvq->vq;
341
342         PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
343                 "vq->hw->cvq = %p vq = %p",
344                 vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
345
346         if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
347                 rte_spinlock_unlock(&cvq->lock);
348                 return -1;
349         }
350
351         memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
352                 sizeof(struct virtio_pmd_ctrl));
353
354         if (vtpci_packed_queue(vq->hw))
355                 result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
356         else
357                 result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
358
359         rte_spinlock_unlock(&cvq->lock);
360         return result->status;
361 }
362
363 static int
364 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
365 {
366         struct virtio_hw *hw = dev->data->dev_private;
367         struct virtio_pmd_ctrl ctrl;
368         int dlen[1];
369         int ret;
370
371         ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
372         ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
373         memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
374
375         dlen[0] = sizeof(uint16_t);
376
377         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
378         if (ret) {
379                 PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
380                           "failed, this is too late now...");
381                 return -EINVAL;
382         }
383
384         return 0;
385 }
386
387 static void
388 virtio_dev_queue_release(void *queue __rte_unused)
389 {
390         /* do nothing */
391 }
392
393 static uint16_t
394 virtio_get_nr_vq(struct virtio_hw *hw)
395 {
396         uint16_t nr_vq = hw->max_queue_pairs * 2;
397
398         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
399                 nr_vq += 1;
400
401         return nr_vq;
402 }
403
404 static void
405 virtio_init_vring(struct virtqueue *vq)
406 {
407         int size = vq->vq_nentries;
408         uint8_t *ring_mem = vq->vq_ring_virt_mem;
409
410         PMD_INIT_FUNC_TRACE();
411
412         memset(ring_mem, 0, vq->vq_ring_size);
413
414         vq->vq_used_cons_idx = 0;
415         vq->vq_desc_head_idx = 0;
416         vq->vq_avail_idx = 0;
417         vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
418         vq->vq_free_cnt = vq->vq_nentries;
419         memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
420         if (vtpci_packed_queue(vq->hw)) {
421                 vring_init_packed(&vq->vq_packed.ring, ring_mem,
422                                   VIRTIO_PCI_VRING_ALIGN, size);
423                 vring_desc_init_packed(vq, size);
424         } else {
425                 struct vring *vr = &vq->vq_split.ring;
426
427                 vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
428                 vring_desc_init_split(vr->desc, size);
429         }
430         /*
431          * Disable device(host) interrupting guest
432          */
433         virtqueue_disable_intr(vq);
434 }
435
436 static int
437 virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
438 {
439         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
440         char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
441         const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
442         unsigned int vq_size, size;
443         struct virtio_hw *hw = dev->data->dev_private;
444         struct virtnet_rx *rxvq = NULL;
445         struct virtnet_tx *txvq = NULL;
446         struct virtnet_ctl *cvq = NULL;
447         struct virtqueue *vq;
448         size_t sz_hdr_mz = 0;
449         void *sw_ring = NULL;
450         int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
451         int ret;
452         int numa_node = dev->device->numa_node;
453
454         PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
455                         vtpci_queue_idx, numa_node);
456
457         /*
458          * Read the virtqueue size from the Queue Size field
459          * Always power of 2 and if 0 virtqueue does not exist
460          */
461         vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
462         PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
463         if (vq_size == 0) {
464                 PMD_INIT_LOG(ERR, "virtqueue does not exist");
465                 return -EINVAL;
466         }
467
468         if (!rte_is_power_of_2(vq_size)) {
469                 PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
470                 return -EINVAL;
471         }
472
473         snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
474                  dev->data->port_id, vtpci_queue_idx);
475
476         size = RTE_ALIGN_CEIL(sizeof(*vq) +
477                                 vq_size * sizeof(struct vq_desc_extra),
478                                 RTE_CACHE_LINE_SIZE);
479         if (queue_type == VTNET_TQ) {
480                 /*
481                  * For each xmit packet, allocate a virtio_net_hdr
482                  * and indirect ring elements
483                  */
484                 sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
485         } else if (queue_type == VTNET_CQ) {
486                 /* Allocate a page for control vq command, data and status */
487                 sz_hdr_mz = PAGE_SIZE;
488         }
489
490         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
491                                 numa_node);
492         if (vq == NULL) {
493                 PMD_INIT_LOG(ERR, "can not allocate vq");
494                 return -ENOMEM;
495         }
496         hw->vqs[vtpci_queue_idx] = vq;
497
498         vq->hw = hw;
499         vq->vq_queue_index = vtpci_queue_idx;
500         vq->vq_nentries = vq_size;
501         if (vtpci_packed_queue(hw)) {
502                 vq->vq_packed.used_wrap_counter = 1;
503                 vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
504                 vq->vq_packed.event_flags_shadow = 0;
505                 if (queue_type == VTNET_RQ)
506                         vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
507         }
508
509         /*
510          * Reserve a memzone for vring elements
511          */
512         size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
513         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
514         PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
515                      size, vq->vq_ring_size);
516
517         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
518                         numa_node, RTE_MEMZONE_IOVA_CONTIG,
519                         VIRTIO_PCI_VRING_ALIGN);
520         if (mz == NULL) {
521                 if (rte_errno == EEXIST)
522                         mz = rte_memzone_lookup(vq_name);
523                 if (mz == NULL) {
524                         ret = -ENOMEM;
525                         goto fail_q_alloc;
526                 }
527         }
528
529         memset(mz->addr, 0, mz->len);
530
531         vq->vq_ring_mem = mz->iova;
532         vq->vq_ring_virt_mem = mz->addr;
533         PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem:      0x%" PRIx64,
534                      (uint64_t)mz->iova);
535         PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
536                      (uint64_t)(uintptr_t)mz->addr);
537
538         virtio_init_vring(vq);
539
540         if (sz_hdr_mz) {
541                 snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
542                          dev->data->port_id, vtpci_queue_idx);
543                 hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
544                                 numa_node, RTE_MEMZONE_IOVA_CONTIG,
545                                 RTE_CACHE_LINE_SIZE);
546                 if (hdr_mz == NULL) {
547                         if (rte_errno == EEXIST)
548                                 hdr_mz = rte_memzone_lookup(vq_hdr_name);
549                         if (hdr_mz == NULL) {
550                                 ret = -ENOMEM;
551                                 goto fail_q_alloc;
552                         }
553                 }
554         }
555
556         if (queue_type == VTNET_RQ) {
557                 size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
558                                sizeof(vq->sw_ring[0]);
559
560                 sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
561                                 RTE_CACHE_LINE_SIZE, numa_node);
562                 if (!sw_ring) {
563                         PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
564                         ret = -ENOMEM;
565                         goto fail_q_alloc;
566                 }
567
568                 vq->sw_ring = sw_ring;
569                 rxvq = &vq->rxq;
570                 rxvq->vq = vq;
571                 rxvq->port_id = dev->data->port_id;
572                 rxvq->mz = mz;
573         } else if (queue_type == VTNET_TQ) {
574                 txvq = &vq->txq;
575                 txvq->vq = vq;
576                 txvq->port_id = dev->data->port_id;
577                 txvq->mz = mz;
578                 txvq->virtio_net_hdr_mz = hdr_mz;
579                 txvq->virtio_net_hdr_mem = hdr_mz->iova;
580         } else if (queue_type == VTNET_CQ) {
581                 cvq = &vq->cq;
582                 cvq->vq = vq;
583                 cvq->mz = mz;
584                 cvq->virtio_net_hdr_mz = hdr_mz;
585                 cvq->virtio_net_hdr_mem = hdr_mz->iova;
586                 memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
587
588                 hw->cvq = cvq;
589         }
590
591         /* For virtio_user case (that is when hw->dev is NULL), we use
592          * virtual address. And we need properly set _offset_, please see
593          * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
594          */
595         if (!hw->virtio_user_dev)
596                 vq->offset = offsetof(struct rte_mbuf, buf_iova);
597         else {
598                 vq->vq_ring_mem = (uintptr_t)mz->addr;
599                 vq->offset = offsetof(struct rte_mbuf, buf_addr);
600                 if (queue_type == VTNET_TQ)
601                         txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
602                 else if (queue_type == VTNET_CQ)
603                         cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
604         }
605
606         if (queue_type == VTNET_TQ) {
607                 struct virtio_tx_region *txr;
608                 unsigned int i;
609
610                 txr = hdr_mz->addr;
611                 memset(txr, 0, vq_size * sizeof(*txr));
612                 for (i = 0; i < vq_size; i++) {
613                         struct vring_desc *start_dp = txr[i].tx_indir;
614
615                         /* first indirect descriptor is always the tx header */
616                         if (!vtpci_packed_queue(hw)) {
617                                 vring_desc_init_split(start_dp,
618                                                       RTE_DIM(txr[i].tx_indir));
619                                 start_dp->addr = txvq->virtio_net_hdr_mem
620                                         + i * sizeof(*txr)
621                                         + offsetof(struct virtio_tx_region,
622                                                    tx_hdr);
623                                 start_dp->len = hw->vtnet_hdr_size;
624                                 start_dp->flags = VRING_DESC_F_NEXT;
625                         }
626                 }
627         }
628
629         if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
630                 PMD_INIT_LOG(ERR, "setup_queue failed");
631                 return -EINVAL;
632         }
633
634         return 0;
635
636 fail_q_alloc:
637         rte_free(sw_ring);
638         rte_memzone_free(hdr_mz);
639         rte_memzone_free(mz);
640         rte_free(vq);
641
642         return ret;
643 }
644
645 static void
646 virtio_free_queues(struct virtio_hw *hw)
647 {
648         uint16_t nr_vq = virtio_get_nr_vq(hw);
649         struct virtqueue *vq;
650         int queue_type;
651         uint16_t i;
652
653         if (hw->vqs == NULL)
654                 return;
655
656         for (i = 0; i < nr_vq; i++) {
657                 vq = hw->vqs[i];
658                 if (!vq)
659                         continue;
660
661                 queue_type = virtio_get_queue_type(hw, i);
662                 if (queue_type == VTNET_RQ) {
663                         rte_free(vq->sw_ring);
664                         rte_memzone_free(vq->rxq.mz);
665                 } else if (queue_type == VTNET_TQ) {
666                         rte_memzone_free(vq->txq.mz);
667                         rte_memzone_free(vq->txq.virtio_net_hdr_mz);
668                 } else {
669                         rte_memzone_free(vq->cq.mz);
670                         rte_memzone_free(vq->cq.virtio_net_hdr_mz);
671                 }
672
673                 rte_free(vq);
674                 hw->vqs[i] = NULL;
675         }
676
677         rte_free(hw->vqs);
678         hw->vqs = NULL;
679 }
680
681 static int
682 virtio_alloc_queues(struct rte_eth_dev *dev)
683 {
684         struct virtio_hw *hw = dev->data->dev_private;
685         uint16_t nr_vq = virtio_get_nr_vq(hw);
686         uint16_t i;
687         int ret;
688
689         hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
690         if (!hw->vqs) {
691                 PMD_INIT_LOG(ERR, "failed to allocate vqs");
692                 return -ENOMEM;
693         }
694
695         for (i = 0; i < nr_vq; i++) {
696                 ret = virtio_init_queue(dev, i);
697                 if (ret < 0) {
698                         virtio_free_queues(hw);
699                         return ret;
700                 }
701         }
702
703         return 0;
704 }
705
706 static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
707
708 static void
709 virtio_dev_close(struct rte_eth_dev *dev)
710 {
711         struct virtio_hw *hw = dev->data->dev_private;
712         struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
713
714         PMD_INIT_LOG(DEBUG, "virtio_dev_close");
715
716         if (!hw->opened)
717                 return;
718         hw->opened = false;
719
720         /* reset the NIC */
721         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
722                 VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
723         if (intr_conf->rxq)
724                 virtio_queues_unbind_intr(dev);
725
726         if (intr_conf->lsc || intr_conf->rxq) {
727                 virtio_intr_disable(dev);
728                 rte_intr_efd_disable(dev->intr_handle);
729                 rte_free(dev->intr_handle->intr_vec);
730                 dev->intr_handle->intr_vec = NULL;
731         }
732
733         vtpci_reset(hw);
734         virtio_dev_free_mbufs(dev);
735         virtio_free_queues(hw);
736
737 #ifdef RTE_VIRTIO_USER
738         if (hw->virtio_user_dev)
739                 virtio_user_dev_uninit(hw->virtio_user_dev);
740         else
741 #endif
742         if (dev->device) {
743                 rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(dev));
744                 if (!hw->modern)
745                         rte_pci_ioport_unmap(VTPCI_IO(hw));
746         }
747 }
748
749 static int
750 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
751 {
752         struct virtio_hw *hw = dev->data->dev_private;
753         struct virtio_pmd_ctrl ctrl;
754         int dlen[1];
755         int ret;
756
757         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
758                 PMD_INIT_LOG(INFO, "host does not support rx control");
759                 return -ENOTSUP;
760         }
761
762         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
763         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
764         ctrl.data[0] = 1;
765         dlen[0] = 1;
766
767         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
768         if (ret) {
769                 PMD_INIT_LOG(ERR, "Failed to enable promisc");
770                 return -EAGAIN;
771         }
772
773         return 0;
774 }
775
776 static int
777 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
778 {
779         struct virtio_hw *hw = dev->data->dev_private;
780         struct virtio_pmd_ctrl ctrl;
781         int dlen[1];
782         int ret;
783
784         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
785                 PMD_INIT_LOG(INFO, "host does not support rx control");
786                 return -ENOTSUP;
787         }
788
789         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
790         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
791         ctrl.data[0] = 0;
792         dlen[0] = 1;
793
794         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
795         if (ret) {
796                 PMD_INIT_LOG(ERR, "Failed to disable promisc");
797                 return -EAGAIN;
798         }
799
800         return 0;
801 }
802
803 static void
804 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
805 {
806         struct virtio_hw *hw = dev->data->dev_private;
807         struct virtio_pmd_ctrl ctrl;
808         int dlen[1];
809         int ret;
810
811         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
812                 PMD_INIT_LOG(INFO, "host does not support rx control");
813                 return;
814         }
815
816         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
817         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
818         ctrl.data[0] = 1;
819         dlen[0] = 1;
820
821         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
822         if (ret)
823                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
824 }
825
826 static void
827 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
828 {
829         struct virtio_hw *hw = dev->data->dev_private;
830         struct virtio_pmd_ctrl ctrl;
831         int dlen[1];
832         int ret;
833
834         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
835                 PMD_INIT_LOG(INFO, "host does not support rx control");
836                 return;
837         }
838
839         ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
840         ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
841         ctrl.data[0] = 0;
842         dlen[0] = 1;
843
844         ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
845         if (ret)
846                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
847 }
848
849 #define VLAN_TAG_LEN           4    /* 802.3ac tag (not DMA'd) */
850 static int
851 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
852 {
853         struct virtio_hw *hw = dev->data->dev_private;
854         uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
855                                  hw->vtnet_hdr_size;
856         uint32_t frame_size = mtu + ether_hdr_len;
857         uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
858
859         max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
860
861         if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {
862                 PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
863                         RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
864                 return -EINVAL;
865         }
866         return 0;
867 }
868
869 static int
870 virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
871 {
872         struct virtio_hw *hw = dev->data->dev_private;
873         struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
874         struct virtqueue *vq = rxvq->vq;
875
876         virtqueue_enable_intr(vq);
877         virtio_mb(hw->weak_barriers);
878         return 0;
879 }
880
881 static int
882 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
883 {
884         struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
885         struct virtqueue *vq = rxvq->vq;
886
887         virtqueue_disable_intr(vq);
888         return 0;
889 }
890
891 /*
892  * dev_ops for virtio, bare necessities for basic operation
893  */
894 static const struct eth_dev_ops virtio_eth_dev_ops = {
895         .dev_configure           = virtio_dev_configure,
896         .dev_start               = virtio_dev_start,
897         .dev_stop                = virtio_dev_stop,
898         .dev_close               = virtio_dev_close,
899         .promiscuous_enable      = virtio_dev_promiscuous_enable,
900         .promiscuous_disable     = virtio_dev_promiscuous_disable,
901         .allmulticast_enable     = virtio_dev_allmulticast_enable,
902         .allmulticast_disable    = virtio_dev_allmulticast_disable,
903         .mtu_set                 = virtio_mtu_set,
904         .dev_infos_get           = virtio_dev_info_get,
905         .stats_get               = virtio_dev_stats_get,
906         .xstats_get              = virtio_dev_xstats_get,
907         .xstats_get_names        = virtio_dev_xstats_get_names,
908         .stats_reset             = virtio_dev_stats_reset,
909         .xstats_reset            = virtio_dev_stats_reset,
910         .link_update             = virtio_dev_link_update,
911         .vlan_offload_set        = virtio_dev_vlan_offload_set,
912         .rx_queue_setup          = virtio_dev_rx_queue_setup,
913         .rx_queue_intr_enable    = virtio_dev_rx_queue_intr_enable,
914         .rx_queue_intr_disable   = virtio_dev_rx_queue_intr_disable,
915         .rx_queue_release        = virtio_dev_queue_release,
916         .rx_descriptor_done      = virtio_dev_rx_queue_done,
917         .tx_queue_setup          = virtio_dev_tx_queue_setup,
918         .tx_queue_release        = virtio_dev_queue_release,
919         /* collect stats per queue */
920         .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
921         .vlan_filter_set         = virtio_vlan_filter_set,
922         .mac_addr_add            = virtio_mac_addr_add,
923         .mac_addr_remove         = virtio_mac_addr_remove,
924         .mac_addr_set            = virtio_mac_addr_set,
925 };
926
927 /*
928  * dev_ops for virtio-user in secondary processes, as we just have
929  * some limited supports currently.
930  */
931 const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
932         .dev_infos_get           = virtio_dev_info_get,
933         .stats_get               = virtio_dev_stats_get,
934         .xstats_get              = virtio_dev_xstats_get,
935         .xstats_get_names        = virtio_dev_xstats_get_names,
936         .stats_reset             = virtio_dev_stats_reset,
937         .xstats_reset            = virtio_dev_stats_reset,
938         /* collect stats per queue */
939         .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
940 };
941
942 static void
943 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
944 {
945         unsigned i;
946
947         for (i = 0; i < dev->data->nb_tx_queues; i++) {
948                 const struct virtnet_tx *txvq = dev->data->tx_queues[i];
949                 if (txvq == NULL)
950                         continue;
951
952                 stats->opackets += txvq->stats.packets;
953                 stats->obytes += txvq->stats.bytes;
954
955                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
956                         stats->q_opackets[i] = txvq->stats.packets;
957                         stats->q_obytes[i] = txvq->stats.bytes;
958                 }
959         }
960
961         for (i = 0; i < dev->data->nb_rx_queues; i++) {
962                 const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
963                 if (rxvq == NULL)
964                         continue;
965
966                 stats->ipackets += rxvq->stats.packets;
967                 stats->ibytes += rxvq->stats.bytes;
968                 stats->ierrors += rxvq->stats.errors;
969
970                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
971                         stats->q_ipackets[i] = rxvq->stats.packets;
972                         stats->q_ibytes[i] = rxvq->stats.bytes;
973                 }
974         }
975
976         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
977 }
978
979 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
980                                        struct rte_eth_xstat_name *xstats_names,
981                                        __rte_unused unsigned limit)
982 {
983         unsigned i;
984         unsigned count = 0;
985         unsigned t;
986
987         unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
988                 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
989
990         if (xstats_names != NULL) {
991                 /* Note: limit checked in rte_eth_xstats_names() */
992
993                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
994                         struct virtnet_rx *rxvq = dev->data->rx_queues[i];
995                         if (rxvq == NULL)
996                                 continue;
997                         for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
998                                 snprintf(xstats_names[count].name,
999                                         sizeof(xstats_names[count].name),
1000                                         "rx_q%u_%s", i,
1001                                         rte_virtio_rxq_stat_strings[t].name);
1002                                 count++;
1003                         }
1004                 }
1005
1006                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1007                         struct virtnet_tx *txvq = dev->data->tx_queues[i];
1008                         if (txvq == NULL)
1009                                 continue;
1010                         for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
1011                                 snprintf(xstats_names[count].name,
1012                                         sizeof(xstats_names[count].name),
1013                                         "tx_q%u_%s", i,
1014                                         rte_virtio_txq_stat_strings[t].name);
1015                                 count++;
1016                         }
1017                 }
1018                 return count;
1019         }
1020         return nstats;
1021 }
1022
1023 static int
1024 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1025                       unsigned n)
1026 {
1027         unsigned i;
1028         unsigned count = 0;
1029
1030         unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
1031                 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
1032
1033         if (n < nstats)
1034                 return nstats;
1035
1036         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1037                 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1038
1039                 if (rxvq == NULL)
1040                         continue;
1041
1042                 unsigned t;
1043
1044                 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
1045                         xstats[count].value = *(uint64_t *)(((char *)rxvq) +
1046                                 rte_virtio_rxq_stat_strings[t].offset);
1047                         xstats[count].id = count;
1048                         count++;
1049                 }
1050         }
1051
1052         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1053                 struct virtnet_tx *txvq = dev->data->tx_queues[i];
1054
1055                 if (txvq == NULL)
1056                         continue;
1057
1058                 unsigned t;
1059
1060                 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
1061                         xstats[count].value = *(uint64_t *)(((char *)txvq) +
1062                                 rte_virtio_txq_stat_strings[t].offset);
1063                         xstats[count].id = count;
1064                         count++;
1065                 }
1066         }
1067
1068         return count;
1069 }
1070
1071 static int
1072 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1073 {
1074         virtio_update_stats(dev, stats);
1075
1076         return 0;
1077 }
1078
1079 static void
1080 virtio_dev_stats_reset(struct rte_eth_dev *dev)
1081 {
1082         unsigned int i;
1083
1084         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1085                 struct virtnet_tx *txvq = dev->data->tx_queues[i];
1086                 if (txvq == NULL)
1087                         continue;
1088
1089                 txvq->stats.packets = 0;
1090                 txvq->stats.bytes = 0;
1091                 txvq->stats.multicast = 0;
1092                 txvq->stats.broadcast = 0;
1093                 memset(txvq->stats.size_bins, 0,
1094                        sizeof(txvq->stats.size_bins[0]) * 8);
1095         }
1096
1097         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1098                 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1099                 if (rxvq == NULL)
1100                         continue;
1101
1102                 rxvq->stats.packets = 0;
1103                 rxvq->stats.bytes = 0;
1104                 rxvq->stats.errors = 0;
1105                 rxvq->stats.multicast = 0;
1106                 rxvq->stats.broadcast = 0;
1107                 memset(rxvq->stats.size_bins, 0,
1108                        sizeof(rxvq->stats.size_bins[0]) * 8);
1109         }
1110 }
1111
1112 static void
1113 virtio_set_hwaddr(struct virtio_hw *hw)
1114 {
1115         vtpci_write_dev_config(hw,
1116                         offsetof(struct virtio_net_config, mac),
1117                         &hw->mac_addr, RTE_ETHER_ADDR_LEN);
1118 }
1119
1120 static void
1121 virtio_get_hwaddr(struct virtio_hw *hw)
1122 {
1123         if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
1124                 vtpci_read_dev_config(hw,
1125                         offsetof(struct virtio_net_config, mac),
1126                         &hw->mac_addr, RTE_ETHER_ADDR_LEN);
1127         } else {
1128                 rte_eth_random_addr(&hw->mac_addr[0]);
1129                 virtio_set_hwaddr(hw);
1130         }
1131 }
1132
1133 static int
1134 virtio_mac_table_set(struct virtio_hw *hw,
1135                      const struct virtio_net_ctrl_mac *uc,
1136                      const struct virtio_net_ctrl_mac *mc)
1137 {
1138         struct virtio_pmd_ctrl ctrl;
1139         int err, len[2];
1140
1141         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1142                 PMD_DRV_LOG(INFO, "host does not support mac table");
1143                 return -1;
1144         }
1145
1146         ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1147         ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1148
1149         len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);
1150         memcpy(ctrl.data, uc, len[0]);
1151
1152         len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);
1153         memcpy(ctrl.data + len[0], mc, len[1]);
1154
1155         err = virtio_send_command(hw->cvq, &ctrl, len, 2);
1156         if (err != 0)
1157                 PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
1158         return err;
1159 }
1160
1161 static int
1162 virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1163                     uint32_t index, uint32_t vmdq __rte_unused)
1164 {
1165         struct virtio_hw *hw = dev->data->dev_private;
1166         const struct rte_ether_addr *addrs = dev->data->mac_addrs;
1167         unsigned int i;
1168         struct virtio_net_ctrl_mac *uc, *mc;
1169
1170         if (index >= VIRTIO_MAX_MAC_ADDRS) {
1171                 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1172                 return -EINVAL;
1173         }
1174
1175         uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
1176                 sizeof(uc->entries));
1177         uc->entries = 0;
1178         mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
1179                 sizeof(mc->entries));
1180         mc->entries = 0;
1181
1182         for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1183                 const struct rte_ether_addr *addr
1184                         = (i == index) ? mac_addr : addrs + i;
1185                 struct virtio_net_ctrl_mac *tbl
1186                         = rte_is_multicast_ether_addr(addr) ? mc : uc;
1187
1188                 memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
1189         }
1190
1191         return virtio_mac_table_set(hw, uc, mc);
1192 }
1193
1194 static void
1195 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1196 {
1197         struct virtio_hw *hw = dev->data->dev_private;
1198         struct rte_ether_addr *addrs = dev->data->mac_addrs;
1199         struct virtio_net_ctrl_mac *uc, *mc;
1200         unsigned int i;
1201
1202         if (index >= VIRTIO_MAX_MAC_ADDRS) {
1203                 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1204                 return;
1205         }
1206
1207         uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
1208                 sizeof(uc->entries));
1209         uc->entries = 0;
1210         mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
1211                 sizeof(mc->entries));
1212         mc->entries = 0;
1213
1214         for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1215                 struct virtio_net_ctrl_mac *tbl;
1216
1217                 if (i == index || rte_is_zero_ether_addr(addrs + i))
1218                         continue;
1219
1220                 tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;
1221                 memcpy(&tbl->macs[tbl->entries++], addrs + i,
1222                         RTE_ETHER_ADDR_LEN);
1223         }
1224
1225         virtio_mac_table_set(hw, uc, mc);
1226 }
1227
1228 static int
1229 virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1230 {
1231         struct virtio_hw *hw = dev->data->dev_private;
1232
1233         memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1234
1235         /* Use atomic update if available */
1236         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1237                 struct virtio_pmd_ctrl ctrl;
1238                 int len = RTE_ETHER_ADDR_LEN;
1239
1240                 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1241                 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
1242
1243                 memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);
1244                 return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1245         }
1246
1247         if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
1248                 return -ENOTSUP;
1249
1250         virtio_set_hwaddr(hw);
1251         return 0;
1252 }
1253
1254 static int
1255 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1256 {
1257         struct virtio_hw *hw = dev->data->dev_private;
1258         struct virtio_pmd_ctrl ctrl;
1259         int len;
1260
1261         if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1262                 return -ENOTSUP;
1263
1264         ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1265         ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1266         memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1267         len = sizeof(vlan_id);
1268
1269         return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1270 }
1271
1272 static int
1273 virtio_intr_unmask(struct rte_eth_dev *dev)
1274 {
1275         struct virtio_hw *hw = dev->data->dev_private;
1276
1277         if (rte_intr_ack(dev->intr_handle) < 0)
1278                 return -1;
1279
1280         if (!hw->virtio_user_dev)
1281                 hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
1282
1283         return 0;
1284 }
1285
1286 static int
1287 virtio_intr_enable(struct rte_eth_dev *dev)
1288 {
1289         struct virtio_hw *hw = dev->data->dev_private;
1290
1291         if (rte_intr_enable(dev->intr_handle) < 0)
1292                 return -1;
1293
1294         if (!hw->virtio_user_dev)
1295                 hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
1296
1297         return 0;
1298 }
1299
1300 static int
1301 virtio_intr_disable(struct rte_eth_dev *dev)
1302 {
1303         struct virtio_hw *hw = dev->data->dev_private;
1304
1305         if (rte_intr_disable(dev->intr_handle) < 0)
1306                 return -1;
1307
1308         if (!hw->virtio_user_dev)
1309                 hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
1310
1311         return 0;
1312 }
1313
1314 static int
1315 virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
1316 {
1317         uint64_t host_features;
1318
1319         /* Prepare guest_features: feature that driver wants to support */
1320         PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1321                 req_features);
1322
1323         /* Read device(host) feature bits */
1324         host_features = VTPCI_OPS(hw)->get_features(hw);
1325         PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1326                 host_features);
1327
1328         /* If supported, ensure MTU value is valid before acknowledging it. */
1329         if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
1330                 struct virtio_net_config config;
1331
1332                 vtpci_read_dev_config(hw,
1333                         offsetof(struct virtio_net_config, mtu),
1334                         &config.mtu, sizeof(config.mtu));
1335
1336                 if (config.mtu < RTE_ETHER_MIN_MTU)
1337                         req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
1338         }
1339
1340         /*
1341          * Negotiate features: Subset of device feature bits are written back
1342          * guest feature bits.
1343          */
1344         hw->guest_features = req_features;
1345         hw->guest_features = vtpci_negotiate_features(hw, host_features);
1346         PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1347                 hw->guest_features);
1348
1349         if (hw->modern) {
1350                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
1351                         PMD_INIT_LOG(ERR,
1352                                 "VIRTIO_F_VERSION_1 features is not enabled.");
1353                         return -1;
1354                 }
1355                 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1356                 if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1357                         PMD_INIT_LOG(ERR,
1358                                 "failed to set FEATURES_OK status!");
1359                         return -1;
1360                 }
1361         }
1362
1363         hw->req_guest_features = req_features;
1364
1365         return 0;
1366 }
1367
1368 int
1369 virtio_dev_pause(struct rte_eth_dev *dev)
1370 {
1371         struct virtio_hw *hw = dev->data->dev_private;
1372
1373         rte_spinlock_lock(&hw->state_lock);
1374
1375         if (hw->started == 0) {
1376                 /* Device is just stopped. */
1377                 rte_spinlock_unlock(&hw->state_lock);
1378                 return -1;
1379         }
1380         hw->started = 0;
1381         /*
1382          * Prevent the worker threads from touching queues to avoid contention,
1383          * 1 ms should be enough for the ongoing Tx function to finish.
1384          */
1385         rte_delay_ms(1);
1386         return 0;
1387 }
1388
1389 /*
1390  * Recover hw state to let the worker threads continue.
1391  */
1392 void
1393 virtio_dev_resume(struct rte_eth_dev *dev)
1394 {
1395         struct virtio_hw *hw = dev->data->dev_private;
1396
1397         hw->started = 1;
1398         rte_spinlock_unlock(&hw->state_lock);
1399 }
1400
1401 /*
1402  * Should be called only after device is paused.
1403  */
1404 int
1405 virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
1406                 int nb_pkts)
1407 {
1408         struct virtio_hw *hw = dev->data->dev_private;
1409         struct virtnet_tx *txvq = dev->data->tx_queues[0];
1410         int ret;
1411
1412         hw->inject_pkts = tx_pkts;
1413         ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
1414         hw->inject_pkts = NULL;
1415
1416         return ret;
1417 }
1418
1419 static void
1420 virtio_notify_peers(struct rte_eth_dev *dev)
1421 {
1422         struct virtio_hw *hw = dev->data->dev_private;
1423         struct virtnet_rx *rxvq;
1424         struct rte_mbuf *rarp_mbuf;
1425
1426         if (!dev->data->rx_queues)
1427                 return;
1428
1429         rxvq = dev->data->rx_queues[0];
1430         if (!rxvq)
1431                 return;
1432
1433         rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
1434                         (struct rte_ether_addr *)hw->mac_addr);
1435         if (rarp_mbuf == NULL) {
1436                 PMD_DRV_LOG(ERR, "failed to make RARP packet.");
1437                 return;
1438         }
1439
1440         /* If virtio port just stopped, no need to send RARP */
1441         if (virtio_dev_pause(dev) < 0) {
1442                 rte_pktmbuf_free(rarp_mbuf);
1443                 return;
1444         }
1445
1446         virtio_inject_pkts(dev, &rarp_mbuf, 1);
1447         virtio_dev_resume(dev);
1448 }
1449
1450 static void
1451 virtio_ack_link_announce(struct rte_eth_dev *dev)
1452 {
1453         struct virtio_hw *hw = dev->data->dev_private;
1454         struct virtio_pmd_ctrl ctrl;
1455
1456         ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
1457         ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
1458
1459         virtio_send_command(hw->cvq, &ctrl, NULL, 0);
1460 }
1461
1462 /*
1463  * Process virtio config changed interrupt. Call the callback
1464  * if link state changed, generate gratuitous RARP packet if
1465  * the status indicates an ANNOUNCE.
1466  */
1467 void
1468 virtio_interrupt_handler(void *param)
1469 {
1470         struct rte_eth_dev *dev = param;
1471         struct virtio_hw *hw = dev->data->dev_private;
1472         uint8_t isr;
1473         uint16_t status;
1474
1475         /* Read interrupt status which clears interrupt */
1476         isr = vtpci_isr(hw);
1477         PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1478
1479         if (virtio_intr_unmask(dev) < 0)
1480                 PMD_DRV_LOG(ERR, "interrupt enable failed");
1481
1482         if (isr & VIRTIO_PCI_ISR_CONFIG) {
1483                 if (virtio_dev_link_update(dev, 0) == 0)
1484                         _rte_eth_dev_callback_process(dev,
1485                                                       RTE_ETH_EVENT_INTR_LSC,
1486                                                       NULL);
1487
1488                 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1489                         vtpci_read_dev_config(hw,
1490                                 offsetof(struct virtio_net_config, status),
1491                                 &status, sizeof(status));
1492                         if (status & VIRTIO_NET_S_ANNOUNCE) {
1493                                 virtio_notify_peers(dev);
1494                                 if (hw->cvq)
1495                                         virtio_ack_link_announce(dev);
1496                         }
1497                 }
1498         }
1499 }
1500
1501 /* set rx and tx handlers according to what is supported */
1502 static void
1503 set_rxtx_funcs(struct rte_eth_dev *eth_dev)
1504 {
1505         struct virtio_hw *hw = eth_dev->data->dev_private;
1506
1507         eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
1508         if (vtpci_packed_queue(hw)) {
1509                 PMD_INIT_LOG(INFO,
1510                         "virtio: using packed ring %s Tx path on port %u",
1511                         hw->use_inorder_tx ? "inorder" : "standard",
1512                         eth_dev->data->port_id);
1513                 eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
1514         } else {
1515                 if (hw->use_inorder_tx) {
1516                         PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
1517                                 eth_dev->data->port_id);
1518                         eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
1519                 } else {
1520                         PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1521                                 eth_dev->data->port_id);
1522                         eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1523                 }
1524         }
1525
1526         if (vtpci_packed_queue(hw)) {
1527                 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1528                         PMD_INIT_LOG(INFO,
1529                                 "virtio: using packed ring mergeable buffer Rx path on port %u",
1530                                 eth_dev->data->port_id);
1531                         eth_dev->rx_pkt_burst =
1532                                 &virtio_recv_mergeable_pkts_packed;
1533                 } else {
1534                         PMD_INIT_LOG(INFO,
1535                                 "virtio: using packed ring standard Rx path on port %u",
1536                                 eth_dev->data->port_id);
1537                         eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
1538                 }
1539         } else {
1540                 if (hw->use_simple_rx) {
1541                         PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
1542                                 eth_dev->data->port_id);
1543                         eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
1544                 } else if (hw->use_inorder_rx) {
1545                         PMD_INIT_LOG(INFO,
1546                                 "virtio: using inorder Rx path on port %u",
1547                                 eth_dev->data->port_id);
1548                         eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
1549                 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1550                         PMD_INIT_LOG(INFO,
1551                                 "virtio: using mergeable buffer Rx path on port %u",
1552                                 eth_dev->data->port_id);
1553                         eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1554                 } else {
1555                         PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
1556                                 eth_dev->data->port_id);
1557                         eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1558                 }
1559         }
1560
1561 }
1562
1563 /* Only support 1:1 queue/interrupt mapping so far.
1564  * TODO: support n:1 queue/interrupt mapping when there are limited number of
1565  * interrupt vectors (<N+1).
1566  */
1567 static int
1568 virtio_queues_bind_intr(struct rte_eth_dev *dev)
1569 {
1570         uint32_t i;
1571         struct virtio_hw *hw = dev->data->dev_private;
1572
1573         PMD_INIT_LOG(INFO, "queue/interrupt binding");
1574         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1575                 dev->intr_handle->intr_vec[i] = i + 1;
1576                 if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
1577                                                  VIRTIO_MSI_NO_VECTOR) {
1578                         PMD_DRV_LOG(ERR, "failed to set queue vector");
1579                         return -EBUSY;
1580                 }
1581         }
1582
1583         return 0;
1584 }
1585
1586 static void
1587 virtio_queues_unbind_intr(struct rte_eth_dev *dev)
1588 {
1589         uint32_t i;
1590         struct virtio_hw *hw = dev->data->dev_private;
1591
1592         PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
1593         for (i = 0; i < dev->data->nb_rx_queues; ++i)
1594                 VTPCI_OPS(hw)->set_queue_irq(hw,
1595                                              hw->vqs[i * VTNET_CQ],
1596                                              VIRTIO_MSI_NO_VECTOR);
1597 }
1598
1599 static int
1600 virtio_configure_intr(struct rte_eth_dev *dev)
1601 {
1602         struct virtio_hw *hw = dev->data->dev_private;
1603
1604         if (!rte_intr_cap_multiple(dev->intr_handle)) {
1605                 PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
1606                 return -ENOTSUP;
1607         }
1608
1609         if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
1610                 PMD_INIT_LOG(ERR, "Fail to create eventfd");
1611                 return -1;
1612         }
1613
1614         if (!dev->intr_handle->intr_vec) {
1615                 dev->intr_handle->intr_vec =
1616                         rte_zmalloc("intr_vec",
1617                                     hw->max_queue_pairs * sizeof(int), 0);
1618                 if (!dev->intr_handle->intr_vec) {
1619                         PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
1620                                      hw->max_queue_pairs);
1621                         return -ENOMEM;
1622                 }
1623         }
1624
1625         /* Re-register callback to update max_intr */
1626         rte_intr_callback_unregister(dev->intr_handle,
1627                                      virtio_interrupt_handler,
1628                                      dev);
1629         rte_intr_callback_register(dev->intr_handle,
1630                                    virtio_interrupt_handler,
1631                                    dev);
1632
1633         /* DO NOT try to remove this! This function will enable msix, or QEMU
1634          * will encounter SIGSEGV when DRIVER_OK is sent.
1635          * And for legacy devices, this should be done before queue/vec binding
1636          * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
1637          * (22) will be ignored.
1638          */
1639         if (virtio_intr_enable(dev) < 0) {
1640                 PMD_DRV_LOG(ERR, "interrupt enable failed");
1641                 return -1;
1642         }
1643
1644         if (virtio_queues_bind_intr(dev) < 0) {
1645                 PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
1646                 return -1;
1647         }
1648
1649         return 0;
1650 }
1651
1652 /* reset device and renegotiate features if needed */
1653 static int
1654 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
1655 {
1656         struct virtio_hw *hw = eth_dev->data->dev_private;
1657         struct virtio_net_config *config;
1658         struct virtio_net_config local_config;
1659         struct rte_pci_device *pci_dev = NULL;
1660         int ret;
1661
1662         /* Reset the device although not necessary at startup */
1663         vtpci_reset(hw);
1664
1665         if (hw->vqs) {
1666                 virtio_dev_free_mbufs(eth_dev);
1667                 virtio_free_queues(hw);
1668         }
1669
1670         /* Tell the host we've noticed this device. */
1671         vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1672
1673         /* Tell the host we've known how to drive the device. */
1674         vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1675         if (virtio_negotiate_features(hw, req_features) < 0)
1676                 return -1;
1677
1678         hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
1679
1680         if (!hw->virtio_user_dev)
1681                 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1682
1683         /* If host does not support both status and MSI-X then disable LSC */
1684         if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
1685             hw->use_msix != VIRTIO_MSIX_NONE)
1686                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1687         else
1688                 eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1689
1690         /* Setting up rx_header size for the device */
1691         if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1692             vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
1693             vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
1694                 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1695         else
1696                 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1697
1698         /* Copy the permanent MAC address to: virtio_hw */
1699         virtio_get_hwaddr(hw);
1700         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
1701                         &eth_dev->data->mac_addrs[0]);
1702         PMD_INIT_LOG(DEBUG,
1703                      "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1704                      hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1705                      hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1706
1707         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1708                 config = &local_config;
1709
1710                 vtpci_read_dev_config(hw,
1711                         offsetof(struct virtio_net_config, mac),
1712                         &config->mac, sizeof(config->mac));
1713
1714                 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1715                         vtpci_read_dev_config(hw,
1716                                 offsetof(struct virtio_net_config, status),
1717                                 &config->status, sizeof(config->status));
1718                 } else {
1719                         PMD_INIT_LOG(DEBUG,
1720                                      "VIRTIO_NET_F_STATUS is not supported");
1721                         config->status = 0;
1722                 }
1723
1724                 if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
1725                         vtpci_read_dev_config(hw,
1726                                 offsetof(struct virtio_net_config, max_virtqueue_pairs),
1727                                 &config->max_virtqueue_pairs,
1728                                 sizeof(config->max_virtqueue_pairs));
1729                 } else {
1730                         PMD_INIT_LOG(DEBUG,
1731                                      "VIRTIO_NET_F_MQ is not supported");
1732                         config->max_virtqueue_pairs = 1;
1733                 }
1734
1735                 hw->max_queue_pairs = config->max_virtqueue_pairs;
1736
1737                 if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
1738                         vtpci_read_dev_config(hw,
1739                                 offsetof(struct virtio_net_config, mtu),
1740                                 &config->mtu,
1741                                 sizeof(config->mtu));
1742
1743                         /*
1744                          * MTU value has already been checked at negotiation
1745                          * time, but check again in case it has changed since
1746                          * then, which should not happen.
1747                          */
1748                         if (config->mtu < RTE_ETHER_MIN_MTU) {
1749                                 PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
1750                                                 config->mtu);
1751                                 return -1;
1752                         }
1753
1754                         hw->max_mtu = config->mtu;
1755                         /* Set initial MTU to maximum one supported by vhost */
1756                         eth_dev->data->mtu = config->mtu;
1757
1758                 } else {
1759                         hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1760                                 VLAN_TAG_LEN - hw->vtnet_hdr_size;
1761                 }
1762
1763                 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1764                                 config->max_virtqueue_pairs);
1765                 PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1766                 PMD_INIT_LOG(DEBUG,
1767                                 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1768                                 config->mac[0], config->mac[1],
1769                                 config->mac[2], config->mac[3],
1770                                 config->mac[4], config->mac[5]);
1771         } else {
1772                 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
1773                 hw->max_queue_pairs = 1;
1774                 hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1775                         VLAN_TAG_LEN - hw->vtnet_hdr_size;
1776         }
1777
1778         ret = virtio_alloc_queues(eth_dev);
1779         if (ret < 0)
1780                 return ret;
1781
1782         if (eth_dev->data->dev_conf.intr_conf.rxq) {
1783                 if (virtio_configure_intr(eth_dev) < 0) {
1784                         PMD_INIT_LOG(ERR, "failed to configure interrupt");
1785                         virtio_free_queues(hw);
1786                         return -1;
1787                 }
1788         }
1789
1790         vtpci_reinit_complete(hw);
1791
1792         if (pci_dev)
1793                 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1794                         eth_dev->data->port_id, pci_dev->id.vendor_id,
1795                         pci_dev->id.device_id);
1796
1797         return 0;
1798 }
1799
1800 /*
1801  * Remap the PCI device again (IO port map for legacy device and
1802  * memory map for modern device), so that the secondary process
1803  * could have the PCI initiated correctly.
1804  */
1805 static int
1806 virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
1807 {
1808         if (hw->modern) {
1809                 /*
1810                  * We don't have to re-parse the PCI config space, since
1811                  * rte_pci_map_device() makes sure the mapped address
1812                  * in secondary process would equal to the one mapped in
1813                  * the primary process: error will be returned if that
1814                  * requirement is not met.
1815                  *
1816                  * That said, we could simply reuse all cap pointers
1817                  * (such as dev_cfg, common_cfg, etc.) parsed from the
1818                  * primary process, which is stored in shared memory.
1819                  */
1820                 if (rte_pci_map_device(pci_dev)) {
1821                         PMD_INIT_LOG(DEBUG, "failed to map pci device!");
1822                         return -1;
1823                 }
1824         } else {
1825                 if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
1826                         return -1;
1827         }
1828
1829         return 0;
1830 }
1831
1832 static void
1833 virtio_set_vtpci_ops(struct virtio_hw *hw)
1834 {
1835 #ifdef RTE_VIRTIO_USER
1836         if (hw->virtio_user_dev)
1837                 VTPCI_OPS(hw) = &virtio_user_ops;
1838         else
1839 #endif
1840         if (hw->modern)
1841                 VTPCI_OPS(hw) = &modern_ops;
1842         else
1843                 VTPCI_OPS(hw) = &legacy_ops;
1844 }
1845
1846 /*
1847  * This function is based on probe() function in virtio_pci.c
1848  * It returns 0 on success.
1849  */
1850 int
1851 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1852 {
1853         struct virtio_hw *hw = eth_dev->data->dev_private;
1854         int ret;
1855
1856         if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
1857                 PMD_INIT_LOG(ERR,
1858                         "Not sufficient headroom required = %d, avail = %d",
1859                         (int)sizeof(struct virtio_net_hdr_mrg_rxbuf),
1860                         RTE_PKTMBUF_HEADROOM);
1861
1862                 return -1;
1863         }
1864
1865         eth_dev->dev_ops = &virtio_eth_dev_ops;
1866
1867         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1868                 if (!hw->virtio_user_dev) {
1869                         ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
1870                         if (ret)
1871                                 return ret;
1872                 }
1873
1874                 virtio_set_vtpci_ops(hw);
1875                 set_rxtx_funcs(eth_dev);
1876
1877                 return 0;
1878         }
1879
1880         /*
1881          * Pass the information to the rte_eth_dev_close() that it should also
1882          * release the private port resources.
1883          */
1884         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1885
1886         /* Allocate memory for storing MAC addresses */
1887         eth_dev->data->mac_addrs = rte_zmalloc("virtio",
1888                                 VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
1889         if (eth_dev->data->mac_addrs == NULL) {
1890                 PMD_INIT_LOG(ERR,
1891                         "Failed to allocate %d bytes needed to store MAC addresses",
1892                         VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
1893                 return -ENOMEM;
1894         }
1895
1896         hw->port_id = eth_dev->data->port_id;
1897         /* For virtio_user case the hw->virtio_user_dev is populated by
1898          * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
1899          */
1900         if (!hw->virtio_user_dev) {
1901                 ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
1902                 if (ret)
1903                         goto err_vtpci_init;
1904         }
1905
1906         /* reset device and negotiate default features */
1907         ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
1908         if (ret < 0)
1909                 goto err_virtio_init;
1910
1911         hw->opened = true;
1912
1913         return 0;
1914
1915 err_virtio_init:
1916         if (!hw->virtio_user_dev) {
1917                 rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
1918                 if (!hw->modern)
1919                         rte_pci_ioport_unmap(VTPCI_IO(hw));
1920         }
1921 err_vtpci_init:
1922         rte_free(eth_dev->data->mac_addrs);
1923         eth_dev->data->mac_addrs = NULL;
1924         return ret;
1925 }
1926
1927 static int
1928 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
1929 {
1930         PMD_INIT_FUNC_TRACE();
1931
1932         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1933                 return 0;
1934
1935         virtio_dev_stop(eth_dev);
1936         virtio_dev_close(eth_dev);
1937
1938         eth_dev->dev_ops = NULL;
1939         eth_dev->tx_pkt_burst = NULL;
1940         eth_dev->rx_pkt_burst = NULL;
1941
1942         PMD_INIT_LOG(DEBUG, "dev_uninit completed");
1943
1944         return 0;
1945 }
1946
1947 static int vdpa_check_handler(__rte_unused const char *key,
1948                 const char *value, __rte_unused void *opaque)
1949 {
1950         if (strcmp(value, "1"))
1951                 return -1;
1952
1953         return 0;
1954 }
1955
1956 static int
1957 vdpa_mode_selected(struct rte_devargs *devargs)
1958 {
1959         struct rte_kvargs *kvlist;
1960         const char *key = "vdpa";
1961         int ret = 0;
1962
1963         if (devargs == NULL)
1964                 return 0;
1965
1966         kvlist = rte_kvargs_parse(devargs->args, NULL);
1967         if (kvlist == NULL)
1968                 return 0;
1969
1970         if (!rte_kvargs_count(kvlist, key))
1971                 goto exit;
1972
1973         /* vdpa mode selected when there's a key-value pair: vdpa=1 */
1974         if (rte_kvargs_process(kvlist, key,
1975                                 vdpa_check_handler, NULL) < 0) {
1976                 goto exit;
1977         }
1978         ret = 1;
1979
1980 exit:
1981         rte_kvargs_free(kvlist);
1982         return ret;
1983 }
1984
1985 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1986         struct rte_pci_device *pci_dev)
1987 {
1988         if (rte_eal_iopl_init() != 0) {
1989                 PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
1990                 return 1;
1991         }
1992
1993         /* virtio pmd skips probe if device needs to work in vdpa mode */
1994         if (vdpa_mode_selected(pci_dev->device.devargs))
1995                 return 1;
1996
1997         return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
1998                 eth_virtio_dev_init);
1999 }
2000
2001 static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
2002 {
2003         int ret;
2004
2005         ret = rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
2006         /* Port has already been released by close. */
2007         if (ret == -ENODEV)
2008                 ret = 0;
2009         return ret;
2010 }
2011
2012 static struct rte_pci_driver rte_virtio_pmd = {
2013         .driver = {
2014                 .name = "net_virtio",
2015         },
2016         .id_table = pci_id_virtio_map,
2017         .drv_flags = 0,
2018         .probe = eth_virtio_pci_probe,
2019         .remove = eth_virtio_pci_remove,
2020 };
2021
2022 RTE_INIT(rte_virtio_pmd_init)
2023 {
2024         rte_eal_iopl_init();
2025         rte_pci_register(&rte_virtio_pmd);
2026 }
2027
2028 static bool
2029 rx_offload_enabled(struct virtio_hw *hw)
2030 {
2031         return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
2032                 vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2033                 vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
2034 }
2035
2036 static bool
2037 tx_offload_enabled(struct virtio_hw *hw)
2038 {
2039         return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
2040                 vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
2041                 vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
2042 }
2043
2044 /*
2045  * Configure virtio device
2046  * It returns 0 on success.
2047  */
2048 static int
2049 virtio_dev_configure(struct rte_eth_dev *dev)
2050 {
2051         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2052         const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
2053         struct virtio_hw *hw = dev->data->dev_private;
2054         uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
2055                 hw->vtnet_hdr_size;
2056         uint64_t rx_offloads = rxmode->offloads;
2057         uint64_t tx_offloads = txmode->offloads;
2058         uint64_t req_features;
2059         int ret;
2060
2061         PMD_INIT_LOG(DEBUG, "configure");
2062         req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
2063
2064         if (dev->data->dev_conf.intr_conf.rxq) {
2065                 ret = virtio_init_device(dev, hw->req_guest_features);
2066                 if (ret < 0)
2067                         return ret;
2068         }
2069
2070         if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
2071                 req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
2072
2073         if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
2074                            DEV_RX_OFFLOAD_TCP_CKSUM))
2075                 req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
2076
2077         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
2078                 req_features |=
2079                         (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2080                         (1ULL << VIRTIO_NET_F_GUEST_TSO6);
2081
2082         if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
2083                            DEV_TX_OFFLOAD_TCP_CKSUM))
2084                 req_features |= (1ULL << VIRTIO_NET_F_CSUM);
2085
2086         if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
2087                 req_features |=
2088                         (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2089                         (1ULL << VIRTIO_NET_F_HOST_TSO6);
2090
2091         /* if request features changed, reinit the device */
2092         if (req_features != hw->req_guest_features) {
2093                 ret = virtio_init_device(dev, req_features);
2094                 if (ret < 0)
2095                         return ret;
2096         }
2097
2098         if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
2099                             DEV_RX_OFFLOAD_TCP_CKSUM)) &&
2100                 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
2101                 PMD_DRV_LOG(ERR,
2102                         "rx checksum not available on this host");
2103                 return -ENOTSUP;
2104         }
2105
2106         if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
2107                 (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2108                  !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
2109                 PMD_DRV_LOG(ERR,
2110                         "Large Receive Offload not available on this host");
2111                 return -ENOTSUP;
2112         }
2113
2114         /* start control queue */
2115         if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
2116                 virtio_dev_cq_start(dev);
2117
2118         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2119                 hw->vlan_strip = 1;
2120
2121         if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2122             && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2123                 PMD_DRV_LOG(ERR,
2124                             "vlan filtering not available on this host");
2125                 return -ENOTSUP;
2126         }
2127
2128         hw->has_tx_offload = tx_offload_enabled(hw);
2129         hw->has_rx_offload = rx_offload_enabled(hw);
2130
2131         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2132                 /* Enable vector (0) for Link State Intrerrupt */
2133                 if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
2134                                 VIRTIO_MSI_NO_VECTOR) {
2135                         PMD_DRV_LOG(ERR, "failed to set config vector");
2136                         return -EBUSY;
2137                 }
2138
2139         rte_spinlock_init(&hw->state_lock);
2140
2141         hw->use_simple_rx = 1;
2142
2143         if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
2144                 hw->use_inorder_tx = 1;
2145                 hw->use_inorder_rx = 1;
2146                 hw->use_simple_rx = 0;
2147         }
2148
2149         if (vtpci_packed_queue(hw)) {
2150                 hw->use_simple_rx = 0;
2151                 hw->use_inorder_rx = 0;
2152         }
2153
2154 #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
2155         if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
2156                 hw->use_simple_rx = 0;
2157         }
2158 #endif
2159         if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2160                  hw->use_simple_rx = 0;
2161         }
2162
2163         if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
2164                            DEV_RX_OFFLOAD_TCP_CKSUM |
2165                            DEV_RX_OFFLOAD_TCP_LRO |
2166                            DEV_RX_OFFLOAD_VLAN_STRIP))
2167                 hw->use_simple_rx = 0;
2168
2169         return 0;
2170 }
2171
2172
2173 static int
2174 virtio_dev_start(struct rte_eth_dev *dev)
2175 {
2176         uint16_t nb_queues, i;
2177         struct virtnet_rx *rxvq;
2178         struct virtnet_tx *txvq __rte_unused;
2179         struct virtio_hw *hw = dev->data->dev_private;
2180         int ret;
2181
2182         /* Finish the initialization of the queues */
2183         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2184                 ret = virtio_dev_rx_queue_setup_finish(dev, i);
2185                 if (ret < 0)
2186                         return ret;
2187         }
2188         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2189                 ret = virtio_dev_tx_queue_setup_finish(dev, i);
2190                 if (ret < 0)
2191                         return ret;
2192         }
2193
2194         /* check if lsc interrupt feature is enabled */
2195         if (dev->data->dev_conf.intr_conf.lsc) {
2196                 if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
2197                         PMD_DRV_LOG(ERR, "link status not supported by host");
2198                         return -ENOTSUP;
2199                 }
2200         }
2201
2202         /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
2203          * in device configure, but it could be unmapped  when device is
2204          * stopped.
2205          */
2206         if (dev->data->dev_conf.intr_conf.lsc ||
2207             dev->data->dev_conf.intr_conf.rxq) {
2208                 virtio_intr_disable(dev);
2209
2210                 /* Setup interrupt callback  */
2211                 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2212                         rte_intr_callback_register(dev->intr_handle,
2213                                                    virtio_interrupt_handler,
2214                                                    dev);
2215
2216                 if (virtio_intr_enable(dev) < 0) {
2217                         PMD_DRV_LOG(ERR, "interrupt enable failed");
2218                         return -EIO;
2219                 }
2220         }
2221
2222         /*Notify the backend
2223          *Otherwise the tap backend might already stop its queue due to fullness.
2224          *vhost backend will have no chance to be waked up
2225          */
2226         nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2227         if (hw->max_queue_pairs > 1) {
2228                 if (virtio_set_multiple_queues(dev, nb_queues) != 0)
2229                         return -EINVAL;
2230         }
2231
2232         PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
2233
2234         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2235                 rxvq = dev->data->rx_queues[i];
2236                 /* Flush the old packets */
2237                 virtqueue_rxvq_flush(rxvq->vq);
2238                 virtqueue_notify(rxvq->vq);
2239         }
2240
2241         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2242                 txvq = dev->data->tx_queues[i];
2243                 virtqueue_notify(txvq->vq);
2244         }
2245
2246         PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
2247
2248         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2249                 rxvq = dev->data->rx_queues[i];
2250                 VIRTQUEUE_DUMP(rxvq->vq);
2251         }
2252
2253         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2254                 txvq = dev->data->tx_queues[i];
2255                 VIRTQUEUE_DUMP(txvq->vq);
2256         }
2257
2258         set_rxtx_funcs(dev);
2259         hw->started = true;
2260
2261         /* Initialize Link state */
2262         virtio_dev_link_update(dev, 0);
2263
2264         return 0;
2265 }
2266
2267 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
2268 {
2269         struct virtio_hw *hw = dev->data->dev_private;
2270         uint16_t nr_vq = virtio_get_nr_vq(hw);
2271         const char *type __rte_unused;
2272         unsigned int i, mbuf_num = 0;
2273         struct virtqueue *vq;
2274         struct rte_mbuf *buf;
2275         int queue_type;
2276
2277         if (hw->vqs == NULL)
2278                 return;
2279
2280         for (i = 0; i < nr_vq; i++) {
2281                 vq = hw->vqs[i];
2282                 if (!vq)
2283                         continue;
2284
2285                 queue_type = virtio_get_queue_type(hw, i);
2286                 if (queue_type == VTNET_RQ)
2287                         type = "rxq";
2288                 else if (queue_type == VTNET_TQ)
2289                         type = "txq";
2290                 else
2291                         continue;
2292
2293                 PMD_INIT_LOG(DEBUG,
2294                         "Before freeing %s[%d] used and unused buf",
2295                         type, i);
2296                 VIRTQUEUE_DUMP(vq);
2297
2298                 while ((buf = virtqueue_detach_unused(vq)) != NULL) {
2299                         rte_pktmbuf_free(buf);
2300                         mbuf_num++;
2301                 }
2302
2303                 PMD_INIT_LOG(DEBUG,
2304                         "After freeing %s[%d] used and unused buf",
2305                         type, i);
2306                 VIRTQUEUE_DUMP(vq);
2307         }
2308
2309         PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
2310 }
2311
2312 /*
2313  * Stop device: disable interrupt and mark link down
2314  */
2315 static void
2316 virtio_dev_stop(struct rte_eth_dev *dev)
2317 {
2318         struct virtio_hw *hw = dev->data->dev_private;
2319         struct rte_eth_link link;
2320         struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
2321
2322         PMD_INIT_LOG(DEBUG, "stop");
2323
2324         rte_spinlock_lock(&hw->state_lock);
2325         if (!hw->started)
2326                 goto out_unlock;
2327         hw->started = false;
2328
2329         if (intr_conf->lsc || intr_conf->rxq) {
2330                 virtio_intr_disable(dev);
2331
2332                 /* Reset interrupt callback  */
2333                 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
2334                         rte_intr_callback_unregister(dev->intr_handle,
2335                                                      virtio_interrupt_handler,
2336                                                      dev);
2337                 }
2338         }
2339
2340         memset(&link, 0, sizeof(link));
2341         rte_eth_linkstatus_set(dev, &link);
2342 out_unlock:
2343         rte_spinlock_unlock(&hw->state_lock);
2344 }
2345
2346 static int
2347 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2348 {
2349         struct rte_eth_link link;
2350         uint16_t status;
2351         struct virtio_hw *hw = dev->data->dev_private;
2352
2353         memset(&link, 0, sizeof(link));
2354         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2355         link.link_speed  = ETH_SPEED_NUM_10G;
2356         link.link_autoneg = ETH_LINK_FIXED;
2357
2358         if (!hw->started) {
2359                 link.link_status = ETH_LINK_DOWN;
2360         } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
2361                 PMD_INIT_LOG(DEBUG, "Get link status from hw");
2362                 vtpci_read_dev_config(hw,
2363                                 offsetof(struct virtio_net_config, status),
2364                                 &status, sizeof(status));
2365                 if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
2366                         link.link_status = ETH_LINK_DOWN;
2367                         PMD_INIT_LOG(DEBUG, "Port %d is down",
2368                                      dev->data->port_id);
2369                 } else {
2370                         link.link_status = ETH_LINK_UP;
2371                         PMD_INIT_LOG(DEBUG, "Port %d is up",
2372                                      dev->data->port_id);
2373                 }
2374         } else {
2375                 link.link_status = ETH_LINK_UP;
2376         }
2377
2378         return rte_eth_linkstatus_set(dev, &link);
2379 }
2380
2381 static int
2382 virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2383 {
2384         const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2385         struct virtio_hw *hw = dev->data->dev_private;
2386         uint64_t offloads = rxmode->offloads;
2387
2388         if (mask & ETH_VLAN_FILTER_MASK) {
2389                 if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
2390                                 !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2391
2392                         PMD_DRV_LOG(NOTICE,
2393                                 "vlan filtering not available on this host");
2394
2395                         return -ENOTSUP;
2396                 }
2397         }
2398
2399         if (mask & ETH_VLAN_STRIP_MASK)
2400                 hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
2401
2402         return 0;
2403 }
2404
2405 static int
2406 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2407 {
2408         uint64_t tso_mask, host_features;
2409         struct virtio_hw *hw = dev->data->dev_private;
2410
2411         dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
2412
2413         dev_info->max_rx_queues =
2414                 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
2415         dev_info->max_tx_queues =
2416                 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
2417         dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
2418         dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
2419         dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
2420
2421         host_features = VTPCI_OPS(hw)->get_features(hw);
2422         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
2423         dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2424         if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
2425                 dev_info->rx_offload_capa |=
2426                         DEV_RX_OFFLOAD_TCP_CKSUM |
2427                         DEV_RX_OFFLOAD_UDP_CKSUM;
2428         }
2429         if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
2430                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
2431         tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2432                 (1ULL << VIRTIO_NET_F_GUEST_TSO6);
2433         if ((host_features & tso_mask) == tso_mask)
2434                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2435
2436         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
2437                                     DEV_TX_OFFLOAD_VLAN_INSERT;
2438         if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
2439                 dev_info->tx_offload_capa |=
2440                         DEV_TX_OFFLOAD_UDP_CKSUM |
2441                         DEV_TX_OFFLOAD_TCP_CKSUM;
2442         }
2443         tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2444                 (1ULL << VIRTIO_NET_F_HOST_TSO6);
2445         if ((host_features & tso_mask) == tso_mask)
2446                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
2447
2448         return 0;
2449 }
2450
2451 /*
2452  * It enables testpmd to collect per queue stats.
2453  */
2454 static int
2455 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
2456 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
2457 __rte_unused uint8_t is_rx)
2458 {
2459         return 0;
2460 }
2461
2462 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
2463 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
2464 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
2465
2466 RTE_INIT(virtio_init_log)
2467 {
2468         virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
2469         if (virtio_logtype_init >= 0)
2470                 rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
2471         virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver");
2472         if (virtio_logtype_driver >= 0)
2473                 rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
2474 }