net/vhost: remove limit of vhost Rx burst size
[dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 IGEL Co., Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <stdbool.h>
36 #ifdef RTE_LIBRTE_VHOST_NUMA
37 #include <numaif.h>
38 #endif
39
40 #include <rte_mbuf.h>
41 #include <rte_ethdev.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_vdev.h>
45 #include <rte_kvargs.h>
46 #include <rte_virtio_net.h>
47 #include <rte_spinlock.h>
48
49 #include "rte_eth_vhost.h"
50
51 #define ETH_VHOST_IFACE_ARG             "iface"
52 #define ETH_VHOST_QUEUES_ARG            "queues"
53 #define ETH_VHOST_CLIENT_ARG            "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
55 #define VHOST_MAX_PKT_BURST 32
56
57 static const char *valid_arguments[] = {
58         ETH_VHOST_IFACE_ARG,
59         ETH_VHOST_QUEUES_ARG,
60         ETH_VHOST_CLIENT_ARG,
61         ETH_VHOST_DEQUEUE_ZERO_COPY,
62         NULL
63 };
64
65 static struct ether_addr base_eth_addr = {
66         .addr_bytes = {
67                 0x56 /* V */,
68                 0x48 /* H */,
69                 0x4F /* O */,
70                 0x53 /* S */,
71                 0x54 /* T */,
72                 0x00
73         }
74 };
75
76 enum vhost_xstats_pkts {
77         VHOST_UNDERSIZE_PKT = 0,
78         VHOST_64_PKT,
79         VHOST_65_TO_127_PKT,
80         VHOST_128_TO_255_PKT,
81         VHOST_256_TO_511_PKT,
82         VHOST_512_TO_1023_PKT,
83         VHOST_1024_TO_1522_PKT,
84         VHOST_1523_TO_MAX_PKT,
85         VHOST_BROADCAST_PKT,
86         VHOST_MULTICAST_PKT,
87         VHOST_UNICAST_PKT,
88         VHOST_ERRORS_PKT,
89         VHOST_ERRORS_FRAGMENTED,
90         VHOST_ERRORS_JABBER,
91         VHOST_UNKNOWN_PROTOCOL,
92         VHOST_XSTATS_MAX,
93 };
94
95 struct vhost_stats {
96         uint64_t pkts;
97         uint64_t bytes;
98         uint64_t missed_pkts;
99         uint64_t xstats[VHOST_XSTATS_MAX];
100 };
101
102 struct vhost_queue {
103         int vid;
104         rte_atomic32_t allow_queuing;
105         rte_atomic32_t while_queuing;
106         struct pmd_internal *internal;
107         struct rte_mempool *mb_pool;
108         uint8_t port;
109         uint16_t virtqueue_id;
110         struct vhost_stats stats;
111 };
112
113 struct pmd_internal {
114         rte_atomic32_t dev_attached;
115         char *dev_name;
116         char *iface_name;
117         uint16_t max_queues;
118         rte_atomic32_t started;
119 };
120
121 struct internal_list {
122         TAILQ_ENTRY(internal_list) next;
123         struct rte_eth_dev *eth_dev;
124 };
125
126 TAILQ_HEAD(internal_list_head, internal_list);
127 static struct internal_list_head internal_list =
128         TAILQ_HEAD_INITIALIZER(internal_list);
129
130 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
131
132 static rte_atomic16_t nb_started_ports;
133 static pthread_t session_th;
134
135 static struct rte_eth_link pmd_link = {
136                 .link_speed = 10000,
137                 .link_duplex = ETH_LINK_FULL_DUPLEX,
138                 .link_status = ETH_LINK_DOWN
139 };
140
141 struct rte_vhost_vring_state {
142         rte_spinlock_t lock;
143
144         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
145         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
146         unsigned int index;
147         unsigned int max_vring;
148 };
149
150 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
151
152 #define VHOST_XSTATS_NAME_SIZE 64
153
154 struct vhost_xstats_name_off {
155         char name[VHOST_XSTATS_NAME_SIZE];
156         uint64_t offset;
157 };
158
159 /* [rx]_is prepended to the name string here */
160 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
161         {"good_packets",
162          offsetof(struct vhost_queue, stats.pkts)},
163         {"total_bytes",
164          offsetof(struct vhost_queue, stats.bytes)},
165         {"missed_pkts",
166          offsetof(struct vhost_queue, stats.missed_pkts)},
167         {"broadcast_packets",
168          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
169         {"multicast_packets",
170          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
171         {"unicast_packets",
172          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
173          {"undersize_packets",
174          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
175         {"size_64_packets",
176          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
177         {"size_65_to_127_packets",
178          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
179         {"size_128_to_255_packets",
180          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
181         {"size_256_to_511_packets",
182          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
183         {"size_512_to_1023_packets",
184          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
185         {"size_1024_to_1522_packets",
186          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
187         {"size_1523_to_max_packets",
188          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
189         {"errors_with_bad_CRC",
190          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
191         {"fragmented_errors",
192          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
193         {"jabber_errors",
194          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
195         {"unknown_protos_packets",
196          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
197 };
198
199 /* [tx]_ is prepended to the name string here */
200 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
201         {"good_packets",
202          offsetof(struct vhost_queue, stats.pkts)},
203         {"total_bytes",
204          offsetof(struct vhost_queue, stats.bytes)},
205         {"missed_pkts",
206          offsetof(struct vhost_queue, stats.missed_pkts)},
207         {"broadcast_packets",
208          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
209         {"multicast_packets",
210          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
211         {"unicast_packets",
212          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
213         {"undersize_packets",
214          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
215         {"size_64_packets",
216          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
217         {"size_65_to_127_packets",
218          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
219         {"size_128_to_255_packets",
220          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
221         {"size_256_to_511_packets",
222          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
223         {"size_512_to_1023_packets",
224          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
225         {"size_1024_to_1522_packets",
226          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
227         {"size_1523_to_max_packets",
228          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
229         {"errors_with_bad_CRC",
230          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
231 };
232
233 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
234                                 sizeof(vhost_rxport_stat_strings[0]))
235
236 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
237                                 sizeof(vhost_txport_stat_strings[0]))
238
239 static void
240 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
241 {
242         struct vhost_queue *vq = NULL;
243         unsigned int i = 0;
244
245         for (i = 0; i < dev->data->nb_rx_queues; i++) {
246                 vq = dev->data->rx_queues[i];
247                 if (!vq)
248                         continue;
249                 memset(&vq->stats, 0, sizeof(vq->stats));
250         }
251         for (i = 0; i < dev->data->nb_tx_queues; i++) {
252                 vq = dev->data->tx_queues[i];
253                 if (!vq)
254                         continue;
255                 memset(&vq->stats, 0, sizeof(vq->stats));
256         }
257 }
258
259 static int
260 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
261                            struct rte_eth_xstat_name *xstats_names,
262                            unsigned int limit __rte_unused)
263 {
264         unsigned int t = 0;
265         int count = 0;
266         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
267
268         if (!xstats_names)
269                 return nstats;
270         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
271                 snprintf(xstats_names[count].name,
272                          sizeof(xstats_names[count].name),
273                          "rx_%s", vhost_rxport_stat_strings[t].name);
274                 count++;
275         }
276         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
277                 snprintf(xstats_names[count].name,
278                          sizeof(xstats_names[count].name),
279                          "tx_%s", vhost_txport_stat_strings[t].name);
280                 count++;
281         }
282         return count;
283 }
284
285 static int
286 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
287                      unsigned int n)
288 {
289         unsigned int i;
290         unsigned int t;
291         unsigned int count = 0;
292         struct vhost_queue *vq = NULL;
293         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
294
295         if (n < nxstats)
296                 return nxstats;
297
298         for (i = 0; i < dev->data->nb_rx_queues; i++) {
299                 vq = dev->data->rx_queues[i];
300                 if (!vq)
301                         continue;
302                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
303                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
304                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
305         }
306         for (i = 0; i < dev->data->nb_tx_queues; i++) {
307                 vq = dev->data->tx_queues[i];
308                 if (!vq)
309                         continue;
310                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
311                                 + vq->stats.missed_pkts
312                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
313                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
314         }
315         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
316                 xstats[count].value = 0;
317                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
318                         vq = dev->data->rx_queues[i];
319                         if (!vq)
320                                 continue;
321                         xstats[count].value +=
322                                 *(uint64_t *)(((char *)vq)
323                                 + vhost_rxport_stat_strings[t].offset);
324                 }
325                 xstats[count].id = count;
326                 count++;
327         }
328         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
329                 xstats[count].value = 0;
330                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
331                         vq = dev->data->tx_queues[i];
332                         if (!vq)
333                                 continue;
334                         xstats[count].value +=
335                                 *(uint64_t *)(((char *)vq)
336                                 + vhost_txport_stat_strings[t].offset);
337                 }
338                 xstats[count].id = count;
339                 count++;
340         }
341         return count;
342 }
343
344 static inline void
345 vhost_count_multicast_broadcast(struct vhost_queue *vq,
346                                 struct rte_mbuf *mbuf)
347 {
348         struct ether_addr *ea = NULL;
349         struct vhost_stats *pstats = &vq->stats;
350
351         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
352         if (is_multicast_ether_addr(ea)) {
353                 if (is_broadcast_ether_addr(ea))
354                         pstats->xstats[VHOST_BROADCAST_PKT]++;
355                 else
356                         pstats->xstats[VHOST_MULTICAST_PKT]++;
357         }
358 }
359
360 static void
361 vhost_update_packet_xstats(struct vhost_queue *vq,
362                            struct rte_mbuf **bufs,
363                            uint16_t count)
364 {
365         uint32_t pkt_len = 0;
366         uint64_t i = 0;
367         uint64_t index;
368         struct vhost_stats *pstats = &vq->stats;
369
370         for (i = 0; i < count ; i++) {
371                 pkt_len = bufs[i]->pkt_len;
372                 if (pkt_len == 64) {
373                         pstats->xstats[VHOST_64_PKT]++;
374                 } else if (pkt_len > 64 && pkt_len < 1024) {
375                         index = (sizeof(pkt_len) * 8)
376                                 - __builtin_clz(pkt_len) - 5;
377                         pstats->xstats[index]++;
378                 } else {
379                         if (pkt_len < 64)
380                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
381                         else if (pkt_len <= 1522)
382                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
383                         else if (pkt_len > 1522)
384                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
385                 }
386                 vhost_count_multicast_broadcast(vq, bufs[i]);
387         }
388 }
389
390 static uint16_t
391 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
392 {
393         struct vhost_queue *r = q;
394         uint16_t i, nb_rx = 0;
395         uint16_t nb_receive = nb_bufs;
396
397         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
398                 return 0;
399
400         rte_atomic32_set(&r->while_queuing, 1);
401
402         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
403                 goto out;
404
405         /* Dequeue packets from guest TX queue */
406         while (nb_receive) {
407                 uint16_t nb_pkts;
408                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
409                                                  VHOST_MAX_PKT_BURST);
410
411                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
412                                                   r->mb_pool, &bufs[nb_rx],
413                                                   num);
414
415                 nb_rx += nb_pkts;
416                 nb_receive -= nb_pkts;
417                 if (nb_pkts < num)
418                         break;
419         }
420
421         r->stats.pkts += nb_rx;
422
423         for (i = 0; likely(i < nb_rx); i++) {
424                 bufs[i]->port = r->port;
425                 r->stats.bytes += bufs[i]->pkt_len;
426         }
427
428         vhost_update_packet_xstats(r, bufs, nb_rx);
429
430 out:
431         rte_atomic32_set(&r->while_queuing, 0);
432
433         return nb_rx;
434 }
435
436 static uint16_t
437 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
438 {
439         struct vhost_queue *r = q;
440         uint16_t i, nb_tx = 0;
441         uint16_t nb_send = nb_bufs;
442
443         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
444                 return 0;
445
446         rte_atomic32_set(&r->while_queuing, 1);
447
448         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
449                 goto out;
450
451         /* Enqueue packets to guest RX queue */
452         while (nb_send) {
453                 uint16_t nb_pkts;
454                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
455                                                  VHOST_MAX_PKT_BURST);
456
457                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
458                                                   &bufs[nb_tx], num);
459
460                 nb_tx += nb_pkts;
461                 nb_send -= nb_pkts;
462                 if (nb_pkts < num)
463                         break;
464         }
465
466         r->stats.pkts += nb_tx;
467         r->stats.missed_pkts += nb_bufs - nb_tx;
468
469         for (i = 0; likely(i < nb_tx); i++)
470                 r->stats.bytes += bufs[i]->pkt_len;
471
472         vhost_update_packet_xstats(r, bufs, nb_tx);
473
474         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
475          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
476          * are increased when packets are not transmitted successfully.
477          */
478         for (i = nb_tx; i < nb_bufs; i++)
479                 vhost_count_multicast_broadcast(r, bufs[i]);
480
481         for (i = 0; likely(i < nb_tx); i++)
482                 rte_pktmbuf_free(bufs[i]);
483 out:
484         rte_atomic32_set(&r->while_queuing, 0);
485
486         return nb_tx;
487 }
488
489 static int
490 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
491 {
492         return 0;
493 }
494
495 static inline struct internal_list *
496 find_internal_resource(char *ifname)
497 {
498         int found = 0;
499         struct internal_list *list;
500         struct pmd_internal *internal;
501
502         if (ifname == NULL)
503                 return NULL;
504
505         pthread_mutex_lock(&internal_list_lock);
506
507         TAILQ_FOREACH(list, &internal_list, next) {
508                 internal = list->eth_dev->data->dev_private;
509                 if (!strcmp(internal->iface_name, ifname)) {
510                         found = 1;
511                         break;
512                 }
513         }
514
515         pthread_mutex_unlock(&internal_list_lock);
516
517         if (!found)
518                 return NULL;
519
520         return list;
521 }
522
523 static void
524 update_queuing_status(struct rte_eth_dev *dev)
525 {
526         struct pmd_internal *internal = dev->data->dev_private;
527         struct vhost_queue *vq;
528         unsigned int i;
529         int allow_queuing = 1;
530
531         if (rte_atomic32_read(&internal->started) == 0 ||
532             rte_atomic32_read(&internal->dev_attached) == 0)
533                 allow_queuing = 0;
534
535         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
536         for (i = 0; i < dev->data->nb_rx_queues; i++) {
537                 vq = dev->data->rx_queues[i];
538                 if (vq == NULL)
539                         continue;
540                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
541                 while (rte_atomic32_read(&vq->while_queuing))
542                         rte_pause();
543         }
544
545         for (i = 0; i < dev->data->nb_tx_queues; i++) {
546                 vq = dev->data->tx_queues[i];
547                 if (vq == NULL)
548                         continue;
549                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
550                 while (rte_atomic32_read(&vq->while_queuing))
551                         rte_pause();
552         }
553 }
554
555 static int
556 new_device(int vid)
557 {
558         struct rte_eth_dev *eth_dev;
559         struct internal_list *list;
560         struct pmd_internal *internal;
561         struct vhost_queue *vq;
562         unsigned i;
563         char ifname[PATH_MAX];
564 #ifdef RTE_LIBRTE_VHOST_NUMA
565         int newnode;
566 #endif
567
568         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
569         list = find_internal_resource(ifname);
570         if (list == NULL) {
571                 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
572                 return -1;
573         }
574
575         eth_dev = list->eth_dev;
576         internal = eth_dev->data->dev_private;
577
578 #ifdef RTE_LIBRTE_VHOST_NUMA
579         newnode = rte_vhost_get_numa_node(vid);
580         if (newnode >= 0)
581                 eth_dev->data->numa_node = newnode;
582 #endif
583
584         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
585                 vq = eth_dev->data->rx_queues[i];
586                 if (vq == NULL)
587                         continue;
588                 vq->vid = vid;
589                 vq->internal = internal;
590                 vq->port = eth_dev->data->port_id;
591         }
592         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
593                 vq = eth_dev->data->tx_queues[i];
594                 if (vq == NULL)
595                         continue;
596                 vq->vid = vid;
597                 vq->internal = internal;
598                 vq->port = eth_dev->data->port_id;
599         }
600
601         for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
602                 rte_vhost_enable_guest_notification(vid, i, 0);
603
604         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
605
606         rte_atomic32_set(&internal->dev_attached, 1);
607         update_queuing_status(eth_dev);
608
609         RTE_LOG(INFO, PMD, "New connection established\n");
610
611         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
612
613         return 0;
614 }
615
616 static void
617 destroy_device(int vid)
618 {
619         struct rte_eth_dev *eth_dev;
620         struct pmd_internal *internal;
621         struct vhost_queue *vq;
622         struct internal_list *list;
623         char ifname[PATH_MAX];
624         unsigned i;
625         struct rte_vhost_vring_state *state;
626
627         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
628         list = find_internal_resource(ifname);
629         if (list == NULL) {
630                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
631                 return;
632         }
633         eth_dev = list->eth_dev;
634         internal = eth_dev->data->dev_private;
635
636         rte_atomic32_set(&internal->dev_attached, 0);
637         update_queuing_status(eth_dev);
638
639         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
640
641         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
642                 vq = eth_dev->data->rx_queues[i];
643                 if (vq == NULL)
644                         continue;
645                 vq->vid = -1;
646         }
647         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
648                 vq = eth_dev->data->tx_queues[i];
649                 if (vq == NULL)
650                         continue;
651                 vq->vid = -1;
652         }
653
654         state = vring_states[eth_dev->data->port_id];
655         rte_spinlock_lock(&state->lock);
656         for (i = 0; i <= state->max_vring; i++) {
657                 state->cur[i] = false;
658                 state->seen[i] = false;
659         }
660         state->max_vring = 0;
661         rte_spinlock_unlock(&state->lock);
662
663         RTE_LOG(INFO, PMD, "Connection closed\n");
664
665         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
666 }
667
668 static int
669 vring_state_changed(int vid, uint16_t vring, int enable)
670 {
671         struct rte_vhost_vring_state *state;
672         struct rte_eth_dev *eth_dev;
673         struct internal_list *list;
674         char ifname[PATH_MAX];
675
676         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
677         list = find_internal_resource(ifname);
678         if (list == NULL) {
679                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
680                 return -1;
681         }
682
683         eth_dev = list->eth_dev;
684         /* won't be NULL */
685         state = vring_states[eth_dev->data->port_id];
686         rte_spinlock_lock(&state->lock);
687         state->cur[vring] = enable;
688         state->max_vring = RTE_MAX(vring, state->max_vring);
689         rte_spinlock_unlock(&state->lock);
690
691         RTE_LOG(INFO, PMD, "vring%u is %s\n",
692                         vring, enable ? "enabled" : "disabled");
693
694         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
695
696         return 0;
697 }
698
699 int
700 rte_eth_vhost_get_queue_event(uint8_t port_id,
701                 struct rte_eth_vhost_queue_event *event)
702 {
703         struct rte_vhost_vring_state *state;
704         unsigned int i;
705         int idx;
706
707         if (port_id >= RTE_MAX_ETHPORTS) {
708                 RTE_LOG(ERR, PMD, "Invalid port id\n");
709                 return -1;
710         }
711
712         state = vring_states[port_id];
713         if (!state) {
714                 RTE_LOG(ERR, PMD, "Unused port\n");
715                 return -1;
716         }
717
718         rte_spinlock_lock(&state->lock);
719         for (i = 0; i <= state->max_vring; i++) {
720                 idx = state->index++ % (state->max_vring + 1);
721
722                 if (state->cur[idx] != state->seen[idx]) {
723                         state->seen[idx] = state->cur[idx];
724                         event->queue_id = idx / 2;
725                         event->rx = idx & 1;
726                         event->enable = state->cur[idx];
727                         rte_spinlock_unlock(&state->lock);
728                         return 0;
729                 }
730         }
731         rte_spinlock_unlock(&state->lock);
732
733         return -1;
734 }
735
736 int
737 rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
738 {
739         struct internal_list *list;
740         struct rte_eth_dev *eth_dev;
741         struct vhost_queue *vq;
742         int vid = -1;
743
744         if (!rte_eth_dev_is_valid_port(port_id))
745                 return -1;
746
747         pthread_mutex_lock(&internal_list_lock);
748
749         TAILQ_FOREACH(list, &internal_list, next) {
750                 eth_dev = list->eth_dev;
751                 if (eth_dev->data->port_id == port_id) {
752                         vq = eth_dev->data->rx_queues[0];
753                         if (vq) {
754                                 vid = vq->vid;
755                         }
756                         break;
757                 }
758         }
759
760         pthread_mutex_unlock(&internal_list_lock);
761
762         return vid;
763 }
764
765 static void *
766 vhost_driver_session(void *param __rte_unused)
767 {
768         static struct virtio_net_device_ops vhost_ops;
769
770         /* set vhost arguments */
771         vhost_ops.new_device = new_device;
772         vhost_ops.destroy_device = destroy_device;
773         vhost_ops.vring_state_changed = vring_state_changed;
774         if (rte_vhost_driver_callback_register(&vhost_ops) < 0)
775                 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
776
777         /* start event handling */
778         rte_vhost_driver_session_start();
779
780         return NULL;
781 }
782
783 static int
784 vhost_driver_session_start(void)
785 {
786         int ret;
787
788         ret = pthread_create(&session_th,
789                         NULL, vhost_driver_session, NULL);
790         if (ret)
791                 RTE_LOG(ERR, PMD, "Can't create a thread\n");
792
793         return ret;
794 }
795
796 static void
797 vhost_driver_session_stop(void)
798 {
799         int ret;
800
801         ret = pthread_cancel(session_th);
802         if (ret)
803                 RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
804
805         ret = pthread_join(session_th, NULL);
806         if (ret)
807                 RTE_LOG(ERR, PMD, "Can't join the thread\n");
808 }
809
810 static int
811 eth_dev_start(struct rte_eth_dev *dev)
812 {
813         struct pmd_internal *internal = dev->data->dev_private;
814
815         rte_atomic32_set(&internal->started, 1);
816         update_queuing_status(dev);
817
818         return 0;
819 }
820
821 static void
822 eth_dev_stop(struct rte_eth_dev *dev)
823 {
824         struct pmd_internal *internal = dev->data->dev_private;
825
826         rte_atomic32_set(&internal->started, 0);
827         update_queuing_status(dev);
828 }
829
830 static void
831 eth_dev_close(struct rte_eth_dev *dev)
832 {
833         struct pmd_internal *internal;
834         struct internal_list *list;
835
836         internal = dev->data->dev_private;
837         if (!internal)
838                 return;
839
840         rte_vhost_driver_unregister(internal->iface_name);
841
842         list = find_internal_resource(internal->iface_name);
843         if (!list)
844                 return;
845
846         pthread_mutex_lock(&internal_list_lock);
847         TAILQ_REMOVE(&internal_list, list, next);
848         pthread_mutex_unlock(&internal_list_lock);
849         rte_free(list);
850
851         free(internal->dev_name);
852         free(internal->iface_name);
853         rte_free(internal);
854 }
855
856 static int
857 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
858                    uint16_t nb_rx_desc __rte_unused,
859                    unsigned int socket_id,
860                    const struct rte_eth_rxconf *rx_conf __rte_unused,
861                    struct rte_mempool *mb_pool)
862 {
863         struct vhost_queue *vq;
864
865         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
866                         RTE_CACHE_LINE_SIZE, socket_id);
867         if (vq == NULL) {
868                 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
869                 return -ENOMEM;
870         }
871
872         vq->mb_pool = mb_pool;
873         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
874         dev->data->rx_queues[rx_queue_id] = vq;
875
876         return 0;
877 }
878
879 static int
880 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
881                    uint16_t nb_tx_desc __rte_unused,
882                    unsigned int socket_id,
883                    const struct rte_eth_txconf *tx_conf __rte_unused)
884 {
885         struct vhost_queue *vq;
886
887         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
888                         RTE_CACHE_LINE_SIZE, socket_id);
889         if (vq == NULL) {
890                 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
891                 return -ENOMEM;
892         }
893
894         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
895         dev->data->tx_queues[tx_queue_id] = vq;
896
897         return 0;
898 }
899
900 static void
901 eth_dev_info(struct rte_eth_dev *dev,
902              struct rte_eth_dev_info *dev_info)
903 {
904         struct pmd_internal *internal;
905
906         internal = dev->data->dev_private;
907         if (internal == NULL) {
908                 RTE_LOG(ERR, PMD, "Invalid device specified\n");
909                 return;
910         }
911
912         dev_info->max_mac_addrs = 1;
913         dev_info->max_rx_pktlen = (uint32_t)-1;
914         dev_info->max_rx_queues = internal->max_queues;
915         dev_info->max_tx_queues = internal->max_queues;
916         dev_info->min_rx_bufsize = 0;
917 }
918
919 static void
920 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
921 {
922         unsigned i;
923         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
924         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
925         struct vhost_queue *vq;
926
927         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
928                         i < dev->data->nb_rx_queues; i++) {
929                 if (dev->data->rx_queues[i] == NULL)
930                         continue;
931                 vq = dev->data->rx_queues[i];
932                 stats->q_ipackets[i] = vq->stats.pkts;
933                 rx_total += stats->q_ipackets[i];
934
935                 stats->q_ibytes[i] = vq->stats.bytes;
936                 rx_total_bytes += stats->q_ibytes[i];
937         }
938
939         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
940                         i < dev->data->nb_tx_queues; i++) {
941                 if (dev->data->tx_queues[i] == NULL)
942                         continue;
943                 vq = dev->data->tx_queues[i];
944                 stats->q_opackets[i] = vq->stats.pkts;
945                 tx_missed_total += vq->stats.missed_pkts;
946                 tx_total += stats->q_opackets[i];
947
948                 stats->q_obytes[i] = vq->stats.bytes;
949                 tx_total_bytes += stats->q_obytes[i];
950         }
951
952         stats->ipackets = rx_total;
953         stats->opackets = tx_total;
954         stats->oerrors = tx_missed_total;
955         stats->ibytes = rx_total_bytes;
956         stats->obytes = tx_total_bytes;
957 }
958
959 static void
960 eth_stats_reset(struct rte_eth_dev *dev)
961 {
962         struct vhost_queue *vq;
963         unsigned i;
964
965         for (i = 0; i < dev->data->nb_rx_queues; i++) {
966                 if (dev->data->rx_queues[i] == NULL)
967                         continue;
968                 vq = dev->data->rx_queues[i];
969                 vq->stats.pkts = 0;
970                 vq->stats.bytes = 0;
971         }
972         for (i = 0; i < dev->data->nb_tx_queues; i++) {
973                 if (dev->data->tx_queues[i] == NULL)
974                         continue;
975                 vq = dev->data->tx_queues[i];
976                 vq->stats.pkts = 0;
977                 vq->stats.bytes = 0;
978                 vq->stats.missed_pkts = 0;
979         }
980 }
981
982 static void
983 eth_queue_release(void *q)
984 {
985         rte_free(q);
986 }
987
988 static int
989 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
990 {
991         /*
992          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
993          * and releases mbuf, so nothing to cleanup.
994          */
995         return 0;
996 }
997
998 static int
999 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1000                 int wait_to_complete __rte_unused)
1001 {
1002         return 0;
1003 }
1004
1005 /**
1006  * Disable features in feature_mask. Returns 0 on success.
1007  */
1008 int
1009 rte_eth_vhost_feature_disable(uint64_t feature_mask)
1010 {
1011         return rte_vhost_feature_disable(feature_mask);
1012 }
1013
1014 /**
1015  * Enable features in feature_mask. Returns 0 on success.
1016  */
1017 int
1018 rte_eth_vhost_feature_enable(uint64_t feature_mask)
1019 {
1020         return rte_vhost_feature_enable(feature_mask);
1021 }
1022
1023 /* Returns currently supported vhost features */
1024 uint64_t
1025 rte_eth_vhost_feature_get(void)
1026 {
1027         return rte_vhost_feature_get();
1028 }
1029
1030 static const struct eth_dev_ops ops = {
1031         .dev_start = eth_dev_start,
1032         .dev_stop = eth_dev_stop,
1033         .dev_close = eth_dev_close,
1034         .dev_configure = eth_dev_configure,
1035         .dev_infos_get = eth_dev_info,
1036         .rx_queue_setup = eth_rx_queue_setup,
1037         .tx_queue_setup = eth_tx_queue_setup,
1038         .rx_queue_release = eth_queue_release,
1039         .tx_queue_release = eth_queue_release,
1040         .tx_done_cleanup = eth_tx_done_cleanup,
1041         .link_update = eth_link_update,
1042         .stats_get = eth_stats_get,
1043         .stats_reset = eth_stats_reset,
1044         .xstats_reset = vhost_dev_xstats_reset,
1045         .xstats_get = vhost_dev_xstats_get,
1046         .xstats_get_names = vhost_dev_xstats_get_names,
1047 };
1048
1049 static struct rte_vdev_driver pmd_vhost_drv;
1050
1051 static int
1052 eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
1053                      const unsigned numa_node, uint64_t flags)
1054 {
1055         struct rte_eth_dev_data *data = NULL;
1056         struct pmd_internal *internal = NULL;
1057         struct rte_eth_dev *eth_dev = NULL;
1058         struct ether_addr *eth_addr = NULL;
1059         struct rte_vhost_vring_state *vring_state = NULL;
1060         struct internal_list *list = NULL;
1061
1062         RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1063                 numa_node);
1064
1065         /* now do all data allocation - for eth_dev structure, dummy pci driver
1066          * and internal (private) data
1067          */
1068         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1069         if (data == NULL)
1070                 goto error;
1071
1072         internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);
1073         if (internal == NULL)
1074                 goto error;
1075
1076         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1077         if (list == NULL)
1078                 goto error;
1079
1080         /* reserve an ethdev entry */
1081         eth_dev = rte_eth_dev_allocate(name);
1082         if (eth_dev == NULL)
1083                 goto error;
1084
1085         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1086         if (eth_addr == NULL)
1087                 goto error;
1088         *eth_addr = base_eth_addr;
1089         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1090
1091         vring_state = rte_zmalloc_socket(name,
1092                         sizeof(*vring_state), 0, numa_node);
1093         if (vring_state == NULL)
1094                 goto error;
1095
1096         /* now put it all together
1097          * - store queue data in internal,
1098          * - store numa_node info in ethdev data
1099          * - point eth_dev_data to internals
1100          * - and point eth_dev structure to new eth_dev_data structure
1101          */
1102         internal->dev_name = strdup(name);
1103         if (internal->dev_name == NULL)
1104                 goto error;
1105         internal->iface_name = strdup(iface_name);
1106         if (internal->iface_name == NULL)
1107                 goto error;
1108
1109         list->eth_dev = eth_dev;
1110         pthread_mutex_lock(&internal_list_lock);
1111         TAILQ_INSERT_TAIL(&internal_list, list, next);
1112         pthread_mutex_unlock(&internal_list_lock);
1113
1114         rte_spinlock_init(&vring_state->lock);
1115         vring_states[eth_dev->data->port_id] = vring_state;
1116
1117         data->dev_private = internal;
1118         data->port_id = eth_dev->data->port_id;
1119         memmove(data->name, eth_dev->data->name, sizeof(data->name));
1120         data->nb_rx_queues = queues;
1121         data->nb_tx_queues = queues;
1122         internal->max_queues = queues;
1123         data->dev_link = pmd_link;
1124         data->mac_addrs = eth_addr;
1125
1126         /* We'll replace the 'data' originally allocated by eth_dev. So the
1127          * vhost PMD resources won't be shared between multi processes.
1128          */
1129         eth_dev->data = data;
1130         eth_dev->dev_ops = &ops;
1131         eth_dev->driver = NULL;
1132         data->dev_flags =
1133                 RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
1134         data->kdrv = RTE_KDRV_NONE;
1135         data->drv_name = pmd_vhost_drv.driver.name;
1136         data->numa_node = numa_node;
1137
1138         /* finally assign rx and tx ops */
1139         eth_dev->rx_pkt_burst = eth_vhost_rx;
1140         eth_dev->tx_pkt_burst = eth_vhost_tx;
1141
1142         if (rte_vhost_driver_register(iface_name, flags))
1143                 goto error;
1144
1145         /* We need only one message handling thread */
1146         if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
1147                 if (vhost_driver_session_start())
1148                         goto error;
1149         }
1150
1151         return data->port_id;
1152
1153 error:
1154         if (internal)
1155                 free(internal->dev_name);
1156         rte_free(vring_state);
1157         rte_free(eth_addr);
1158         if (eth_dev)
1159                 rte_eth_dev_release_port(eth_dev);
1160         rte_free(internal);
1161         rte_free(list);
1162         rte_free(data);
1163
1164         return -1;
1165 }
1166
1167 static inline int
1168 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1169 {
1170         const char **iface_name = extra_args;
1171
1172         if (value == NULL)
1173                 return -1;
1174
1175         *iface_name = value;
1176
1177         return 0;
1178 }
1179
1180 static inline int
1181 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1182 {
1183         uint16_t *n = extra_args;
1184
1185         if (value == NULL || extra_args == NULL)
1186                 return -EINVAL;
1187
1188         *n = (uint16_t)strtoul(value, NULL, 0);
1189         if (*n == USHRT_MAX && errno == ERANGE)
1190                 return -1;
1191
1192         return 0;
1193 }
1194
1195 static int
1196 rte_pmd_vhost_probe(const char *name, const char *params)
1197 {
1198         struct rte_kvargs *kvlist = NULL;
1199         int ret = 0;
1200         char *iface_name;
1201         uint16_t queues;
1202         uint64_t flags = 0;
1203         int client_mode = 0;
1204         int dequeue_zero_copy = 0;
1205
1206         RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
1207
1208         kvlist = rte_kvargs_parse(params, valid_arguments);
1209         if (kvlist == NULL)
1210                 return -1;
1211
1212         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1213                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1214                                          &open_iface, &iface_name);
1215                 if (ret < 0)
1216                         goto out_free;
1217         } else {
1218                 ret = -1;
1219                 goto out_free;
1220         }
1221
1222         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1223                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1224                                          &open_int, &queues);
1225                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1226                         goto out_free;
1227
1228         } else
1229                 queues = 1;
1230
1231         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1232                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1233                                          &open_int, &client_mode);
1234                 if (ret < 0)
1235                         goto out_free;
1236
1237                 if (client_mode)
1238                         flags |= RTE_VHOST_USER_CLIENT;
1239         }
1240
1241         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1242                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1243                                          &open_int, &dequeue_zero_copy);
1244                 if (ret < 0)
1245                         goto out_free;
1246
1247                 if (dequeue_zero_copy)
1248                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1249         }
1250
1251         eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags);
1252
1253 out_free:
1254         rte_kvargs_free(kvlist);
1255         return ret;
1256 }
1257
1258 static int
1259 rte_pmd_vhost_remove(const char *name)
1260 {
1261         struct rte_eth_dev *eth_dev = NULL;
1262         unsigned int i;
1263
1264         RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1265
1266         /* find an ethdev entry */
1267         eth_dev = rte_eth_dev_allocated(name);
1268         if (eth_dev == NULL)
1269                 return -ENODEV;
1270
1271         eth_dev_stop(eth_dev);
1272
1273         eth_dev_close(eth_dev);
1274
1275         if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
1276                 vhost_driver_session_stop();
1277
1278         rte_free(vring_states[eth_dev->data->port_id]);
1279         vring_states[eth_dev->data->port_id] = NULL;
1280
1281         for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
1282                 rte_free(eth_dev->data->rx_queues[i]);
1283         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1284                 rte_free(eth_dev->data->tx_queues[i]);
1285
1286         rte_free(eth_dev->data->mac_addrs);
1287         rte_free(eth_dev->data);
1288
1289         rte_eth_dev_release_port(eth_dev);
1290
1291         return 0;
1292 }
1293
1294 static struct rte_vdev_driver pmd_vhost_drv = {
1295         .probe = rte_pmd_vhost_probe,
1296         .remove = rte_pmd_vhost_remove,
1297 };
1298
1299 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1300 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1301 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1302         "iface=<ifc> "
1303         "queues=<int>");